Merge branch 'pm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspe...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 24 Apr 2011 05:35:16 +0000 (22:35 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 24 Apr 2011 05:35:16 +0000 (22:35 -0700)
* 'pm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
  PM: Add missing syscore_suspend() and syscore_resume() calls
  PM: Fix error code paths executed after failing syscore_suspend()

226 files changed:
Documentation/hwmon/max16064 [new file with mode: 0644]
Documentation/hwmon/max34440 [new file with mode: 0644]
Documentation/hwmon/max8688 [new file with mode: 0644]
Documentation/hwmon/pmbus
Documentation/hwmon/smm665
Documentation/hwmon/submitting-patches [new file with mode: 0644]
Documentation/input/event-codes.txt [new file with mode: 0644]
Documentation/md.txt
Documentation/sound/alsa/SB-Live-mixer.txt
MAINTAINERS
Makefile
arch/arm/include/asm/cputype.h
arch/arm/include/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/mach-msm/board-qsd8x50.c
arch/arm/mach-msm/timer.c
arch/arm/mach-s3c2440/mach-gta02.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/pte-common.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/crash.c
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/perf_event.c
arch/powerpc/kernel/time.c
arch/powerpc/platforms/powermac/smp.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/fsl_pci.c
arch/x86/include/asm/gart.h
arch/x86/include/asm/numa.h
arch/x86/kernel/aperture_64.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/pci-gart_64.c
arch/x86/kernel/smpboot.c
arch/x86/mm/numa.c
arch/x86/mm/numa_emulation.c
arch/x86/xen/mmu.c
arch/x86/xen/setup.c
arch/xtensa/kernel/irq.c
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-settings.c
block/blk-sysfs.c
block/cfq-iosched.c
block/elevator.c
block/genhd.c
drivers/char/agp/generic.c
drivers/char/virtio_console.c
drivers/connector/connector.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/nouveau/nouveau_dma.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/nouveau/nv50_vm.c
drivers/gpu/drm/nouveau/nvc0_vm.c
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/hwmon/pmbus_core.c
drivers/ide/ide-cd.c
drivers/ide/ide-cd_ioctl.c
drivers/ide/ide-gd.c
drivers/input/evdev.c
drivers/input/input.c
drivers/input/keyboard/twl4030_keypad.c
drivers/input/misc/xen-kbdfront.c
drivers/input/touchscreen/h3600_ts_input.c
drivers/md/dm-raid.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/video/videobuf-dma-contig.c
drivers/net/bna/bfa_ioc.c
drivers/net/bna/bfa_ioc.h
drivers/net/bna/bfa_ioc_ct.c
drivers/net/bna/bfi.h
drivers/net/bna/bnad.c
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_alb.h
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/loopback.c
drivers/net/natsemi.c
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_main.c
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_main.c
drivers/net/sfc/efx.c
drivers/net/sfc/io.h
drivers/net/sfc/net_driver.h
drivers/net/sfc/nic.c
drivers/net/sfc/nic.h
drivers/net/sfc/selftest.c
drivers/net/sfc/tx.c
drivers/net/sis900.c
drivers/net/stmmac/dwmac_lib.c
drivers/net/stmmac/stmmac_main.c
drivers/net/tokenring/3c359.c
drivers/net/tokenring/lanstreamer.c
drivers/net/tokenring/olympic.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/regd_common.h
drivers/net/wireless/iwlegacy/Kconfig
drivers/net/wireless/iwlegacy/iwl-3945-hw.h
drivers/net/wireless/iwlegacy/iwl-4965-hw.h
drivers/net/wireless/iwlegacy/iwl-core.c
drivers/net/wireless/iwlegacy/iwl-eeprom.c
drivers/net/wireless/iwlegacy/iwl3945-base.c
drivers/net/wireless/iwlegacy/iwl4965-base.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/p54/txrx.c
drivers/parport/parport_pc.c
drivers/pci/Kconfig
drivers/pci/Makefile
drivers/pci/intel-iommu.c
drivers/rtc/rtc-omap.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_fc.c
drivers/tty/n_gsm.c
drivers/tty/serial/imx.c
drivers/virtio/virtio_pci.c
drivers/virtio/virtio_ring.c
fs/btrfs/acl.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/xattr.c
fs/dcache.c
fs/gfs2/aops.c
fs/gfs2/dir.c
fs/gfs2/file.c
fs/gfs2/glops.c
fs/gfs2/inode.c
fs/gfs2/inode.h
fs/gfs2/ops_fstype.c
fs/gfs2/rgrp.c
fs/gfs2/super.c
fs/nfsd/nfs4state.c
fs/nfsd/vfs.c
fs/proc/base.c
fs/xattr.c
fs/xfs/linux-2.6/xfs_message.c
include/linux/blkdev.h
include/linux/device-mapper.h
include/linux/input.h
include/linux/input/mt.h
include/linux/pid.h
include/linux/posix-clock.h
include/linux/security.h
include/linux/usb/usbnet.h
kernel/pid.c
kernel/time/posix-clock.c
net/bridge/br_netfilter.c
net/caif/cfdgml.c
net/caif/cfmuxl.c
net/core/dev.c
net/ieee802154/Makefile
net/ipv4/inet_connection_sock.c
net/ipv4/inetpeer.c
net/ipv4/ip_options.c
net/ipv4/sysctl_net_ipv4.c
net/ipv6/inet6_connection_sock.c
net/irda/af_irda.c
net/llc/llc_input.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/xt_set.c
net/sctp/associola.c
security/capability.c
security/security.c
security/selinux/hooks.c
security/smack/smack_lsm.c
sound/pci/hda/hda_codec.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/jz4740.c
sound/soc/codecs/sn95031.c
sound/soc/codecs/wm8903.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm_hubs.c
sound/soc/mid-x86/sst_platform.c
sound/soc/samsung/pcm.c
sound/soc/sh/fsi.c
sound/soc/soc-core.c
sound/soc/tegra/harmony.c
tools/perf/builtin-record.c
tools/perf/builtin-stat.c
tools/perf/builtin-test.c
tools/perf/builtin-top.c
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/python.c
tools/perf/util/ui/browsers/annotate.c
tools/perf/util/ui/browsers/hists.c

diff --git a/Documentation/hwmon/max16064 b/Documentation/hwmon/max16064
new file mode 100644 (file)
index 0000000..4172899
--- /dev/null
@@ -0,0 +1,62 @@
+Kernel driver max16064
+======================
+
+Supported chips:
+  * Maxim MAX16064
+    Prefix: 'max16064'
+    Addresses scanned: -
+    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX16064 Quad Power-Supply
+Controller with Active-Voltage Output Control and PMBus Interface.
+
+The driver is a client driver to the core PMBus driver.
+Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in[1-4]_label          "vout[1-4]"
+in[1-4]_input          Measured voltage. From READ_VOUT register.
+in[1-4]_min            Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in[1-4]_max            Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in[1-4]_lcrit          Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in[1-4]_crit           Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-4]_min_alarm      Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in[1-4]_max_alarm      Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in[1-4]_lcrit_alarm    Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in[1-4]_crit_alarm     Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+temp1_input            Measured temperature. From READ_TEMPERATURE_1 register.
+temp1_max              Maximum temperature. From OT_WARN_LIMIT register.
+temp1_crit             Critical high temperature. From OT_FAULT_LIMIT register.
+temp1_max_alarm                Chip temperature high alarm. Set by comparing
+                       READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING
+                       status is set.
+temp1_crit_alarm       Chip temperature critical high alarm. Set by comparing
+                       READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT
+                       status is set.
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
new file mode 100644 (file)
index 0000000..6c525dd
--- /dev/null
@@ -0,0 +1,79 @@
+Kernel driver max34440
+======================
+
+Supported chips:
+  * Maxim MAX34440
+    Prefixes: 'max34440'
+    Addresses scanned: -
+    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
+  * Maxim MAX34441
+    PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
+    Prefixes: 'max34441'
+    Addresses scanned: -
+    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel
+Power-Supply Manager and MAX34441 PMBus 5-Channel Power-Supply Manager
+and Intelligent Fan Controller.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in[1-6]_label          "vout[1-6]".
+in[1-6]_input          Measured voltage. From READ_VOUT register.
+in[1-6]_min            Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in[1-6]_max            Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in[1-6]_lcrit          Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in[1-6]_crit           Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-6]_min_alarm      Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in[1-6]_max_alarm      Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in[1-6]_lcrit_alarm    Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in[1-6]_crit_alarm     Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+curr[1-6]_label                "iout[1-6]".
+curr[1-6]_input                Measured current. From READ_IOUT register.
+curr[1-6]_max          Maximum current. From IOUT_OC_WARN_LIMIT register.
+curr[1-6]_crit         Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr[1-6]_max_alarm    Current high alarm. From IOUT_OC_WARNING status.
+curr[1-6]_crit_alarm   Current critical high alarm. From IOUT_OC_FAULT status.
+
+                       in6 and curr6 attributes only exist for MAX34440.
+
+temp[1-8]_input                Measured temperatures. From READ_TEMPERATURE_1 register.
+                       temp1 is the chip's internal temperature. temp2..temp5
+                       are remote I2C temperature sensors. For MAX34441, temp6
+                       is a remote thermal-diode sensor. For MAX34440, temp6..8
+                       are remote I2C temperature sensors.
+temp[1-8]_max          Maximum temperature. From OT_WARN_LIMIT register.
+temp[1-8]_crit         Critical high temperature. From OT_FAULT_LIMIT register.
+temp[1-8]_max_alarm    Temperature high alarm.
+temp[1-8]_crit_alarm   Temperature critical high alarm.
+
+                       temp7 and temp8 attributes only exist for MAX34440.
diff --git a/Documentation/hwmon/max8688 b/Documentation/hwmon/max8688
new file mode 100644 (file)
index 0000000..0ddd3a4
--- /dev/null
@@ -0,0 +1,69 @@
+Kernel driver max8688
+=====================
+
+Supported chips:
+  * Maxim MAX8688
+    Prefix: 'max8688'
+    Addresses scanned: -
+    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX8688 Digital Power-Supply
+Controller/Monitor with PMBus Interface.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in1_label              "vout1"
+in1_input              Measured voltage. From READ_VOUT register.
+in1_min                        Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in1_max                        Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in1_lcrit              Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in1_crit               Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in1_min_alarm          Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in1_max_alarm          Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in1_lcrit_alarm                Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in1_crit_alarm         Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+curr1_label            "iout1"
+curr1_input            Measured current. From READ_IOUT register.
+curr1_max              Maximum current. From IOUT_OC_WARN_LIMIT register.
+curr1_crit             Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr1_max_alarm                Current high alarm. From IOUT_OC_WARN_LIMIT register.
+curr1_crit_alarm       Current critical high alarm. From IOUT_OC_FAULT status.
+
+temp1_input            Measured temperature. From READ_TEMPERATURE_1 register.
+temp1_max              Maximum temperature. From OT_WARN_LIMIT register.
+temp1_crit             Critical high temperature. From OT_FAULT_LIMIT register.
+temp1_max_alarm                Chip temperature high alarm. Set by comparing
+                       READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING
+                       status is set.
+temp1_crit_alarm       Chip temperature critical high alarm. Set by comparing
+                       READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT
+                       status is set.
index dc4933e..5e462fc 100644 (file)
@@ -13,26 +13,6 @@ Supported chips:
     Prefix: 'ltc2978'
     Addresses scanned: -
     Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
-  * Maxim MAX16064
-    Quad Power-Supply Controller
-    Prefix: 'max16064'
-    Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
-  * Maxim MAX34440
-    PMBus 6-Channel Power-Supply Manager
-    Prefixes: 'max34440'
-    Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
-  * Maxim MAX34441
-    PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
-    Prefixes: 'max34441'
-    Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
-  * Maxim MAX8688
-    Digital Power-Supply Controller/Monitor
-    Prefix: 'max8688'
-    Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
   * Generic PMBus devices
     Prefix: 'pmbus'
     Addresses scanned: -
@@ -175,11 +155,13 @@ currX_crit                Critical maximum current.
                        From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register.
 currX_alarm            Current high alarm.
                        From IIN_OC_WARNING or IOUT_OC_WARNING status.
+currX_max_alarm                Current high alarm.
+                       From IIN_OC_WARN_LIMIT or IOUT_OC_WARN_LIMIT status.
 currX_lcrit_alarm      Output current critical low alarm.
                        From IOUT_UC_FAULT status.
 currX_crit_alarm       Current critical high alarm.
                        From IIN_OC_FAULT or IOUT_OC_FAULT status.
-currX_label            "iin" or "vinY"
+currX_label            "iin" or "ioutY"
 
 powerX_input           Measured power. From READ_PIN or READ_POUT register.
 powerX_cap             Output power cap. From POUT_MAX register.
@@ -193,13 +175,13 @@ powerX_crit_alarm Output power critical high alarm.
                        From POUT_OP_FAULT status.
 powerX_label           "pin" or "poutY"
 
-tempX_input            Measured tempererature.
+tempX_input            Measured temperature.
                        From READ_TEMPERATURE_X register.
-tempX_min              Mimimum tempererature. From UT_WARN_LIMIT register.
-tempX_max              Maximum tempererature. From OT_WARN_LIMIT register.
-tempX_lcrit            Critical low tempererature.
+tempX_min              Mimimum temperature. From UT_WARN_LIMIT register.
+tempX_max              Maximum temperature. From OT_WARN_LIMIT register.
+tempX_lcrit            Critical low temperature.
                        From UT_FAULT_LIMIT register.
-tempX_crit             Critical high tempererature.
+tempX_crit             Critical high temperature.
                        From OT_FAULT_LIMIT register.
 tempX_min_alarm                Chip temperature low alarm. Set by comparing
                        READ_TEMPERATURE_X with UT_WARN_LIMIT if
index 3820fc9..59e3161 100644 (file)
@@ -150,8 +150,8 @@ in8_crit_alarm              Channel F critical alarm
 in9_crit_alarm         AIN1 critical alarm
 in10_crit_alarm                AIN2 critical alarm
 
-temp1_input            Chip tempererature
-temp1_min              Mimimum chip tempererature
-temp1_max              Maximum chip tempererature
-temp1_crit             Critical chip tempererature
+temp1_input            Chip temperature
+temp1_min              Mimimum chip temperature
+temp1_max              Maximum chip temperature
+temp1_crit             Critical chip temperature
 temp1_crit_alarm       Temperature critical alarm
diff --git a/Documentation/hwmon/submitting-patches b/Documentation/hwmon/submitting-patches
new file mode 100644 (file)
index 0000000..86f42e8
--- /dev/null
@@ -0,0 +1,109 @@
+       How to Get Your Patch Accepted Into the Hwmon Subsystem
+       -------------------------------------------------------
+
+This text is is a collection of suggestions for people writing patches or
+drivers for the hwmon subsystem. Following these suggestions will greatly
+increase the chances of your change being accepted.
+
+
+1. General
+----------
+
+* It should be unnecessary to mention, but please read and follow
+    Documentation/SubmitChecklist
+    Documentation/SubmittingDrivers
+    Documentation/SubmittingPatches
+    Documentation/CodingStyle
+
+* If your patch generates checkpatch warnings, please refrain from explanations
+  such as "I don't like that coding style". Keep in mind that each unnecessary
+  warning helps hiding a real problem. If you don't like the kernel coding
+  style, don't write kernel drivers.
+
+* Please test your patch thoroughly. We are not your test group.
+  Sometimes a patch can not or not completely be tested because of missing
+  hardware. In such cases, you should test-build the code on at least one
+  architecture. If run-time testing was not achieved, it should be written
+  explicitly below the patch header.
+
+* If your patch (or the driver) is affected by configuration options such as
+  CONFIG_SMP or CONFIG_HOTPLUG, make sure it compiles for all configuration
+  variants.
+
+
+2. Adding functionality to existing drivers
+-------------------------------------------
+
+* Make sure the documentation in Documentation/hwmon/<driver_name> is up to
+  date.
+
+* Make sure the information in Kconfig is up to date.
+
+* If the added functionality requires some cleanup or structural changes, split
+  your patch into a cleanup part and the actual addition. This makes it easier
+  to review your changes, and to bisect any resulting problems.
+
+* Never mix bug fixes, cleanup, and functional enhancements in a single patch.
+
+
+3. New drivers
+--------------
+
+* Running your patch or driver file(s) through checkpatch does not mean its
+  formatting is clean. If unsure about formatting in your new driver, run it
+  through Lindent. Lindent is not perfect, and you may have to do some minor
+  cleanup, but it is a good start.
+
+* Consider adding yourself to MAINTAINERS.
+
+* Document the driver in Documentation/hwmon/<driver_name>.
+
+* Add the driver to Kconfig and Makefile in alphabetical order.
+
+* Make sure that all dependencies are listed in Kconfig. For new drivers, it
+  is most likely prudent to add a dependency on EXPERIMENTAL.
+
+* Avoid forward declarations if you can. Rearrange the code if necessary.
+
+* Avoid calculations in macros and macro-generated functions. While such macros
+  may save a line or so in the source, it obfuscates the code and makes code
+  review more difficult. It may also result in code which is more complicated
+  than necessary. Use inline functions or just regular functions instead.
+
+* If the driver has a detect function, make sure it is silent. Debug messages
+  and messages printed after a successful detection are acceptable, but it
+  must not print messages such as "Chip XXX not found/supported".
+
+  Keep in mind that the detect function will run for all drivers supporting an
+  address if a chip is detected on that address. Unnecessary messages will just
+  pollute the kernel log and not provide any value.
+
+* Provide a detect function if and only if a chip can be detected reliably.
+
+* Avoid writing to chip registers in the detect function. If you have to write,
+  only do it after you have already gathered enough data to be certain that the
+  detection is going to be successful.
+
+  Keep in mind that the chip might not be what your driver believes it is, and
+  writing to it might cause a bad misconfiguration.
+
+* Make sure there are no race conditions in the probe function. Specifically,
+  completely initialize your chip first, then create sysfs entries and register
+  with the hwmon subsystem.
+
+* Do not provide support for deprecated sysfs attributes.
+
+* Do not create non-standard attributes unless really needed. If you have to use
+  non-standard attributes, or you believe you do, discuss it on the mailing list
+  first. Either case, provide a detailed explanation why you need the
+  non-standard attribute(s).
+  Standard attributes are specified in Documentation/hwmon/sysfs-interface.
+
+* When deciding which sysfs attributes to support, look at the chip's
+  capabilities. While we do not expect your driver to support everything the
+  chip may offer, it should at least support all limits and alarms.
+
+* Last but not least, please check if a driver for your chip already exists
+  before starting to write a new driver. Especially for temperature sensors,
+  new chips are often variants of previously released chips. In some cases,
+  a presumably new chip may simply have been relabeled.
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
new file mode 100644 (file)
index 0000000..23fcb05
--- /dev/null
@@ -0,0 +1,262 @@
+The input protocol uses a map of types and codes to express input device values
+to userspace. This document describes the types and codes and how and when they
+may be used.
+
+A single hardware event generates multiple input events. Each input event
+contains the new value of a single data item. A special event type, EV_SYN, is
+used to separate input events into packets of input data changes occurring at
+the same moment in time. In the following, the term "event" refers to a single
+input event encompassing a type, code, and value.
+
+The input protocol is a stateful protocol. Events are emitted only when values
+of event codes have changed. However, the state is maintained within the Linux
+input subsystem; drivers do not need to maintain the state and may attempt to
+emit unchanged values without harm. Userspace may obtain the current state of
+event code values using the EVIOCG* ioctls defined in linux/input.h. The event
+reports supported by a device are also provided by sysfs in
+class/input/event*/device/capabilities/, and the properties of a device are
+provided in class/input/event*/device/properties.
+
+Types:
+==========
+Types are groupings of codes under a logical input construct. Each type has a
+set of applicable codes to be used in generating events. See the Codes section
+for details on valid codes for each type.
+
+* EV_SYN:
+  - Used as markers to separate events. Events may be separated in time or in
+    space, such as with the multitouch protocol.
+
+* EV_KEY:
+  - Used to describe state changes of keyboards, buttons, or other key-like
+    devices.
+
+* EV_REL:
+  - Used to describe relative axis value changes, e.g. moving the mouse 5 units
+    to the left.
+
+* EV_ABS:
+  - Used to describe absolute axis value changes, e.g. describing the
+    coordinates of a touch on a touchscreen.
+
+* EV_MSC:
+  - Used to describe miscellaneous input data that do not fit into other types.
+
+* EV_SW:
+  - Used to describe binary state input switches.
+
+* EV_LED:
+  - Used to turn LEDs on devices on and off.
+
+* EV_SND:
+  - Used to output sound to devices.
+
+* EV_REP:
+  - Used for autorepeating devices.
+
+* EV_FF:
+  - Used to send force feedback commands to an input device.
+
+* EV_PWR:
+  - A special type for power button and switch input.
+
+* EV_FF_STATUS:
+  - Used to receive force feedback device status.
+
+Codes:
+==========
+Codes define the precise type of event.
+
+EV_SYN:
+----------
+EV_SYN event values are undefined. Their usage is defined only by when they are
+sent in the evdev event stream.
+
+* SYN_REPORT:
+  - Used to synchronize and separate events into packets of input data changes
+    occurring at the same moment in time. For example, motion of a mouse may set
+    the REL_X and REL_Y values for one motion, then emit a SYN_REPORT. The next
+    motion will emit more REL_X and REL_Y values and send another SYN_REPORT.
+
+* SYN_CONFIG:
+  - TBD
+
+* SYN_MT_REPORT:
+  - Used to synchronize and separate touch events. See the
+    multi-touch-protocol.txt document for more information.
+
+* SYN_DROPPED:
+  - Used to indicate buffer overrun in the evdev client's event queue.
+    Client should ignore all events up to and including next SYN_REPORT
+    event and query the device (using EVIOCG* ioctls) to obtain its
+    current state.
+
+EV_KEY:
+----------
+EV_KEY events take the form KEY_<name> or BTN_<name>. For example, KEY_A is used
+to represent the 'A' key on a keyboard. When a key is depressed, an event with
+the key's code is emitted with value 1. When the key is released, an event is
+emitted with value 0. Some hardware send events when a key is repeated. These
+events have a value of 2. In general, KEY_<name> is used for keyboard keys, and
+BTN_<name> is used for other types of momentary switch events.
+
+A few EV_KEY codes have special meanings:
+
+* BTN_TOOL_<name>:
+  - These codes are used in conjunction with input trackpads, tablets, and
+    touchscreens. These devices may be used with fingers, pens, or other tools.
+    When an event occurs and a tool is used, the corresponding BTN_TOOL_<name>
+    code should be set to a value of 1. When the tool is no longer interacting
+    with the input device, the BTN_TOOL_<name> code should be reset to 0. All
+    trackpads, tablets, and touchscreens should use at least one BTN_TOOL_<name>
+    code when events are generated.
+
+* BTN_TOUCH:
+    BTN_TOUCH is used for touch contact. While an input tool is determined to be
+    within meaningful physical contact, the value of this property must be set
+    to 1. Meaningful physical contact may mean any contact, or it may mean
+    contact conditioned by an implementation defined property. For example, a
+    touchpad may set the value to 1 only when the touch pressure rises above a
+    certain value. BTN_TOUCH may be combined with BTN_TOOL_<name> codes. For
+    example, a pen tablet may set BTN_TOOL_PEN to 1 and BTN_TOUCH to 0 while the
+    pen is hovering over but not touching the tablet surface.
+
+Note: For appropriate function of the legacy mousedev emulation driver,
+BTN_TOUCH must be the first evdev code emitted in a synchronization frame.
+
+Note: Historically a touch device with BTN_TOOL_FINGER and BTN_TOUCH was
+interpreted as a touchpad by userspace, while a similar device without
+BTN_TOOL_FINGER was interpreted as a touchscreen. For backwards compatibility
+with current userspace it is recommended to follow this distinction. In the
+future, this distinction will be deprecated and the device properties ioctl
+EVIOCGPROP, defined in linux/input.h, will be used to convey the device type.
+
+* BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, BTN_TOOL_TRIPLETAP, BTN_TOOL_QUADTAP:
+  - These codes denote one, two, three, and four finger interaction on a
+    trackpad or touchscreen. For example, if the user uses two fingers and moves
+    them on the touchpad in an effort to scroll content on screen,
+    BTN_TOOL_DOUBLETAP should be set to value 1 for the duration of the motion.
+    Note that all BTN_TOOL_<name> codes and the BTN_TOUCH code are orthogonal in
+    purpose. A trackpad event generated by finger touches should generate events
+    for one code from each group. At most only one of these BTN_TOOL_<name>
+    codes should have a value of 1 during any synchronization frame.
+
+Note: Historically some drivers emitted multiple of the finger count codes with
+a value of 1 in the same synchronization frame. This usage is deprecated.
+
+Note: In multitouch drivers, the input_mt_report_finger_count() function should
+be used to emit these codes. Please see multi-touch-protocol.txt for details.
+
+EV_REL:
+----------
+EV_REL events describe relative changes in a property. For example, a mouse may
+move to the left by a certain number of units, but its absolute position in
+space is unknown. If the absolute position is known, EV_ABS codes should be used
+instead of EV_REL codes.
+
+A few EV_REL codes have special meanings:
+
+* REL_WHEEL, REL_HWHEEL:
+  - These codes are used for vertical and horizontal scroll wheels,
+    respectively.
+
+EV_ABS:
+----------
+EV_ABS events describe absolute changes in a property. For example, a touchpad
+may emit coordinates for a touch location.
+
+A few EV_ABS codes have special meanings:
+
+* ABS_DISTANCE:
+  - Used to describe the distance of a tool from an interaction surface. This
+    event should only be emitted while the tool is hovering, meaning in close
+    proximity of the device and while the value of the BTN_TOUCH code is 0. If
+    the input device may be used freely in three dimensions, consider ABS_Z
+    instead.
+
+* ABS_MT_<name>:
+  - Used to describe multitouch input events. Please see
+    multi-touch-protocol.txt for details.
+
+EV_SW:
+----------
+EV_SW events describe stateful binary switches. For example, the SW_LID code is
+used to denote when a laptop lid is closed.
+
+Upon binding to a device or resuming from suspend, a driver must report
+the current switch state. This ensures that the device, kernel, and userspace
+state is in sync.
+
+Upon resume, if the switch state is the same as before suspend, then the input
+subsystem will filter out the duplicate switch state reports. The driver does
+not need to keep the state of the switch at any time.
+
+EV_MSC:
+----------
+EV_MSC events are used for input and output events that do not fall under other
+categories.
+
+EV_LED:
+----------
+EV_LED events are used for input and output to set and query the state of
+various LEDs on devices.
+
+EV_REP:
+----------
+EV_REP events are used for specifying autorepeating events.
+
+EV_SND:
+----------
+EV_SND events are used for sending sound commands to simple sound output
+devices.
+
+EV_FF:
+----------
+EV_FF events are used to initialize a force feedback capable device and to cause
+such device to feedback.
+
+EV_PWR:
+----------
+EV_PWR events are a special type of event used specifically for power
+mangement. Its usage is not well defined. To be addressed later.
+
+Guidelines:
+==========
+The guidelines below ensure proper single-touch and multi-finger functionality.
+For multi-touch functionality, see the multi-touch-protocol.txt document for
+more information.
+
+Mice:
+----------
+REL_{X,Y} must be reported when the mouse moves. BTN_LEFT must be used to report
+the primary button press. BTN_{MIDDLE,RIGHT,4,5,etc.} should be used to report
+further buttons of the device. REL_WHEEL and REL_HWHEEL should be used to report
+scroll wheel events where available.
+
+Touchscreens:
+----------
+ABS_{X,Y} must be reported with the location of the touch. BTN_TOUCH must be
+used to report when a touch is active on the screen.
+BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch
+contact. BTN_TOOL_<name> events should be reported where possible.
+
+Trackpads:
+----------
+Legacy trackpads that only provide relative position information must report
+events like mice described above.
+
+Trackpads that provide absolute touch position must report ABS_{X,Y} for the
+location of the touch. BTN_TOUCH should be used to report when a touch is active
+on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should
+be used to report the number of touches active on the trackpad.
+
+Tablets:
+----------
+BTN_TOOL_<name> events must be reported when a stylus or other tool is active on
+the tablet. ABS_{X,Y} must be reported with the location of the tool. BTN_TOUCH
+should be used to report when the tool is in contact with the tablet.
+BTN_{STYLUS,STYLUS2} should be used to report buttons on the tool itself. Any
+button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}.
+BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use
+meaningful buttons, like BTN_FORWARD, unless the button is labeled for that
+purpose on the device.
index a81c7b4..2366b1c 100644 (file)
@@ -552,6 +552,16 @@ also have
      within the array where IO will be blocked.  This is currently
      only supported for raid4/5/6.
 
+   sync_min
+   sync_max
+     The two values, given as numbers of sectors, indicate a range
+     withing the array where 'check'/'repair' will operate. Must be
+     a multiple of chunk_size. When it reaches "sync_max" it will
+     pause, rather than complete.
+     You can use 'select' or 'poll' on "sync_completed" to wait for
+     that number to reach sync_max.  Then you can either increase
+     "sync_max", or can write 'idle' to "sync_action".
+
 
 Each active md device may also have attributes specific to the
 personality module that manages it.
index f5639d4..f4b5988 100644 (file)
@@ -87,14 +87,14 @@ accumulator. ALSA uses accumulators 0 and 1 for left and right PCM.
 The result is forwarded to the ADC capture FIFO (thus to the standard capture
 PCM device).
 
-name='Music Playback Volume',index=0
+name='Synth Playback Volume',index=0
 
 This control is used to attenuate samples for left and right MIDI FX-bus
 accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples.
 The result samples are forwarded to the front DAC PCM slots of the AC97 codec.
 
-name='Music Capture Volume',index=0
-name='Music Capture Switch',index=0
+name='Synth Capture Volume',index=0
+name='Synth Capture Switch',index=0
 
 These controls are used to attenuate samples for left and right MIDI FX-bus
 accumulator. ALSA uses accumulators 4 and 5 for left and right PCM.
index ec36003..1380312 100644 (file)
@@ -151,6 +151,7 @@ S:  Maintained
 F:     drivers/net/hamradio/6pack.c
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
+M:     Realtek linux nic maintainers <nic_swsd@realtek.com>
 M:     Francois Romieu <romieu@fr.zoreil.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -5395,7 +5396,7 @@ F:        drivers/media/video/*7146*
 F:     include/media/*7146*
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Jassi Brar <jassi.brar@samsung.com>
+M:     Jassi Brar <jassisinghbrar@gmail.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
 F:     sound/soc/samsung
index 322e733..b967b96 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 39
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
index ed5bc9e..cd4458f 100644 (file)
@@ -2,6 +2,7 @@
 #define __ASM_ARM_CPUTYPE_H
 
 #include <linux/stringify.h>
+#include <linux/kernel.h>
 
 #define CPUID_ID       0
 #define CPUID_CACHETYPE        1
index c891eb7..87dbe3e 100644 (file)
 #define __NR_fanotify_init             (__NR_SYSCALL_BASE+367)
 #define __NR_fanotify_mark             (__NR_SYSCALL_BASE+368)
 #define __NR_prlimit64                 (__NR_SYSCALL_BASE+369)
+#define __NR_name_to_handle_at         (__NR_SYSCALL_BASE+370)
+#define __NR_open_by_handle_at         (__NR_SYSCALL_BASE+371)
+#define __NR_clock_adjtime             (__NR_SYSCALL_BASE+372)
+#define __NR_syncfs                    (__NR_SYSCALL_BASE+373)
 
 /*
  * The following SWIs are ARM private.
index 5c26ecc..7fbf28c 100644 (file)
                CALL(sys_fanotify_init)
                CALL(sys_fanotify_mark)
                CALL(sys_prlimit64)
+/* 370 */      CALL(sys_name_to_handle_at)
+               CALL(sys_open_by_handle_at)
+               CALL(sys_clock_adjtime)
+               CALL(sys_syncfs)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 7f56861..6a96911 100644 (file)
@@ -160,10 +160,7 @@ static struct msm_mmc_platform_data qsd8x50_sdc1_data = {
 
 static void __init qsd8x50_init_mmc(void)
 {
-       if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa())
-               vreg_mmc = vreg_get(NULL, "gp6");
-       else
-               vreg_mmc = vreg_get(NULL, "gp5");
+       vreg_mmc = vreg_get(NULL, "gp5");
 
        if (IS_ERR(vreg_mmc)) {
                pr_err("vreg get for vreg_mmc failed (%ld)\n",
index 56f920c..38b95e9 100644 (file)
@@ -269,7 +269,7 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
 
        /* Use existing clock_event for cpu 0 */
        if (!smp_processor_id())
-               return;
+               return 0;
 
        writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
 
index 0db2411..7166620 100644 (file)
@@ -409,6 +409,10 @@ struct platform_device s3c24xx_pwm_device = {
        .num_resources  = 0,
 };
 
+static struct platform_device gta02_dfbmcs320_device = {
+       .name = "dfbmcs320",
+};
+
 static struct i2c_board_info gta02_i2c_devs[] __initdata = {
        {
                I2C_BOARD_INFO("pcf50633", 0x73),
@@ -523,6 +527,7 @@ static struct platform_device *gta02_devices[] __initdata = {
        &s3c_device_iis,
        &samsung_asoc_dma,
        &s3c_device_i2c0,
+       &gta02_dfbmcs320_device,
        &gta02_buttons_device,
        &s3c_device_adc,
        &s3c_device_ts,
index b6ff882..8f4d50b 100644 (file)
@@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
        depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
-                  PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
+                  (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x
 
 config PPC_DCR_NATIVE
        bool
index be3cdf9..1833d1a 100644 (file)
@@ -382,10 +382,12 @@ extern const char *powerpc_base_platform;
 #define CPU_FTRS_E500_2        (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
            CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
            CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_E500MC        (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
-           CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+#define CPU_FTRS_E500MC        (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
            CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
            CPU_FTR_DBELL)
+#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
+           CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+           CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
 #define CPU_FTRS_GENERIC_32    (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
 
 /* 64-bit CPUs */
@@ -435,11 +437,15 @@ extern const char *powerpc_base_platform;
 #define CPU_FTRS_COMPATIBLE    (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
 
 #ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_POSSIBLE      (CPU_FTRS_E5500)
+#else
 #define CPU_FTRS_POSSIBLE      \
            (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |        \
            CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 |       \
            CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T |           \
            CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
+#endif
 #else
 enum {
        CPU_FTRS_POSSIBLE =
@@ -473,16 +479,21 @@ enum {
 #endif
 #ifdef CONFIG_E500
            CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
+           CPU_FTRS_E5500 |
 #endif
            0,
 };
 #endif /* __powerpc64__ */
 
 #ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_ALWAYS                (CPU_FTRS_E5500)
+#else
 #define CPU_FTRS_ALWAYS                \
            (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &        \
            CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 &       \
            CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
+#endif
 #else
 enum {
        CPU_FTRS_ALWAYS =
@@ -513,6 +524,7 @@ enum {
 #endif
 #ifdef CONFIG_E500
            CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
+           CPU_FTRS_E5500 &
 #endif
            CPU_FTRS_POSSIBLE,
 };
index 811f04a..8d1569c 100644 (file)
@@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
  * on platforms where such control is possible.
  */
 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
-       defined(CONFIG_KPROBES)
+       defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
 #define PAGE_KERNEL_TEXT       PAGE_KERNEL_X
 #else
 #define PAGE_KERNEL_TEXT       PAGE_KERNEL_ROX
index c9b68d0..b9602ee 100644 (file)
@@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x80240000,
                .cpu_name               = "e5500",
-               .cpu_features           = CPU_FTRS_E500MC,
+               .cpu_features           = CPU_FTRS_E5500,
                .cpu_user_features      = COMMON_USER_BOOKE,
                .mmu_features           = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
                        MMU_FTR_USE_TLBILX,
index 3d3d416..5b5e1f0 100644 (file)
@@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu)
 }
 
 /* wait for all the CPUs to hit real mode but timeout if they don't come in */
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
+#ifdef CONFIG_PPC_STD_MMU_64
 static void crash_kexec_wait_realmode(int cpu)
 {
        unsigned int msecs;
@@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu)
        }
        mb();
 }
-#else
-static inline void crash_kexec_wait_realmode(int cpu) {}
-#endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
 
 /*
  * This function will be called by secondary cpus or by kexec cpu
@@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs)
        crash_ipi_callback(regs);
 }
 
-#else
+#else  /* ! CONFIG_SMP */
+static inline void crash_kexec_wait_realmode(int cpu) {}
+
 static void crash_kexec_prepare_cpus(int cpu)
 {
        /*
@@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
 {
        cpus_in_sr = CPU_MASK_NONE;
 }
-#endif
+#endif /* CONFIG_SMP */
 
 /*
  * Register a function to be called on shutdown.  Only use this if you
index c834757..2b97b80 100644 (file)
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void)
                if (!parent)
                        continue;
                if (of_match_node(legacy_serial_parents, parent) != NULL) {
-                       index = add_legacy_soc_port(np, np);
-                       if (index >= 0 && np == stdout)
-                               legacy_serial_console = index;
+                       if (of_device_is_available(np)) {
+                               index = add_legacy_soc_port(np, np);
+                               if (index >= 0 && np == stdout)
+                                       legacy_serial_console = index;
+                       }
                }
                of_node_put(parent);
        }
index c4063b7..822f630 100644 (file)
@@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
        return 0;
 }
 
+static u64 check_and_compute_delta(u64 prev, u64 val)
+{
+       u64 delta = (val - prev) & 0xfffffffful;
+
+       /*
+        * POWER7 can roll back counter values, if the new value is smaller
+        * than the previous value it will cause the delta and the counter to
+        * have bogus values unless we rolled a counter over.  If a coutner is
+        * rolled back, it will be smaller, but within 256, which is the maximum
+        * number of events to rollback at once.  If we dectect a rollback
+        * return 0.  This can lead to a small lack of precision in the
+        * counters.
+        */
+       if (prev > val && (prev - val) < 256)
+               delta = 0;
+
+       return delta;
+}
+
 static void power_pmu_read(struct perf_event *event)
 {
        s64 val, delta, prev;
@@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event)
                prev = local64_read(&event->hw.prev_count);
                barrier();
                val = read_pmc(event->hw.idx);
+               delta = check_and_compute_delta(prev, val);
+               if (!delta)
+                       return;
        } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 
-       /* The counters are only 32 bits wide */
-       delta = (val - prev) & 0xfffffffful;
        local64_add(delta, &event->count);
        local64_sub(delta, &event->hw.period_left);
 }
@@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
                val = (event->hw.idx == 5) ? pmc5 : pmc6;
                prev = local64_read(&event->hw.prev_count);
                event->hw.idx = 0;
-               delta = (val - prev) & 0xfffffffful;
-               local64_add(delta, &event->count);
+               delta = check_and_compute_delta(prev, val);
+               if (delta)
+                       local64_add(delta, &event->count);
        }
 }
 
@@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
                                  unsigned long pmc5, unsigned long pmc6)
 {
        struct perf_event *event;
-       u64 val;
+       u64 val, prev;
        int i;
 
        for (i = 0; i < cpuhw->n_limited; ++i) {
                event = cpuhw->limited_counter[i];
                event->hw.idx = cpuhw->limited_hwidx[i];
                val = (event->hw.idx == 5) ? pmc5 : pmc6;
-               local64_set(&event->hw.prev_count, val);
+               prev = local64_read(&event->hw.prev_count);
+               if (check_and_compute_delta(prev, val))
+                       local64_set(&event->hw.prev_count, val);
                perf_event_update_userpage(event);
        }
 }
@@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 
        /* we don't have to worry about interrupts here */
        prev = local64_read(&event->hw.prev_count);
-       delta = (val - prev) & 0xfffffffful;
+       delta = check_and_compute_delta(prev, val);
        local64_add(delta, &event->count);
 
        /*
index 375480c..f33acfd 100644 (file)
@@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb)
        u64 stolen = 0;
        u64 dtb;
 
+       if (!dtl)
+               return 0;
+
        if (i == vpa->dtl_idx)
                return 0;
        while (i < vpa->dtl_idx) {
index a830c5e..bc5f0dc 100644 (file)
@@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
        mpic_setup_this_cpu();
 }
 
+#ifdef CONFIG_PPC64
 #ifdef CONFIG_HOTPLUG_CPU
 static int smp_core99_cpu_notify(struct notifier_block *self,
                                 unsigned long action, void *hcpu)
@@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
 
 static void __init smp_core99_bringup_done(void)
 {
-#ifdef CONFIG_PPC64
        extern void g5_phy_disable_cpu1(void);
 
        /* Close i2c bus if it was used for tb sync */
@@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void)
                set_cpu_present(1, false);
                g5_phy_disable_cpu1();
        }
-#endif /* CONFIG_PPC64 */
-
 #ifdef CONFIG_HOTPLUG_CPU
        register_cpu_notifier(&smp_core99_cpu_nb);
 #endif
+
        if (ppc_md.progress)
                ppc_md.progress("smp_core99_bringup_done", 0x349);
 }
+#endif /* CONFIG_PPC64 */
 
 #ifdef CONFIG_HOTPLUG_CPU
 
@@ -975,7 +975,9 @@ static void pmac_cpu_die(void)
 struct smp_ops_t core99_smp_ops = {
        .message_pass   = smp_mpic_message_pass,
        .probe          = smp_core99_probe,
+#ifdef CONFIG_PPC64
        .bringup_done   = smp_core99_bringup_done,
+#endif
        .kick_cpu       = smp_core99_kick_cpu,
        .setup_cpu      = smp_core99_setup_cpu,
        .give_timebase  = smp_core99_give_timebase,
index 0007241..6c42cfd 100644 (file)
@@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void)
        int cpu, ret;
        struct paca_struct *pp;
        struct dtl_entry *dtl;
+       struct kmem_cache *dtl_cache;
 
        if (!firmware_has_feature(FW_FEATURE_SPLPAR))
                return 0;
 
+       dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
+                                               DISPATCH_LOG_BYTES, 0, NULL);
+       if (!dtl_cache) {
+               pr_warn("Failed to create dispatch trace log buffer cache\n");
+               pr_warn("Stolen time statistics will be unreliable\n");
+               return 0;
+       }
+
        for_each_possible_cpu(cpu) {
                pp = &paca[cpu];
-               dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL,
-                                  cpu_to_node(cpu));
+               dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
                if (!dtl) {
                        pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
                                cpu);
index f8f7f28..68ca929 100644 (file)
@@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
        struct resource rsrc;
        const int *bus_range;
 
+       if (!of_device_is_available(dev)) {
+               pr_warning("%s: disabled\n", dev->full_name);
+               return -ENODEV;
+       }
+
        pr_debug("Adding PCI host bridge %s\n", dev->full_name);
 
        /* Fetch host bridge registers address */
index 43085bf..156cd5d 100644 (file)
@@ -66,7 +66,7 @@ static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
         * Don't enable translation but enable GART IO and CPU accesses.
         * Also, set DISTLBWALKPRB since GART tables memory is UC.
         */
-       ctl = DISTLBWALKPRB | order << 1;
+       ctl = order << 1;
 
        pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
 }
@@ -75,17 +75,17 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
 {
        u32 tmp, ctl;
 
-        /* address of the mappings table */
-        addr >>= 12;
-        tmp = (u32) addr<<4;
-        tmp &= ~0xf;
-        pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
-
-        /* Enable GART translation for this hammer. */
-        pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
-        ctl |= GARTEN;
-        ctl &= ~(DISGARTCPU | DISGARTIO);
-        pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
+       /* address of the mappings table */
+       addr >>= 12;
+       tmp = (u32) addr<<4;
+       tmp &= ~0xf;
+       pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
+
+       /* Enable GART translation for this hammer. */
+       pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
+       ctl |= GARTEN | DISTLBWALKPRB;
+       ctl &= ~(DISGARTCPU | DISGARTIO);
+       pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
 }
 
 static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
index 3d4dab4..a50fc9f 100644 (file)
@@ -51,7 +51,7 @@ static inline void numa_remove_cpu(int cpu)           { }
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
-struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
+void debug_cpumask_set_cpu(int cpu, int node, bool enable);
 #endif
 
 #endif /* _ASM_X86_NUMA_H */
index 86d1ad4..73fb469 100644 (file)
@@ -499,7 +499,7 @@ out:
                 * Don't enable translation yet but enable GART IO and CPU
                 * accesses and set DISTLBWALKPRB since GART table memory is UC.
                 */
-               u32 ctl = DISTLBWALKPRB | aper_order << 1;
+               u32 ctl = aper_order << 1;
 
                bus = amd_nb_bus_dev_ranges[i].bus;
                dev_base = amd_nb_bus_dev_ranges[i].dev_base;
index eed3673..632e5dc 100644 (file)
@@ -586,8 +586,12 @@ static int x86_setup_perfctr(struct perf_event *event)
                        return -EOPNOTSUPP;
        }
 
+       /*
+        * Do not allow config1 (extended registers) to propagate,
+        * there's no sane user-space generalization yet:
+        */
        if (attr->type == PERF_TYPE_RAW)
-               return x86_pmu_extra_regs(event->attr.config, event);
+               return 0;
 
        if (attr->type == PERF_TYPE_HW_CACHE)
                return set_ext_hw_attr(hwc, event);
index 461f62b..cf4e369 100644 (file)
@@ -8,7 +8,7 @@ static __initconst const u64 amd_hw_cache_event_ids
  [ C(L1D) ] = {
        [ C(OP_READ) ] = {
                [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
-               [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
+               [ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
        },
        [ C(OP_WRITE) ] = {
                [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
@@ -427,7 +427,9 @@ static __initconst const struct x86_pmu amd_pmu = {
  *
  * Exceptions:
  *
+ * 0x000       FP      PERF_CTL[3], PERF_CTL[5:3] (*)
  * 0x003       FP      PERF_CTL[3]
+ * 0x004       FP      PERF_CTL[3], PERF_CTL[5:3] (*)
  * 0x00B       FP      PERF_CTL[3]
  * 0x00D       FP      PERF_CTL[3]
  * 0x023       DE      PERF_CTL[2:0]
@@ -448,6 +450,8 @@ static __initconst const struct x86_pmu amd_pmu = {
  * 0x0DF       LS      PERF_CTL[5:0]
  * 0x1D6       EX      PERF_CTL[5:0]
  * 0x1D8       EX      PERF_CTL[5:0]
+ *
+ * (*) depending on the umask all FPU counters may be used
  */
 
 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
@@ -460,18 +464,28 @@ static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
 static struct event_constraint *
 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
 {
-       unsigned int event_code = amd_get_event_code(&event->hw);
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned int event_code = amd_get_event_code(hwc);
 
        switch (event_code & AMD_EVENT_TYPE_MASK) {
        case AMD_EVENT_FP:
                switch (event_code) {
+               case 0x000:
+                       if (!(hwc->config & 0x0000F000ULL))
+                               break;
+                       if (!(hwc->config & 0x00000F00ULL))
+                               break;
+                       return &amd_f15_PMC3;
+               case 0x004:
+                       if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
+                               break;
+                       return &amd_f15_PMC3;
                case 0x003:
                case 0x00B:
                case 0x00D:
                        return &amd_f15_PMC3;
-               default:
-                       return &amd_f15_PMC53;
                }
+               return &amd_f15_PMC53;
        case AMD_EVENT_LS:
        case AMD_EVENT_DC:
        case AMD_EVENT_EX_LS:
index 8fc2b2c..43fa20b 100644 (file)
@@ -391,12 +391,12 @@ static __initconst const u64 nehalem_hw_cache_event_ids
 {
  [ C(L1D) ] = {
        [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
-               [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
+               [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
+               [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
        },
        [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
-               [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
+               [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
+               [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
        },
        [ C(OP_PREFETCH) ] = {
                [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
@@ -1425,6 +1425,7 @@ static __init int intel_pmu_init(void)
 
        case 37: /* 32 nm nehalem, "Clarkdale" */
        case 44: /* 32 nm nehalem, "Gulftown" */
+       case 47: /* 32 nm Xeon E7 */
                memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
index c2520e1..d1f77e2 100644 (file)
@@ -947,7 +947,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
                if (!x86_perf_event_set_period(event))
                        continue;
                if (perf_event_overflow(event, 1, &data, regs))
-                       p4_pmu_disable_event(event);
+                       x86_pmu_stop(event, 0);
        }
 
        if (handled) {
index 82ada01..b117efd 100644 (file)
@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry;
 #define AGPEXTERN
 #endif
 
+/* GART can only remap to physical addresses < 1TB */
+#define GART_MAX_PHYS_ADDR     (1ULL << 40)
+
 /* backdoor interface to AGP driver */
 AGPEXTERN int agp_memory_reserved;
 AGPEXTERN __u32 *agp_gatt_table;
@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
                                size_t size, int dir, unsigned long align_mask)
 {
        unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
-       unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
+       unsigned long iommu_page;
        int i;
 
+       if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
+               return bad_dma_addr;
+
+       iommu_page = alloc_iommu(dev, npages, align_mask);
        if (iommu_page == -1) {
                if (!nonforced_iommu(dev, phys_mem, size))
                        return phys_mem;
index 8ed8908..c2871d3 100644 (file)
@@ -312,26 +312,6 @@ void __cpuinit smp_store_cpu_info(int id)
                identify_secondary_cpu(c);
 }
 
-static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2)
-{
-       int node1 = early_cpu_to_node(cpu1);
-       int node2 = early_cpu_to_node(cpu2);
-
-       /*
-        * Our CPU scheduler assumes all logical cpus in the same physical cpu
-        * share the same node. But, buggy ACPI or NUMA emulation might assign
-        * them to different node. Fix it.
-        */
-       if (node1 != node2) {
-               pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n",
-                          cpu1, node1, cpu2, node2, node2);
-
-               numa_remove_cpu(cpu1);
-               numa_set_node(cpu1, node2);
-               numa_add_cpu(cpu1);
-       }
-}
-
 static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
 {
        cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
@@ -340,7 +320,6 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
        cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
        cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
        cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
-       check_cpu_siblings_on_same_node(cpu1, cpu2);
 }
 
 
@@ -382,12 +361,10 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
                        cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
                        cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
-                       check_cpu_siblings_on_same_node(cpu, i);
                }
                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
                        cpumask_set_cpu(i, cpu_core_mask(cpu));
                        cpumask_set_cpu(cpu, cpu_core_mask(i));
-                       check_cpu_siblings_on_same_node(cpu, i);
                        /*
                         *  Does this new cpu bringup a new core?
                         */
index 9559d36..745258d 100644 (file)
@@ -213,53 +213,48 @@ int early_cpu_to_node(int cpu)
        return per_cpu(x86_cpu_to_node_map, cpu);
 }
 
-struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
+void debug_cpumask_set_cpu(int cpu, int node, bool enable)
 {
-       int node = early_cpu_to_node(cpu);
        struct cpumask *mask;
        char buf[64];
 
        if (node == NUMA_NO_NODE) {
                /* early_cpu_to_node() already emits a warning and trace */
-               return NULL;
+               return;
        }
        mask = node_to_cpumask_map[node];
        if (!mask) {
                pr_err("node_to_cpumask_map[%i] NULL\n", node);
                dump_stack();
-               return NULL;
+               return;
        }
 
+       if (enable)
+               cpumask_set_cpu(cpu, mask);
+       else
+               cpumask_clear_cpu(cpu, mask);
+
        cpulist_scnprintf(buf, sizeof(buf), mask);
        printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
                enable ? "numa_add_cpu" : "numa_remove_cpu",
                cpu, node, buf);
-       return mask;
+       return;
 }
 
 # ifndef CONFIG_NUMA_EMU
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static void __cpuinit numa_set_cpumask(int cpu, bool enable)
 {
-       struct cpumask *mask;
-
-       mask = debug_cpumask_set_cpu(cpu, enable);
-       if (!mask)
-               return;
-
-       if (enable)
-               cpumask_set_cpu(cpu, mask);
-       else
-               cpumask_clear_cpu(cpu, mask);
+       debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
 }
 
 void __cpuinit numa_add_cpu(int cpu)
 {
-       numa_set_cpumask(cpu, 1);
+       numa_set_cpumask(cpu, true);
 }
 
 void __cpuinit numa_remove_cpu(int cpu)
 {
-       numa_set_cpumask(cpu, 0);
+       numa_set_cpumask(cpu, false);
 }
 # endif        /* !CONFIG_NUMA_EMU */
 
index ad091e4..de84cc1 100644 (file)
@@ -454,10 +454,9 @@ void __cpuinit numa_remove_cpu(int cpu)
                cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
 }
 #else  /* !CONFIG_DEBUG_PER_CPU_MAPS */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static void __cpuinit numa_set_cpumask(int cpu, bool enable)
 {
-       struct cpumask *mask;
-       int nid, physnid, i;
+       int nid, physnid;
 
        nid = early_cpu_to_node(cpu);
        if (nid == NUMA_NO_NODE) {
@@ -467,28 +466,21 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
 
        physnid = emu_nid_to_phys[nid];
 
-       for_each_online_node(i) {
+       for_each_online_node(nid) {
                if (emu_nid_to_phys[nid] != physnid)
                        continue;
 
-               mask = debug_cpumask_set_cpu(cpu, enable);
-               if (!mask)
-                       return;
-
-               if (enable)
-                       cpumask_set_cpu(cpu, mask);
-               else
-                       cpumask_clear_cpu(cpu, mask);
+               debug_cpumask_set_cpu(cpu, nid, enable);
        }
 }
 
 void __cpuinit numa_add_cpu(int cpu)
 {
-       numa_set_cpumask(cpu, 1);
+       numa_set_cpumask(cpu, true);
 }
 
 void __cpuinit numa_remove_cpu(int cpu)
 {
-       numa_set_cpumask(cpu, 0);
+       numa_set_cpumask(cpu, false);
 }
 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
index a991b57..aef7af9 100644 (file)
@@ -1473,16 +1473,20 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 #endif
 }
 
+#ifdef CONFIG_X86_32
 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
 {
-       unsigned long pfn = pte_pfn(pte);
-
-#ifdef CONFIG_X86_32
        /* If there's an existing pte, then don't allow _PAGE_RW to be set */
        if (pte_val_ma(*ptep) & _PAGE_PRESENT)
                pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
                               pte_val_ma(pte));
-#endif
+
+       return pte;
+}
+#else /* CONFIG_X86_64 */
+static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
+{
+       unsigned long pfn = pte_pfn(pte);
 
        /*
         * If the new pfn is within the range of the newly allocated
@@ -1497,6 +1501,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
 
        return pte;
 }
+#endif /* CONFIG_X86_64 */
 
 /* Init-time set_pte while constructing initial pagetables, which
    doesn't allow RO pagetable pages to be remapped RW */
index fa0269a..90bac0a 100644 (file)
@@ -227,7 +227,7 @@ char * __init xen_memory_setup(void)
 
        memcpy(map_raw, map, sizeof(map));
        e820.nr_map = 0;
-       xen_extra_mem_start = mem_end;
+       xen_extra_mem_start = max((1ULL << 32), mem_end);
        for (i = 0; i < memmap.nr_entries; i++) {
                unsigned long long end;
 
index d77089d..4340ee0 100644 (file)
@@ -64,47 +64,41 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
-       int j;
-
-       seq_printf(p, "%*s: ", prec, "NMI");
-       for_each_online_cpu(j)
-               seq_printf(p, "%10u ", nmi_count(j));
-       seq_putc(p, '\n');
        seq_printf(p, "%*s: ", prec, "ERR");
        seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
        return 0;
 }
 
-static void xtensa_irq_mask(struct irq_chip *d)
+static void xtensa_irq_mask(struct irq_data *d)
 {
        cached_irq_mask &= ~(1 << d->irq);
        set_sr (cached_irq_mask, INTENABLE);
 }
 
-static void xtensa_irq_unmask(struct irq_chip *d)
+static void xtensa_irq_unmask(struct irq_data *d)
 {
        cached_irq_mask |= 1 << d->irq;
        set_sr (cached_irq_mask, INTENABLE);
 }
 
-static void xtensa_irq_enable(struct irq_chip *d)
+static void xtensa_irq_enable(struct irq_data *d)
 {
        variant_irq_enable(d->irq);
        xtensa_irq_unmask(d->irq);
 }
 
-static void xtensa_irq_disable(struct irq_chip *d)
+static void xtensa_irq_disable(struct irq_data *d)
 {
        xtensa_irq_mask(d->irq);
        variant_irq_disable(d->irq);
 }
 
-static void xtensa_irq_ack(struct irq_chip *d)
+static void xtensa_irq_ack(struct irq_data *d)
 {
        set_sr(1 << d->irq, INTCLEAR);
 }
 
-static int xtensa_irq_retrigger(struct irq_chip *d)
+static int xtensa_irq_retrigger(struct irq_data *d)
 {
        set_sr (1 << d->irq, INTSET);
        return 1;
index 78b7b0c..a2e58ee 100644 (file)
@@ -204,7 +204,7 @@ static void blk_delay_work(struct work_struct *work)
 
        q = container_of(work, struct request_queue, delay_work.work);
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irq(q->queue_lock);
 }
 
@@ -220,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
  */
 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
-       schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
+       queue_delayed_work(kblockd_workqueue, &q->delay_work,
+                               msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL(blk_delay_queue);
 
@@ -238,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -291,30 +292,35 @@ EXPORT_SYMBOL(blk_sync_queue);
 /**
  * __blk_run_queue - run a single device queue
  * @q: The queue to run
- * @force_kblockd: Don't run @q->request_fn directly.  Use kblockd.
  *
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
  *    held and interrupts disabled.
- *
  */
-void __blk_run_queue(struct request_queue *q, bool force_kblockd)
+void __blk_run_queue(struct request_queue *q)
 {
        if (unlikely(blk_queue_stopped(q)))
                return;
 
-       /*
-        * Only recurse once to avoid overrunning the stack, let the unplug
-        * handling reinvoke the handler shortly if we already got there.
-        */
-       if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
-               q->request_fn(q);
-               queue_flag_clear(QUEUE_FLAG_REENTER, q);
-       } else
-               queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+       q->request_fn(q);
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
+/**
+ * blk_run_queue_async - run a single device queue in workqueue context
+ * @q: The queue to run
+ *
+ * Description:
+ *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
+ *    of us.
+ */
+void blk_run_queue_async(struct request_queue *q)
+{
+       if (likely(!blk_queue_stopped(q)))
+               queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+}
+EXPORT_SYMBOL(blk_run_queue_async);
+
 /**
  * blk_run_queue - run a single device queue
  * @q: The queue to run
@@ -328,7 +334,7 @@ void blk_run_queue(struct request_queue *q)
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -977,7 +983,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
                blk_queue_end_tag(q, rq);
 
        add_acct_request(q, rq, where);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
@@ -1321,7 +1327,7 @@ get_rq:
        } else {
                spin_lock_irq(q->queue_lock);
                add_acct_request(q, req, where);
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
 out_unlock:
                spin_unlock_irq(q->queue_lock);
        }
@@ -2638,6 +2644,7 @@ void blk_start_plug(struct blk_plug *plug)
 
        plug->magic = PLUG_MAGIC;
        INIT_LIST_HEAD(&plug->list);
+       INIT_LIST_HEAD(&plug->cb_list);
        plug->should_sort = 0;
 
        /*
@@ -2670,12 +2677,41 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
  */
 static void queue_unplugged(struct request_queue *q, unsigned int depth,
                            bool from_schedule)
+       __releases(q->queue_lock)
 {
        trace_block_unplug(q, depth, !from_schedule);
-       __blk_run_queue(q, from_schedule);
 
-       if (q->unplugged_fn)
-               q->unplugged_fn(q);
+       /*
+        * If we are punting this to kblockd, then we can safely drop
+        * the queue_lock before waking kblockd (which needs to take
+        * this lock).
+        */
+       if (from_schedule) {
+               spin_unlock(q->queue_lock);
+               blk_run_queue_async(q);
+       } else {
+               __blk_run_queue(q);
+               spin_unlock(q->queue_lock);
+       }
+
+}
+
+static void flush_plug_callbacks(struct blk_plug *plug)
+{
+       LIST_HEAD(callbacks);
+
+       if (list_empty(&plug->cb_list))
+               return;
+
+       list_splice_init(&plug->cb_list, &callbacks);
+
+       while (!list_empty(&callbacks)) {
+               struct blk_plug_cb *cb = list_first_entry(&callbacks,
+                                                         struct blk_plug_cb,
+                                                         list);
+               list_del(&cb->list);
+               cb->callback(cb);
+       }
 }
 
 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
@@ -2688,6 +2724,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 
        BUG_ON(plug->magic != PLUG_MAGIC);
 
+       flush_plug_callbacks(plug);
        if (list_empty(&plug->list))
                return;
 
@@ -2712,10 +2749,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
                BUG_ON(!rq->q);
                if (rq->q != q) {
-                       if (q) {
+                       /*
+                        * This drops the queue lock
+                        */
+                       if (q)
                                queue_unplugged(q, depth, from_schedule);
-                               spin_unlock(q->queue_lock);
-                       }
                        q = rq->q;
                        depth = 0;
                        spin_lock(q->queue_lock);
@@ -2733,14 +2771,14 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                depth++;
        }
 
-       if (q) {
+       /*
+        * This drops the queue lock
+        */
+       if (q)
                queue_unplugged(q, depth, from_schedule);
-               spin_unlock(q->queue_lock);
-       }
 
        local_irq_restore(flags);
 }
-EXPORT_SYMBOL(blk_flush_plug_list);
 
 void blk_finish_plug(struct blk_plug *plug)
 {
index 7482b7f..81e3181 100644 (file)
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        WARN_ON(irqs_disabled());
        spin_lock_irq(q->queue_lock);
        __elv_add_request(q, rq, where);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        /* the queue is stopped so it won't be plugged+unplugged */
        if (rq->cmd_type == REQ_TYPE_PM_RESUME)
                q->request_fn(q);
index eba4a27..6c9b5e1 100644 (file)
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
         * request_fn may confuse the driver.  Always use kblockd.
         */
        if (queued)
-               __blk_run_queue(q, true);
+               blk_run_queue_async(q);
 }
 
 /**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
         * the comment in flush_end_io().
         */
        if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
-               __blk_run_queue(q, true);
+               blk_run_queue_async(q);
 }
 
 /**
index eb94904..1fa7692 100644 (file)
@@ -790,22 +790,6 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush)
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush);
 
-/**
- * blk_queue_unplugged - register a callback for an unplug event
- * @q:         the request queue for the device
- * @fn:                the function to call
- *
- * Some stacked drivers may need to know when IO is dispatched on an
- * unplug event. By registrering a callback here, they will be notified
- * when someone flushes their on-stack queue plug. The function will be
- * called with the queue lock held.
- */
-void blk_queue_unplugged(struct request_queue *q, unplugged_fn *fn)
-{
-       q->unplugged_fn = fn;
-}
-EXPORT_SYMBOL(blk_queue_unplugged);
-
 static int __init blk_settings_init(void)
 {
        blk_max_low_pfn = max_low_pfn - 1;
index 6d73512..bd23631 100644 (file)
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
 
        if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
                blk_set_queue_full(q, BLK_RW_SYNC);
-       } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
+       } else {
                blk_clear_queue_full(q, BLK_RW_SYNC);
                wake_up(&rl->wait[BLK_RW_SYNC]);
        }
 
        if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
                blk_set_queue_full(q, BLK_RW_ASYNC);
-       } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
+       } else {
                blk_clear_queue_full(q, BLK_RW_ASYNC);
                wake_up(&rl->wait[BLK_RW_ASYNC]);
        }
@@ -508,8 +508,10 @@ int blk_register_queue(struct gendisk *disk)
                return ret;
 
        ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
-       if (ret < 0)
+       if (ret < 0) {
+               blk_trace_remove_sysfs(dev);
                return ret;
+       }
 
        kobject_uevent(&q->kobj, KOBJ_ADD);
 
index 3be881e..5b52011 100644 (file)
@@ -2582,28 +2582,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
 }
 
 /*
- * Must always be called with the rcu_read_lock() held
+ * Call func for each cic attached to this ioc.
  */
 static void
-__call_for_each_cic(struct io_context *ioc,
-                   void (*func)(struct io_context *, struct cfq_io_context *))
+call_for_each_cic(struct io_context *ioc,
+                 void (*func)(struct io_context *, struct cfq_io_context *))
 {
        struct cfq_io_context *cic;
        struct hlist_node *n;
 
+       rcu_read_lock();
+
        hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
                func(ioc, cic);
-}
 
-/*
- * Call func for each cic attached to this ioc.
- */
-static void
-call_for_each_cic(struct io_context *ioc,
-                 void (*func)(struct io_context *, struct cfq_io_context *))
-{
-       rcu_read_lock();
-       __call_for_each_cic(ioc, func);
        rcu_read_unlock();
 }
 
@@ -2664,7 +2656,7 @@ static void cfq_free_io_context(struct io_context *ioc)
         * should be ok to iterate over the known list, we will see all cic's
         * since no new ones are added.
         */
-       __call_for_each_cic(ioc, cic_free_func);
+       call_for_each_cic(ioc, cic_free_func);
 }
 
 static void cfq_put_cooperator(struct cfq_queue *cfqq)
@@ -3368,7 +3360,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                            cfqd->busy_queues > 1) {
                                cfq_del_timer(cfqd, cfqq);
                                cfq_clear_cfqq_wait_request(cfqq);
-                               __blk_run_queue(cfqd->queue, false);
+                               __blk_run_queue(cfqd->queue);
                        } else {
                                cfq_blkiocg_update_idle_time_stats(
                                                &cfqq->cfqg->blkg);
@@ -3383,7 +3375,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               __blk_run_queue(cfqd->queue, false);
+               __blk_run_queue(cfqd->queue);
        }
 }
 
@@ -3743,7 +3735,7 @@ static void cfq_kick_queue(struct work_struct *work)
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(cfqd->queue, false);
+       __blk_run_queue(cfqd->queue);
        spin_unlock_irq(q->queue_lock);
 }
 
index 0cdb4e7..45ca1e3 100644 (file)
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
         */
        elv_drain_elevator(q);
        while (q->rq.elvpriv) {
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
@@ -671,7 +671,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
                        q->boundary_rq = rq;
                }
        } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
-                   where == ELEVATOR_INSERT_SORT)
+                   (where == ELEVATOR_INSERT_SORT ||
+                    where == ELEVATOR_INSERT_SORT_MERGE))
                where = ELEVATOR_INSERT_BACK;
 
        switch (where) {
@@ -695,7 +696,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
                 *   with anything.  There's no point in delaying queue
                 *   processing.
                 */
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
                break;
 
        case ELEVATOR_INSERT_SORT_MERGE:
index b364bd0..2dd9887 100644 (file)
@@ -1588,9 +1588,13 @@ static void disk_events_workfn(struct work_struct *work)
 
        spin_unlock_irq(&ev->lock);
 
-       /* tell userland about new events */
+       /*
+        * Tell userland about new events.  Only the events listed in
+        * @disk->events are reported.  Unlisted events are processed the
+        * same internally but never get reported to userland.
+        */
        for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
-               if (events & (1 << i))
+               if (events & disk->events & (1 << i))
                        envp[nr_events++] = disk_uevents[i];
 
        if (nr_events)
index 012cba0..b072648 100644 (file)
@@ -115,6 +115,9 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
        struct agp_memory *new;
        unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
 
+       if (INT_MAX/sizeof(struct page *) < num_agp_pages)
+               return NULL;
+
        new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
        if (new == NULL)
                return NULL;
@@ -234,11 +237,14 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
        int scratch_pages;
        struct agp_memory *new;
        size_t i;
+       int cur_memory;
 
        if (!bridge)
                return NULL;
 
-       if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
+       cur_memory = atomic_read(&bridge->current_memory_agp);
+       if ((cur_memory + page_count > bridge->max_memory_agp) ||
+           (cur_memory + page_count < page_count))
                return NULL;
 
        if (type >= AGP_USER_TYPES) {
@@ -1089,8 +1095,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
                return -EINVAL;
        }
 
-       /* AK: could wrap */
-       if ((pg_start + mem->page_count) > num_entries)
+       if (((pg_start + mem->page_count) > num_entries) ||
+           ((pg_start + mem->page_count) < pg_start))
                return -EINVAL;
 
        j = pg_start;
@@ -1124,7 +1130,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
 {
        size_t i;
        struct agp_bridge_data *bridge;
-       int mask_type;
+       int mask_type, num_entries;
 
        bridge = mem->bridge;
        if (!bridge)
@@ -1136,6 +1142,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
        if (type != mem->type)
                return -EINVAL;
 
+       num_entries = agp_num_entries();
+       if (((pg_start + mem->page_count) > num_entries) ||
+           ((pg_start + mem->page_count) < pg_start))
+               return -EINVAL;
+
        mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
        if (mask_type != 0) {
                /* The generic routines know nothing of memory types */
index 84b164d..838568a 100644 (file)
@@ -1280,18 +1280,7 @@ static void unplug_port(struct port *port)
                spin_lock_irq(&pdrvdata_lock);
                list_del(&port->cons.list);
                spin_unlock_irq(&pdrvdata_lock);
-#if 0
-               /*
-                * hvc_remove() not called as removing one hvc port
-                * results in other hvc ports getting frozen.
-                *
-                * Once this is resolved in hvc, this functionality
-                * will be enabled.  Till that is done, the -EPIPE
-                * return from get_chars() above will help
-                * hvc_console.c to clean up on ports we remove here.
-                */
                hvc_remove(port->cons.hvc);
-#endif
        }
 
        /* Remove unused data this port might have received. */
index d770058..219d88a 100644 (file)
@@ -142,6 +142,7 @@ static int cn_call_callback(struct sk_buff *skb)
                cbq->callback(msg, nsp);
                kfree_skb(skb);
                cn_queue_release_callback(cbq);
+               err = 0;
        }
 
        return err;
index 432fc04..e522c70 100644 (file)
@@ -3771,8 +3771,11 @@ static bool g4x_compute_wm0(struct drm_device *dev,
        int entries, tlb_miss;
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       if (crtc->fb == NULL || !crtc->enabled)
+       if (crtc->fb == NULL || !crtc->enabled) {
+               *cursor_wm = cursor->guard_size;
+               *plane_wm = display->guard_size;
                return false;
+       }
 
        htotal = crtc->mode.htotal;
        hdisplay = crtc->mode.hdisplay;
@@ -6215,36 +6218,6 @@ cleanup_work:
        return ret;
 }
 
-static void intel_crtc_reset(struct drm_crtc *crtc)
-{
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       /* Reset flags back to the 'unknown' status so that they
-        * will be correctly set on the initial modeset.
-        */
-       intel_crtc->dpms_mode = -1;
-}
-
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
-       .dpms = intel_crtc_dpms,
-       .mode_fixup = intel_crtc_mode_fixup,
-       .mode_set = intel_crtc_mode_set,
-       .mode_set_base = intel_pipe_set_base,
-       .mode_set_base_atomic = intel_pipe_set_base_atomic,
-       .load_lut = intel_crtc_load_lut,
-       .disable = intel_crtc_disable,
-};
-
-static const struct drm_crtc_funcs intel_crtc_funcs = {
-       .reset = intel_crtc_reset,
-       .cursor_set = intel_crtc_cursor_set,
-       .cursor_move = intel_crtc_cursor_move,
-       .gamma_set = intel_crtc_gamma_set,
-       .set_config = drm_crtc_helper_set_config,
-       .destroy = intel_crtc_destroy,
-       .page_flip = intel_crtc_page_flip,
-};
-
 static void intel_sanitize_modesetting(struct drm_device *dev,
                                       int pipe, int plane)
 {
@@ -6281,6 +6254,42 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
        intel_disable_pipe(dev_priv, pipe);
 }
 
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       /* Reset flags back to the 'unknown' status so that they
+        * will be correctly set on the initial modeset.
+        */
+       intel_crtc->dpms_mode = -1;
+
+       /* We need to fix up any BIOS configuration that conflicts with
+        * our expectations.
+        */
+       intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
+}
+
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+       .dpms = intel_crtc_dpms,
+       .mode_fixup = intel_crtc_mode_fixup,
+       .mode_set = intel_crtc_mode_set,
+       .mode_set_base = intel_pipe_set_base,
+       .mode_set_base_atomic = intel_pipe_set_base_atomic,
+       .load_lut = intel_crtc_load_lut,
+       .disable = intel_crtc_disable,
+};
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+       .reset = intel_crtc_reset,
+       .cursor_set = intel_crtc_cursor_set,
+       .cursor_move = intel_crtc_cursor_move,
+       .gamma_set = intel_crtc_gamma_set,
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = intel_crtc_destroy,
+       .page_flip = intel_crtc_page_flip,
+};
+
 static void intel_crtc_init(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -6330,8 +6339,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
 
        setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
                    (unsigned long)intel_crtc);
-
-       intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
 }
 
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
index 4256b8e..6b22c1d 100644 (file)
@@ -1151,10 +1151,10 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                            (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
        {
                int pipeconf_reg = PIPECONF(pipe);
-               int dspcntr_reg = DSPCNTR(pipe);
+               int dspcntr_reg = DSPCNTR(intel_crtc->plane);
                int pipeconf = I915_READ(pipeconf_reg);
                int dspcntr = I915_READ(dspcntr_reg);
-               int dspbase_reg = DSPADDR(pipe);
+               int dspbase_reg = DSPADDR(intel_crtc->plane);
                int xpos = 0x0, ypos = 0x0;
                unsigned int xsize, ysize;
                /* Pipe must be off here */
@@ -1378,7 +1378,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
        if (type < 0)
                return connector_status_disconnected;
 
+       intel_tv->type = type;
        intel_tv_find_better_format(connector);
+
        return connector_status_connected;
 }
 
@@ -1670,8 +1672,7 @@ intel_tv_init(struct drm_device *dev)
         *
         * More recent chipsets favour HDMI rather than integrated S-Video.
         */
-       connector->polled =
-               DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 
        drm_connector_init(dev, connector, &intel_tv_connector_funcs,
                           DRM_MODE_CONNECTOR_SVIDEO);
index ce38e97..568caed 100644 (file)
@@ -83,7 +83,7 @@ nouveau_dma_init(struct nouveau_channel *chan)
                return ret;
 
        /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
-       ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
+       ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
                                     &chan->m2mf_ntfy);
        if (ret)
                return ret;
index 856d56a..a76514a 100644 (file)
@@ -682,6 +682,9 @@ struct drm_nouveau_private {
        /* For PFIFO and PGRAPH. */
        spinlock_t context_switch_lock;
 
+       /* VM/PRAMIN flush, legacy PRAMIN aperture */
+       spinlock_t vm_lock;
+
        /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
        struct nouveau_ramht  *ramht;
        struct nouveau_gpuobj *ramfc;
index 889c445..39aee6d 100644 (file)
@@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info)
                OUT_RING  (chan, 0);
        }
 
-       nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
+       nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
        FIRE_RING(chan);
        mutex_unlock(&chan->mutex);
 
        ret = -EBUSY;
        for (i = 0; i < 100000; i++) {
-               if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
+               if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
                        ret = 0;
                        break;
                }
index 78f467f..5045f8b 100644 (file)
@@ -398,7 +398,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
                        dma_bits = 40;
        } else
        if (drm_pci_device_is_pcie(dev) &&
-           dev_priv->chipset != 0x40 &&
+           dev_priv->chipset  > 0x40 &&
            dev_priv->chipset != 0x45) {
                if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
                        dma_bits = 39;
index 7ba3fc0..5b39718 100644 (file)
@@ -35,19 +35,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
 {
        struct drm_device *dev = chan->dev;
        struct nouveau_bo *ntfy = NULL;
-       uint32_t flags;
+       uint32_t flags, ttmpl;
        int ret;
 
-       if (nouveau_vram_notify)
+       if (nouveau_vram_notify) {
                flags = NOUVEAU_GEM_DOMAIN_VRAM;
-       else
+               ttmpl = TTM_PL_FLAG_VRAM;
+       } else {
                flags = NOUVEAU_GEM_DOMAIN_GART;
+               ttmpl = TTM_PL_FLAG_TT;
+       }
 
        ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
        if (ret)
                return ret;
 
-       ret = nouveau_bo_pin(ntfy, flags);
+       ret = nouveau_bo_pin(ntfy, ttmpl);
        if (ret)
                goto out_err;
 
index 4f00c87..67a16e0 100644 (file)
@@ -1039,19 +1039,20 @@ nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
 {
        struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
        struct drm_device *dev = gpuobj->dev;
+       unsigned long flags;
 
        if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
                u64  ptr = gpuobj->vinst + offset;
                u32 base = ptr >> 16;
                u32  val;
 
-               spin_lock(&dev_priv->ramin_lock);
+               spin_lock_irqsave(&dev_priv->vm_lock, flags);
                if (dev_priv->ramin_base != base) {
                        dev_priv->ramin_base = base;
                        nv_wr32(dev, 0x001700, dev_priv->ramin_base);
                }
                val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
-               spin_unlock(&dev_priv->ramin_lock);
+               spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
                return val;
        }
 
@@ -1063,18 +1064,19 @@ nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
 {
        struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
        struct drm_device *dev = gpuobj->dev;
+       unsigned long flags;
 
        if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
                u64  ptr = gpuobj->vinst + offset;
                u32 base = ptr >> 16;
 
-               spin_lock(&dev_priv->ramin_lock);
+               spin_lock_irqsave(&dev_priv->vm_lock, flags);
                if (dev_priv->ramin_base != base) {
                        dev_priv->ramin_base = base;
                        nv_wr32(dev, 0x001700, dev_priv->ramin_base);
                }
                nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
-               spin_unlock(&dev_priv->ramin_lock);
+               spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
                return;
        }
 
index a33fe40..4bce801 100644 (file)
@@ -55,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
                                be->func->clear(be);
                                return -EFAULT;
                        }
+                       nvbe->ttm_alloced[nvbe->nr_pages] = false;
                }
 
                nvbe->nr_pages++;
@@ -427,7 +428,7 @@ nouveau_sgdma_init(struct drm_device *dev)
        u32 aper_size, align;
        int ret;
 
-       if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev))
+       if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
                aper_size = 512 * 1024 * 1024;
        else
                aper_size = 64 * 1024 * 1024;
@@ -457,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
                dev_priv->gart_info.func = &nv50_sgdma_backend;
        } else
        if (drm_pci_device_is_pcie(dev) &&
-           dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+           dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
                if (nv44_graph_class(dev)) {
                        dev_priv->gart_info.func = &nv44_sgdma_backend;
                        align = 512 * 1024;
index 6e2b1a6..a30adec 100644 (file)
@@ -608,6 +608,7 @@ nouveau_card_init(struct drm_device *dev)
        spin_lock_init(&dev_priv->channels.lock);
        spin_lock_init(&dev_priv->tile.lock);
        spin_lock_init(&dev_priv->context_switch_lock);
+       spin_lock_init(&dev_priv->vm_lock);
 
        /* Make the CRTCs and I2C buses accessible */
        ret = engine->display.early_init(dev);
index a6f8aa6..4f95a1e 100644 (file)
@@ -404,23 +404,25 @@ void
 nv50_instmem_flush(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       unsigned long flags;
 
-       spin_lock(&dev_priv->ramin_lock);
+       spin_lock_irqsave(&dev_priv->vm_lock, flags);
        nv_wr32(dev, 0x00330c, 0x00000001);
        if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
                NV_ERROR(dev, "PRAMIN flush timeout\n");
-       spin_unlock(&dev_priv->ramin_lock);
+       spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
 }
 
 void
 nv84_instmem_flush(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       unsigned long flags;
 
-       spin_lock(&dev_priv->ramin_lock);
+       spin_lock_irqsave(&dev_priv->vm_lock, flags);
        nv_wr32(dev, 0x070000, 0x00000001);
        if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
                NV_ERROR(dev, "PRAMIN flush timeout\n");
-       spin_unlock(&dev_priv->ramin_lock);
+       spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
 }
 
index 4fd3432..6c26944 100644 (file)
@@ -174,10 +174,11 @@ void
 nv50_vm_flush_engine(struct drm_device *dev, int engine)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       unsigned long flags;
 
-       spin_lock(&dev_priv->ramin_lock);
+       spin_lock_irqsave(&dev_priv->vm_lock, flags);
        nv_wr32(dev, 0x100c80, (engine << 16) | 1);
        if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
                NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
-       spin_unlock(&dev_priv->ramin_lock);
+       spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
 }
index a0a2a02..a179e6c 100644 (file)
@@ -104,11 +104,12 @@ nvc0_vm_flush(struct nouveau_vm *vm)
        struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
        struct drm_device *dev = vm->dev;
        struct nouveau_vm_pgd *vpgd;
+       unsigned long flags;
        u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
 
        pinstmem->flush(vm->dev);
 
-       spin_lock(&dev_priv->ramin_lock);
+       spin_lock_irqsave(&dev_priv->vm_lock, flags);
        list_for_each_entry(vpgd, &vm->pgd_list, head) {
                /* looks like maybe a "free flush slots" counter, the
                 * faster you write to 0x100cbc to more it decreases
@@ -125,5 +126,5 @@ nvc0_vm_flush(struct nouveau_vm *vm)
                                 nv_rd32(dev, 0x100c80), engine);
                }
        }
-       spin_unlock(&dev_priv->ramin_lock);
+       spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
 }
index d71d375..7bd7456 100644 (file)
@@ -135,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
                case ATOM_IIO_MOVE_INDEX:
                        temp &=
                            ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
-                             CU8(base + 2));
+                             CU8(base + 3));
                        temp |=
                            ((index >> CU8(base + 2)) &
                             (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -145,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
                case ATOM_IIO_MOVE_DATA:
                        temp &=
                            ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
-                             CU8(base + 2));
+                             CU8(base + 3));
                        temp |=
                            ((data >> CU8(base + 2)) &
                             (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -155,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
                case ATOM_IIO_MOVE_ATTR:
                        temp &=
                            ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
-                             CU8(base + 2));
+                             CU8(base + 3));
                        temp |=
                            ((ctx->
                              io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
index 9d516a8..529a3a7 100644 (file)
@@ -532,10 +532,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                else
                        pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
-               if ((rdev->family == CHIP_R600) ||
-                   (rdev->family == CHIP_RV610) ||
-                   (rdev->family == CHIP_RV630) ||
-                   (rdev->family == CHIP_RV670))
+               if (rdev->family < CHIP_RV770)
                        pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
        } else {
                pll->flags |= RADEON_PLL_LEGACY;
@@ -565,7 +562,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                        if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
                                if (ss_enabled) {
                                        if (ss->refdiv) {
-                                               pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
                                                pll->flags |= RADEON_PLL_USE_REF_DIV;
                                                pll->reference_div = ss->refdiv;
                                                if (ASIC_IS_AVIVO(rdev))
index 3453910..e9bc135 100644 (file)
@@ -353,7 +353,7 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
                                        struct drm_display_mode *mode,
                                        struct drm_display_mode *other_mode)
 {
-       u32 tmp = 0;
+       u32 tmp;
        /*
         * Line Buffer Setup
         * There are 3 line buffers, each one shared by 2 display controllers.
@@ -363,64 +363,63 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
         * first display controller
         *  0 - first half of lb (3840 * 2)
         *  1 - first 3/4 of lb (5760 * 2)
-        *  2 - whole lb (7680 * 2)
+        *  2 - whole lb (7680 * 2), other crtc must be disabled
         *  3 - first 1/4 of lb (1920 * 2)
         * second display controller
         *  4 - second half of lb (3840 * 2)
         *  5 - second 3/4 of lb (5760 * 2)
-        *  6 - whole lb (7680 * 2)
+        *  6 - whole lb (7680 * 2), other crtc must be disabled
         *  7 - last 1/4 of lb (1920 * 2)
         */
-       if (mode && other_mode) {
-               if (mode->hdisplay > other_mode->hdisplay) {
-                       if (mode->hdisplay > 2560)
-                               tmp = 1; /* 3/4 */
-                       else
-                               tmp = 0; /* 1/2 */
-               } else if (other_mode->hdisplay > mode->hdisplay) {
-                       if (other_mode->hdisplay > 2560)
-                               tmp = 3; /* 1/4 */
-                       else
-                               tmp = 0; /* 1/2 */
-               } else
+       /* this can get tricky if we have two large displays on a paired group
+        * of crtcs.  Ideally for multiple large displays we'd assign them to
+        * non-linked crtcs for maximum line buffer allocation.
+        */
+       if (radeon_crtc->base.enabled && mode) {
+               if (other_mode)
                        tmp = 0; /* 1/2 */
-       } else if (mode)
-               tmp = 2; /* whole */
-       else if (other_mode)
-               tmp = 3; /* 1/4 */
+               else
+                       tmp = 2; /* whole */
+       } else
+               tmp = 0;
 
        /* second controller of the pair uses second half of the lb */
        if (radeon_crtc->crtc_id % 2)
                tmp += 4;
        WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
 
-       switch (tmp) {
-       case 0:
-       case 4:
-       default:
-               if (ASIC_IS_DCE5(rdev))
-                       return 4096 * 2;
-               else
-                       return 3840 * 2;
-       case 1:
-       case 5:
-               if (ASIC_IS_DCE5(rdev))
-                       return 6144 * 2;
-               else
-                       return 5760 * 2;
-       case 2:
-       case 6:
-               if (ASIC_IS_DCE5(rdev))
-                       return 8192 * 2;
-               else
-                       return 7680 * 2;
-       case 3:
-       case 7:
-               if (ASIC_IS_DCE5(rdev))
-                       return 2048 * 2;
-               else
-                       return 1920 * 2;
+       if (radeon_crtc->base.enabled && mode) {
+               switch (tmp) {
+               case 0:
+               case 4:
+               default:
+                       if (ASIC_IS_DCE5(rdev))
+                               return 4096 * 2;
+                       else
+                               return 3840 * 2;
+               case 1:
+               case 5:
+                       if (ASIC_IS_DCE5(rdev))
+                               return 6144 * 2;
+                       else
+                               return 5760 * 2;
+               case 2:
+               case 6:
+                       if (ASIC_IS_DCE5(rdev))
+                               return 8192 * 2;
+                       else
+                               return 7680 * 2;
+               case 3:
+               case 7:
+                       if (ASIC_IS_DCE5(rdev))
+                               return 2048 * 2;
+                       else
+                               return 1920 * 2;
+               }
        }
+
+       /* controller not enabled, so no lb used */
+       return 0;
 }
 
 static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
@@ -2581,7 +2580,7 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
        u32 wptr, tmp;
 
        if (rdev->wb.enabled)
-               wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+               wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
        else
                wptr = RREG32(IH_RB_WPTR);
 
index 15d5829..6f27593 100644 (file)
@@ -3231,7 +3231,7 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
        u32 wptr, tmp;
 
        if (rdev->wb.enabled)
-               wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+               wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
        else
                wptr = RREG32(IH_RB_WPTR);
 
index 2ef6d51..5f45fa1 100644 (file)
@@ -1199,7 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
        if (router->ddc_valid || router->cd_valid) {
                radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
                if (!radeon_connector->router_bus)
-                       goto failed;
+                       DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
        }
        switch (connector_type) {
        case DRM_MODE_CONNECTOR_VGA:
@@ -1208,7 +1208,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
                        if (!radeon_connector->ddc_bus)
-                               goto failed;
+                               DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                radeon_connector->dac_load_detect = true;
                drm_connector_attach_property(&radeon_connector->base,
@@ -1226,7 +1226,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
                        if (!radeon_connector->ddc_bus)
-                               goto failed;
+                               DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                radeon_connector->dac_load_detect = true;
                drm_connector_attach_property(&radeon_connector->base,
@@ -1249,7 +1249,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
                        if (!radeon_connector->ddc_bus)
-                               goto failed;
+                               DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                subpixel_order = SubPixelHorizontalRGB;
                drm_connector_attach_property(&radeon_connector->base,
@@ -1290,7 +1290,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
                        if (!radeon_connector->ddc_bus)
-                               goto failed;
+                               DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.coherent_mode_property,
@@ -1329,10 +1329,10 @@ radeon_add_atom_connector(struct drm_device *dev,
                        else
                                radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
                        if (!radeon_dig_connector->dp_i2c_bus)
-                               goto failed;
+                               DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
                        radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
                        if (!radeon_connector->ddc_bus)
-                               goto failed;
+                               DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                subpixel_order = SubPixelHorizontalRGB;
                drm_connector_attach_property(&radeon_connector->base,
@@ -1381,7 +1381,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
                        if (!radeon_connector->ddc_bus)
-                               goto failed;
+                               DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                drm_connector_attach_property(&radeon_connector->base,
                                              dev->mode_config.scaling_mode_property,
@@ -1457,7 +1457,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
                        if (!radeon_connector->ddc_bus)
-                               goto failed;
+                               DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                radeon_connector->dac_load_detect = true;
                drm_connector_attach_property(&radeon_connector->base,
@@ -1475,7 +1475,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
                        if (!radeon_connector->ddc_bus)
-                               goto failed;
+                               DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                radeon_connector->dac_load_detect = true;
                drm_connector_attach_property(&radeon_connector->base,
@@ -1493,7 +1493,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
                        if (!radeon_connector->ddc_bus)
-                               goto failed;
+                               DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                        radeon_connector->dac_load_detect = true;
@@ -1538,7 +1538,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
                        if (!radeon_connector->ddc_bus)
-                               goto failed;
+                               DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                drm_connector_attach_property(&radeon_connector->base,
                                              dev->mode_config.scaling_mode_property,
@@ -1567,9 +1567,4 @@ radeon_add_legacy_connector(struct drm_device *dev,
                                radeon_legacy_backlight_init(radeon_encoder, connector);
                }
        }
-       return;
-
-failed:
-       drm_connector_cleanup(connector);
-       kfree(connector);
 }
index ccbabf7..983cbac 100644 (file)
@@ -1096,6 +1096,9 @@ void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
        if (!radeon_connector->router.ddc_valid)
                return;
 
+       if (!radeon_connector->router_bus)
+               return;
+
        radeon_i2c_get_byte(radeon_connector->router_bus,
                            radeon_connector->router.i2c_addr,
                            0x3, &val);
@@ -1121,6 +1124,9 @@ void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
        if (!radeon_connector->router.cd_valid)
                return;
 
+       if (!radeon_connector->router_bus)
+               return;
+
        radeon_i2c_get_byte(radeon_connector->router_bus,
                            radeon_connector->router.i2c_addr,
                            0x3, &val);
index edfb92e..196ffaf 100644 (file)
@@ -139,7 +139,6 @@ struct pmbus_data {
         * A single status register covers multiple attributes,
         * so we keep them all together.
         */
-       u8 status_bits;
        u8 status[PB_NUM_STATUS_REG];
 
        u8 currpage;
index fd1e117..a5ec5a7 100644 (file)
@@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive)
        ide_cd_read_toc(drive, &sense);
        g->fops = &idecd_ops;
        g->flags |= GENHD_FL_REMOVABLE;
-       g->events = DISK_EVENT_MEDIA_CHANGE;
        add_disk(g);
        return 0;
 
index 2a6bc50..02caa7d 100644 (file)
@@ -79,6 +79,12 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
        return CDS_DRIVE_NOT_READY;
 }
 
+/*
+ * ide-cd always generates media changed event if media is missing, which
+ * makes it impossible to use for proper event reporting, so disk->events
+ * is cleared to 0 and the following function is used only to trigger
+ * revalidation and never propagated to userland.
+ */
 unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
                                         unsigned int clearing, int slot_nr)
 {
index c4ffd48..70ea876 100644 (file)
@@ -298,6 +298,12 @@ static unsigned int ide_gd_check_events(struct gendisk *disk,
                return 0;
        }
 
+       /*
+        * The following is used to force revalidation on the first open on
+        * removeable devices, and never gets reported to userland as
+        * genhd->events is 0.  This is intended as removeable ide disk
+        * can't really detect MEDIA_CHANGE events.
+        */
        ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
        drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
 
@@ -413,7 +419,6 @@ static int ide_gd_probe(ide_drive_t *drive)
        if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
                g->flags = GENHD_FL_REMOVABLE;
        g->fops = &ide_gd_ops;
-       g->events = DISK_EVENT_MEDIA_CHANGE;
        add_disk(g);
        return 0;
 
index 7f42d3a..88d8e4c 100644 (file)
@@ -39,13 +39,13 @@ struct evdev {
 };
 
 struct evdev_client {
-       int head;
-       int tail;
+       unsigned int head;
+       unsigned int tail;
        spinlock_t buffer_lock; /* protects access to buffer, head and tail */
        struct fasync_struct *fasync;
        struct evdev *evdev;
        struct list_head node;
-       int bufsize;
+       unsigned int bufsize;
        struct input_event buffer[];
 };
 
@@ -55,16 +55,25 @@ static DEFINE_MUTEX(evdev_table_mutex);
 static void evdev_pass_event(struct evdev_client *client,
                             struct input_event *event)
 {
-       /*
-        * Interrupts are disabled, just acquire the lock.
-        * Make sure we don't leave with the client buffer
-        * "empty" by having client->head == client->tail.
-        */
+       /* Interrupts are disabled, just acquire the lock. */
        spin_lock(&client->buffer_lock);
-       do {
-               client->buffer[client->head++] = *event;
-               client->head &= client->bufsize - 1;
-       } while (client->head == client->tail);
+
+       client->buffer[client->head++] = *event;
+       client->head &= client->bufsize - 1;
+
+       if (unlikely(client->head == client->tail)) {
+               /*
+                * This effectively "drops" all unconsumed events, leaving
+                * EV_SYN/SYN_DROPPED plus the newest event in the queue.
+                */
+               client->tail = (client->head - 2) & (client->bufsize - 1);
+
+               client->buffer[client->tail].time = event->time;
+               client->buffer[client->tail].type = EV_SYN;
+               client->buffer[client->tail].code = SYN_DROPPED;
+               client->buffer[client->tail].value = 0;
+       }
+
        spin_unlock(&client->buffer_lock);
 
        if (event->type == EV_SYN)
index d6e8bd8..ebbceed 100644 (file)
@@ -1746,6 +1746,42 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
 }
 EXPORT_SYMBOL(input_set_capability);
 
+static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
+{
+       int mt_slots;
+       int i;
+       unsigned int events;
+
+       if (dev->mtsize) {
+               mt_slots = dev->mtsize;
+       } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
+               mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
+                          dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
+               clamp(mt_slots, 2, 32);
+       } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+               mt_slots = 2;
+       } else {
+               mt_slots = 0;
+       }
+
+       events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
+
+       for (i = 0; i < ABS_CNT; i++) {
+               if (test_bit(i, dev->absbit)) {
+                       if (input_is_mt_axis(i))
+                               events += mt_slots;
+                       else
+                               events++;
+               }
+       }
+
+       for (i = 0; i < REL_CNT; i++)
+               if (test_bit(i, dev->relbit))
+                       events++;
+
+       return events;
+}
+
 #define INPUT_CLEANSE_BITMASK(dev, type, bits)                         \
        do {                                                            \
                if (!test_bit(EV_##type, dev->evbit))                   \
@@ -1793,6 +1829,10 @@ int input_register_device(struct input_dev *dev)
        /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
        input_cleanse_bitmasks(dev);
 
+       if (!dev->hint_events_per_packet)
+               dev->hint_events_per_packet =
+                               input_estimate_events_per_packet(dev);
+
        /*
         * If delay and period are pre-set by the driver, then autorepeating
         * is handled by the driver itself and we don't do it in input.c.
index 09bef79..a26922c 100644 (file)
@@ -332,18 +332,20 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
 static int __devinit twl4030_kp_probe(struct platform_device *pdev)
 {
        struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
-       const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
+       const struct matrix_keymap_data *keymap_data;
        struct twl4030_keypad *kp;
        struct input_dev *input;
        u8 reg;
        int error;
 
-       if (!pdata || !pdata->rows || !pdata->cols ||
+       if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data ||
            pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) {
                dev_err(&pdev->dev, "Invalid platform_data\n");
                return -EINVAL;
        }
 
+       keymap_data = pdata->keymap_data;
+
        kp = kzalloc(sizeof(*kp), GFP_KERNEL);
        input = input_allocate_device();
        if (!kp || !input) {
index 7077f9b..62bae99 100644 (file)
@@ -303,7 +303,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
                                   enum xenbus_state backend_state)
 {
        struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
-       int val;
+       int ret, val;
 
        switch (backend_state) {
        case XenbusStateInitialising:
@@ -316,6 +316,17 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
 
        case XenbusStateInitWait:
 InitWait:
+               ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+                                  "feature-abs-pointer", "%d", &val);
+               if (ret < 0)
+                       val = 0;
+               if (val) {
+                       ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
+                                           "request-abs-pointer", "1");
+                       if (ret)
+                               pr_warning("xenkbd: can't request abs-pointer");
+               }
+
                xenbus_switch_state(dev, XenbusStateConnected);
                break;
 
index efa0688..45f93d0 100644 (file)
@@ -399,31 +399,34 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
                        IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) {
                printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
                err = -EBUSY;
-               goto fail2;
+               goto fail1;
        }
 
        if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
                        IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) {
                printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
                err = -EBUSY;
-               goto fail3;
+               goto fail2;
        }
 
        serio_set_drvdata(serio, ts);
 
        err = serio_open(serio, drv);
        if (err)
-               return err;
+               goto fail3;
 
        //h3600_flite_control(1, 25);     /* default brightness */
-       input_register_device(ts->dev);
+       err = input_register_device(ts->dev);
+       if (err)
+               goto fail4;
 
        return 0;
 
-fail3: free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
+fail4: serio_close(serio);
+fail3: serio_set_drvdata(serio, NULL);
+       free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
 fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
-fail1: serio_set_drvdata(serio, NULL);
-       input_free_device(input_dev);
+fail1: input_free_device(input_dev);
        kfree(ts);
        return err;
 }
index 5ef136c..e5d8904 100644 (file)
@@ -390,13 +390,6 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
        return md_raid5_congested(&rs->md, bits);
 }
 
-static void raid_unplug(struct dm_target_callbacks *cb)
-{
-       struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
-
-       md_raid5_kick_device(rs->md.private);
-}
-
 /*
  * Construct a RAID4/5/6 mapping:
  * Args:
@@ -487,7 +480,6 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
        }
 
        rs->callbacks.congested_fn = raid_is_congested;
-       rs->callbacks.unplug_fn = raid_unplug;
        dm_table_add_target_callbacks(ti->table, &rs->callbacks);
 
        return 0;
index b12b377..7d6f7f1 100644 (file)
@@ -447,48 +447,59 @@ EXPORT_SYMBOL(md_flush_request);
 
 /* Support for plugging.
  * This mirrors the plugging support in request_queue, but does not
- * require having a whole queue
+ * require having a whole queue or request structures.
+ * We allocate an md_plug_cb for each md device and each thread it gets
+ * plugged on.  This links tot the private plug_handle structure in the
+ * personality data where we keep a count of the number of outstanding
+ * plugs so other code can see if a plug is active.
  */
-static void plugger_work(struct work_struct *work)
-{
-       struct plug_handle *plug =
-               container_of(work, struct plug_handle, unplug_work);
-       plug->unplug_fn(plug);
-}
-static void plugger_timeout(unsigned long data)
-{
-       struct plug_handle *plug = (void *)data;
-       kblockd_schedule_work(NULL, &plug->unplug_work);
-}
-void plugger_init(struct plug_handle *plug,
-                 void (*unplug_fn)(struct plug_handle *))
-{
-       plug->unplug_flag = 0;
-       plug->unplug_fn = unplug_fn;
-       init_timer(&plug->unplug_timer);
-       plug->unplug_timer.function = plugger_timeout;
-       plug->unplug_timer.data = (unsigned long)plug;
-       INIT_WORK(&plug->unplug_work, plugger_work);
-}
-EXPORT_SYMBOL_GPL(plugger_init);
+struct md_plug_cb {
+       struct blk_plug_cb cb;
+       mddev_t *mddev;
+};
 
-void plugger_set_plug(struct plug_handle *plug)
+static void plugger_unplug(struct blk_plug_cb *cb)
 {
-       if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag))
-               mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1);
+       struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
+       if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
+               md_wakeup_thread(mdcb->mddev->thread);
+       kfree(mdcb);
 }
-EXPORT_SYMBOL_GPL(plugger_set_plug);
 
-int plugger_remove_plug(struct plug_handle *plug)
+/* Check that an unplug wakeup will come shortly.
+ * If not, wakeup the md thread immediately
+ */
+int mddev_check_plugged(mddev_t *mddev)
 {
-       if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) {
-               del_timer(&plug->unplug_timer);
-               return 1;
-       } else
+       struct blk_plug *plug = current->plug;
+       struct md_plug_cb *mdcb;
+
+       if (!plug)
                return 0;
-}
-EXPORT_SYMBOL_GPL(plugger_remove_plug);
 
+       list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
+               if (mdcb->cb.callback == plugger_unplug &&
+                   mdcb->mddev == mddev) {
+                       /* Already on the list, move to top */
+                       if (mdcb != list_first_entry(&plug->cb_list,
+                                                   struct md_plug_cb,
+                                                   cb.list))
+                               list_move(&mdcb->cb.list, &plug->cb_list);
+                       return 1;
+               }
+       }
+       /* Not currently on the callback list */
+       mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
+       if (!mdcb)
+               return 0;
+
+       mdcb->mddev = mddev;
+       mdcb->cb.callback = plugger_unplug;
+       atomic_inc(&mddev->plug_cnt);
+       list_add(&mdcb->cb.list, &plug->cb_list);
+       return 1;
+}
+EXPORT_SYMBOL_GPL(mddev_check_plugged);
 
 static inline mddev_t *mddev_get(mddev_t *mddev)
 {
@@ -538,6 +549,7 @@ void mddev_init(mddev_t *mddev)
        atomic_set(&mddev->active, 1);
        atomic_set(&mddev->openers, 0);
        atomic_set(&mddev->active_io, 0);
+       atomic_set(&mddev->plug_cnt, 0);
        spin_lock_init(&mddev->write_lock);
        atomic_set(&mddev->flush_pending, 0);
        init_waitqueue_head(&mddev->sb_wait);
@@ -3158,6 +3170,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
        mddev->layout = mddev->new_layout;
        mddev->chunk_sectors = mddev->new_chunk_sectors;
        mddev->delta_disks = 0;
+       mddev->degraded = 0;
        if (mddev->pers->sync_request == NULL) {
                /* this is now an array without redundancy, so
                 * it must always be in_sync
@@ -4723,7 +4736,6 @@ static void md_clean(mddev_t *mddev)
        mddev->bitmap_info.chunksize = 0;
        mddev->bitmap_info.daemon_sleep = 0;
        mddev->bitmap_info.max_write_behind = 0;
-       mddev->plug = NULL;
 }
 
 static void __md_stop_writes(mddev_t *mddev)
@@ -6688,12 +6700,6 @@ int md_allow_write(mddev_t *mddev)
 }
 EXPORT_SYMBOL_GPL(md_allow_write);
 
-void md_unplug(mddev_t *mddev)
-{
-       if (mddev->plug)
-               mddev->plug->unplug_fn(mddev->plug);
-}
-
 #define SYNC_MARKS     10
 #define        SYNC_MARK_STEP  (3*HZ)
 void md_do_sync(mddev_t *mddev)
index 52b4073..0b1fd3f 100644 (file)
 typedef struct mddev_s mddev_t;
 typedef struct mdk_rdev_s mdk_rdev_t;
 
-/* generic plugging support - like that provided with request_queue,
- * but does not require a request_queue
- */
-struct plug_handle {
-       void                    (*unplug_fn)(struct plug_handle *);
-       struct timer_list       unplug_timer;
-       struct work_struct      unplug_work;
-       unsigned long           unplug_flag;
-};
-#define        PLUGGED_FLAG 1
-void plugger_init(struct plug_handle *plug,
-                 void (*unplug_fn)(struct plug_handle *));
-void plugger_set_plug(struct plug_handle *plug);
-int plugger_remove_plug(struct plug_handle *plug);
-static inline void plugger_flush(struct plug_handle *plug)
-{
-       del_timer_sync(&plug->unplug_timer);
-       cancel_work_sync(&plug->unplug_work);
-}
-
 /*
  * MD's 'extended' device
  */
@@ -199,6 +179,9 @@ struct mddev_s
        int                             delta_disks, new_level, new_layout;
        int                             new_chunk_sectors;
 
+       atomic_t                        plug_cnt;       /* If device is expecting
+                                                        * more bios soon.
+                                                        */
        struct mdk_thread_s             *thread;        /* management thread */
        struct mdk_thread_s             *sync_thread;   /* doing resync or reconstruct */
        sector_t                        curr_resync;    /* last block scheduled */
@@ -336,7 +319,6 @@ struct mddev_s
        struct list_head                all_mddevs;
 
        struct attribute_group          *to_remove;
-       struct plug_handle              *plug; /* if used by personality */
 
        struct bio_set                  *bio_set;
 
@@ -516,7 +498,6 @@ extern int md_integrity_register(mddev_t *mddev);
 extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
 extern void restore_bitmap_write_access(struct file *file);
-extern void md_unplug(mddev_t *mddev);
 
 extern void mddev_init(mddev_t *mddev);
 extern int md_run(mddev_t *mddev);
@@ -530,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
                                   mddev_t *mddev);
 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
                                   mddev_t *mddev);
+extern int mddev_check_plugged(mddev_t *mddev);
 #endif /* _MD_MD_H */
index c2a21ae..2b7a7ff 100644 (file)
@@ -565,12 +565,6 @@ static void flush_pending_writes(conf_t *conf)
                spin_unlock_irq(&conf->device_lock);
 }
 
-static void md_kick_device(mddev_t *mddev)
-{
-       blk_flush_plug(current);
-       md_wakeup_thread(mddev->thread);
-}
-
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
@@ -600,7 +594,7 @@ static void raise_barrier(conf_t *conf)
 
        /* Wait until no block IO is waiting */
        wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        /* block any new IO from starting */
        conf->barrier++;
@@ -608,7 +602,7 @@ static void raise_barrier(conf_t *conf)
        /* Now wait for all pending IO to complete */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        spin_unlock_irq(&conf->resync_lock);
 }
@@ -630,7 +624,7 @@ static void wait_barrier(conf_t *conf)
                conf->nr_waiting++;
                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
                                    conf->resync_lock,
-                                   md_kick_device(conf->mddev));
+                                   );
                conf->nr_waiting--;
        }
        conf->nr_pending++;
@@ -666,8 +660,7 @@ static void freeze_array(conf_t *conf)
        wait_event_lock_irq(conf->wait_barrier,
                            conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
-                           ({ flush_pending_writes(conf);
-                              md_kick_device(conf->mddev); }));
+                           flush_pending_writes(conf));
        spin_unlock_irq(&conf->resync_lock);
 }
 static void unfreeze_array(conf_t *conf)
@@ -729,6 +722,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        mdk_rdev_t *blocked_rdev;
+       int plugged;
 
        /*
         * Register the new request and wait if the reconstruction
@@ -820,6 +814,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
         * inc refcount on their rdev.  Record them by setting
         * bios[x] to bio
         */
+       plugged = mddev_check_plugged(mddev);
+
        disks = conf->raid_disks;
  retry_write:
        blocked_rdev = NULL;
@@ -925,7 +921,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
-       if (do_sync || !bitmap)
+       if (do_sync || !bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
 
        return 0;
@@ -1516,13 +1512,16 @@ static void raid1d(mddev_t *mddev)
        conf_t *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
        mdk_rdev_t *rdev;
+       struct blk_plug plug;
 
        md_check_recovery(mddev);
-       
+
+       blk_start_plug(&plug);
        for (;;) {
                char b[BDEVNAME_SIZE];
 
-               flush_pending_writes(conf);
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -1593,6 +1592,7 @@ static void raid1d(mddev_t *mddev)
                }
                cond_resched();
        }
+       blk_finish_plug(&plug);
 }
 
 
@@ -2039,7 +2039,6 @@ static int stop(mddev_t *mddev)
 
        md_unregister_thread(mddev->thread);
        mddev->thread = NULL;
-       blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
        if (conf->r1bio_pool)
                mempool_destroy(conf->r1bio_pool);
        kfree(conf->mirrors);
index 2da83d5..8e94626 100644 (file)
@@ -634,12 +634,6 @@ static void flush_pending_writes(conf_t *conf)
                spin_unlock_irq(&conf->device_lock);
 }
 
-static void md_kick_device(mddev_t *mddev)
-{
-       blk_flush_plug(current);
-       md_wakeup_thread(mddev->thread);
-}
-
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
@@ -669,15 +663,15 @@ static void raise_barrier(conf_t *conf, int force)
 
        /* Wait until no block IO is waiting (unless 'force') */
        wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        /* block any new IO from starting */
        conf->barrier++;
 
-       /* No wait for all pending IO to complete */
+       /* Now wait for all pending IO to complete */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        spin_unlock_irq(&conf->resync_lock);
 }
@@ -698,7 +692,7 @@ static void wait_barrier(conf_t *conf)
                conf->nr_waiting++;
                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
                                    conf->resync_lock,
-                                   md_kick_device(conf->mddev));
+                                   );
                conf->nr_waiting--;
        }
        conf->nr_pending++;
@@ -734,8 +728,8 @@ static void freeze_array(conf_t *conf)
        wait_event_lock_irq(conf->wait_barrier,
                            conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
-                           ({ flush_pending_writes(conf);
-                              md_kick_device(conf->mddev); }));
+                           flush_pending_writes(conf));
+
        spin_unlock_irq(&conf->resync_lock);
 }
 
@@ -762,6 +756,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
        mdk_rdev_t *blocked_rdev;
+       int plugged;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
@@ -870,6 +865,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
         * inc refcount on their rdev.  Record them by setting
         * bios[x] to bio
         */
+       plugged = mddev_check_plugged(mddev);
+
        raid10_find_phys(conf, r10_bio);
  retry_write:
        blocked_rdev = NULL;
@@ -946,9 +943,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
-       if (do_sync || !mddev->bitmap)
+       if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
-
        return 0;
 }
 
@@ -1640,9 +1636,11 @@ static void raid10d(mddev_t *mddev)
        conf_t *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
        mdk_rdev_t *rdev;
+       struct blk_plug plug;
 
        md_check_recovery(mddev);
 
+       blk_start_plug(&plug);
        for (;;) {
                char b[BDEVNAME_SIZE];
 
@@ -1716,6 +1714,7 @@ static void raid10d(mddev_t *mddev)
                }
                cond_resched();
        }
+       blk_finish_plug(&plug);
 }
 
 
index e867ee4..49bf5f8 100644 (file)
  *
  * We group bitmap updates into batches.  Each batch has a number.
  * We may write out several batches at once, but that isn't very important.
- * conf->bm_write is the number of the last batch successfully written.
- * conf->bm_flush is the number of the last batch that was closed to
+ * conf->seq_write is the number of the last batch successfully written.
+ * conf->seq_flush is the number of the last batch that was closed to
  *    new additions.
  * When we discover that we will need to write to any block in a stripe
  * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
- * the number of the batch it will be in. This is bm_flush+1.
+ * the number of the batch it will be in. This is seq_flush+1.
  * When we are ready to do a write, if that batch hasn't been written yet,
  *   we plug the array and queue the stripe for later.
  * When an unplug happens, we increment bm_flush, thus closing the current
@@ -199,14 +199,12 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state)) {
+                       if (test_bit(STRIPE_DELAYED, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
-                               plugger_set_plug(&conf->plug);
-                       } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
-                                  sh->bm_seq - conf->seq_write > 0) {
+                       else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+                                  sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
-                               plugger_set_plug(&conf->plug);
-                       } else {
+                       else {
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -461,7 +459,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
                                                     < (conf->max_nr_stripes *3/4)
                                                     || !conf->inactive_blocked),
                                                    conf->device_lock,
-                                                   md_raid5_kick_device(conf));
+                                                   );
                                conf->inactive_blocked = 0;
                        } else
                                init_stripe(sh, sector, previous);
@@ -1470,7 +1468,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
                wait_event_lock_irq(conf->wait_for_stripe,
                                    !list_empty(&conf->inactive_list),
                                    conf->device_lock,
-                                   blk_flush_plug(current));
+                                   );
                osh = get_free_stripe(conf);
                spin_unlock_irq(&conf->device_lock);
                atomic_set(&nsh->count, 1);
@@ -3623,8 +3621,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
                                atomic_inc(&conf->preread_active_stripes);
                        list_add_tail(&sh->lru, &conf->hold_list);
                }
-       } else
-               plugger_set_plug(&conf->plug);
+       }
 }
 
 static void activate_bit_delay(raid5_conf_t *conf)
@@ -3641,21 +3638,6 @@ static void activate_bit_delay(raid5_conf_t *conf)
        }
 }
 
-void md_raid5_kick_device(raid5_conf_t *conf)
-{
-       blk_flush_plug(current);
-       raid5_activate_delayed(conf);
-       md_wakeup_thread(conf->mddev->thread);
-}
-EXPORT_SYMBOL_GPL(md_raid5_kick_device);
-
-static void raid5_unplug(struct plug_handle *plug)
-{
-       raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
-
-       md_raid5_kick_device(conf);
-}
-
 int md_raid5_congested(mddev_t *mddev, int bits)
 {
        raid5_conf_t *conf = mddev->private;
@@ -3945,6 +3927,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
        int remaining;
+       int plugged;
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
@@ -3963,6 +3946,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
 
+       plugged = mddev_check_plugged(mddev);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
                DEFINE_WAIT(w);
                int disks, data_disks;
@@ -4057,7 +4041,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
                                 * add failed due to overlap.  Flush everything
                                 * and wait a while
                                 */
-                               md_raid5_kick_device(conf);
+                               md_wakeup_thread(mddev->thread);
                                release_stripe(sh);
                                schedule();
                                goto retry;
@@ -4077,6 +4061,9 @@ static int make_request(mddev_t *mddev, struct bio * bi)
                }
                        
        }
+       if (!plugged)
+               md_wakeup_thread(mddev->thread);
+
        spin_lock_irq(&conf->device_lock);
        remaining = raid5_dec_bi_phys_segments(bi);
        spin_unlock_irq(&conf->device_lock);
@@ -4478,24 +4465,30 @@ static void raid5d(mddev_t *mddev)
        struct stripe_head *sh;
        raid5_conf_t *conf = mddev->private;
        int handled;
+       struct blk_plug plug;
 
        pr_debug("+++ raid5d active\n");
 
        md_check_recovery(mddev);
 
+       blk_start_plug(&plug);
        handled = 0;
        spin_lock_irq(&conf->device_lock);
        while (1) {
                struct bio *bio;
 
-               if (conf->seq_flush != conf->seq_write) {
-                       int seq = conf->seq_flush;
+               if (atomic_read(&mddev->plug_cnt) == 0 &&
+                   !list_empty(&conf->bitmap_list)) {
+                       /* Now is a good time to flush some bitmap updates */
+                       conf->seq_flush++;
                        spin_unlock_irq(&conf->device_lock);
                        bitmap_unplug(mddev->bitmap);
                        spin_lock_irq(&conf->device_lock);
-                       conf->seq_write = seq;
+                       conf->seq_write = conf->seq_flush;
                        activate_bit_delay(conf);
                }
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       raid5_activate_delayed(conf);
 
                while ((bio = remove_bio_from_retry(conf))) {
                        int ok;
@@ -4525,6 +4518,7 @@ static void raid5d(mddev_t *mddev)
        spin_unlock_irq(&conf->device_lock);
 
        async_tx_issue_pending_all();
+       blk_finish_plug(&plug);
 
        pr_debug("--- raid5d inactive\n");
 }
@@ -5141,8 +5135,6 @@ static int run(mddev_t *mddev)
                       mdname(mddev));
        md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
 
-       plugger_init(&conf->plug, raid5_unplug);
-       mddev->plug = &conf->plug;
        if (mddev->queue) {
                int chunk_size;
                /* read-ahead size must cover two whole stripes, which
@@ -5159,7 +5151,6 @@ static int run(mddev_t *mddev)
 
                mddev->queue->backing_dev_info.congested_data = mddev;
                mddev->queue->backing_dev_info.congested_fn = raid5_congested;
-               mddev->queue->queue_lock = &conf->device_lock;
 
                chunk_size = mddev->chunk_sectors << 9;
                blk_queue_io_min(mddev->queue, chunk_size);
@@ -5192,7 +5183,6 @@ static int stop(mddev_t *mddev)
        mddev->thread = NULL;
        if (mddev->queue)
                mddev->queue->backing_dev_info.congested_fn = NULL;
-       plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
        free_conf(conf);
        mddev->private = NULL;
        mddev->to_remove = &raid5_attrs_group;
@@ -5688,6 +5678,7 @@ static void raid5_quiesce(mddev_t *mddev, int state)
 static void *raid45_takeover_raid0(mddev_t *mddev, int level)
 {
        struct raid0_private_data *raid0_priv = mddev->private;
+       sector_t sectors;
 
        /* for raid0 takeover only one zone is supported */
        if (raid0_priv->nr_strip_zones > 1) {
@@ -5696,6 +5687,9 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level)
                return ERR_PTR(-EINVAL);
        }
 
+       sectors = raid0_priv->strip_zone[0].zone_end;
+       sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
+       mddev->dev_sectors = sectors;
        mddev->new_level = level;
        mddev->new_layout = ALGORITHM_PARITY_N;
        mddev->new_chunk_sectors = mddev->chunk_sectors;
index 8d563a4..3ca77a2 100644 (file)
@@ -400,8 +400,6 @@ struct raid5_private_data {
                                            * Cleared when a sync completes.
                                            */
 
-       struct plug_handle      plug;
-
        /* per cpu variables */
        struct raid5_percpu {
                struct page     *spare_page; /* Used when checking P/Q in raid6 */
index c4742fc..c969111 100644 (file)
@@ -300,7 +300,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        retval = remap_pfn_range(vma, vma->vm_start,
-                                PFN_DOWN(virt_to_phys(mem->vaddr)),
+                                mem->dma_handle >> PAGE_SHIFT,
                                 size, vma->vm_page_prot);
        if (retval) {
                dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
index e3de0b8..7581518 100644 (file)
@@ -38,6 +38,8 @@
 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
 #define bfa_ioc_notify_fail(__ioc)                     \
                        ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc)               \
+                       ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
 #define bfa_ioc_sync_join(__ioc)                       \
                        ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
 #define bfa_ioc_sync_leave(__ioc)                      \
@@ -602,7 +604,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
        switch (event) {
        case IOCPF_E_SEMLOCKED:
                if (bfa_ioc_firmware_lock(ioc)) {
-                       if (bfa_ioc_sync_complete(ioc)) {
+                       if (bfa_ioc_sync_start(ioc)) {
                                iocpf->retry_count = 0;
                                bfa_ioc_sync_join(ioc);
                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
@@ -1314,7 +1316,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
  * execution context (driver/bios) must match.
  */
 static bool
-bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
 {
        struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
 
@@ -1325,7 +1327,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
        if (fwhdr.signature != drv_fwhdr->signature)
                return false;
 
-       if (fwhdr.exec != drv_fwhdr->exec)
+       if (swab32(fwhdr.param) != boot_env)
                return false;
 
        return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@@ -1352,9 +1354,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
 {
        enum bfi_ioc_state ioc_fwstate;
        bool fwvalid;
+       u32 boot_env;
 
        ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
 
+       boot_env = BFI_BOOT_LOADER_OS;
+
        if (force)
                ioc_fwstate = BFI_IOC_UNINIT;
 
@@ -1362,10 +1367,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
         * check if firmware is valid
         */
        fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
-               false : bfa_ioc_fwver_valid(ioc);
+               false : bfa_ioc_fwver_valid(ioc, boot_env);
 
        if (!fwvalid) {
-               bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+               bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
                return;
        }
 
@@ -1396,7 +1401,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
        /**
         * Initialize the h/w for any other states.
         */
-       bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+       bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
 }
 
 void
@@ -1506,7 +1511,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
  */
 static void
 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
-                   u32 boot_param)
+                   u32 boot_env)
 {
        u32 *fwimg;
        u32 pgnum, pgoff;
@@ -1558,10 +1563,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
        /*
         * Set boot type and boot param at the end.
        */
-       writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+       writel(boot_type, ((ioc->ioc_regs.smem_page_start)
                        + (BFI_BOOT_TYPE_OFF)));
-       writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
-                       + (BFI_BOOT_PARAM_OFF)));
+       writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+                       + (BFI_BOOT_LOADER_OFF)));
 }
 
 static void
@@ -1721,7 +1726,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
  * as the entry vector.
  */
 static void
-bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
 {
        void __iomem *rb;
 
@@ -1734,7 +1739,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
         * Initialize IOC state of all functions on a chip reset.
         */
        rb = ioc->pcidev.pci_bar_kva;
-       if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+       if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
                writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
                writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
        } else {
@@ -1743,7 +1748,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
        }
 
        bfa_ioc_msgflush(ioc);
-       bfa_ioc_download_fw(ioc, boot_type, boot_param);
+       bfa_ioc_download_fw(ioc, boot_type, boot_env);
 
        /**
         * Enable interrupts just before starting LPU
index e4974bc..bd48abe 100644 (file)
@@ -194,6 +194,7 @@ struct bfa_ioc_hwif {
                                        bool msix);
        void            (*ioc_notify_fail)      (struct bfa_ioc *ioc);
        void            (*ioc_ownership_reset)  (struct bfa_ioc *ioc);
+       bool            (*ioc_sync_start)       (struct bfa_ioc *ioc);
        void            (*ioc_sync_join)        (struct bfa_ioc *ioc);
        void            (*ioc_sync_leave)       (struct bfa_ioc *ioc);
        void            (*ioc_sync_ack)         (struct bfa_ioc *ioc);
index 469997c..87aecdf 100644 (file)
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
@@ -63,6 +64,7 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
        nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
        nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
        nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+       nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
        nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
        nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
        nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -342,6 +344,32 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
        bfa_nw_ioc_hw_sem_release(ioc);
 }
 
+/**
+ * Synchronized IOC failure processing routines
+ */
+static bool
+bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
+{
+       u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+       u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+       /*
+        * Driver load time.  If the sync required bit for this PCI fn
+        * is set, it is due to an unclean exit by the driver for this
+        * PCI fn in the previous incarnation. Whoever comes here first
+        * should clean it up, no matter which PCI fn.
+        */
+
+       if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+               writel(0, ioc->ioc_regs.ioc_fail_sync);
+               writel(1, ioc->ioc_regs.ioc_usage_reg);
+               writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+               writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+               return true;
+       }
+
+       return bfa_ioc_ct_sync_complete(ioc);
+}
 /**
  * Synchronized IOC failure processing routines
  */
index a973968..6050379 100644 (file)
@@ -184,12 +184,14 @@ enum bfi_mclass {
 #define BFI_IOC_MSGLEN_MAX     32      /* 32 bytes */
 
 #define BFI_BOOT_TYPE_OFF              8
-#define BFI_BOOT_PARAM_OFF             12
+#define BFI_BOOT_LOADER_OFF            12
 
-#define BFI_BOOT_TYPE_NORMAL           0       /* param is device id */
+#define BFI_BOOT_TYPE_NORMAL           0
 #define        BFI_BOOT_TYPE_FLASH             1
 #define        BFI_BOOT_TYPE_MEMTEST           2
 
+#define BFI_BOOT_LOADER_OS             0
+
 #define BFI_BOOT_MEMTEST_RES_ADDR   0x900
 #define BFI_BOOT_MEMTEST_RES_SIG    0xA0A1A2A3
 
index 9f356d5..8e6ceab 100644 (file)
@@ -1837,7 +1837,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
        /* Initialize the Rx event handlers */
        rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
        rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
-       rx_cbfn.rcb_destroy_cbfn = NULL;
        rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
        rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
        rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
index f505015..89cb977 100644 (file)
@@ -2114,19 +2114,18 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
        for (i = 0; i < (data * 2); i++) {
                if ((i % 2) == 0)
                        bnx2x_set_led(&bp->link_params, &bp->link_vars,
-                                     LED_MODE_OPER, SPEED_1000);
+                                     LED_MODE_ON, SPEED_1000);
                else
                        bnx2x_set_led(&bp->link_params, &bp->link_vars,
-                                     LED_MODE_OFF, 0);
+                                     LED_MODE_FRONT_PANEL_OFF, 0);
 
                msleep_interruptible(500);
                if (signal_pending(current))
                        break;
        }
 
-       if (bp->link_vars.link_up)
-               bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
-                             bp->link_vars.line_speed);
+       bnx2x_set_led(&bp->link_params, &bp->link_vars,
+                     LED_MODE_OPER, bp->link_vars.line_speed);
 
        return 0;
 }
index 9bc5de3..ba71582 100644 (file)
@@ -176,7 +176,7 @@ static int tlb_initialize(struct bonding *bond)
        bond_info->tx_hashtbl = new_hashtbl;
 
        for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
-               tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
+               tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
        }
 
        _unlock_tx_hashtbl(bond);
@@ -701,7 +701,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
                 */
                rlb_choose_channel(skb, bond);
 
-               /* The ARP relpy packets must be delayed so that
+               /* The ARP reply packets must be delayed so that
                 * they can cancel out the influence of the ARP request.
                 */
                bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
@@ -1042,7 +1042,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
  *
  * If the permanent hw address of @slave is @bond's hw address, we need to
  * find a different hw address to give @slave, that isn't in use by any other
- * slave in the bond. This address must be, of course, one of the premanent
+ * slave in the bond. This address must be, of course, one of the permanent
  * addresses of the other slaves.
  *
  * We go over the slave list, and for each slave there we compare its
index 86861f0..8ca7158 100644 (file)
@@ -75,7 +75,7 @@ struct tlb_client_info {
                                 * gave this entry index.
                                 */
        u32 tx_bytes;           /* Each Client accumulates the BytesTx that
-                                * were tranmitted to it, and after each
+                                * were transmitted to it, and after each
                                 * CallBack the LoadHistory is divided
                                 * by the balance interval
                                 */
@@ -122,7 +122,6 @@ struct tlb_slave_info {
 };
 
 struct alb_bond_info {
-       struct timer_list       alb_timer;
        struct tlb_client_info  *tx_hashtbl; /* Dynamically allocated */
        spinlock_t              tx_hashtbl_lock;
        u32                     unbalanced_load;
@@ -140,7 +139,6 @@ struct alb_bond_info {
        struct slave            *next_rx_slave;/* next slave to be assigned
                                                * to a new rx client for
                                                */
-       u32                     rlb_interval_counter;
        u8                      primary_is_promisc;        /* boolean */
        u32                     rlb_promisc_timeout_counter;/* counts primary
                                                             * promiscuity time
index c0a1bc5..bd1d811 100644 (file)
@@ -260,7 +260,7 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
 
        if (!ofdev->dev.of_match)
                return -EINVAL;
-       data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data;
+       data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
 
        base = of_iomap(np, 0);
        if (!base) {
index ea0dc45..d70fb76 100644 (file)
@@ -173,7 +173,8 @@ static void loopback_setup(struct net_device *dev)
                | NETIF_F_RXCSUM
                | NETIF_F_HIGHDMA
                | NETIF_F_LLTX
-               | NETIF_F_NETNS_LOCAL;
+               | NETIF_F_NETNS_LOCAL
+               | NETIF_F_VLAN_CHALLENGED;
        dev->ethtool_ops        = &loopback_ethtool_ops;
        dev->header_ops         = &eth_header_ops;
        dev->netdev_ops         = &loopback_ops;
index aa2813e..1074231 100644 (file)
@@ -860,6 +860,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
                prev_eedata = eedata;
        }
 
+       /* Store MAC Address in perm_addr */
+       memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
        dev->base_addr = (unsigned long __force) ioaddr;
        dev->irq = irq;
 
index d7299f1..679dc85 100644 (file)
 
 #define        MAX_NUM_CARDS           4
 
-#define MAX_BUFFERS_PER_CMD    32
+#define NETXEN_MAX_FRAGS_PER_TX        14
 #define MAX_TSO_HEADER_DESC    2
 #define MGMT_CMD_DESC_RESV     4
 #define TX_STOP_THRESH         ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
@@ -558,7 +558,7 @@ struct netxen_recv_crb {
  */
 struct netxen_cmd_buffer {
        struct sk_buff *skb;
-       struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+       struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
        u32 frag_count;
 };
 
index 83348dc..e8a4b66 100644 (file)
@@ -1844,6 +1844,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        struct cmd_desc_type0 *hwdesc, *first_desc;
        struct pci_dev *pdev;
        int i, k;
+       int delta = 0;
+       struct skb_frag_struct *frag;
 
        u32 producer;
        int frag_count, no_of_desc;
@@ -1851,6 +1853,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
        frag_count = skb_shinfo(skb)->nr_frags + 1;
 
+       /* 14 frags supported for normal packet and
+        * 32 frags supported for TSO packet
+        */
+       if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
+
+               for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
+                       frag = &skb_shinfo(skb)->frags[i];
+                       delta += frag->size;
+               }
+
+               if (!__pskb_pull_tail(skb, delta))
+                       goto drop_packet;
+
+               frag_count = 1 + skb_shinfo(skb)->nr_frags;
+       }
        /* 4 fragments per cmd des */
        no_of_desc = (frag_count + 3) >> 2;
 
index dc44564..b0dead0 100644 (file)
@@ -99,6 +99,7 @@
 #define TX_UDPV6_PKT   0x0c
 
 /* Tx defines */
+#define QLCNIC_MAX_FRAGS_PER_TX        14
 #define MAX_TSO_HEADER_DESC    2
 #define MGMT_CMD_DESC_RESV     4
 #define TX_STOP_THRESH         ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
index cd88c7e..cb1a1ef 100644 (file)
@@ -2099,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        struct cmd_desc_type0 *hwdesc, *first_desc;
        struct pci_dev *pdev;
        struct ethhdr *phdr;
+       int delta = 0;
        int i, k;
 
        u32 producer;
@@ -2118,6 +2119,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        }
 
        frag_count = skb_shinfo(skb)->nr_frags + 1;
+       /* 14 frags supported for normal packet and
+        * 32 frags supported for TSO packet
+        */
+       if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+
+               for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+                       delta += skb_shinfo(skb)->frags[i].size;
+
+               if (!__pskb_pull_tail(skb, delta))
+                       goto drop_packet;
+
+               frag_count = 1 + skb_shinfo(skb)->nr_frags;
+       }
 
        /* 4 fragments per cmd des */
        no_of_desc = (frag_count + 3) >> 2;
index d890679..a3c2aab 100644 (file)
@@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
  * processing to finish, then directly poll (and ack ) the eventq.
  * Finally reenable NAPI and interrupts.
  *
- * Since we are touching interrupts the caller should hold the suspend lock
+ * This is for use only during a loopback self-test.  It must not
+ * deliver any packets up the stack as this can result in deadlock.
  */
 void efx_process_channel_now(struct efx_channel *channel)
 {
@@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
 
        BUG_ON(channel->channel >= efx->n_channels);
        BUG_ON(!channel->enabled);
+       BUG_ON(!efx->loopback_selftest);
 
        /* Disable interrupts and wait for ISRs to complete */
        efx_nic_disable_interrupts(efx);
@@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx)
         * restart the transmit interface early so the watchdog timer stops */
        efx_start_port(efx);
 
-       if (efx_dev_registered(efx))
+       if (efx_dev_registered(efx) && !efx->port_inhibited)
                netif_tx_wake_all_queues(efx->net_dev);
 
        efx_for_each_channel(channel, efx)
index d9d8c2e..cc97880 100644 (file)
@@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
 
        spin_lock_irqsave(&efx->biu_lock, flags);
        value->u32[0] = _efx_readd(efx, reg + 0);
+       rmb();
        value->u32[1] = _efx_readd(efx, reg + 4);
        value->u32[2] = _efx_readd(efx, reg + 8);
        value->u32[3] = _efx_readd(efx, reg + 12);
@@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
        value->u64[0] = (__force __le64)__raw_readq(membase + addr);
 #else
        value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+       rmb();
        value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
 #endif
        spin_unlock_irqrestore(&efx->biu_lock, flags);
index 9ffa9a6..191a311 100644 (file)
@@ -330,7 +330,6 @@ enum efx_rx_alloc_method {
  * @eventq_mask: Event queue pointer mask
  * @eventq_read_ptr: Event queue read pointer
  * @last_eventq_read_ptr: Last event queue read pointer value.
- * @magic_count: Event queue test event count
  * @irq_count: Number of IRQs since last adaptive moderation decision
  * @irq_mod_score: IRQ moderation score
  * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -360,7 +359,6 @@ struct efx_channel {
        unsigned int eventq_mask;
        unsigned int eventq_read_ptr;
        unsigned int last_eventq_read_ptr;
-       unsigned int magic_count;
 
        unsigned int irq_count;
        unsigned int irq_mod_score;
index e839661..10f1cb7 100644 (file)
@@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
 static inline efx_qword_t *efx_event(struct efx_channel *channel,
                                     unsigned int index)
 {
-       return ((efx_qword_t *) (channel->eventq.addr)) + index;
+       return ((efx_qword_t *) (channel->eventq.addr)) +
+               (index & channel->eventq_mask);
 }
 
 /* See if an event is present
@@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
        efx_dword_t reg;
        struct efx_nic *efx = channel->efx;
 
-       EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
+       EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+                            channel->eventq_read_ptr & channel->eventq_mask);
        efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
                         channel->channel);
 }
@@ -908,7 +910,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
 
        code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
        if (code == EFX_CHANNEL_MAGIC_TEST(channel))
-               ++channel->magic_count;
+               ; /* ignore */
        else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
                /* The queue must be empty, so we won't receive any rx
                 * events, so efx_process_channel() won't refill the
@@ -1015,8 +1017,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
                /* Clear this event by marking it all ones */
                EFX_SET_QWORD(*p_event);
 
-               /* Increment read pointer */
-               read_ptr = (read_ptr + 1) & channel->eventq_mask;
+               ++read_ptr;
 
                ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
 
@@ -1060,6 +1061,13 @@ out:
        return spent;
 }
 
+/* Check whether an event is present in the eventq at the current
+ * read pointer.  Only useful for self-test.
+ */
+bool efx_nic_event_present(struct efx_channel *channel)
+{
+       return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
 
 /* Allocate buffer table entries for event queue */
 int efx_nic_probe_eventq(struct efx_channel *channel)
@@ -1165,7 +1173,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
        struct efx_tx_queue *tx_queue;
        struct efx_rx_queue *rx_queue;
        unsigned int read_ptr = channel->eventq_read_ptr;
-       unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
+       unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
 
        do {
                efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1205,7 +1213,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
                 * it's ok to throw away every non-flush event */
                EFX_SET_QWORD(*event);
 
-               read_ptr = (read_ptr + 1) & channel->eventq_mask;
+               ++read_ptr;
        } while (read_ptr != end_ptr);
 
        channel->eventq_read_ptr = read_ptr;
index d9de1b6..a42db6e 100644 (file)
@@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel);
 extern void efx_nic_remove_eventq(struct efx_channel *channel);
 extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
 extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+extern bool efx_nic_event_present(struct efx_channel *channel);
 
 /* MAC/PHY */
 extern void falcon_drain_tx_fifo(struct efx_nic *efx);
index a0f49b3..50ad3bc 100644 (file)
@@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
 static int efx_test_interrupts(struct efx_nic *efx,
                               struct efx_self_tests *tests)
 {
-       struct efx_channel *channel;
-
        netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
        tests->interrupt = -1;
 
@@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
        efx->last_irq_cpu = -1;
        smp_wmb();
 
-       /* ACK each interrupting event queue. Receiving an interrupt due to
-        * traffic before a test event is raised is considered a pass */
-       efx_for_each_channel(channel, efx) {
-               if (channel->work_pending)
-                       efx_process_channel_now(channel);
-               if (efx->last_irq_cpu >= 0)
-                       goto success;
-       }
-
        efx_nic_generate_interrupt(efx);
 
        /* Wait for arrival of test interrupt. */
@@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
                               struct efx_self_tests *tests)
 {
        struct efx_nic *efx = channel->efx;
-       unsigned int magic_count, count;
+       unsigned int read_ptr, count;
 
        tests->eventq_dma[channel->channel] = -1;
        tests->eventq_int[channel->channel] = -1;
        tests->eventq_poll[channel->channel] = -1;
 
-       magic_count = channel->magic_count;
+       read_ptr = channel->eventq_read_ptr;
        channel->efx->last_irq_cpu = -1;
        smp_wmb();
 
@@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
        do {
                schedule_timeout_uninterruptible(HZ / 100);
 
-               if (channel->work_pending)
-                       efx_process_channel_now(channel);
-
-               if (channel->magic_count != magic_count)
+               if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
                        goto eventq_ok;
        } while (++count < 2);
 
@@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
        }
 
        /* Check to see if event was received even if interrupt wasn't */
-       efx_process_channel_now(channel);
-       if (channel->magic_count != magic_count) {
+       if (efx_nic_event_present(channel)) {
                netif_err(efx, drv, efx->net_dev,
                          "channel %d event was generated, but "
                          "failed to trigger an interrupt\n", channel->channel);
@@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
        __efx_reconfigure_port(efx);
        mutex_unlock(&efx->mac_lock);
 
+       netif_tx_wake_all_queues(efx->net_dev);
+
        return rc_test;
 }
 
index 1398019..d2c85df 100644 (file)
@@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
         * queue state. */
        smp_mb();
        if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
-           likely(efx->port_enabled)) {
+           likely(efx->port_enabled) &&
+           likely(!efx->port_inhibited)) {
                fill_level = tx_queue->insert_count - tx_queue->read_count;
                if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
                        EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
index cb317cd..484f795 100644 (file)
@@ -240,7 +240,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
  *     @net_dev: the net device to get address for
  *
  *     Older SiS900 and friends, use EEPROM to store MAC address.
- *     MAC address is read from read_eeprom() into @net_dev->dev_addr.
+ *     MAC address is read from read_eeprom() into @net_dev->dev_addr and
+ *     @net_dev->perm_addr.
  */
 
 static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
@@ -261,6 +262,9 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
        for (i = 0; i < 3; i++)
                ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
 
+       /* Store MAC Address in perm_addr */
+       memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
        return 1;
 }
 
@@ -271,7 +275,8 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
  *
  *     SiS630E model, use APC CMOS RAM to store MAC address.
  *     APC CMOS RAM is accessed through ISA bridge.
- *     MAC address is read into @net_dev->dev_addr.
+ *     MAC address is read into @net_dev->dev_addr and
+ *     @net_dev->perm_addr.
  */
 
 static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
@@ -296,6 +301,10 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
                outb(0x09 + i, 0x70);
                ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
        }
+
+       /* Store MAC Address in perm_addr */
+       memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
        pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
        pci_dev_put(isa_bridge);
 
@@ -310,7 +319,7 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
  *
  *     SiS635 model, set MAC Reload Bit to load Mac address from APC
  *     to rfdr. rfdr is accessed through rfcr. MAC address is read into
- *     @net_dev->dev_addr.
+ *     @net_dev->dev_addr and @net_dev->perm_addr.
  */
 
 static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
@@ -334,6 +343,9 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
                *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
        }
 
+       /* Store MAC Address in perm_addr */
+       memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
        /* enable packet filtering */
        outl(rfcrSave | RFEN, rfcr + ioaddr);
 
@@ -353,7 +365,7 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
  *     EEDONE signal to refuse EEPROM access by LAN.
  *     The EEPROM map of SiS962 or SiS963 is different to SiS900.
  *     The signature field in SiS962 or SiS963 spec is meaningless.
- *     MAC address is read into @net_dev->dev_addr.
+ *     MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
  */
 
 static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
@@ -372,6 +384,9 @@ static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
                        for (i = 0; i < 3; i++)
                                ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
 
+                       /* Store MAC Address in perm_addr */
+                       memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
                        outl(EEDONE, ee_addr);
                        return 1;
                } else {
index d65fab1..e250935 100644 (file)
@@ -26,9 +26,9 @@
 
 #undef DWMAC_DMA_DEBUG
 #ifdef DWMAC_DMA_DEBUG
-#define DBG(fmt, args...)  printk(fmt, ## args)
+#define DWMAC_LIB_DBG(fmt, args...)  printk(fmt, ## args)
 #else
-#define DBG(fmt, args...)  do { } while (0)
+#define DWMAC_LIB_DBG(fmt, args...)  do { } while (0)
 #endif
 
 /* CSR1 enables the transmit DMA to check for new descriptor */
@@ -152,7 +152,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
        /* read the status register (CSR5) */
        u32 intr_status = readl(ioaddr + DMA_STATUS);
 
-       DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+       DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
 #ifdef DWMAC_DMA_DEBUG
        /* It displays the DMA process states (CSR5 register) */
        show_tx_process_state(intr_status);
@@ -160,43 +160,43 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
 #endif
        /* ABNORMAL interrupts */
        if (unlikely(intr_status & DMA_STATUS_AIS)) {
-               DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+               DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
                if (unlikely(intr_status & DMA_STATUS_UNF)) {
-                       DBG(INFO, "transmit underflow\n");
+                       DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
                        ret = tx_hard_error_bump_tc;
                        x->tx_undeflow_irq++;
                }
                if (unlikely(intr_status & DMA_STATUS_TJT)) {
-                       DBG(INFO, "transmit jabber\n");
+                       DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
                        x->tx_jabber_irq++;
                }
                if (unlikely(intr_status & DMA_STATUS_OVF)) {
-                       DBG(INFO, "recv overflow\n");
+                       DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
                        x->rx_overflow_irq++;
                }
                if (unlikely(intr_status & DMA_STATUS_RU)) {
-                       DBG(INFO, "receive buffer unavailable\n");
+                       DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
                        x->rx_buf_unav_irq++;
                }
                if (unlikely(intr_status & DMA_STATUS_RPS)) {
-                       DBG(INFO, "receive process stopped\n");
+                       DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
                        x->rx_process_stopped_irq++;
                }
                if (unlikely(intr_status & DMA_STATUS_RWT)) {
-                       DBG(INFO, "receive watchdog\n");
+                       DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
                        x->rx_watchdog_irq++;
                }
                if (unlikely(intr_status & DMA_STATUS_ETI)) {
-                       DBG(INFO, "transmit early interrupt\n");
+                       DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
                        x->tx_early_irq++;
                }
                if (unlikely(intr_status & DMA_STATUS_TPS)) {
-                       DBG(INFO, "transmit process stopped\n");
+                       DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
                        x->tx_process_stopped_irq++;
                        ret = tx_hard_error;
                }
                if (unlikely(intr_status & DMA_STATUS_FBI)) {
-                       DBG(INFO, "fatal bus error\n");
+                       DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
                        x->fatal_bus_error_irq++;
                        ret = tx_hard_error;
                }
@@ -215,7 +215,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
        /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
        writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
 
-       DBG(INFO, "\n\n");
+       DWMAC_LIB_DBG(KERN_INFO "\n\n");
        return ret;
 }
 
index 0e5f031..cc973fc 100644 (file)
@@ -750,7 +750,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
                        priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
                        priv->xstats.threshold = tc;
                }
-               stmmac_tx_err(priv);
        } else if (unlikely(status == tx_hard_error))
                stmmac_tx_err(priv);
 }
@@ -781,21 +780,6 @@ static int stmmac_open(struct net_device *dev)
 
        stmmac_verify_args();
 
-       ret = stmmac_init_phy(dev);
-       if (unlikely(ret)) {
-               pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
-               return ret;
-       }
-
-       /* Request the IRQ lines */
-       ret = request_irq(dev->irq, stmmac_interrupt,
-                         IRQF_SHARED, dev->name, dev);
-       if (unlikely(ret < 0)) {
-               pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
-                      __func__, dev->irq, ret);
-               return ret;
-       }
-
 #ifdef CONFIG_STMMAC_TIMER
        priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
        if (unlikely(priv->tm == NULL)) {
@@ -814,6 +798,11 @@ static int stmmac_open(struct net_device *dev)
        } else
                priv->tm->enable = 1;
 #endif
+       ret = stmmac_init_phy(dev);
+       if (unlikely(ret)) {
+               pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
+               goto open_error;
+       }
 
        /* Create and initialize the TX/RX descriptors chains. */
        priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
@@ -822,12 +811,11 @@ static int stmmac_open(struct net_device *dev)
        init_dma_desc_rings(dev);
 
        /* DMA initialization and SW reset */
-       if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
-                                        priv->dma_tx_phy,
-                                        priv->dma_rx_phy) < 0)) {
-
+       ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
+                                 priv->dma_tx_phy, priv->dma_rx_phy);
+       if (ret < 0) {
                pr_err("%s: DMA initialization failed\n", __func__);
-               return -1;
+               goto open_error;
        }
 
        /* Copy the MAC addr into the HW  */
@@ -848,6 +836,15 @@ static int stmmac_open(struct net_device *dev)
        writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
        writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
 
+       /* Request the IRQ lines */
+       ret = request_irq(dev->irq, stmmac_interrupt,
+                        IRQF_SHARED, dev->name, dev);
+       if (unlikely(ret < 0)) {
+               pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+                      __func__, dev->irq, ret);
+               goto open_error;
+       }
+
        /* Enable the MAC Rx/Tx */
        stmmac_enable_mac(priv->ioaddr);
 
@@ -878,7 +875,17 @@ static int stmmac_open(struct net_device *dev)
        napi_enable(&priv->napi);
        skb_queue_head_init(&priv->rx_recycle);
        netif_start_queue(dev);
+
        return 0;
+
+open_error:
+#ifdef CONFIG_STMMAC_TIMER
+       kfree(priv->tm);
+#endif
+       if (priv->phydev)
+               phy_disconnect(priv->phydev);
+
+       return ret;
 }
 
 /**
index 8a3b191..ff32bef 100644 (file)
@@ -1251,7 +1251,7 @@ static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
 /* 
  * The NIC has told us that a packet has been downloaded onto the card, we must
  * find out which packet it has done, clear the skb and information for the packet
- * then advance around the ring for all tranmitted packets
+ * then advance around the ring for all transmitted packets
  */
 
 static void xl_dn_comp(struct net_device *dev) 
@@ -1568,7 +1568,7 @@ static void xl_arb_cmd(struct net_device *dev)
                        if (lan_status_diff & LSC_SOFT_ERR)
                                        printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
                        if (lan_status_diff & LSC_TRAN_BCN) 
-                                       printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+                                       printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
                        if (lan_status_diff & LSC_SS) 
                                        printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
                        if (lan_status_diff & LSC_RING_REC)
index 5bd1407..9354ca9 100644 (file)
@@ -1675,7 +1675,7 @@ drop_frame:
                        if (lan_status_diff & LSC_SOFT_ERR)
                                printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
                        if (lan_status_diff & LSC_TRAN_BCN)
-                               printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
+                               printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
                        if (lan_status_diff & LSC_SS)
                                printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
                        if (lan_status_diff & LSC_RING_REC)
index 3d2fbe6..2684003 100644 (file)
@@ -1500,7 +1500,7 @@ drop_frame:
                        if (lan_status_diff & LSC_SOFT_ERR)
                                        printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
                        if (lan_status_diff & LSC_TRAN_BCN) 
-                                       printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+                                       printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
                        if (lan_status_diff & LSC_SS) 
                                        printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
                        if (lan_status_diff & LSC_RING_REC)
index f1b8af6..2d10239 100644 (file)
@@ -1040,7 +1040,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
        }
 
        ret = ath9k_htc_hw_init(hif_dev->htc_handle,
-                               &hif_dev->udev->dev, hif_dev->device_id,
+                               &interface->dev, hif_dev->device_id,
                                hif_dev->udev->product, id->driver_info);
        if (ret) {
                ret = -EINVAL;
@@ -1158,7 +1158,7 @@ fail_resume:
 #endif
 
 static struct usb_driver ath9k_hif_usb_driver = {
-       .name = "ath9k_hif_usb",
+       .name = KBUILD_MODNAME,
        .probe = ath9k_hif_usb_probe,
        .disconnect = ath9k_hif_usb_disconnect,
 #ifdef CONFIG_PM
index 1ec9bcd..c95bc5c 100644 (file)
@@ -1254,15 +1254,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        ah->txchainmask = common->tx_chainmask;
        ah->rxchainmask = common->rx_chainmask;
 
-       if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
-               ath9k_hw_abortpcurecv(ah);
-               if (!ath9k_hw_stopdmarecv(ah)) {
-                       ath_dbg(common, ATH_DBG_XMIT,
-                               "Failed to stop receive dma\n");
-                       bChannelChange = false;
-               }
-       }
-
        if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
                return -EIO;
 
index 562257a..edc1cbb 100644 (file)
@@ -751,28 +751,47 @@ void ath9k_hw_abortpcurecv(struct ath_hw *ah)
 }
 EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
 
-bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
+bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
 {
 #define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
 #define AH_RX_TIME_QUANTUM     100     /* usec */
        struct ath_common *common = ath9k_hw_common(ah);
+       u32 mac_status, last_mac_status = 0;
        int i;
 
+       /* Enable access to the DMA observation bus */
+       REG_WRITE(ah, AR_MACMISC,
+                 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+                  (AR_MACMISC_MISC_OBS_BUS_1 <<
+                   AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
        REG_WRITE(ah, AR_CR, AR_CR_RXD);
 
        /* Wait for rx enable bit to go low */
        for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
                if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
                        break;
+
+               if (!AR_SREV_9300_20_OR_LATER(ah)) {
+                       mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
+                       if (mac_status == 0x1c0 && mac_status == last_mac_status) {
+                               *reset = true;
+                               break;
+                       }
+
+                       last_mac_status = mac_status;
+               }
+
                udelay(AH_TIME_QUANTUM);
        }
 
        if (i == 0) {
                ath_err(common,
-                       "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+                       "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
                        AH_RX_STOP_DMA_TIMEOUT / 1000,
                        REG_READ(ah, AR_CR),
-                       REG_READ(ah, AR_DIAG_SW));
+                       REG_READ(ah, AR_DIAG_SW),
+                       REG_READ(ah, AR_DMADBG_7));
                return false;
        } else {
                return true;
index b2b2ff8..c2a5938 100644 (file)
@@ -695,7 +695,7 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
 void ath9k_hw_abortpcurecv(struct ath_hw *ah);
-bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
+bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
 int ath9k_hw_beaconq_setup(struct ath_hw *ah);
 
 /* Interrupt Handling */
index dddb85d..17d04ff 100644 (file)
@@ -1376,7 +1376,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
 
        ath9k_calculate_iter_data(hw, vif, &iter_data);
 
-       ath9k_ps_wakeup(sc);
        /* Set BSSID mask. */
        memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
        ath_hw_setbssidmask(common);
@@ -1411,7 +1410,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
        }
 
        ath9k_hw_set_interrupts(ah, ah->imask);
-       ath9k_ps_restore(sc);
 
        /* Set up ANI */
        if ((iter_data.naps + iter_data.nadhocs) > 0) {
@@ -1457,6 +1455,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        struct ath_vif *avp = (void *)vif->drv_priv;
        int ret = 0;
 
+       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
        switch (vif->type) {
@@ -1503,6 +1502,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        ath9k_do_vif_add_setup(hw, vif);
 out:
        mutex_unlock(&sc->mutex);
+       ath9k_ps_restore(sc);
        return ret;
 }
 
@@ -1517,6 +1517,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
 
        ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
        mutex_lock(&sc->mutex);
+       ath9k_ps_wakeup(sc);
 
        /* See if new interface type is valid. */
        if ((new_type == NL80211_IFTYPE_ADHOC) &&
@@ -1546,6 +1547,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
 
        ath9k_do_vif_add_setup(hw, vif);
 out:
+       ath9k_ps_restore(sc);
        mutex_unlock(&sc->mutex);
        return ret;
 }
@@ -1558,6 +1560,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 
        ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
 
+       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
        sc->nvifs--;
@@ -1569,6 +1572,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
        ath9k_calculate_summary_state(hw, NULL);
 
        mutex_unlock(&sc->mutex);
+       ath9k_ps_restore(sc);
 }
 
 static void ath9k_enable_ps(struct ath_softc *sc)
@@ -1809,6 +1813,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
 
        txq = sc->tx.txq_map[queue];
 
+       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
        memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -1832,6 +1837,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
                        ath_beaconq_config(sc);
 
        mutex_unlock(&sc->mutex);
+       ath9k_ps_restore(sc);
 
        return ret;
 }
@@ -1894,6 +1900,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
        int slottime;
        int error;
 
+       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
        if (changed & BSS_CHANGED_BSSID) {
@@ -1994,6 +2001,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
        }
 
        mutex_unlock(&sc->mutex);
+       ath9k_ps_restore(sc);
 }
 
 static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
index a9c3f46..dcd19bc 100644 (file)
@@ -486,12 +486,12 @@ start_recv:
 bool ath_stoprecv(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
-       bool stopped;
+       bool stopped, reset = false;
 
        spin_lock_bh(&sc->rx.rxbuflock);
        ath9k_hw_abortpcurecv(ah);
        ath9k_hw_setrxfilter(ah, 0);
-       stopped = ath9k_hw_stopdmarecv(ah);
+       stopped = ath9k_hw_stopdmarecv(ah, &reset);
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
                ath_edma_stop_recv(sc);
@@ -506,7 +506,7 @@ bool ath_stoprecv(struct ath_softc *sc)
                        "confusing the DMA engine when we start RX up\n");
                ATH_DBG_WARN_ON_ONCE(!stopped);
        }
-       return stopped;
+       return stopped || reset;
 }
 
 void ath_flushrecv(struct ath_softc *sc)
index 248c670..5c2cfe6 100644 (file)
@@ -195,6 +195,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
        {APL9_WORLD, CTL_ETSI, CTL_ETSI},
 
        {APL3_FCCA, CTL_FCC, CTL_FCC},
+       {APL7_FCCA, CTL_FCC, CTL_FCC},
        {APL1_ETSIC, CTL_FCC, CTL_ETSI},
        {APL2_ETSIC, CTL_FCC, CTL_ETSI},
        {APL2_APLD, CTL_FCC, NO_CTL},
index 2a45dd4..aef65cd 100644 (file)
@@ -1,6 +1,5 @@
 config IWLWIFI_LEGACY
-       tristate "Intel Wireless Wifi legacy devices"
-       depends on PCI && MAC80211
+       tristate
        select FW_LOADER
        select NEW_LEDS
        select LEDS_CLASS
@@ -65,7 +64,8 @@ endmenu
 
 config IWL4965
        tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
-       depends on IWLWIFI_LEGACY
+       depends on PCI && MAC80211
+       select IWLWIFI_LEGACY
        ---help---
          This option enables support for
 
@@ -92,7 +92,8 @@ config IWL4965
 
 config IWL3945
        tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
-       depends on IWLWIFI_LEGACY
+       depends on PCI && MAC80211
+       select IWLWIFI_LEGACY
        ---help---
          Select to build the driver supporting the:
 
index 779d3cb..5c3a68d 100644 (file)
@@ -74,8 +74,6 @@
 /* RSSI to dBm */
 #define IWL39_RSSI_OFFSET      95
 
-#define IWL_DEFAULT_TX_POWER   0x0F
-
 /*
  * EEPROM related constants, enums, and structures.
  */
index 08b189c..fc6fa28 100644 (file)
@@ -804,9 +804,6 @@ struct iwl4965_scd_bc_tbl {
 
 #define IWL4965_DEFAULT_TX_RETRY  15
 
-/* Limit range of txpower output target to be between these values */
-#define IWL4965_TX_POWER_TARGET_POWER_MIN      (0)     /* 0 dBm: 1 milliwatt */
-
 /* EEPROM */
 #define IWL4965_FIRST_AMPDU_QUEUE      10
 
index 7007d61..c1511b1 100644 (file)
@@ -160,6 +160,7 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
        struct ieee80211_channel *geo_ch;
        struct ieee80211_rate *rates;
        int i = 0;
+       s8 max_tx_power = 0;
 
        if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
            priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
@@ -235,8 +236,8 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
 
                        geo_ch->flags |= ch->ht40_extension_channel;
 
-                       if (ch->max_power_avg > priv->tx_power_device_lmt)
-                               priv->tx_power_device_lmt = ch->max_power_avg;
+                       if (ch->max_power_avg > max_tx_power)
+                               max_tx_power = ch->max_power_avg;
                } else {
                        geo_ch->flags |= IEEE80211_CHAN_DISABLED;
                }
@@ -249,6 +250,10 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
                                 geo_ch->flags);
        }
 
+       priv->tx_power_device_lmt = max_tx_power;
+       priv->tx_power_user_lmt = max_tx_power;
+       priv->tx_power_next = max_tx_power;
+
        if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
             priv->cfg->sku & IWL_SKU_A) {
                IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
@@ -1124,11 +1129,11 @@ int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
        if (!priv->cfg->ops->lib->send_tx_power)
                return -EOPNOTSUPP;
 
-       if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
+       /* 0 dBm mean 1 milliwatt */
+       if (tx_power < 0) {
                IWL_WARN(priv,
-                        "Requested user TXPOWER %d below lower limit %d.\n",
-                        tx_power,
-                        IWL4965_TX_POWER_TARGET_POWER_MIN);
+                        "Requested user TXPOWER %d below 1 mW.\n",
+                        tx_power);
                return -EINVAL;
        }
 
index 04c5648..cb346d1 100644 (file)
@@ -471,13 +471,6 @@ int iwl_legacy_init_channel_map(struct iwl_priv *priv)
                                             flags & EEPROM_CHANNEL_RADAR))
                                       ? "" : "not ");
 
-                       /* Set the tx_power_user_lmt to the highest power
-                        * supported by any channel */
-                       if (eeprom_ch_info[ch].max_power_avg >
-                                               priv->tx_power_user_lmt)
-                               priv->tx_power_user_lmt =
-                                   eeprom_ch_info[ch].max_power_avg;
-
                        ch_info++;
                }
        }
index 28eb3d8..cc7ebce 100644 (file)
@@ -3825,10 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
        priv->force_reset[IWL_FW_RESET].reset_duration =
                IWL_DELAY_NEXT_FORCE_FW_RELOAD;
 
-
-       priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
-       priv->tx_power_next = IWL_DEFAULT_TX_POWER;
-
        if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
                IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
                         eeprom->version);
index 91b3d8b..d484c36 100644 (file)
@@ -3140,12 +3140,6 @@ static int iwl4965_init_drv(struct iwl_priv *priv)
 
        iwl_legacy_init_scan_params(priv);
 
-       /* Set the tx_power_user_lmt to the lowest power level
-        * this value will get overwritten by channel max power avg
-        * from eeprom */
-       priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
-       priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
-
        ret = iwl_legacy_init_channel_map(priv);
        if (ret) {
                IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
index 3ea31b6..22e045b 100644 (file)
@@ -530,6 +530,9 @@ static struct iwl_ht_params iwl5000_ht_params = {
 struct iwl_cfg iwl5300_agn_cfg = {
        .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
        IWL_DEVICE_5000,
+       /* at least EEPROM 0x11A has wrong info */
+       .valid_tx_ant = ANT_ABC,        /* .cfg overwrite */
+       .valid_rx_ant = ANT_ABC,        /* .cfg overwrite */
        .ht_params = &iwl5000_ht_params,
 };
 
index 3695227..c1ceb4b 100644 (file)
@@ -137,6 +137,7 @@ struct mwl8k_tx_queue {
 struct mwl8k_priv {
        struct ieee80211_hw *hw;
        struct pci_dev *pdev;
+       int irq;
 
        struct mwl8k_device_info *device_info;
 
@@ -3761,9 +3762,11 @@ static int mwl8k_start(struct ieee80211_hw *hw)
        rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
                         IRQF_SHARED, MWL8K_NAME, hw);
        if (rc) {
+               priv->irq = -1;
                wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
                return -EIO;
        }
+       priv->irq = priv->pdev->irq;
 
        /* Enable TX reclaim and RX tasklets.  */
        tasklet_enable(&priv->poll_tx_task);
@@ -3800,6 +3803,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
        if (rc) {
                iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
                free_irq(priv->pdev->irq, hw);
+               priv->irq = -1;
                tasklet_disable(&priv->poll_tx_task);
                tasklet_disable(&priv->poll_rx_task);
        }
@@ -3818,7 +3822,10 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
 
        /* Disable interrupts */
        iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
-       free_irq(priv->pdev->irq, hw);
+       if (priv->irq != -1) {
+               free_irq(priv->pdev->irq, hw);
+               priv->irq = -1;
+       }
 
        /* Stop finalize join worker */
        cancel_work_sync(&priv->finalize_join_worker);
index 7834c26..042842e 100644 (file)
@@ -703,7 +703,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
        struct p54_tx_info *p54info;
        struct p54_hdr *hdr;
        struct p54_tx_data *txhdr;
-       unsigned int padding, len, extra_len;
+       unsigned int padding, len, extra_len = 0;
        int i, j, ridx;
        u16 hdr_flags = 0, aid = 0;
        u8 rate, queue = 0, crypt_offset = 0;
index a3755ff..bc8ce48 100644 (file)
@@ -2550,7 +2550,6 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
                                         const struct parport_pc_via_data *via)
 {
        short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 };
-       struct resource *base_res;
        u32 ite8872set;
        u32 ite8872_lpt, ite8872_lpthi;
        u8 ite8872_irq, type;
@@ -2561,8 +2560,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
 
        /* make sure which one chip */
        for (i = 0; i < 5; i++) {
-               base_res = request_region(inta_addr[i], 32, "it887x");
-               if (base_res) {
+               if (request_region(inta_addr[i], 32, "it887x")) {
                        int test;
                        pci_write_config_dword(pdev, 0x60,
                                                0xe5000000 | inta_addr[i]);
@@ -2571,7 +2569,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
                        test = inb(inta_addr[i]);
                        if (test != 0xff)
                                break;
-                       release_region(inta_addr[i], 0x8);
+                       release_region(inta_addr[i], 32);
                }
        }
        if (i >= 5) {
@@ -2635,7 +2633,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
        /*
         * Release the resource so that parport_pc_probe_port can get it.
         */
-       release_resource(base_res);
+       release_region(inta_addr[i], 32);
        if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
                                   irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
                printk(KERN_INFO
index c8ff646..0fa466a 100644 (file)
@@ -88,4 +88,6 @@ config PCI_IOAPIC
        depends on HOTPLUG
        default y
 
-select NLS if (DMI || ACPI)
+config PCI_LABEL
+       def_bool y if (DMI || ACPI)
+       select NLS
index 98d61c8..c85f744 100644 (file)
@@ -56,10 +56,10 @@ obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
 # ACPI Related PCI FW Functions
 # ACPI _DSM provided firmware instance and string name
 #
-obj-$(CONFIG_ACPI)    += pci-acpi.o pci-label.o
+obj-$(CONFIG_ACPI)    += pci-acpi.o
 
 # SMBIOS provided firmware instance and labels
-obj-$(CONFIG_DMI)    += pci-label.o
+obj-$(CONFIG_PCI_LABEL) += pci-label.o
 
 # Cardbus & CompactPCI use setup-bus
 obj-$(CONFIG_HOTPLUG) += setup-bus.o
index 505c1c7..d552d2c 100644 (file)
@@ -1299,7 +1299,7 @@ static void iommu_detach_domain(struct dmar_domain *domain,
 static struct iova_domain reserved_iova_list;
 static struct lock_class_key reserved_rbtree_key;
 
-static void dmar_init_reserved_ranges(void)
+static int dmar_init_reserved_ranges(void)
 {
        struct pci_dev *pdev = NULL;
        struct iova *iova;
@@ -1313,8 +1313,10 @@ static void dmar_init_reserved_ranges(void)
        /* IOAPIC ranges shouldn't be accessed by DMA */
        iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
                IOVA_PFN(IOAPIC_RANGE_END));
-       if (!iova)
+       if (!iova) {
                printk(KERN_ERR "Reserve IOAPIC range failed\n");
+               return -ENODEV;
+       }
 
        /* Reserve all PCI MMIO to avoid peer-to-peer access */
        for_each_pci_dev(pdev) {
@@ -1327,11 +1329,13 @@ static void dmar_init_reserved_ranges(void)
                        iova = reserve_iova(&reserved_iova_list,
                                            IOVA_PFN(r->start),
                                            IOVA_PFN(r->end));
-                       if (!iova)
+                       if (!iova) {
                                printk(KERN_ERR "Reserve iova failed\n");
+                               return -ENODEV;
+                       }
                }
        }
-
+       return 0;
 }
 
 static void domain_reserve_special_ranges(struct dmar_domain *domain)
@@ -1835,7 +1839,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
 
        ret = iommu_attach_domain(domain, iommu);
        if (ret) {
-               domain_exit(domain);
+               free_domain_mem(domain);
                goto error;
        }
 
@@ -2213,7 +2217,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
        return 0;
 }
 
-int __init init_dmars(void)
+static int __init init_dmars(int force_on)
 {
        struct dmar_drhd_unit *drhd;
        struct dmar_rmrr_unit *rmrr;
@@ -2393,8 +2397,15 @@ int __init init_dmars(void)
         *   enable translation
         */
        for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
+               if (drhd->ignored) {
+                       /*
+                        * we always have to disable PMRs or DMA may fail on
+                        * this device
+                        */
+                       if (force_on)
+                               iommu_disable_protect_mem_regions(drhd->iommu);
                        continue;
+               }
                iommu = drhd->iommu;
 
                iommu_flush_write_buffer(iommu);
@@ -3240,9 +3251,15 @@ static int device_notifier(struct notifier_block *nb,
        if (!domain)
                return 0;
 
-       if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
+       if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
                domain_remove_one_dev_info(domain, pdev);
 
+               if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+                   !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+                   list_empty(&domain->devices))
+                       domain_exit(domain);
+       }
+
        return 0;
 }
 
@@ -3277,12 +3294,21 @@ int __init intel_iommu_init(void)
        if (no_iommu || dmar_disabled)
                return -ENODEV;
 
-       iommu_init_mempool();
-       dmar_init_reserved_ranges();
+       if (iommu_init_mempool()) {
+               if (force_on)
+                       panic("tboot: Failed to initialize iommu memory\n");
+               return  -ENODEV;
+       }
+
+       if (dmar_init_reserved_ranges()) {
+               if (force_on)
+                       panic("tboot: Failed to reserve iommu ranges\n");
+               return  -ENODEV;
+       }
 
        init_no_remapping_devices();
 
-       ret = init_dmars();
+       ret = init_dmars(force_on);
        if (ret) {
                if (force_on)
                        panic("tboot: Failed to initialize DMARs\n");
@@ -3391,6 +3417,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
                domain->iommu_count--;
                domain_update_iommu_cap(domain);
                spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
+
+               spin_lock_irqsave(&iommu->lock, tmp_flags);
+               clear_bit(domain->id, iommu->domain_ids);
+               iommu->domains[domain->id] = NULL;
+               spin_unlock_irqrestore(&iommu->lock, tmp_flags);
        }
 
        spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3607,9 +3638,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
 
                pte = dmar_domain->pgd;
                if (dma_pte_present(pte)) {
-                       free_pgtable_page(dmar_domain->pgd);
                        dmar_domain->pgd = (struct dma_pte *)
                                phys_to_virt(dma_pte_addr(pte));
+                       free_pgtable_page(pte);
                }
                dmar_domain->agaw--;
        }
index de0dd7b..bcae8dd 100644 (file)
@@ -394,7 +394,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
        return 0;
 
 fail2:
-       free_irq(omap_rtc_timer, NULL);
+       free_irq(omap_rtc_timer, rtc);
 fail1:
        rtc_device_unregister(rtc);
 fail0:
index 6d5c7ff..e9901b8 100644 (file)
@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q)
        list_splice_init(&shost->starved_list, &starved_list);
 
        while (!list_empty(&starved_list)) {
-               int flagset;
-
                /*
                 * As long as shost is accepting commands and we have
                 * starved queues, call blk_run_queue. scsi_request_fn
@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q)
                        continue;
                }
 
-               spin_unlock(shost->host_lock);
-
-               spin_lock(sdev->request_queue->queue_lock);
-               flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
-                               !test_bit(QUEUE_FLAG_REENTER,
-                                       &sdev->request_queue->queue_flags);
-               if (flagset)
-                       queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
-               __blk_run_queue(sdev->request_queue, false);
-               if (flagset)
-                       queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
-               spin_unlock(sdev->request_queue->queue_lock);
-
-               spin_lock(shost->host_lock);
+               blk_run_queue_async(sdev->request_queue);
        }
        /* put any unprocessed entries back */
        list_splice(&starved_list, &shost->starved_list);
index fdf3fa6..815069d 100644 (file)
@@ -3816,28 +3816,17 @@ fail_host_msg:
 static void
 fc_bsg_goose_queue(struct fc_rport *rport)
 {
-       int flagset;
-       unsigned long flags;
-
        if (!rport->rqst_q)
                return;
 
+       /*
+        * This get/put dance makes no sense
+        */
        get_device(&rport->dev);
-
-       spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
-       flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
-                 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
-       if (flagset)
-               queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
-       __blk_run_queue(rport->rqst_q, false);
-       if (flagset)
-               queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
-       spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
-
+       blk_run_queue_async(rport->rqst_q);
        put_device(&rport->dev);
 }
 
-
 /**
  * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
  * @q:         rport request queue
index 47f8cdb..74273e6 100644 (file)
@@ -1658,8 +1658,12 @@ static void gsm_queue(struct gsm_mux *gsm)
 
        if ((gsm->control & ~PF) == UI)
                gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
-       /* generate final CRC with received FCS */
-       gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
+       if (gsm->encoding == 0){
+               /* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only.
+                           In this case it contain the last piece of data
+                           required to generate final CRC */
+               gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
+       }
        if (gsm->fcs != GOOD_FCS) {
                gsm->bad_fcs++;
                if (debug & 4)
index cb36b0d..62df72d 100644 (file)
@@ -382,12 +382,13 @@ static void imx_start_tx(struct uart_port *port)
 static irqreturn_t imx_rtsint(int irq, void *dev_id)
 {
        struct imx_port *sport = dev_id;
-       unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS;
+       unsigned int val;
        unsigned long flags;
 
        spin_lock_irqsave(&sport->port.lock, flags);
 
        writel(USR1_RTSD, sport->port.membase + USR1);
+       val = readl(sport->port.membase + USR1) & USR1_RTSS;
        uart_handle_cts_change(&sport->port, !!val);
        wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
 
index 4fb5b2b..4bcc8b8 100644 (file)
@@ -590,15 +590,10 @@ static struct virtio_config_ops virtio_pci_config_ops = {
 
 static void virtio_pci_release_dev(struct device *_d)
 {
-       struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
+       struct virtio_device *dev = container_of(_d, struct virtio_device,
+                                                dev);
        struct virtio_pci_device *vp_dev = to_vp_device(dev);
-       struct pci_dev *pci_dev = vp_dev->pci_dev;
 
-       vp_del_vqs(dev);
-       pci_set_drvdata(pci_dev, NULL);
-       pci_iounmap(pci_dev, vp_dev->ioaddr);
-       pci_release_regions(pci_dev);
-       pci_disable_device(pci_dev);
        kfree(vp_dev);
 }
 
@@ -681,6 +676,12 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
 
        unregister_virtio_device(&vp_dev->vdev);
+
+       vp_del_vqs(&vp_dev->vdev);
+       pci_set_drvdata(pci_dev, NULL);
+       pci_iounmap(pci_dev, vp_dev->ioaddr);
+       pci_release_regions(pci_dev);
+       pci_disable_device(pci_dev);
 }
 
 #ifdef CONFIG_PM
index cc2f73e..b0043fb 100644 (file)
@@ -371,6 +371,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
                /* detach_buf clears data, so grab it now. */
                buf = vq->data[i];
                detach_buf(vq, i);
+               vq->vring.avail->idx--;
                END_USE(vq);
                return buf;
        }
index de34bfa..5d505aa 100644 (file)
@@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
 
        if (value) {
                acl = posix_acl_from_xattr(value, size);
-               if (acl == NULL) {
-                       value = NULL;
-                       size = 0;
+               if (acl) {
+                       ret = posix_acl_valid(acl);
+                       if (ret)
+                               goto out;
                } else if (IS_ERR(acl)) {
                        return PTR_ERR(acl);
                }
        }
 
        ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type);
-
+out:
        posix_acl_release(acl);
 
        return ret;
index 3458b57..2e61fe1 100644 (file)
@@ -740,8 +740,10 @@ struct btrfs_space_info {
         */
        unsigned long reservation_progress;
 
-       int full;               /* indicates that we cannot allocate any more
+       int full:1;             /* indicates that we cannot allocate any more
                                   chunks for this space */
+       int chunk_alloc:1;      /* set if we are allocating a chunk */
+
        int force_alloc;        /* set if we need to force a chunk alloc for
                                   this space */
 
@@ -2576,6 +2578,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
                              struct inode *inode, u64 start, u64 end);
 int btrfs_release_file(struct inode *inode, struct file *file);
+void btrfs_drop_pages(struct page **pages, size_t num_pages);
+int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
+                     struct page **pages, size_t num_pages,
+                     loff_t pos, size_t write_bytes,
+                     struct extent_state **cached);
 
 /* tree-defrag.c */
 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
index 8f1d44b..68c84c8 100644 (file)
@@ -3057,7 +3057,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
                btrfs_destroy_pinned_extent(root,
                                            root->fs_info->pinned_extents);
 
-               t->use_count = 0;
+               atomic_set(&t->use_count, 0);
                list_del_init(&t->list);
                memset(t, 0, sizeof(*t));
                kmem_cache_free(btrfs_transaction_cachep, t);
index f619c3c..31f33ba 100644 (file)
 #include "locking.h"
 #include "free-space-cache.h"
 
+/* control flags for do_chunk_alloc's force field
+ * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
+ * if we really need one.
+ *
+ * CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
+ * CHUNK_ALLOC_LIMITED means to only try and allocate one
+ * if we have very few chunks already allocated.  This is
+ * used as part of the clustering code to help make sure
+ * we have a good pool of storage to cluster in, without
+ * filling the FS with empty chunks
+ *
+ */
+enum {
+       CHUNK_ALLOC_NO_FORCE = 0,
+       CHUNK_ALLOC_FORCE = 1,
+       CHUNK_ALLOC_LIMITED = 2,
+};
+
 static int update_block_group(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
                              u64 bytenr, u64 num_bytes, int alloc);
@@ -3019,7 +3038,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
        found->bytes_readonly = 0;
        found->bytes_may_use = 0;
        found->full = 0;
-       found->force_alloc = 0;
+       found->force_alloc = CHUNK_ALLOC_NO_FORCE;
+       found->chunk_alloc = 0;
        *space_info = found;
        list_add_rcu(&found->list, &info->space_info);
        atomic_set(&found->caching_threads, 0);
@@ -3150,7 +3170,7 @@ again:
                if (!data_sinfo->full && alloc_chunk) {
                        u64 alloc_target;
 
-                       data_sinfo->force_alloc = 1;
+                       data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
                        spin_unlock(&data_sinfo->lock);
 alloc:
                        alloc_target = btrfs_get_alloc_profile(root, 1);
@@ -3160,7 +3180,8 @@ alloc:
 
                        ret = do_chunk_alloc(trans, root->fs_info->extent_root,
                                             bytes + 2 * 1024 * 1024,
-                                            alloc_target, 0);
+                                            alloc_target,
+                                            CHUNK_ALLOC_NO_FORCE);
                        btrfs_end_transaction(trans, root);
                        if (ret < 0) {
                                if (ret != -ENOSPC)
@@ -3239,31 +3260,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
        rcu_read_lock();
        list_for_each_entry_rcu(found, head, list) {
                if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
-                       found->force_alloc = 1;
+                       found->force_alloc = CHUNK_ALLOC_FORCE;
        }
        rcu_read_unlock();
 }
 
 static int should_alloc_chunk(struct btrfs_root *root,
-                             struct btrfs_space_info *sinfo, u64 alloc_bytes)
+                             struct btrfs_space_info *sinfo, u64 alloc_bytes,
+                             int force)
 {
        u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
+       u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
        u64 thresh;
 
-       if (sinfo->bytes_used + sinfo->bytes_reserved +
-           alloc_bytes + 256 * 1024 * 1024 < num_bytes)
+       if (force == CHUNK_ALLOC_FORCE)
+               return 1;
+
+       /*
+        * in limited mode, we want to have some free space up to
+        * about 1% of the FS size.
+        */
+       if (force == CHUNK_ALLOC_LIMITED) {
+               thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+               thresh = max_t(u64, 64 * 1024 * 1024,
+                              div_factor_fine(thresh, 1));
+
+               if (num_bytes - num_allocated < thresh)
+                       return 1;
+       }
+
+       /*
+        * we have two similar checks here, one based on percentage
+        * and once based on a hard number of 256MB.  The idea
+        * is that if we have a good amount of free
+        * room, don't allocate a chunk.  A good mount is
+        * less than 80% utilized of the chunks we have allocated,
+        * or more than 256MB free
+        */
+       if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
                return 0;
 
-       if (sinfo->bytes_used + sinfo->bytes_reserved +
-           alloc_bytes < div_factor(num_bytes, 8))
+       if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
                return 0;
 
        thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+
+       /* 256MB or 5% of the FS */
        thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
 
        if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
                return 0;
-
        return 1;
 }
 
@@ -3273,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 {
        struct btrfs_space_info *space_info;
        struct btrfs_fs_info *fs_info = extent_root->fs_info;
+       int wait_for_alloc = 0;
        int ret = 0;
 
-       mutex_lock(&fs_info->chunk_mutex);
-
        flags = btrfs_reduce_alloc_profile(extent_root, flags);
 
        space_info = __find_space_info(extent_root->fs_info, flags);
@@ -3287,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
        }
        BUG_ON(!space_info);
 
+again:
        spin_lock(&space_info->lock);
        if (space_info->force_alloc)
-               force = 1;
+               force = space_info->force_alloc;
        if (space_info->full) {
                spin_unlock(&space_info->lock);
-               goto out;
+               return 0;
        }
 
-       if (!force && !should_alloc_chunk(extent_root, space_info,
-                                         alloc_bytes)) {
+       if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
                spin_unlock(&space_info->lock);
-               goto out;
+               return 0;
+       } else if (space_info->chunk_alloc) {
+               wait_for_alloc = 1;
+       } else {
+               space_info->chunk_alloc = 1;
        }
+
        spin_unlock(&space_info->lock);
 
+       mutex_lock(&fs_info->chunk_mutex);
+
+       /*
+        * The chunk_mutex is held throughout the entirety of a chunk
+        * allocation, so once we've acquired the chunk_mutex we know that the
+        * other guy is done and we need to recheck and see if we should
+        * allocate.
+        */
+       if (wait_for_alloc) {
+               mutex_unlock(&fs_info->chunk_mutex);
+               wait_for_alloc = 0;
+               goto again;
+       }
+
        /*
         * If we have mixed data/metadata chunks we want to make sure we keep
         * allocating mixed chunks instead of individual chunks.
@@ -3327,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
                space_info->full = 1;
        else
                ret = 1;
-       space_info->force_alloc = 0;
+
+       space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+       space_info->chunk_alloc = 0;
        spin_unlock(&space_info->lock);
-out:
        mutex_unlock(&extent_root->fs_info->chunk_mutex);
        return ret;
 }
@@ -5303,11 +5368,13 @@ loop:
 
                if (allowed_chunk_alloc) {
                        ret = do_chunk_alloc(trans, root, num_bytes +
-                                            2 * 1024 * 1024, data, 1);
+                                            2 * 1024 * 1024, data,
+                                            CHUNK_ALLOC_LIMITED);
                        allowed_chunk_alloc = 0;
                        done_chunk_alloc = 1;
-               } else if (!done_chunk_alloc) {
-                       space_info->force_alloc = 1;
+               } else if (!done_chunk_alloc &&
+                          space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
+                       space_info->force_alloc = CHUNK_ALLOC_LIMITED;
                }
 
                if (loop < LOOP_NO_EMPTY_SIZE) {
@@ -5393,7 +5460,8 @@ again:
         */
        if (empty_size || root->ref_cows)
                ret = do_chunk_alloc(trans, root->fs_info->extent_root,
-                                    num_bytes + 2 * 1024 * 1024, data, 0);
+                                    num_bytes + 2 * 1024 * 1024, data,
+                                    CHUNK_ALLOC_NO_FORCE);
 
        WARN_ON(num_bytes < root->sectorsize);
        ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -5405,7 +5473,7 @@ again:
                num_bytes = num_bytes & ~(root->sectorsize - 1);
                num_bytes = max(num_bytes, min_alloc_size);
                do_chunk_alloc(trans, root->fs_info->extent_root,
-                              num_bytes, data, 1);
+                              num_bytes, data, CHUNK_ALLOC_FORCE);
                goto again;
        }
        if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
@@ -8109,13 +8177,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
 
        alloc_flags = update_block_group_flags(root, cache->flags);
        if (alloc_flags != cache->flags)
-               do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+               do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+                              CHUNK_ALLOC_FORCE);
 
        ret = set_block_group_ro(cache);
        if (!ret)
                goto out;
        alloc_flags = get_alloc_profile(root, cache->space_info->flags);
-       ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+       ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+                            CHUNK_ALLOC_FORCE);
        if (ret < 0)
                goto out;
        ret = set_block_group_ro(cache);
@@ -8128,7 +8198,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root, u64 type)
 {
        u64 alloc_flags = get_alloc_profile(root, type);
-       return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+       return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+                             CHUNK_ALLOC_FORCE);
 }
 
 /*
index 20ddb28..3151386 100644 (file)
@@ -690,6 +690,15 @@ static void cache_state(struct extent_state *state,
        }
 }
 
+static void uncache_state(struct extent_state **cached_ptr)
+{
+       if (cached_ptr && (*cached_ptr)) {
+               struct extent_state *state = *cached_ptr;
+               *cached_ptr = NULL;
+               free_extent_state(state);
+       }
+}
+
 /*
  * set some bits on a range in the tree.  This may require allocations or
  * sleeping, so the gfp mask is used to indicate what is allowed.
@@ -940,10 +949,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
 }
 
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
-                       gfp_t mask)
+                       struct extent_state **cached_state, gfp_t mask)
 {
-       return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
-                             NULL, mask);
+       return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
+                             NULL, cached_state, mask);
 }
 
 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1012,8 +1021,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
                                mask);
 }
 
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
-                 gfp_t mask)
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
 {
        return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
                                mask);
@@ -1735,6 +1743,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
 
        do {
                struct page *page = bvec->bv_page;
+               struct extent_state *cached = NULL;
+               struct extent_state *state;
+
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
                start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1749,9 +1760,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                if (++bvec <= bvec_end)
                        prefetchw(&bvec->bv_page->flags);
 
+               spin_lock(&tree->lock);
+               state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
+               if (state && state->start == start) {
+                       /*
+                        * take a reference on the state, unlock will drop
+                        * the ref
+                        */
+                       cache_state(state, &cached);
+               }
+               spin_unlock(&tree->lock);
+
                if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
                        ret = tree->ops->readpage_end_io_hook(page, start, end,
-                                                             NULL);
+                                                             state);
                        if (ret)
                                uptodate = 0;
                }
@@ -1764,15 +1786,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                                        test_bit(BIO_UPTODATE, &bio->bi_flags);
                                if (err)
                                        uptodate = 0;
+                               uncache_state(&cached);
                                continue;
                        }
                }
 
                if (uptodate) {
-                       set_extent_uptodate(tree, start, end,
+                       set_extent_uptodate(tree, start, end, &cached,
                                            GFP_ATOMIC);
                }
-               unlock_extent(tree, start, end, GFP_ATOMIC);
+               unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
 
                if (whole_page) {
                        if (uptodate) {
@@ -1811,6 +1834,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
 
        do {
                struct page *page = bvec->bv_page;
+               struct extent_state *cached = NULL;
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
                start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1821,13 +1845,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
                        prefetchw(&bvec->bv_page->flags);
 
                if (uptodate) {
-                       set_extent_uptodate(tree, start, end, GFP_ATOMIC);
+                       set_extent_uptodate(tree, start, end, &cached,
+                                           GFP_ATOMIC);
                } else {
                        ClearPageUptodate(page);
                        SetPageError(page);
                }
 
-               unlock_extent(tree, start, end, GFP_ATOMIC);
+               unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
 
        } while (bvec >= bio->bi_io_vec);
 
@@ -2016,14 +2041,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
        while (cur <= end) {
                if (cur >= last_byte) {
                        char *userpage;
+                       struct extent_state *cached = NULL;
+
                        iosize = PAGE_CACHE_SIZE - page_offset;
                        userpage = kmap_atomic(page, KM_USER0);
                        memset(userpage + page_offset, 0, iosize);
                        flush_dcache_page(page);
                        kunmap_atomic(userpage, KM_USER0);
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
-                                           GFP_NOFS);
-                       unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+                                           &cached, GFP_NOFS);
+                       unlock_extent_cached(tree, cur, cur + iosize - 1,
+                                            &cached, GFP_NOFS);
                        break;
                }
                em = get_extent(inode, page, page_offset, cur,
@@ -2063,14 +2091,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                /* we've found a hole, just zero and go on */
                if (block_start == EXTENT_MAP_HOLE) {
                        char *userpage;
+                       struct extent_state *cached = NULL;
+
                        userpage = kmap_atomic(page, KM_USER0);
                        memset(userpage + page_offset, 0, iosize);
                        flush_dcache_page(page);
                        kunmap_atomic(userpage, KM_USER0);
 
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
-                                           GFP_NOFS);
-                       unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+                                           &cached, GFP_NOFS);
+                       unlock_extent_cached(tree, cur, cur + iosize - 1,
+                                            &cached, GFP_NOFS);
                        cur = cur + iosize;
                        page_offset += iosize;
                        continue;
@@ -2789,9 +2820,12 @@ int extent_prepare_write(struct extent_io_tree *tree,
                        iocount++;
                        block_start = block_start + iosize;
                } else {
-                       set_extent_uptodate(tree, block_start, cur_end,
+                       struct extent_state *cached = NULL;
+
+                       set_extent_uptodate(tree, block_start, cur_end, &cached,
                                            GFP_NOFS);
-                       unlock_extent(tree, block_start, cur_end, GFP_NOFS);
+                       unlock_extent_cached(tree, block_start, cur_end,
+                                            &cached, GFP_NOFS);
                        block_start = cur_end + 1;
                }
                page_offset = block_start & (PAGE_CACHE_SIZE - 1);
@@ -3457,7 +3491,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
        num_pages = num_extent_pages(eb->start, eb->len);
 
        set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
-                           GFP_NOFS);
+                           NULL, GFP_NOFS);
        for (i = 0; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
                if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
@@ -3885,6 +3919,12 @@ static void move_pages(struct page *dst_page, struct page *src_page,
        kunmap_atomic(dst_kaddr, KM_USER0);
 }
 
+static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
+{
+       unsigned long distance = (src > dst) ? src - dst : dst - src;
+       return distance < len;
+}
+
 static void copy_pages(struct page *dst_page, struct page *src_page,
                       unsigned long dst_off, unsigned long src_off,
                       unsigned long len)
@@ -3892,10 +3932,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
        char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
        char *src_kaddr;
 
-       if (dst_page != src_page)
+       if (dst_page != src_page) {
                src_kaddr = kmap_atomic(src_page, KM_USER1);
-       else
+       } else {
                src_kaddr = dst_kaddr;
+               BUG_ON(areas_overlap(src_off, dst_off, len));
+       }
 
        memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
        kunmap_atomic(dst_kaddr, KM_USER0);
@@ -3970,7 +4012,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
                       "len %lu len %lu\n", dst_offset, len, dst->len);
                BUG_ON(1);
        }
-       if (dst_offset < src_offset) {
+       if (!areas_overlap(src_offset, dst_offset, len)) {
                memcpy_extent_buffer(dst, dst_offset, src_offset, len);
                return;
        }
index f62c544..af2d717 100644 (file)
@@ -208,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
                   int bits, int exclusive_bits, u64 *failed_start,
                   struct extent_state **cached_state, gfp_t mask);
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
-                       gfp_t mask);
+                       struct extent_state **cached_state, gfp_t mask);
 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
                   gfp_t mask);
 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
index e621ea5..75899a0 100644 (file)
@@ -104,7 +104,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
 /*
  * unlocks pages after btrfs_file_write is done with them
  */
-static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
+void btrfs_drop_pages(struct page **pages, size_t num_pages)
 {
        size_t i;
        for (i = 0; i < num_pages; i++) {
@@ -127,16 +127,13 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
  * this also makes the decision about creating an inline extent vs
  * doing real data extents, marking pages dirty and delalloc as required.
  */
-static noinline int dirty_and_release_pages(struct btrfs_root *root,
-                                           struct file *file,
-                                           struct page **pages,
-                                           size_t num_pages,
-                                           loff_t pos,
-                                           size_t write_bytes)
+int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
+                     struct page **pages, size_t num_pages,
+                     loff_t pos, size_t write_bytes,
+                     struct extent_state **cached)
 {
        int err = 0;
        int i;
-       struct inode *inode = fdentry(file)->d_inode;
        u64 num_bytes;
        u64 start_pos;
        u64 end_of_last_block;
@@ -149,7 +146,7 @@ static noinline int dirty_and_release_pages(struct btrfs_root *root,
 
        end_of_last_block = start_pos + num_bytes - 1;
        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
-                                       NULL);
+                                       cached);
        if (err)
                return err;
 
@@ -992,9 +989,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                }
 
                if (copied > 0) {
-                       ret = dirty_and_release_pages(root, file, pages,
-                                                     dirty_pages, pos,
-                                                     copied);
+                       ret = btrfs_dirty_pages(root, inode, pages,
+                                               dirty_pages, pos, copied,
+                                               NULL);
                        if (ret) {
                                btrfs_delalloc_release_space(inode,
                                        dirty_pages << PAGE_CACHE_SHIFT);
index f561c95..11d2e9c 100644 (file)
@@ -508,6 +508,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        struct inode *inode;
        struct rb_node *node;
        struct list_head *pos, *n;
+       struct page **pages;
        struct page *page;
        struct extent_state *cached_state = NULL;
        struct btrfs_free_cluster *cluster = NULL;
@@ -517,13 +518,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        u64 start, end, len;
        u64 bytes = 0;
        u32 *crc, *checksums;
-       pgoff_t index = 0, last_index = 0;
        unsigned long first_page_offset;
-       int num_checksums;
+       int index = 0, num_pages = 0;
        int entries = 0;
        int bitmaps = 0;
        int ret = 0;
        bool next_page = false;
+       bool out_of_space = false;
 
        root = root->fs_info->tree_root;
 
@@ -551,24 +552,31 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                return 0;
        }
 
-       last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+       num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+               PAGE_CACHE_SHIFT;
        filemap_write_and_wait(inode->i_mapping);
        btrfs_wait_ordered_range(inode, inode->i_size &
                                 ~(root->sectorsize - 1), (u64)-1);
 
        /* We need a checksum per page. */
-       num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE;
-       crc = checksums  = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
+       crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
        if (!crc) {
                iput(inode);
                return 0;
        }
 
+       pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
+       if (!pages) {
+               kfree(crc);
+               iput(inode);
+               return 0;
+       }
+
        /* Since the first page has all of our checksums and our generation we
         * need to calculate the offset into the page that we can start writing
         * our entries.
         */
-       first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
+       first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
 
        /* Get the cluster for this block_group if it exists */
        if (!list_empty(&block_group->cluster_list))
@@ -590,20 +598,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
         * after find_get_page at this point.  Just putting this here so people
         * know and don't freak out.
         */
-       while (index <= last_index) {
+       while (index < num_pages) {
                page = grab_cache_page(inode->i_mapping, index);
                if (!page) {
-                       pgoff_t i = 0;
+                       int i;
 
-                       while (i < index) {
-                               page = find_get_page(inode->i_mapping, i);
-                               unlock_page(page);
-                               page_cache_release(page);
-                               page_cache_release(page);
-                               i++;
+                       for (i = 0; i < num_pages; i++) {
+                               unlock_page(pages[i]);
+                               page_cache_release(pages[i]);
                        }
                        goto out_free;
                }
+               pages[index] = page;
                index++;
        }
 
@@ -631,7 +637,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                        offset = start_offset;
                }
 
-               page = find_get_page(inode->i_mapping, index);
+               if (index >= num_pages) {
+                       out_of_space = true;
+                       break;
+               }
+
+               page = pages[index];
 
                addr = kmap(page);
                entry = addr + start_offset;
@@ -708,23 +719,6 @@ int btrfs_write_out_cache(struct btrfs_root *root,
 
                bytes += PAGE_CACHE_SIZE;
 
-               ClearPageChecked(page);
-               set_page_extent_mapped(page);
-               SetPageUptodate(page);
-               set_page_dirty(page);
-
-               /*
-                * We need to release our reference we got for grab_cache_page,
-                * except for the first page which will hold our checksums, we
-                * do that below.
-                */
-               if (index != 0) {
-                       unlock_page(page);
-                       page_cache_release(page);
-               }
-
-               page_cache_release(page);
-
                index++;
        } while (node || next_page);
 
@@ -734,7 +728,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                struct btrfs_free_space *entry =
                        list_entry(pos, struct btrfs_free_space, list);
 
-               page = find_get_page(inode->i_mapping, index);
+               if (index >= num_pages) {
+                       out_of_space = true;
+                       break;
+               }
+               page = pages[index];
 
                addr = kmap(page);
                memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
@@ -745,64 +743,58 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                crc++;
                bytes += PAGE_CACHE_SIZE;
 
-               ClearPageChecked(page);
-               set_page_extent_mapped(page);
-               SetPageUptodate(page);
-               set_page_dirty(page);
-               unlock_page(page);
-               page_cache_release(page);
-               page_cache_release(page);
                list_del_init(&entry->list);
                index++;
        }
 
+       if (out_of_space) {
+               btrfs_drop_pages(pages, num_pages);
+               unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+                                    i_size_read(inode) - 1, &cached_state,
+                                    GFP_NOFS);
+               ret = 0;
+               goto out_free;
+       }
+
        /* Zero out the rest of the pages just to make sure */
-       while (index <= last_index) {
+       while (index < num_pages) {
                void *addr;
 
-               page = find_get_page(inode->i_mapping, index);
-
+               page = pages[index];
                addr = kmap(page);
                memset(addr, 0, PAGE_CACHE_SIZE);
                kunmap(page);
-               ClearPageChecked(page);
-               set_page_extent_mapped(page);
-               SetPageUptodate(page);
-               set_page_dirty(page);
-               unlock_page(page);
-               page_cache_release(page);
-               page_cache_release(page);
                bytes += PAGE_CACHE_SIZE;
                index++;
        }
 
-       btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state);
-
        /* Write the checksums and trans id to the first page */
        {
                void *addr;
                u64 *gen;
 
-               page = find_get_page(inode->i_mapping, 0);
+               page = pages[0];
 
                addr = kmap(page);
-               memcpy(addr, checksums, sizeof(u32) * num_checksums);
-               gen = addr + (sizeof(u32) * num_checksums);
+               memcpy(addr, checksums, sizeof(u32) * num_pages);
+               gen = addr + (sizeof(u32) * num_pages);
                *gen = trans->transid;
                kunmap(page);
-               ClearPageChecked(page);
-               set_page_extent_mapped(page);
-               SetPageUptodate(page);
-               set_page_dirty(page);
-               unlock_page(page);
-               page_cache_release(page);
-               page_cache_release(page);
        }
-       BTRFS_I(inode)->generation = trans->transid;
 
+       ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
+                                           bytes, &cached_state);
+       btrfs_drop_pages(pages, num_pages);
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
                             i_size_read(inode) - 1, &cached_state, GFP_NOFS);
 
+       if (ret) {
+               ret = 0;
+               goto out_free;
+       }
+
+       BTRFS_I(inode)->generation = trans->transid;
+
        filemap_write_and_wait(inode->i_mapping);
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -853,6 +845,7 @@ out_free:
                BTRFS_I(inode)->generation = 0;
        }
        kfree(checksums);
+       kfree(pages);
        btrfs_update_inode(trans, root, inode);
        iput(inode);
        return ret;
index 5cc64ab..fcd66b6 100644 (file)
@@ -1770,9 +1770,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
        add_pending_csums(trans, inode, ordered_extent->file_offset,
                          &ordered_extent->list);
 
-       btrfs_ordered_update_i_size(inode, 0, ordered_extent);
-       ret = btrfs_update_inode(trans, root, inode);
-       BUG_ON(ret);
+       ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+       if (!ret) {
+               ret = btrfs_update_inode(trans, root, inode);
+               BUG_ON(ret);
+       }
+       ret = 0;
 out:
        if (nolock) {
                if (trans)
@@ -2590,6 +2593,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
                            struct btrfs_inode_item *item,
                            struct inode *inode)
 {
+       if (!leaf->map_token)
+               map_private_extent_buffer(leaf, (unsigned long)item,
+                                         sizeof(struct btrfs_inode_item),
+                                         &leaf->map_token, &leaf->kaddr,
+                                         &leaf->map_start, &leaf->map_len,
+                                         KM_USER1);
+
        btrfs_set_inode_uid(leaf, item, inode->i_uid);
        btrfs_set_inode_gid(leaf, item, inode->i_gid);
        btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
@@ -2618,6 +2628,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
        btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
        btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
+
+       if (leaf->map_token) {
+               unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+               leaf->map_token = NULL;
+       }
 }
 
 /*
@@ -4207,10 +4222,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        struct btrfs_key found_key;
        struct btrfs_path *path;
        int ret;
-       u32 nritems;
        struct extent_buffer *leaf;
        int slot;
-       int advance;
        unsigned char d_type;
        int over = 0;
        u32 di_cur;
@@ -4253,27 +4266,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
                goto err;
-       advance = 0;
 
        while (1) {
                leaf = path->nodes[0];
-               nritems = btrfs_header_nritems(leaf);
                slot = path->slots[0];
-               if (advance || slot >= nritems) {
-                       if (slot >= nritems - 1) {
-                               ret = btrfs_next_leaf(root, path);
-                               if (ret)
-                                       break;
-                               leaf = path->nodes[0];
-                               nritems = btrfs_header_nritems(leaf);
-                               slot = path->slots[0];
-                       } else {
-                               slot++;
-                               path->slots[0]++;
-                       }
+               if (slot >= btrfs_header_nritems(leaf)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               goto err;
+                       else if (ret > 0)
+                               break;
+                       continue;
                }
 
-               advance = 1;
                item = btrfs_item_nr(leaf, slot);
                btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
@@ -4282,7 +4287,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
                if (btrfs_key_type(&found_key) != key_type)
                        break;
                if (found_key.offset < filp->f_pos)
-                       continue;
+                       goto next;
 
                filp->f_pos = found_key.offset;
 
@@ -4335,6 +4340,8 @@ skip:
                        di_cur += di_len;
                        di = (struct btrfs_dir_item *)((char *)di + di_len);
                }
+next:
+               path->slots[0]++;
        }
 
        /* Reached end of directory/root. Bump pos past the last item. */
@@ -4527,14 +4534,17 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        BUG_ON(!path);
 
        inode = new_inode(root->fs_info->sb);
-       if (!inode)
+       if (!inode) {
+               btrfs_free_path(path);
                return ERR_PTR(-ENOMEM);
+       }
 
        if (dir) {
                trace_btrfs_inode_request(dir);
 
                ret = btrfs_set_inode_index(dir, index);
                if (ret) {
+                       btrfs_free_path(path);
                        iput(inode);
                        return ERR_PTR(ret);
                }
@@ -4834,9 +4844,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        if (inode->i_nlink == ~0U)
                return -EMLINK;
 
-       btrfs_inc_nlink(inode);
-       inode->i_ctime = CURRENT_TIME;
-
        err = btrfs_set_inode_index(dir, &index);
        if (err)
                goto fail;
@@ -4852,6 +4859,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                goto fail;
        }
 
+       btrfs_inc_nlink(inode);
+       inode->i_ctime = CURRENT_TIME;
+
        btrfs_set_trans_block_group(trans, dir);
        ihold(inode);
 
@@ -5221,7 +5231,7 @@ again:
                        btrfs_mark_buffer_dirty(leaf);
                }
                set_extent_uptodate(io_tree, em->start,
-                                   extent_map_end(em) - 1, GFP_NOFS);
+                                   extent_map_end(em) - 1, NULL, GFP_NOFS);
                goto insert;
        } else {
                printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
@@ -5428,17 +5438,30 @@ out:
 }
 
 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
+                                                 struct extent_map *em,
                                                  u64 start, u64 len)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
-       struct extent_map *em;
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        struct btrfs_key ins;
        u64 alloc_hint;
        int ret;
+       bool insert = false;
 
-       btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+       /*
+        * Ok if the extent map we looked up is a hole and is for the exact
+        * range we want, there is no reason to allocate a new one, however if
+        * it is not right then we need to free this one and drop the cache for
+        * our range.
+        */
+       if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
+           em->len != len) {
+               free_extent_map(em);
+               em = NULL;
+               insert = true;
+               btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+       }
 
        trans = btrfs_join_transaction(root, 0);
        if (IS_ERR(trans))
@@ -5454,10 +5477,12 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
                goto out;
        }
 
-       em = alloc_extent_map(GFP_NOFS);
        if (!em) {
-               em = ERR_PTR(-ENOMEM);
-               goto out;
+               em = alloc_extent_map(GFP_NOFS);
+               if (!em) {
+                       em = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
        }
 
        em->start = start;
@@ -5467,9 +5492,15 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
        em->block_start = ins.objectid;
        em->block_len = ins.offset;
        em->bdev = root->fs_info->fs_devices->latest_bdev;
+
+       /*
+        * We need to do this because if we're using the original em we searched
+        * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
+        */
+       em->flags = 0;
        set_bit(EXTENT_FLAG_PINNED, &em->flags);
 
-       while (1) {
+       while (insert) {
                write_lock(&em_tree->lock);
                ret = add_extent_mapping(em_tree, em);
                write_unlock(&em_tree->lock);
@@ -5687,8 +5718,7 @@ must_cow:
         * it above
         */
        len = bh_result->b_size;
-       free_extent_map(em);
-       em = btrfs_new_extent_direct(inode, start, len);
+       em = btrfs_new_extent_direct(inode, em, start, len);
        if (IS_ERR(em))
                return PTR_ERR(em);
        len = min(len, em->len - (start - em->start));
@@ -5851,8 +5881,10 @@ again:
        }
 
        add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
-       btrfs_ordered_update_i_size(inode, 0, ordered);
-       btrfs_update_inode(trans, root, inode);
+       ret = btrfs_ordered_update_i_size(inode, 0, ordered);
+       if (!ret)
+               btrfs_update_inode(trans, root, inode);
+       ret = 0;
 out_unlock:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
                             ordered->file_offset + ordered->len - 1,
@@ -5938,7 +5970,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
 
 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
                                         int rw, u64 file_offset, int skip_sum,
-                                        u32 *csums)
+                                        u32 *csums, int async_submit)
 {
        int write = rw & REQ_WRITE;
        struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5949,13 +5981,24 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
        if (ret)
                goto err;
 
-       if (write && !skip_sum) {
+       if (skip_sum)
+               goto map;
+
+       if (write && async_submit) {
                ret = btrfs_wq_submit_bio(root->fs_info,
                                   inode, rw, bio, 0, 0,
                                   file_offset,
                                   __btrfs_submit_bio_start_direct_io,
                                   __btrfs_submit_bio_done);
                goto err;
+       } else if (write) {
+               /*
+                * If we aren't doing async submit, calculate the csum of the
+                * bio now.
+                */
+               ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
+               if (ret)
+                       goto err;
        } else if (!skip_sum) {
                ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
                                          file_offset, csums);
@@ -5963,7 +6006,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
                        goto err;
        }
 
-       ret = btrfs_map_bio(root, rw, bio, 0, 1);
+map:
+       ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
 err:
        bio_put(bio);
        return ret;
@@ -5985,15 +6029,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        int nr_pages = 0;
        u32 *csums = dip->csums;
        int ret = 0;
+       int async_submit = 0;
        int write = rw & REQ_WRITE;
 
-       bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
-       if (!bio)
-               return -ENOMEM;
-       bio->bi_private = dip;
-       bio->bi_end_io = btrfs_end_dio_bio;
-       atomic_inc(&dip->pending_bios);
-
        map_length = orig_bio->bi_size;
        ret = btrfs_map_block(map_tree, READ, start_sector << 9,
                              &map_length, NULL, 0);
@@ -6002,6 +6040,19 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                return -EIO;
        }
 
+       if (map_length >= orig_bio->bi_size) {
+               bio = orig_bio;
+               goto submit;
+       }
+
+       async_submit = 1;
+       bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
+       if (!bio)
+               return -ENOMEM;
+       bio->bi_private = dip;
+       bio->bi_end_io = btrfs_end_dio_bio;
+       atomic_inc(&dip->pending_bios);
+
        while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
                if (unlikely(map_length < submit_len + bvec->bv_len ||
                    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
@@ -6015,7 +6066,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                        atomic_inc(&dip->pending_bios);
                        ret = __btrfs_submit_dio_bio(bio, inode, rw,
                                                     file_offset, skip_sum,
-                                                    csums);
+                                                    csums, async_submit);
                        if (ret) {
                                bio_put(bio);
                                atomic_dec(&dip->pending_bios);
@@ -6052,8 +6103,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                }
        }
 
+submit:
        ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
-                                    csums);
+                                    csums, async_submit);
        if (!ret)
                return 0;
 
@@ -6148,6 +6200,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
                        unsigned long nr_segs)
 {
        int seg;
+       int i;
        size_t size;
        unsigned long addr;
        unsigned blocksize_mask = root->sectorsize - 1;
@@ -6162,8 +6215,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
                addr = (unsigned long)iov[seg].iov_base;
                size = iov[seg].iov_len;
                end += size;
-               if ((addr & blocksize_mask) || (size & blocksize_mask)) 
+               if ((addr & blocksize_mask) || (size & blocksize_mask))
                        goto out;
+
+               /* If this is a write we don't need to check anymore */
+               if (rw & WRITE)
+                       continue;
+
+               /*
+                * Check to make sure we don't have duplicate iov_base's in this
+                * iovec, if so return EINVAL, otherwise we'll get csum errors
+                * when reading back.
+                */
+               for (i = seg + 1; i < nr_segs; i++) {
+                       if (iov[seg].iov_base == iov[i].iov_base)
+                               goto out;
+               }
        }
        retval = 0;
 out:
index cfc264f..ffb48d6 100644 (file)
@@ -2287,7 +2287,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
        struct btrfs_ioctl_space_info space;
        struct btrfs_ioctl_space_info *dest;
        struct btrfs_ioctl_space_info *dest_orig;
-       struct btrfs_ioctl_space_info *user_dest;
+       struct btrfs_ioctl_space_info __user *user_dest;
        struct btrfs_space_info *info;
        u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
                       BTRFS_BLOCK_GROUP_SYSTEM,
index 58e7de9..0ac712e 100644 (file)
@@ -159,7 +159,7 @@ enum {
        Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
        Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
        Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
-       Opt_enospc_debug, Opt_err,
+       Opt_enospc_debug, Opt_subvolrootid, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -189,6 +189,7 @@ static match_table_t tokens = {
        {Opt_clear_cache, "clear_cache"},
        {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
        {Opt_enospc_debug, "enospc_debug"},
+       {Opt_subvolrootid, "subvolrootid=%d"},
        {Opt_err, NULL},
 };
 
@@ -232,6 +233,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                        break;
                case Opt_subvol:
                case Opt_subvolid:
+               case Opt_subvolrootid:
                case Opt_device:
                        /*
                         * These are parsed by btrfs_parse_early_options
@@ -388,7 +390,7 @@ out:
  */
 static int btrfs_parse_early_options(const char *options, fmode_t flags,
                void *holder, char **subvol_name, u64 *subvol_objectid,
-               struct btrfs_fs_devices **fs_devices)
+               u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices)
 {
        substring_t args[MAX_OPT_ARGS];
        char *opts, *orig, *p;
@@ -429,6 +431,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
                                        *subvol_objectid = intarg;
                        }
                        break;
+               case Opt_subvolrootid:
+                       intarg = 0;
+                       error = match_int(&args[0], &intarg);
+                       if (!error) {
+                               /* we want the original fs_tree */
+                               if (!intarg)
+                                       *subvol_rootid =
+                                               BTRFS_FS_TREE_OBJECTID;
+                               else
+                                       *subvol_rootid = intarg;
+                       }
+                       break;
                case Opt_device:
                        error = btrfs_scan_one_device(match_strdup(&args[0]),
                                        flags, holder, fs_devices);
@@ -736,6 +750,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        fmode_t mode = FMODE_READ;
        char *subvol_name = NULL;
        u64 subvol_objectid = 0;
+       u64 subvol_rootid = 0;
        int error = 0;
 
        if (!(flags & MS_RDONLY))
@@ -743,7 +758,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
 
        error = btrfs_parse_early_options(data, mode, fs_type,
                                          &subvol_name, &subvol_objectid,
-                                         &fs_devices);
+                                         &subvol_rootid, &fs_devices);
        if (error)
                return ERR_PTR(error);
 
@@ -807,15 +822,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
                s->s_flags |= MS_ACTIVE;
        }
 
-       root = get_default_root(s, subvol_objectid);
-       if (IS_ERR(root)) {
-               error = PTR_ERR(root);
-               deactivate_locked_super(s);
-               goto error_free_subvol_name;
-       }
        /* if they gave us a subvolume name bind mount into that */
        if (strcmp(subvol_name, ".")) {
                struct dentry *new_root;
+
+               root = get_default_root(s, subvol_rootid);
+               if (IS_ERR(root)) {
+                       error = PTR_ERR(root);
+                       deactivate_locked_super(s);
+                       goto error_free_subvol_name;
+               }
+
                mutex_lock(&root->d_inode->i_mutex);
                new_root = lookup_one_len(subvol_name, root,
                                      strlen(subvol_name));
@@ -836,6 +853,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
                }
                dput(root);
                root = new_root;
+       } else {
+               root = get_default_root(s, subvol_objectid);
+               if (IS_ERR(root)) {
+                       error = PTR_ERR(root);
+                       deactivate_locked_super(s);
+                       goto error_free_subvol_name;
+               }
        }
 
        kfree(subvol_name);
index 5b158da..c571734 100644 (file)
 
 static noinline void put_transaction(struct btrfs_transaction *transaction)
 {
-       WARN_ON(transaction->use_count == 0);
-       transaction->use_count--;
-       if (transaction->use_count == 0) {
-               list_del_init(&transaction->list);
+       WARN_ON(atomic_read(&transaction->use_count) == 0);
+       if (atomic_dec_and_test(&transaction->use_count)) {
                memset(transaction, 0, sizeof(*transaction));
                kmem_cache_free(btrfs_transaction_cachep, transaction);
        }
@@ -60,14 +58,14 @@ static noinline int join_transaction(struct btrfs_root *root)
                if (!cur_trans)
                        return -ENOMEM;
                root->fs_info->generation++;
-               cur_trans->num_writers = 1;
+               atomic_set(&cur_trans->num_writers, 1);
                cur_trans->num_joined = 0;
                cur_trans->transid = root->fs_info->generation;
                init_waitqueue_head(&cur_trans->writer_wait);
                init_waitqueue_head(&cur_trans->commit_wait);
                cur_trans->in_commit = 0;
                cur_trans->blocked = 0;
-               cur_trans->use_count = 1;
+               atomic_set(&cur_trans->use_count, 1);
                cur_trans->commit_done = 0;
                cur_trans->start_time = get_seconds();
 
@@ -88,7 +86,7 @@ static noinline int join_transaction(struct btrfs_root *root)
                root->fs_info->running_transaction = cur_trans;
                spin_unlock(&root->fs_info->new_trans_lock);
        } else {
-               cur_trans->num_writers++;
+               atomic_inc(&cur_trans->num_writers);
                cur_trans->num_joined++;
        }
 
@@ -145,7 +143,7 @@ static void wait_current_trans(struct btrfs_root *root)
        cur_trans = root->fs_info->running_transaction;
        if (cur_trans && cur_trans->blocked) {
                DEFINE_WAIT(wait);
-               cur_trans->use_count++;
+               atomic_inc(&cur_trans->use_count);
                while (1) {
                        prepare_to_wait(&root->fs_info->transaction_wait, &wait,
                                        TASK_UNINTERRUPTIBLE);
@@ -181,6 +179,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
 {
        struct btrfs_trans_handle *h;
        struct btrfs_transaction *cur_trans;
+       int retries = 0;
        int ret;
 
        if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
@@ -204,7 +203,7 @@ again:
        }
 
        cur_trans = root->fs_info->running_transaction;
-       cur_trans->use_count++;
+       atomic_inc(&cur_trans->use_count);
        if (type != TRANS_JOIN_NOLOCK)
                mutex_unlock(&root->fs_info->trans_mutex);
 
@@ -224,10 +223,18 @@ again:
 
        if (num_items > 0) {
                ret = btrfs_trans_reserve_metadata(h, root, num_items);
-               if (ret == -EAGAIN) {
+               if (ret == -EAGAIN && !retries) {
+                       retries++;
                        btrfs_commit_transaction(h, root);
                        goto again;
+               } else if (ret == -EAGAIN) {
+                       /*
+                        * We have already retried and got EAGAIN, so really we
+                        * don't have space, so set ret to -ENOSPC.
+                        */
+                       ret = -ENOSPC;
                }
+
                if (ret < 0) {
                        btrfs_end_transaction(h, root);
                        return ERR_PTR(ret);
@@ -327,7 +334,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
                        goto out_unlock;  /* nothing committing|committed */
        }
 
-       cur_trans->use_count++;
+       atomic_inc(&cur_trans->use_count);
        mutex_unlock(&root->fs_info->trans_mutex);
 
        wait_for_commit(root, cur_trans);
@@ -457,18 +464,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
                        wake_up_process(info->transaction_kthread);
        }
 
-       if (lock)
-               mutex_lock(&info->trans_mutex);
        WARN_ON(cur_trans != info->running_transaction);
-       WARN_ON(cur_trans->num_writers < 1);
-       cur_trans->num_writers--;
+       WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
+       atomic_dec(&cur_trans->num_writers);
 
        smp_mb();
        if (waitqueue_active(&cur_trans->writer_wait))
                wake_up(&cur_trans->writer_wait);
        put_transaction(cur_trans);
-       if (lock)
-               mutex_unlock(&info->trans_mutex);
 
        if (current->journal_info == trans)
                current->journal_info = NULL;
@@ -1178,7 +1181,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
        /* take transaction reference */
        mutex_lock(&root->fs_info->trans_mutex);
        cur_trans = trans->transaction;
-       cur_trans->use_count++;
+       atomic_inc(&cur_trans->use_count);
        mutex_unlock(&root->fs_info->trans_mutex);
 
        btrfs_end_transaction(trans, root);
@@ -1237,7 +1240,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        mutex_lock(&root->fs_info->trans_mutex);
        if (cur_trans->in_commit) {
-               cur_trans->use_count++;
+               atomic_inc(&cur_trans->use_count);
                mutex_unlock(&root->fs_info->trans_mutex);
                btrfs_end_transaction(trans, root);
 
@@ -1259,7 +1262,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                prev_trans = list_entry(cur_trans->list.prev,
                                        struct btrfs_transaction, list);
                if (!prev_trans->commit_done) {
-                       prev_trans->use_count++;
+                       atomic_inc(&prev_trans->use_count);
                        mutex_unlock(&root->fs_info->trans_mutex);
 
                        wait_for_commit(root, prev_trans);
@@ -1300,14 +1303,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                                TASK_UNINTERRUPTIBLE);
 
                smp_mb();
-               if (cur_trans->num_writers > 1)
+               if (atomic_read(&cur_trans->num_writers) > 1)
                        schedule_timeout(MAX_SCHEDULE_TIMEOUT);
                else if (should_grow)
                        schedule_timeout(1);
 
                mutex_lock(&root->fs_info->trans_mutex);
                finish_wait(&cur_trans->writer_wait, &wait);
-       } while (cur_trans->num_writers > 1 ||
+       } while (atomic_read(&cur_trans->num_writers) > 1 ||
                 (should_grow && cur_trans->num_joined != joined));
 
        ret = create_pending_snapshots(trans, root->fs_info);
@@ -1394,6 +1397,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        wake_up(&cur_trans->commit_wait);
 
+       list_del_init(&cur_trans->list);
        put_transaction(cur_trans);
        put_transaction(cur_trans);
 
index 229a594..e441acc 100644 (file)
@@ -27,11 +27,11 @@ struct btrfs_transaction {
         * total writers in this transaction, it must be zero before the
         * transaction can end
         */
-       unsigned long num_writers;
+       atomic_t num_writers;
 
        unsigned long num_joined;
        int in_commit;
-       int use_count;
+       atomic_t use_count;
        int commit_done;
        int blocked;
        struct list_head list;
index a5303b8..cfd6605 100644 (file)
@@ -180,11 +180,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_dir_item *di;
-       int ret = 0, slot, advance;
+       int ret = 0, slot;
        size_t total_size = 0, size_left = size;
        unsigned long name_ptr;
        size_t name_len;
-       u32 nritems;
 
        /*
         * ok we want all objects associated with this id.
@@ -204,34 +203,24 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
                goto err;
-       advance = 0;
+
        while (1) {
                leaf = path->nodes[0];
-               nritems = btrfs_header_nritems(leaf);
                slot = path->slots[0];
 
                /* this is where we start walking through the path */
-               if (advance || slot >= nritems) {
+               if (slot >= btrfs_header_nritems(leaf)) {
                        /*
                         * if we've reached the last slot in this leaf we need
                         * to go to the next leaf and reset everything
                         */
-                       if (slot >= nritems-1) {
-                               ret = btrfs_next_leaf(root, path);
-                               if (ret)
-                                       break;
-                               leaf = path->nodes[0];
-                               nritems = btrfs_header_nritems(leaf);
-                               slot = path->slots[0];
-                       } else {
-                               /*
-                                * just walking through the slots on this leaf
-                                */
-                               slot++;
-                               path->slots[0]++;
-                       }
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               goto err;
+                       else if (ret > 0)
+                               break;
+                       continue;
                }
-               advance = 1;
 
                btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
@@ -250,7 +239,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 
                /* we are just looking for how big our buffer needs to be */
                if (!size)
-                       continue;
+                       goto next;
 
                if (!buffer || (name_len + 1) > size_left) {
                        ret = -ERANGE;
@@ -263,6 +252,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 
                size_left -= name_len + 1;
                buffer += name_len + 1;
+next:
+               path->slots[0]++;
        }
        ret = total_size;
 
index 129a357..7108c15 100644 (file)
@@ -99,12 +99,9 @@ static struct kmem_cache *dentry_cache __read_mostly;
 static unsigned int d_hash_mask __read_mostly;
 static unsigned int d_hash_shift __read_mostly;
 
-struct dcache_hash_bucket {
-       struct hlist_bl_head head;
-};
-static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
+static struct hlist_bl_head *dentry_hashtable __read_mostly;
 
-static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
+static inline struct hlist_bl_head *d_hash(struct dentry *parent,
                                        unsigned long hash)
 {
        hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
@@ -112,14 +109,14 @@ static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
        return dentry_hashtable + (hash & D_HASHMASK);
 }
 
-static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
+static inline void spin_lock_bucket(struct hlist_bl_head *b)
 {
-       bit_spin_lock(0, (unsigned long *)&b->head.first);
+       bit_spin_lock(0, (unsigned long *)&b->first);
 }
 
-static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
+static inline void spin_unlock_bucket(struct hlist_bl_head *b)
 {
-       __bit_spin_unlock(0, (unsigned long *)&b->head.first);
+       __bit_spin_unlock(0, (unsigned long *)&b->first);
 }
 
 /* Statistics gathering. */
@@ -331,15 +328,15 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
 void __d_drop(struct dentry *dentry)
 {
        if (!(dentry->d_flags & DCACHE_UNHASHED)) {
+               struct hlist_bl_head *b;
                if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) {
-                       bit_spin_lock(0,
-                               (unsigned long *)&dentry->d_sb->s_anon.first);
+                       b = &dentry->d_sb->s_anon;
+                       spin_lock_bucket(b);
                        dentry->d_flags |= DCACHE_UNHASHED;
                        hlist_bl_del_init(&dentry->d_hash);
-                       __bit_spin_unlock(0,
-                               (unsigned long *)&dentry->d_sb->s_anon.first);
+                       spin_unlock_bucket(b);
                } else {
-                       struct dcache_hash_bucket *b;
+                       struct hlist_bl_head *b;
                        b = d_hash(dentry->d_parent, dentry->d_name.hash);
                        spin_lock_bucket(b);
                        /*
@@ -1789,7 +1786,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
        unsigned int len = name->len;
        unsigned int hash = name->hash;
        const unsigned char *str = name->name;
-       struct dcache_hash_bucket *b = d_hash(parent, hash);
+       struct hlist_bl_head *b = d_hash(parent, hash);
        struct hlist_bl_node *node;
        struct dentry *dentry;
 
@@ -1813,7 +1810,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
         *
         * See Documentation/filesystems/path-lookup.txt for more details.
         */
-       hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+       hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
                struct inode *i;
                const char *tname;
                int tlen;
@@ -1908,7 +1905,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
        unsigned int len = name->len;
        unsigned int hash = name->hash;
        const unsigned char *str = name->name;
-       struct dcache_hash_bucket *b = d_hash(parent, hash);
+       struct hlist_bl_head *b = d_hash(parent, hash);
        struct hlist_bl_node *node;
        struct dentry *found = NULL;
        struct dentry *dentry;
@@ -1935,7 +1932,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
         */
        rcu_read_lock();
        
-       hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+       hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
                const char *tname;
                int tlen;
 
@@ -2086,12 +2083,12 @@ again:
 }
 EXPORT_SYMBOL(d_delete);
 
-static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b)
+static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
 {
        BUG_ON(!d_unhashed(entry));
        spin_lock_bucket(b);
        entry->d_flags &= ~DCACHE_UNHASHED;
-       hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
+       hlist_bl_add_head_rcu(&entry->d_hash, b);
        spin_unlock_bucket(b);
 }
 
@@ -3025,7 +3022,7 @@ static void __init dcache_init_early(void)
 
        dentry_hashtable =
                alloc_large_system_hash("Dentry cache",
-                                       sizeof(struct dcache_hash_bucket),
+                                       sizeof(struct hlist_bl_head),
                                        dhash_entries,
                                        13,
                                        HASH_EARLY,
@@ -3034,7 +3031,7 @@ static void __init dcache_init_early(void)
                                        0);
 
        for (loop = 0; loop < (1 << d_hash_shift); loop++)
-               INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
+               INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
 }
 
 static void __init dcache_init(void)
@@ -3057,7 +3054,7 @@ static void __init dcache_init(void)
 
        dentry_hashtable =
                alloc_large_system_hash("Dentry cache",
-                                       sizeof(struct dcache_hash_bucket),
+                                       sizeof(struct hlist_bl_head),
                                        dhash_entries,
                                        13,
                                        0,
@@ -3066,7 +3063,7 @@ static void __init dcache_init(void)
                                        0);
 
        for (loop = 0; loop < (1 << d_hash_shift); loop++)
-               INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
+               INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
 }
 
 /* SLAB cache for __getname() consumers */
index c71995b..0f5c4f9 100644 (file)
@@ -884,8 +884,8 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
        }
 
        brelse(dibh);
-       gfs2_trans_end(sdp);
 failed:
+       gfs2_trans_end(sdp);
        if (al) {
                gfs2_inplace_release(ip);
                gfs2_quota_unlock(ip);
index 5c356d0..f789c57 100644 (file)
@@ -1506,7 +1506,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
                inode = gfs2_inode_lookup(dir->i_sb, 
                                be16_to_cpu(dent->de_type),
                                be64_to_cpu(dent->de_inum.no_addr),
-                               be64_to_cpu(dent->de_inum.no_formal_ino));
+                               be64_to_cpu(dent->de_inum.no_formal_ino), 0);
                brelse(bh);
                return inode;
        }
index b2682e0..e483108 100644 (file)
@@ -617,18 +617,51 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        return generic_file_aio_write(iocb, iov, nr_segs, pos);
 }
 
-static void empty_write_end(struct page *page, unsigned from,
-                          unsigned to)
+static int empty_write_end(struct page *page, unsigned from,
+                          unsigned to, int mode)
 {
-       struct gfs2_inode *ip = GFS2_I(page->mapping->host);
+       struct inode *inode = page->mapping->host;
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct buffer_head *bh;
+       unsigned offset, blksize = 1 << inode->i_blkbits;
+       pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
 
        zero_user(page, from, to-from);
        mark_page_accessed(page);
 
-       if (!gfs2_is_writeback(ip))
-               gfs2_page_add_databufs(ip, page, from, to);
+       if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
+               if (!gfs2_is_writeback(ip))
+                       gfs2_page_add_databufs(ip, page, from, to);
+
+               block_commit_write(page, from, to);
+               return 0;
+       }
+
+       offset = 0;
+       bh = page_buffers(page);
+       while (offset < to) {
+               if (offset >= from) {
+                       set_buffer_uptodate(bh);
+                       mark_buffer_dirty(bh);
+                       clear_buffer_new(bh);
+                       write_dirty_buffer(bh, WRITE);
+               }
+               offset += blksize;
+               bh = bh->b_this_page;
+       }
 
-       block_commit_write(page, from, to);
+       offset = 0;
+       bh = page_buffers(page);
+       while (offset < to) {
+               if (offset >= from) {
+                       wait_on_buffer(bh);
+                       if (!buffer_uptodate(bh))
+                               return -EIO;
+               }
+               offset += blksize;
+               bh = bh->b_this_page;
+       }
+       return 0;
 }
 
 static int needs_empty_write(sector_t block, struct inode *inode)
@@ -643,7 +676,8 @@ static int needs_empty_write(sector_t block, struct inode *inode)
        return !buffer_mapped(&bh_map);
 }
 
-static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
+static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
+                             int mode)
 {
        struct inode *inode = page->mapping->host;
        unsigned start, end, next, blksize;
@@ -668,7 +702,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
                                                          gfs2_block_map);
                                if (unlikely(ret))
                                        return ret;
-                               empty_write_end(page, start, end);
+                               ret = empty_write_end(page, start, end, mode);
+                               if (unlikely(ret))
+                                       return ret;
                                end = 0;
                        }
                        start = next;
@@ -682,7 +718,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
                ret = __block_write_begin(page, start, end - start, gfs2_block_map);
                if (unlikely(ret))
                        return ret;
-               empty_write_end(page, start, end);
+               ret = empty_write_end(page, start, end, mode);
+               if (unlikely(ret))
+                       return ret;
        }
 
        return 0;
@@ -731,7 +769,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
 
                if (curr == end)
                        to = end_offset;
-               error = write_empty_blocks(page, from, to);
+               error = write_empty_blocks(page, from, to, mode);
                if (!error && offset + to > inode->i_size &&
                    !(mode & FALLOC_FL_KEEP_SIZE)) {
                        i_size_write(inode, offset + to);
index 3754e3c..25eeb2b 100644 (file)
@@ -385,6 +385,10 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
 static void iopen_go_callback(struct gfs2_glock *gl)
 {
        struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
+       struct gfs2_sbd *sdp = gl->gl_sbd;
+
+       if (sdp->sd_vfs->s_flags & MS_RDONLY)
+               return;
 
        if (gl->gl_demote_state == LM_ST_UNLOCKED &&
            gl->gl_state == LM_ST_SHARED && ip) {
index 97d54a2..9134dcb 100644 (file)
@@ -40,37 +40,61 @@ struct gfs2_inum_range_host {
        u64 ir_length;
 };
 
+struct gfs2_skip_data {
+       u64 no_addr;
+       int skipped;
+       int non_block;
+};
+
 static int iget_test(struct inode *inode, void *opaque)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
-       u64 *no_addr = opaque;
+       struct gfs2_skip_data *data = opaque;
 
-       if (ip->i_no_addr == *no_addr)
+       if (ip->i_no_addr == data->no_addr) {
+               if (data->non_block &&
+                   inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
+                       data->skipped = 1;
+                       return 0;
+               }
                return 1;
-
+       }
        return 0;
 }
 
 static int iget_set(struct inode *inode, void *opaque)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
-       u64 *no_addr = opaque;
+       struct gfs2_skip_data *data = opaque;
 
-       inode->i_ino = (unsigned long)*no_addr;
-       ip->i_no_addr = *no_addr;
+       if (data->skipped)
+               return -ENOENT;
+       inode->i_ino = (unsigned long)(data->no_addr);
+       ip->i_no_addr = data->no_addr;
        return 0;
 }
 
 struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
 {
        unsigned long hash = (unsigned long)no_addr;
-       return ilookup5(sb, hash, iget_test, &no_addr);
+       struct gfs2_skip_data data;
+
+       data.no_addr = no_addr;
+       data.skipped = 0;
+       data.non_block = 0;
+       return ilookup5(sb, hash, iget_test, &data);
 }
 
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
+static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr,
+                              int non_block)
 {
+       struct gfs2_skip_data data;
        unsigned long hash = (unsigned long)no_addr;
-       return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
+
+       data.no_addr = no_addr;
+       data.skipped = 0;
+       data.non_block = non_block;
+       return iget5_locked(sb, hash, iget_test, iget_set, &data);
 }
 
 /**
@@ -111,19 +135,20 @@ static void gfs2_set_iop(struct inode *inode)
  * @sb: The super block
  * @no_addr: The inode number
  * @type: The type of the inode
+ * non_block: Can we block on inodes that are being freed?
  *
  * Returns: A VFS inode, or an error
  */
 
 struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
-                               u64 no_addr, u64 no_formal_ino)
+                               u64 no_addr, u64 no_formal_ino, int non_block)
 {
        struct inode *inode;
        struct gfs2_inode *ip;
        struct gfs2_glock *io_gl = NULL;
        int error;
 
-       inode = gfs2_iget(sb, no_addr);
+       inode = gfs2_iget(sb, no_addr, non_block);
        ip = GFS2_I(inode);
 
        if (!inode)
@@ -185,11 +210,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
 {
        struct super_block *sb = sdp->sd_vfs;
        struct gfs2_holder i_gh;
-       struct inode *inode;
+       struct inode *inode = NULL;
        int error;
 
+       /* Must not read in block until block type is verified */
        error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
-                                 LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+                                 LM_ST_EXCLUSIVE, GL_SKIP, &i_gh);
        if (error)
                return ERR_PTR(error);
 
@@ -197,7 +223,7 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
        if (error)
                goto fail;
 
-       inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0);
+       inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, 1);
        if (IS_ERR(inode))
                goto fail;
 
@@ -843,7 +869,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
                goto fail_gunlock2;
 
        inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr,
-                                 inum.no_formal_ino);
+                                 inum.no_formal_ino, 0);
        if (IS_ERR(inode))
                goto fail_gunlock2;
 
index 3e00a66..099ca30 100644 (file)
@@ -97,7 +97,8 @@ err:
 }
 
 extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 
-                                      u64 no_addr, u64 no_formal_ino);
+                                      u64 no_addr, u64 no_formal_ino,
+                                      int non_block);
 extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
                                         u64 *no_formal_ino,
                                         unsigned int blktype);
index 42ef243..d3c69eb 100644 (file)
@@ -430,7 +430,7 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
        struct dentry *dentry;
        struct inode *inode;
 
-       inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
+       inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
        if (IS_ERR(inode)) {
                fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
                return PTR_ERR(inode);
index cf930cd..6fcae84 100644 (file)
@@ -945,7 +945,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
                /* rgblk_search can return a block < goal, so we need to
                   keep it marching forward. */
                no_addr = block + rgd->rd_data0;
-               goal++;
+               goal = max(block + 1, goal + 1);
                if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
                        continue;
                if (no_addr == skip)
@@ -971,7 +971,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
                        found++;
 
                /* Limit reclaim to sensible number of tasks */
-               if (found > 2*NR_CPUS)
+               if (found > NR_CPUS)
                        return;
        }
 
index a4e23d6..b9f28e6 100644 (file)
@@ -1318,15 +1318,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
 
 static void gfs2_evict_inode(struct inode *inode)
 {
-       struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
+       struct super_block *sb = inode->i_sb;
+       struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_holder gh;
        int error;
 
-       if (inode->i_nlink)
+       if (inode->i_nlink || (sb->s_flags & MS_RDONLY))
                goto out;
 
-       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+       /* Must not read inode block until block type has been verified */
+       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
        if (unlikely(error)) {
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
                goto out;
@@ -1336,6 +1338,12 @@ static void gfs2_evict_inode(struct inode *inode)
        if (error)
                goto out_truncate;
 
+       if (test_bit(GIF_INVALID, &ip->i_flags)) {
+               error = gfs2_inode_refresh(ip);
+               if (error)
+                       goto out_truncate;
+       }
+
        ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
        gfs2_glock_dq_wait(&ip->i_iopen_gh);
        gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
index aa309aa..4cf04e1 100644 (file)
@@ -258,6 +258,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
        if (atomic_dec_and_test(&fp->fi_delegees)) {
                vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
                fp->fi_lease = NULL;
+               fput(fp->fi_deleg_file);
                fp->fi_deleg_file = NULL;
        }
 }
@@ -402,8 +403,8 @@ static void free_generic_stateid(struct nfs4_stateid *stp)
        if (stp->st_access_bmap) {
                oflag = nfs4_access_bmap_to_omode(stp);
                nfs4_file_put_access(stp->st_file, oflag);
-               put_nfs4_file(stp->st_file);
        }
+       put_nfs4_file(stp->st_file);
        kmem_cache_free(stateid_slab, stp);
 }
 
index 2e1cebd..129f3c9 100644 (file)
@@ -1363,7 +1363,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
                goto out;
        if (!(iap->ia_valid & ATTR_MODE))
                iap->ia_mode = 0;
-       err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+       err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
        if (err)
                goto out;
 
@@ -1385,6 +1385,13 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
        if (IS_ERR(dchild))
                goto out_nfserr;
 
+       /* If file doesn't exist, check for permissions to create one */
+       if (!dchild->d_inode) {
+               err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+               if (err)
+                       goto out;
+       }
+
        err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
        if (err)
                goto out;
index dd6628d..dfa5327 100644 (file)
@@ -3124,11 +3124,16 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
 /* for the /proc/ directory itself, after non-process stuff has been done */
 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
 {
-       unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
-       struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
+       unsigned int nr;
+       struct task_struct *reaper;
        struct tgid_iter iter;
        struct pid_namespace *ns;
 
+       if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
+               goto out_no_task;
+       nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+
+       reaper = get_proc_task(filp->f_path.dentry->d_inode);
        if (!reaper)
                goto out_no_task;
 
index a19acdb..f1ef949 100644 (file)
@@ -666,7 +666,7 @@ generic_setxattr(struct dentry *dentry, const char *name, const void *value, siz
        handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
        if (!handler)
                return -EOPNOTSUPP;
-       return handler->set(dentry, name, value, size, 0, handler->flags);
+       return handler->set(dentry, name, value, size, flags, handler->flags);
 }
 
 /*
index 3ca7956..9f76cce 100644 (file)
@@ -34,8 +34,10 @@ __xfs_printk(
        const struct xfs_mount  *mp,
        struct va_format        *vaf)
 {
-       if (mp && mp->m_fsname)
+       if (mp && mp->m_fsname) {
                printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
+               return;
+       }
        printk("%sXFS: %pV\n", level, vaf);
 }
 
index ec0357d..2ad95fa 100644 (file)
@@ -196,7 +196,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
-typedef void (unplugged_fn) (struct request_queue *);
 
 struct bio_vec;
 struct bvec_merge_data {
@@ -284,7 +283,6 @@ struct request_queue
        rq_timed_out_fn         *rq_timed_out_fn;
        dma_drain_needed_fn     *dma_drain_needed;
        lld_busy_fn             *lld_busy_fn;
-       unplugged_fn            *unplugged_fn;
 
        /*
         * Dispatch queue sorting
@@ -390,20 +388,19 @@ struct request_queue
 #define        QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
 #define QUEUE_FLAG_ASYNCFULL   4       /* write queue has been filled */
 #define QUEUE_FLAG_DEAD                5       /* queue being torn down */
-#define QUEUE_FLAG_REENTER     6       /* Re-entrancy avoidance */
-#define QUEUE_FLAG_ELVSWITCH   7       /* don't use elevator, just do FIFO */
-#define QUEUE_FLAG_BIDI                8       /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES     9      /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP   10      /* force complete on same CPU */
-#define QUEUE_FLAG_FAIL_IO     11      /* fake timeout */
-#define QUEUE_FLAG_STACKABLE   12      /* supports request stacking */
-#define QUEUE_FLAG_NONROT      13      /* non-rotational device (SSD) */
+#define QUEUE_FLAG_ELVSWITCH   6       /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BIDI                7       /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES     8      /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP   9       /* force complete on same CPU */
+#define QUEUE_FLAG_FAIL_IO     10      /* fake timeout */
+#define QUEUE_FLAG_STACKABLE   11      /* supports request stacking */
+#define QUEUE_FLAG_NONROT      12      /* non-rotational device (SSD) */
 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT     15      /* do IO stats */
-#define QUEUE_FLAG_DISCARD     16      /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES   17      /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM  18      /* Contributes to random pool */
-#define QUEUE_FLAG_SECDISCARD  19      /* supports SECDISCARD */
+#define QUEUE_FLAG_IO_STAT     13      /* do IO stats */
+#define QUEUE_FLAG_DISCARD     14      /* supports DISCARD */
+#define QUEUE_FLAG_NOXMERGES   15      /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM  16      /* Contributes to random pool */
+#define QUEUE_FLAG_SECDISCARD  17      /* supports SECDISCARD */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -699,8 +696,9 @@ extern void blk_start_queue(struct request_queue *q);
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
+extern void __blk_run_queue(struct request_queue *q);
 extern void blk_run_queue(struct request_queue *);
+extern void blk_run_queue_async(struct request_queue *q);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
                           gfp_t);
@@ -843,7 +841,6 @@ extern void blk_queue_dma_alignment(struct request_queue *, int);
 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
-extern void blk_queue_unplugged(struct request_queue *, unplugged_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
@@ -860,8 +857,13 @@ extern void blk_put_queue(struct request_queue *);
 struct blk_plug {
        unsigned long magic;
        struct list_head list;
+       struct list_head cb_list;
        unsigned int should_sort;
 };
+struct blk_plug_cb {
+       struct list_head list;
+       void (*callback)(struct blk_plug_cb *);
+};
 
 extern void blk_start_plug(struct blk_plug *);
 extern void blk_finish_plug(struct blk_plug *);
@@ -887,7 +889,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
 {
        struct blk_plug *plug = tsk->plug;
 
-       return plug && !list_empty(&plug->list);
+       return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
 }
 
 /*
index e276883..32a4423 100644 (file)
@@ -197,7 +197,6 @@ struct dm_target {
 struct dm_target_callbacks {
        struct list_head list;
        int (*congested_fn) (struct dm_target_callbacks *, int);
-       void (*unplug_fn)(struct dm_target_callbacks *);
 };
 
 int dm_register_target(struct target_type *t);
index f3a7794..771d6d8 100644 (file)
@@ -167,6 +167,7 @@ struct input_keymap_entry {
 #define SYN_REPORT             0
 #define SYN_CONFIG             1
 #define SYN_MT_REPORT          2
+#define SYN_DROPPED            3
 
 /*
  * Keys and buttons
@@ -553,8 +554,8 @@ struct input_keymap_entry {
 #define KEY_DVD                        0x185   /* Media Select DVD */
 #define KEY_AUX                        0x186
 #define KEY_MP3                        0x187
-#define KEY_AUDIO              0x188
-#define KEY_VIDEO              0x189
+#define KEY_AUDIO              0x188   /* AL Audio Browser */
+#define KEY_VIDEO              0x189   /* AL Movie Browser */
 #define KEY_DIRECTORY          0x18a
 #define KEY_LIST               0x18b
 #define KEY_MEMO               0x18c   /* Media Select Messages */
@@ -603,8 +604,9 @@ struct input_keymap_entry {
 #define KEY_FRAMEFORWARD       0x1b5
 #define KEY_CONTEXT_MENU       0x1b6   /* GenDesc - system context menu */
 #define KEY_MEDIA_REPEAT       0x1b7   /* Consumer - transport control */
-#define KEY_10CHANNELSUP        0x1b8   /* 10 channels up (10+) */
-#define KEY_10CHANNELSDOWN      0x1b9   /* 10 channels down (10-) */
+#define KEY_10CHANNELSUP       0x1b8   /* 10 channels up (10+) */
+#define KEY_10CHANNELSDOWN     0x1b9   /* 10 channels down (10-) */
+#define KEY_IMAGES             0x1ba   /* AL Image Browser */
 
 #define KEY_DEL_EOL            0x1c0
 #define KEY_DEL_EOS            0x1c1
index b3ac06a..318bb82 100644 (file)
@@ -48,6 +48,12 @@ static inline void input_mt_slot(struct input_dev *dev, int slot)
        input_event(dev, EV_ABS, ABS_MT_SLOT, slot);
 }
 
+static inline bool input_is_mt_axis(int axis)
+{
+       return axis == ABS_MT_SLOT ||
+               (axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST);
+}
+
 void input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active);
 
index 31afb7e..cdced84 100644 (file)
@@ -117,7 +117,7 @@ extern struct pid *find_vpid(int nr);
  */
 extern struct pid *find_get_pid(int nr);
 extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
-int next_pidmap(struct pid_namespace *pid_ns, int last);
+int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
 
 extern struct pid *alloc_pid(struct pid_namespace *ns);
 extern void free_pid(struct pid *pid);
index 369e19d..7f1183d 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/fs.h>
 #include <linux/poll.h>
 #include <linux/posix-timers.h>
+#include <linux/rwsem.h>
 
 struct posix_clock;
 
@@ -104,7 +105,7 @@ struct posix_clock_operations {
  * @ops:     Functional interface to the clock
  * @cdev:    Character device instance for this clock
  * @kref:    Reference count.
- * @mutex:   Protects the 'zombie' field from concurrent access.
+ * @rwsem:   Protects the 'zombie' field from concurrent access.
  * @zombie:  If 'zombie' is true, then the hardware has disappeared.
  * @release: A function to free the structure when the reference count reaches
  *           zero. May be NULL if structure is statically allocated.
@@ -117,7 +118,7 @@ struct posix_clock {
        struct posix_clock_operations ops;
        struct cdev cdev;
        struct kref kref;
-       struct mutex mutex;
+       struct rw_semaphore rwsem;
        bool zombie;
        void (*release)(struct posix_clock *clk);
 };
index ca02f17..8ce59ef 100644 (file)
@@ -1456,7 +1456,7 @@ struct security_operations {
                             struct inode *new_dir, struct dentry *new_dentry);
        int (*inode_readlink) (struct dentry *dentry);
        int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
-       int (*inode_permission) (struct inode *inode, int mask);
+       int (*inode_permission) (struct inode *inode, int mask, unsigned flags);
        int (*inode_setattr)    (struct dentry *dentry, struct iattr *attr);
        int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
        int (*inode_setxattr) (struct dentry *dentry, const char *name,
index 3c7329b..0e18550 100644 (file)
@@ -103,8 +103,8 @@ struct driver_info {
  * Indicates to usbnet, that USB driver accumulates multiple IP packets.
  * Affects statistic (counters) and short packet handling.
  */
-#define FLAG_MULTI_PACKET      0x1000
-#define FLAG_RX_ASSEMBLE       0x2000  /* rx packets may span >1 frames */
+#define FLAG_MULTI_PACKET      0x2000
+#define FLAG_RX_ASSEMBLE       0x4000  /* rx packets may span >1 frames */
 
        /* init device ... can sleep, or cause probe() failure */
        int     (*bind)(struct usbnet *, struct usb_interface *);
index 02f2212..57a8346 100644 (file)
@@ -217,11 +217,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
        return -1;
 }
 
-int next_pidmap(struct pid_namespace *pid_ns, int last)
+int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
 {
        int offset;
        struct pidmap *map, *end;
 
+       if (last >= PID_MAX_LIMIT)
+               return -1;
+
        offset = (last + 1) & BITS_PER_PAGE_MASK;
        map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
        end = &pid_ns->pidmap[PIDMAP_ENTRIES];
index 25028dd..c340ca6 100644 (file)
@@ -19,7 +19,6 @@
  */
 #include <linux/device.h>
 #include <linux/file.h>
-#include <linux/mutex.h>
 #include <linux/posix-clock.h>
 #include <linux/slab.h>
 #include <linux/syscalls.h>
@@ -34,19 +33,19 @@ static struct posix_clock *get_posix_clock(struct file *fp)
 {
        struct posix_clock *clk = fp->private_data;
 
-       mutex_lock(&clk->mutex);
+       down_read(&clk->rwsem);
 
        if (!clk->zombie)
                return clk;
 
-       mutex_unlock(&clk->mutex);
+       up_read(&clk->rwsem);
 
        return NULL;
 }
 
 static void put_posix_clock(struct posix_clock *clk)
 {
-       mutex_unlock(&clk->mutex);
+       up_read(&clk->rwsem);
 }
 
 static ssize_t posix_clock_read(struct file *fp, char __user *buf,
@@ -156,7 +155,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
        struct posix_clock *clk =
                container_of(inode->i_cdev, struct posix_clock, cdev);
 
-       mutex_lock(&clk->mutex);
+       down_read(&clk->rwsem);
 
        if (clk->zombie) {
                err = -ENODEV;
@@ -172,7 +171,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
                fp->private_data = clk;
        }
 out:
-       mutex_unlock(&clk->mutex);
+       up_read(&clk->rwsem);
        return err;
 }
 
@@ -211,25 +210,20 @@ int posix_clock_register(struct posix_clock *clk, dev_t devid)
        int err;
 
        kref_init(&clk->kref);
-       mutex_init(&clk->mutex);
+       init_rwsem(&clk->rwsem);
 
        cdev_init(&clk->cdev, &posix_clock_file_operations);
        clk->cdev.owner = clk->ops.owner;
        err = cdev_add(&clk->cdev, devid, 1);
-       if (err)
-               goto no_cdev;
 
        return err;
-no_cdev:
-       mutex_destroy(&clk->mutex);
-       return err;
 }
 EXPORT_SYMBOL_GPL(posix_clock_register);
 
 static void delete_clock(struct kref *kref)
 {
        struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
-       mutex_destroy(&clk->mutex);
+
        if (clk->release)
                clk->release(clk);
 }
@@ -238,9 +232,9 @@ void posix_clock_unregister(struct posix_clock *clk)
 {
        cdev_del(&clk->cdev);
 
-       mutex_lock(&clk->mutex);
+       down_write(&clk->rwsem);
        clk->zombie = true;
-       mutex_unlock(&clk->mutex);
+       up_write(&clk->rwsem);
 
        kref_put(&clk->kref, delete_clock);
 }
index 008ff6c..f3bc322 100644 (file)
@@ -249,11 +249,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
                goto drop;
        }
 
-       /* Zero out the CB buffer if no options present */
-       if (iph->ihl == 5) {
-               memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+       memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+       if (iph->ihl == 5)
                return 0;
-       }
 
        opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
        if (ip_options_compile(dev_net(dev), opt, skb))
index 27dab26..054fdb5 100644 (file)
@@ -13,6 +13,7 @@
 #include <net/caif/cfsrvl.h>
 #include <net/caif/cfpkt.h>
 
+
 #define container_obj(layr) ((struct cfsrvl *) layr)
 
 #define DGM_CMD_BIT  0x80
@@ -83,6 +84,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
 
 static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
 {
+       u8 packet_type;
        u32 zero = 0;
        struct caif_payload_info *info;
        struct cfsrvl *service = container_obj(layr);
@@ -94,7 +96,9 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
        if (cfpkt_getlen(pkt) > DGM_MTU)
                return -EMSGSIZE;
 
-       cfpkt_add_head(pkt, &zero, 4);
+       cfpkt_add_head(pkt, &zero, 3);
+       packet_type = 0x08; /* B9 set - UNCLASSIFIED */
+       cfpkt_add_head(pkt, &packet_type, 1);
 
        /* Add info for MUX-layer to route the packet out. */
        info = cfpkt_info(pkt);
index 46f34b2..24f1ffa 100644 (file)
@@ -244,9 +244,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
                                int phyid)
 {
        struct cfmuxl *muxl = container_obj(layr);
-       struct list_head *node;
+       struct list_head *node, *next;
        struct cflayer *layer;
-       list_for_each(node, &muxl->srvl_list) {
+       list_for_each_safe(node, next, &muxl->srvl_list) {
                layer = list_entry(node, struct cflayer, node);
                if (cfsrvl_phyid_match(layer, phyid))
                        layer->ctrlcmd(layer, ctrl, phyid);
index 956d3b0..c2ac599 100644 (file)
@@ -5203,11 +5203,15 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
        }
 
        /* TSO requires that SG is present as well. */
-       if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
-               netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
-               features &= ~NETIF_F_TSO;
+       if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
+               netdev_info(dev, "Dropping TSO features since no SG feature.\n");
+               features &= ~NETIF_F_ALL_TSO;
        }
 
+       /* TSO ECN requires that TSO is present as well. */
+       if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
+               features &= ~NETIF_F_TSO_ECN;
+
        /* Software GSO depends on SG. */
        if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
                netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
index ce2d335..5761185 100644 (file)
@@ -1,5 +1,3 @@
 obj-$(CONFIG_IEEE802154) +=    ieee802154.o af_802154.o
 ieee802154-y           := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
 af_802154-y            := af_ieee802154.o raw.o dgram.o
-
-ccflags-y += -Wall -DDEBUG
index 6c0b7f4..38f23e7 100644 (file)
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
                     !sk2->sk_bound_dev_if ||
                     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
                        if (!reuse || !sk2->sk_reuse ||
-                           ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
+                           sk2->sk_state == TCP_LISTEN) {
                                const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
                                if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
                                    sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,8 +122,7 @@ again:
                                            (tb->num_owners < smallest_size || smallest_size == -1)) {
                                                smallest_size = tb->num_owners;
                                                smallest_rover = rover;
-                                               if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
-                                                   !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+                                               if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
                                                        spin_unlock(&head->lock);
                                                        snum = smallest_rover;
                                                        goto have_snum;
index dd1b20e..9df4e63 100644 (file)
@@ -354,7 +354,8 @@ static void inetpeer_free_rcu(struct rcu_head *head)
 }
 
 /* May be called with local BH enabled. */
-static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
+static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
+                            struct inet_peer __rcu **stack[PEER_MAXDEPTH])
 {
        int do_free;
 
@@ -368,7 +369,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
         * We use refcnt=-1 to alert lockless readers this entry is deleted.
         */
        if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
-               struct inet_peer __rcu **stack[PEER_MAXDEPTH];
                struct inet_peer __rcu ***stackptr, ***delp;
                if (lookup(&p->daddr, stack, base) != p)
                        BUG();
@@ -422,7 +422,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p)
 }
 
 /* May be called with local BH enabled. */
-static int cleanup_once(unsigned long ttl)
+static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
 {
        struct inet_peer *p = NULL;
 
@@ -454,7 +454,7 @@ static int cleanup_once(unsigned long ttl)
                 * happen because of entry limits in route cache. */
                return -1;
 
-       unlink_from_pool(p, peer_to_base(p));
+       unlink_from_pool(p, peer_to_base(p), stack);
        return 0;
 }
 
@@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
 
        if (base->total >= inet_peer_threshold)
                /* Remove one less-recently-used entry. */
-               cleanup_once(0);
+               cleanup_once(0, stack);
 
        return p;
 }
@@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy)
 {
        unsigned long now = jiffies;
        int ttl, total;
+       struct inet_peer __rcu **stack[PEER_MAXDEPTH];
 
        total = compute_total();
        if (total >= inet_peer_threshold)
@@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy)
                ttl = inet_peer_maxttl
                                - (inet_peer_maxttl - inet_peer_minttl) / HZ *
                                        total / inet_peer_threshold * HZ;
-       while (!cleanup_once(ttl)) {
+       while (!cleanup_once(ttl, stack)) {
                if (jiffies != now)
                        break;
        }
index 28a736f..2391b24 100644 (file)
@@ -329,7 +329,7 @@ int ip_options_compile(struct net *net,
                                        pp_ptr = optptr + 2;
                                        goto error;
                                }
-                               if (skb) {
+                               if (rt) {
                                        memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
                                        opt->is_changed = 1;
                                }
@@ -371,7 +371,7 @@ int ip_options_compile(struct net *net,
                                                goto error;
                                        }
                                        opt->ts = optptr - iph;
-                                       if (skb) {
+                                       if (rt)  {
                                                memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
                                                timeptr = (__be32*)&optptr[optptr[2]+3];
                                        }
@@ -603,7 +603,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
        unsigned long orefdst;
        int err;
 
-       if (!opt->srr)
+       if (!opt->srr || !rt)
                return 0;
 
        if (skb->pkt_type != PACKET_HOST)
index 1a45665..321e6e8 100644 (file)
@@ -311,7 +311,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_do_large_bitmap,
        },
-#ifdef CONFIG_IP_MULTICAST
        {
                .procname       = "igmp_max_memberships",
                .data           = &sysctl_igmp_max_memberships,
@@ -319,8 +318,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-
-#endif
        {
                .procname       = "igmp_max_msf",
                .data           = &sysctl_igmp_max_msf,
index 1660546..f2c5b0f 100644 (file)
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
                     !sk2->sk_bound_dev_if ||
                     sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
                    (!sk->sk_reuse || !sk2->sk_reuse ||
-                    ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
+                    sk2->sk_state == TCP_LISTEN) &&
                     ipv6_rcv_saddr_equal(sk, sk2))
                        break;
        }
index c9890e2..cc61697 100644 (file)
@@ -1297,8 +1297,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
        /* Note : socket.c set MSG_EOR on SEQPACKET sockets */
        if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
                               MSG_NOSIGNAL)) {
-               err = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        lock_sock(sk);
index 058f1e9..9032421 100644 (file)
@@ -121,8 +121,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
                s32 data_size = ntohs(pdulen) - llc_len;
 
                if (data_size < 0 ||
-                   ((skb_tail_pointer(skb) -
-                     (u8 *)pdu) - llc_len) < data_size)
+                   !pskb_may_pull(skb, data_size))
                        return 0;
                if (unlikely(pskb_trim_rcsum(skb, data_size)))
                        return 0;
index 00a3324..a274300 100644 (file)
@@ -343,6 +343,10 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct ipmac data;
 
+       /* MAC can be src only */
+       if (!(flags & IPSET_DIM_TWO_SRC))
+               return 0;
+
        data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
        if (data.id < map->first_ip || data.id > map->last_ip)
                return -IPSET_ERR_BITMAP_RANGE;
index 9152e69..72d1ac6 100644 (file)
@@ -1022,8 +1022,9 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
        if (cb->args[1] >= ip_set_max)
                goto out;
 
-       pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
        max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+dump_last:
+       pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
        for (; cb->args[1] < max; cb->args[1]++) {
                index = (ip_set_id_t) cb->args[1];
                set = ip_set_list[index];
@@ -1038,8 +1039,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
                 * so that lists (unions of sets) are dumped last.
                 */
                if (cb->args[0] != DUMP_ONE &&
-                   !((cb->args[0] == DUMP_ALL) ^
-                     (set->type->features & IPSET_DUMP_LAST)))
+                   ((cb->args[0] == DUMP_ALL) ==
+                    !!(set->type->features & IPSET_DUMP_LAST)))
                        continue;
                pr_debug("List set: %s\n", set->name);
                if (!cb->args[2]) {
@@ -1083,6 +1084,12 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
                        goto release_refcount;
                }
        }
+       /* If we dump all sets, continue with dumping last ones */
+       if (cb->args[0] == DUMP_ALL) {
+               cb->args[0] = DUMP_LAST;
+               cb->args[1] = 0;
+               goto dump_last;
+       }
        goto out;
 
 nla_put_failure:
@@ -1093,11 +1100,6 @@ release_refcount:
                pr_debug("release set %s\n", ip_set_list[index]->name);
                ip_set_put_byindex(index);
        }
-
-       /* If we dump all sets, continue with dumping last ones */
-       if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
-               cb->args[0] = DUMP_LAST;
-
 out:
        if (nlh) {
                nlmsg_end(skb, nlh);
index 061d48c..b3babae 100644 (file)
@@ -81,6 +81,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
        if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
                pr_warning("Protocol error: set match dimension "
                           "is over the limit!\n");
+               ip_set_nfnl_put(info->match_set.index);
                return -ERANGE;
        }
 
@@ -135,6 +136,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
                if (index == IPSET_INVALID_ID) {
                        pr_warning("Cannot find del_set index %u as target\n",
                                   info->del_set.index);
+                       if (info->add_set.index != IPSET_INVALID_ID)
+                               ip_set_nfnl_put(info->add_set.index);
                        return -ENOENT;
                }
        }
@@ -142,6 +145,10 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
            info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
                pr_warning("Protocol error: SET target dimension "
                           "is over the limit!\n");
+               if (info->add_set.index != IPSET_INVALID_ID)
+                       ip_set_nfnl_put(info->add_set.index);
+               if (info->del_set.index != IPSET_INVALID_ID)
+                       ip_set_nfnl_put(info->del_set.index);
                return -ERANGE;
        }
 
@@ -192,6 +199,7 @@ set_match_checkentry(const struct xt_mtchk_param *par)
        if (info->match_set.dim > IPSET_DIM_MAX) {
                pr_warning("Protocol error: set match dimension "
                           "is over the limit!\n");
+               ip_set_nfnl_put(info->match_set.index);
                return -ERANGE;
        }
 
@@ -219,7 +227,7 @@ set_target(struct sk_buff *skb, const struct xt_action_param *par)
        if (info->del_set.index != IPSET_INVALID_ID)
                ip_set_del(info->del_set.index,
                           skb, par->family,
-                          info->add_set.dim,
+                          info->del_set.dim,
                           info->del_set.flags);
 
        return XT_CONTINUE;
@@ -245,13 +253,19 @@ set_target_checkentry(const struct xt_tgchk_param *par)
                if (index == IPSET_INVALID_ID) {
                        pr_warning("Cannot find del_set index %u as target\n",
                                   info->del_set.index);
+                       if (info->add_set.index != IPSET_INVALID_ID)
+                               ip_set_nfnl_put(info->add_set.index);
                        return -ENOENT;
                }
        }
        if (info->add_set.dim > IPSET_DIM_MAX ||
-           info->del_set.flags > IPSET_DIM_MAX) {
+           info->del_set.dim > IPSET_DIM_MAX) {
                pr_warning("Protocol error: SET target dimension "
                           "is over the limit!\n");
+               if (info->add_set.index != IPSET_INVALID_ID)
+                       ip_set_nfnl_put(info->add_set.index);
+               if (info->del_set.index != IPSET_INVALID_ID)
+                       ip_set_nfnl_put(info->del_set.index);
                return -ERANGE;
        }
 
index 0698cad..1a21c57 100644 (file)
@@ -569,6 +569,8 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
                sctp_assoc_set_primary(asoc, transport);
        if (asoc->peer.active_path == peer)
                asoc->peer.active_path = transport;
+       if (asoc->peer.retran_path == peer)
+               asoc->peer.retran_path = transport;
        if (asoc->peer.last_data_from == peer)
                asoc->peer.last_data_from = transport;
 
@@ -1323,6 +1325,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
 
        if (t)
                asoc->peer.retran_path = t;
+       else
+               t = asoc->peer.retran_path;
 
        SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
                                 " %p addr: ",
index 2984ea4..bbb5115 100644 (file)
@@ -181,7 +181,7 @@ static int cap_inode_follow_link(struct dentry *dentry,
        return 0;
 }
 
-static int cap_inode_permission(struct inode *inode, int mask)
+static int cap_inode_permission(struct inode *inode, int mask, unsigned flags)
 {
        return 0;
 }
index 1011423..4ba6d4c 100644 (file)
@@ -518,16 +518,14 @@ int security_inode_permission(struct inode *inode, int mask)
 {
        if (unlikely(IS_PRIVATE(inode)))
                return 0;
-       return security_ops->inode_permission(inode, mask);
+       return security_ops->inode_permission(inode, mask, 0);
 }
 
 int security_inode_exec_permission(struct inode *inode, unsigned int flags)
 {
        if (unlikely(IS_PRIVATE(inode)))
                return 0;
-       if (flags)
-               return -ECHILD;
-       return security_ops->inode_permission(inode, MAY_EXEC);
+       return security_ops->inode_permission(inode, MAY_EXEC, flags);
 }
 
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
index f9c3764..a73f4e4 100644 (file)
@@ -2635,7 +2635,7 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *na
        return dentry_has_perm(cred, NULL, dentry, FILE__READ);
 }
 
-static int selinux_inode_permission(struct inode *inode, int mask)
+static int selinux_inode_permission(struct inode *inode, int mask, unsigned flags)
 {
        const struct cred *cred = current_cred();
        struct common_audit_data ad;
@@ -2649,6 +2649,10 @@ static int selinux_inode_permission(struct inode *inode, int mask)
        if (!mask)
                return 0;
 
+       /* May be droppable after audit */
+       if (flags & IPERM_FLAG_RCU)
+               return -ECHILD;
+
        COMMON_AUDIT_DATA_INIT(&ad, FS);
        ad.u.fs.inode = inode;
 
index c6f8fca..400a5d5 100644 (file)
@@ -686,7 +686,7 @@ static int smack_inode_rename(struct inode *old_inode,
  *
  * Returns 0 if access is permitted, -EACCES otherwise
  */
-static int smack_inode_permission(struct inode *inode, int mask)
+static int smack_inode_permission(struct inode *inode, int mask, unsigned flags)
 {
        struct smk_audit_info ad;
 
@@ -696,6 +696,10 @@ static int smack_inode_permission(struct inode *inode, int mask)
         */
        if (mask == 0)
                return 0;
+
+       /* May be droppable after audit */
+       if (flags & IPERM_FLAG_RCU)
+               return -ECHILD;
        smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
        smk_ad_setfield_u_fs_inode(&ad, inode);
        return smk_curacc(smk_of_inode(inode), mask, &ad);
index 430f41d..759ade1 100644 (file)
@@ -937,6 +937,7 @@ void snd_hda_shutup_pins(struct hda_codec *codec)
 }
 EXPORT_SYMBOL_HDA(snd_hda_shutup_pins);
 
+#ifdef SND_HDA_NEEDS_RESUME
 /* Restore the pin controls cleared previously via snd_hda_shutup_pins() */
 static void restore_shutup_pins(struct hda_codec *codec)
 {
@@ -953,6 +954,7 @@ static void restore_shutup_pins(struct hda_codec *codec)
        }
        codec->pins_shutup = 0;
 }
+#endif
 
 static void init_hda_cache(struct hda_cache_rec *cache,
                           unsigned int record_size);
@@ -1329,6 +1331,7 @@ static void purify_inactive_streams(struct hda_codec *codec)
        }
 }
 
+#ifdef SND_HDA_NEEDS_RESUME
 /* clean up all streams; called from suspend */
 static void hda_cleanup_all_streams(struct hda_codec *codec)
 {
@@ -1340,6 +1343,7 @@ static void hda_cleanup_all_streams(struct hda_codec *codec)
                        really_cleanup_stream(codec, p);
        }
 }
+#endif
 
 /*
  * amp access functions
index 52928d9..d3bd2c1 100644 (file)
@@ -14868,6 +14868,23 @@ static void alc269_fixup_hweq(struct hda_codec *codec,
        alc_write_coef_idx(codec, 0x1e, coef | 0x80);
 }
 
+static void alc271_fixup_dmic(struct hda_codec *codec,
+                             const struct alc_fixup *fix, int action)
+{
+       static struct hda_verb verbs[] = {
+               {0x20, AC_VERB_SET_COEF_INDEX, 0x0d},
+               {0x20, AC_VERB_SET_PROC_COEF, 0x4000},
+               {}
+       };
+       unsigned int cfg;
+
+       if (strcmp(codec->chip_name, "ALC271X"))
+               return;
+       cfg = snd_hda_codec_get_pincfg(codec, 0x12);
+       if (get_defcfg_connect(cfg) == AC_JACK_PORT_FIXED)
+               snd_hda_sequence_write(codec, verbs);
+}
+
 enum {
        ALC269_FIXUP_SONY_VAIO,
        ALC275_FIXUP_SONY_VAIO_GPIO2,
@@ -14876,6 +14893,7 @@ enum {
        ALC269_FIXUP_ASUS_G73JW,
        ALC269_FIXUP_LENOVO_EAPD,
        ALC275_FIXUP_SONY_HWEQ,
+       ALC271_FIXUP_DMIC,
 };
 
 static const struct alc_fixup alc269_fixups[] = {
@@ -14929,7 +14947,11 @@ static const struct alc_fixup alc269_fixups[] = {
                .v.func = alc269_fixup_hweq,
                .chained = true,
                .chain_id = ALC275_FIXUP_SONY_VAIO_GPIO2
-       }
+       },
+       [ALC271_FIXUP_DMIC] = {
+               .type = ALC_FIXUP_FUNC,
+               .v.func = alc271_fixup_dmic,
+       },
 };
 
 static struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -14938,6 +14960,7 @@ static struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
        SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+       SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
index f7cd346..f5ccdbf 100644 (file)
@@ -308,8 +308,6 @@ static int jz4740_codec_dev_probe(struct snd_soc_codec *codec)
        snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes,
                ARRAY_SIZE(jz4740_codec_dapm_routes));
 
-       snd_soc_dapm_new_widgets(codec);
-
        jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
        return 0;
index a54d2a5..4d9fb27 100644 (file)
@@ -927,7 +927,7 @@ static struct platform_driver sn95031_codec_driver = {
                .owner          = THIS_MODULE,
        },
        .probe          = sn95031_device_probe,
-       .remove         = sn95031_device_remove,
+       .remove         = __devexit_p(sn95031_device_remove),
 };
 
 static int __init sn95031_init(void)
index ae1cadf..f52b623 100644 (file)
@@ -247,8 +247,6 @@ static int wm8903_volatile_register(struct snd_soc_codec *codec, unsigned int re
        case WM8903_REVISION_NUMBER:
        case WM8903_INTERRUPT_STATUS_1:
        case WM8903_WRITE_SEQUENCER_4:
-       case WM8903_POWER_MANAGEMENT_3:
-       case WM8903_POWER_MANAGEMENT_2:
        case WM8903_DC_SERVO_READBACK_1:
        case WM8903_DC_SERVO_READBACK_2:
        case WM8903_DC_SERVO_READBACK_3:
@@ -875,34 +873,40 @@ SND_SOC_DAPM_MIXER("Left Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 1, 0,
 SND_SOC_DAPM_MIXER("Right Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 0, 0,
                   right_speaker_mixer, ARRAY_SIZE(right_speaker_mixer)),
 
-SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0,
-                  4, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0,
+SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_POWER_MANAGEMENT_2,
+                  1, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_POWER_MANAGEMENT_2,
                   0, 0, NULL, 0),
 
-SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 4, 0,
+SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_POWER_MANAGEMENT_3, 1, 0,
                   NULL, 0),
-SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 0, 0,
+SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_POWER_MANAGEMENT_3, 0, 0,
                   NULL, 0),
 
 SND_SOC_DAPM_PGA_S("HPL_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 7, 0, NULL, 0),
 SND_SOC_DAPM_PGA_S("HPL_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 6, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 2, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPL_ENA", 1, WM8903_ANALOGUE_HP_0, 4, 0, NULL, 0),
 SND_SOC_DAPM_PGA_S("HPR_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 3, 0, NULL, 0),
 SND_SOC_DAPM_PGA_S("HPR_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 2, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 2, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPR_ENA", 1, WM8903_ANALOGUE_HP_0, 0, 0, NULL, 0),
 
 SND_SOC_DAPM_PGA_S("LINEOUTL_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 7, 0,
                   NULL, 0),
 SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 6, 0,
                   NULL, 0),
-SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 5, 0,
+SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 2, WM8903_ANALOGUE_LINEOUT_0, 5, 0,
+                  NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTL_ENA", 1, WM8903_ANALOGUE_LINEOUT_0, 4, 0,
                   NULL, 0),
 SND_SOC_DAPM_PGA_S("LINEOUTR_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 3, 0,
                   NULL, 0),
 SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 2, 0,
                   NULL, 0),
-SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 1, 0,
+SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 2, WM8903_ANALOGUE_LINEOUT_0, 1, 0,
+                  NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTR_ENA", 1, WM8903_ANALOGUE_LINEOUT_0, 0, 0,
                   NULL, 0),
 
 SND_SOC_DAPM_SUPPLY("DCS Master", WM8903_DC_SERVO_0, 4, 0, NULL, 0),
@@ -1037,10 +1041,14 @@ static const struct snd_soc_dapm_route intercon[] = {
        { "Left Speaker PGA", NULL, "Left Speaker Mixer" },
        { "Right Speaker PGA", NULL, "Right Speaker Mixer" },
 
-       { "HPL_ENA_DLY", NULL, "Left Headphone Output PGA" },
-       { "HPR_ENA_DLY", NULL, "Right Headphone Output PGA" },
-       { "LINEOUTL_ENA_DLY", NULL, "Left Line Output PGA" },
-       { "LINEOUTR_ENA_DLY", NULL, "Right Line Output PGA" },
+       { "HPL_ENA", NULL, "Left Headphone Output PGA" },
+       { "HPR_ENA", NULL, "Right Headphone Output PGA" },
+       { "HPL_ENA_DLY", NULL, "HPL_ENA" },
+       { "HPR_ENA_DLY", NULL, "HPR_ENA" },
+       { "LINEOUTL_ENA", NULL, "Left Line Output PGA" },
+       { "LINEOUTR_ENA", NULL, "Right Line Output PGA" },
+       { "LINEOUTL_ENA_DLY", NULL, "LINEOUTL_ENA" },
+       { "LINEOUTR_ENA_DLY", NULL, "LINEOUTR_ENA" },
 
        { "HPL_DCS", NULL, "DCS Master" },
        { "HPR_DCS", NULL, "DCS Master" },
index 3290333..84e1bd1 100644 (file)
@@ -3261,20 +3261,36 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
        wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
        /* Latch volume updates (right only; we always do left then right). */
+       snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME,
+                           WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
        snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME,
                            WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
+       snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME,
+                           WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
        snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME,
                            WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
+       snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME,
+                           WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
        snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME,
                            WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
+       snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME,
+                           WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
        snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME,
                            WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
+       snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME,
+                           WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
        snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME,
                            WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
+       snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME,
+                           WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
        snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME,
                            WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
+       snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME,
+                           WM8994_DAC1_VU, WM8994_DAC1_VU);
        snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME,
                            WM8994_DAC1_VU, WM8994_DAC1_VU);
+       snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME,
+                           WM8994_DAC2_VU, WM8994_DAC2_VU);
        snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME,
                            WM8994_DAC2_VU, WM8994_DAC2_VU);
 
index 7b6b3c1..4005e9a 100644 (file)
@@ -740,12 +740,12 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
 
        { "SPKL", "Input Switch", "MIXINL" },
        { "SPKL", "IN1LP Switch", "IN1LP" },
-       { "SPKL", "Output Switch", "Left Output Mixer" },
+       { "SPKL", "Output Switch", "Left Output PGA" },
        { "SPKL", NULL, "TOCLK" },
 
        { "SPKR", "Input Switch", "MIXINR" },
        { "SPKR", "IN1RP Switch", "IN1RP" },
-       { "SPKR", "Output Switch", "Right Output Mixer" },
+       { "SPKR", "Output Switch", "Right Output PGA" },
        { "SPKR", NULL, "TOCLK" },
 
        { "SPKL Boost", "Direct Voice Switch", "Direct Voice" },
@@ -767,8 +767,8 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
        { "SPKOUTRP", NULL, "SPKR Driver" },
        { "SPKOUTRN", NULL, "SPKR Driver" },
 
-       { "Left Headphone Mux", "Mixer", "Left Output Mixer" },
-       { "Right Headphone Mux", "Mixer", "Right Output Mixer" },
+       { "Left Headphone Mux", "Mixer", "Left Output PGA" },
+       { "Right Headphone Mux", "Mixer", "Right Output PGA" },
 
        { "Headphone PGA", NULL, "Left Headphone Mux" },
        { "Headphone PGA", NULL, "Right Headphone Mux" },
index b2e9198..d567c32 100644 (file)
@@ -116,18 +116,20 @@ struct snd_soc_dai_driver sst_platform_dai[] = {
 static inline void sst_set_stream_status(struct sst_runtime_stream *stream,
                                        int state)
 {
-       spin_lock(&stream->status_lock);
+       unsigned long flags;
+       spin_lock_irqsave(&stream->status_lock, flags);
        stream->stream_status = state;
-       spin_unlock(&stream->status_lock);
+       spin_unlock_irqrestore(&stream->status_lock, flags);
 }
 
 static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
 {
        int state;
+       unsigned long flags;
 
-       spin_lock(&stream->status_lock);
+       spin_lock_irqsave(&stream->status_lock, flags);
        state = stream->stream_status;
-       spin_unlock(&stream->status_lock);
+       spin_unlock_irqrestore(&stream->status_lock, flags);
        return state;
 }
 
index 38aac7d..9c7e8b4 100644 (file)
@@ -350,8 +350,8 @@ static int s3c_pcm_set_fmt(struct snd_soc_dai *cpu_dai,
        ctl = readl(regs + S3C_PCM_CTL);
 
        switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
-       case SND_SOC_DAIFMT_NB_NF:
-               /* Nothing to do, NB_NF by default */
+       case SND_SOC_DAIFMT_IB_NF:
+               /* Nothing to do, IB_NF by default */
                break;
        default:
                dev_err(pcm->dev, "Unsupported clock inversion!\n");
index 0c9997e..23c0e83 100644 (file)
@@ -1200,10 +1200,11 @@ static int fsi_probe(struct platform_device *pdev)
        master->fsib.master     = master;
 
        pm_runtime_enable(&pdev->dev);
-       pm_runtime_resume(&pdev->dev);
        dev_set_drvdata(&pdev->dev, master);
 
+       pm_runtime_get_sync(&pdev->dev);
        fsi_soft_all_reset(master);
+       pm_runtime_put_sync(&pdev->dev);
 
        ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED,
                          id_entry->name, master);
@@ -1218,8 +1219,17 @@ static int fsi_probe(struct platform_device *pdev)
                goto exit_free_irq;
        }
 
-       return snd_soc_register_dais(&pdev->dev, fsi_soc_dai, ARRAY_SIZE(fsi_soc_dai));
+       ret = snd_soc_register_dais(&pdev->dev, fsi_soc_dai,
+                                   ARRAY_SIZE(fsi_soc_dai));
+       if (ret < 0) {
+               dev_err(&pdev->dev, "cannot snd dai register\n");
+               goto exit_snd_soc;
+       }
+
+       return ret;
 
+exit_snd_soc:
+       snd_soc_unregister_platform(&pdev->dev);
 exit_free_irq:
        free_irq(irq, master);
 exit_iounmap:
@@ -1238,12 +1248,11 @@ static int fsi_remove(struct platform_device *pdev)
 
        master = dev_get_drvdata(&pdev->dev);
 
-       snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai));
-       snd_soc_unregister_platform(&pdev->dev);
-
+       free_irq(master->irq, master);
        pm_runtime_disable(&pdev->dev);
 
-       free_irq(master->irq, master);
+       snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai));
+       snd_soc_unregister_platform(&pdev->dev);
 
        iounmap(master->base);
        kfree(master);
@@ -1321,3 +1330,4 @@ module_exit(fsi_mobile_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SuperH onchip FSI audio driver");
 MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_ALIAS("platform:fsi-pcm-audio");
index b76b74d..d8562ce 100644 (file)
@@ -629,6 +629,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
                        runtime->hw.rates |= codec_dai_drv->capture.rates;
        }
 
+       ret = -EINVAL;
        snd_pcm_limit_hw_rates(runtime);
        if (!runtime->hw.rates) {
                printk(KERN_ERR "asoc: %s <-> %s No matching rates\n",
@@ -640,7 +641,8 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
                        codec_dai->name, cpu_dai->name);
                goto config_err;
        }
-       if (!runtime->hw.channels_min || !runtime->hw.channels_max) {
+       if (!runtime->hw.channels_min || !runtime->hw.channels_max ||
+           runtime->hw.channels_min > runtime->hw.channels_max) {
                printk(KERN_ERR "asoc: %s <-> %s No matching channels\n",
                                codec_dai->name, cpu_dai->name);
                goto config_err;
@@ -2060,6 +2062,7 @@ const struct dev_pm_ops snd_soc_pm_ops = {
        .resume = snd_soc_resume,
        .poweroff = snd_soc_poweroff,
 };
+EXPORT_SYMBOL_GPL(snd_soc_pm_ops);
 
 /* ASoC platform driver */
 static struct platform_driver soc_driver = {
index 8585957..556a571 100644 (file)
@@ -370,6 +370,7 @@ static struct platform_driver tegra_snd_harmony_driver = {
        .driver = {
                .name = DRV_NAME,
                .owner = THIS_MODULE,
+               .pm = &snd_soc_pm_ops,
        },
        .probe = tegra_snd_harmony_probe,
        .remove = __devexit_p(tegra_snd_harmony_remove),
index 17d1dcb..4165382 100644 (file)
@@ -163,6 +163,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
        struct perf_event_attr *attr = &evsel->attr;
        int track = !evsel->idx; /* only the first counter needs these */
 
+       attr->inherit           = !no_inherit;
        attr->read_format       = PERF_FORMAT_TOTAL_TIME_ENABLED |
                                  PERF_FORMAT_TOTAL_TIME_RUNNING |
                                  PERF_FORMAT_ID;
@@ -251,6 +252,9 @@ static void open_counters(struct perf_evlist *evlist)
 {
        struct perf_evsel *pos;
 
+       if (evlist->cpus->map[0] < 0)
+               no_inherit = true;
+
        list_for_each_entry(pos, &evlist->entries, node) {
                struct perf_event_attr *attr = &pos->attr;
                /*
@@ -271,8 +275,7 @@ static void open_counters(struct perf_evlist *evlist)
 retry_sample_id:
                attr->sample_id_all = sample_id_all_avail ? 1 : 0;
 try_again:
-               if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
-                                    !no_inherit) < 0) {
+               if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) {
                        int err = errno;
 
                        if (err == EPERM || err == EACCES) {
index e2109f9..03f0e45 100644 (file)
@@ -167,16 +167,17 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
                attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
                                    PERF_FORMAT_TOTAL_TIME_RUNNING;
 
+       attr->inherit = !no_inherit;
+
        if (system_wide)
-               return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false);
+               return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false);
 
-       attr->inherit = !no_inherit;
        if (target_pid == -1 && target_tid == -1) {
                attr->disabled = 1;
                attr->enable_on_exec = 1;
        }
 
-       return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false);
+       return perf_evsel__open_per_thread(evsel, evsel_list->threads, false);
 }
 
 /*
index 1b2106c..11e3c84 100644 (file)
@@ -290,7 +290,7 @@ static int test__open_syscall_event(void)
                goto out_thread_map_delete;
        }
 
-       if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) {
+       if (perf_evsel__open_per_thread(evsel, threads, false) < 0) {
                pr_debug("failed to open counter: %s, "
                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
                         strerror(errno));
@@ -303,7 +303,7 @@ static int test__open_syscall_event(void)
        }
 
        if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
-               pr_debug("perf_evsel__open_read_on_cpu\n");
+               pr_debug("perf_evsel__read_on_cpu\n");
                goto out_close_fd;
        }
 
@@ -365,7 +365,7 @@ static int test__open_syscall_event_on_all_cpus(void)
                goto out_thread_map_delete;
        }
 
-       if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) {
+       if (perf_evsel__open(evsel, cpus, threads, false) < 0) {
                pr_debug("failed to open counter: %s, "
                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
                         strerror(errno));
@@ -418,7 +418,7 @@ static int test__open_syscall_event_on_all_cpus(void)
                        continue;
 
                if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
-                       pr_debug("perf_evsel__open_read_on_cpu\n");
+                       pr_debug("perf_evsel__read_on_cpu\n");
                        err = -1;
                        break;
                }
@@ -529,7 +529,7 @@ static int test__basic_mmap(void)
 
                perf_evlist__add(evlist, evsels[i]);
 
-               if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) {
+               if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) {
                        pr_debug("failed to open counter: %s, "
                                 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
                                 strerror(errno));
index fc1273e..7e3d6e3 100644 (file)
@@ -845,9 +845,10 @@ static void start_counters(struct perf_evlist *evlist)
                }
 
                attr->mmap = 1;
+               attr->inherit = inherit;
 try_again:
                if (perf_evsel__open(counter, top.evlist->cpus,
-                                    top.evlist->threads, group, inherit) < 0) {
+                                    top.evlist->threads, group) < 0) {
                        int err = errno;
 
                        if (err == EPERM || err == EACCES) {
index d852cef..45da8d1 100644 (file)
@@ -12,6 +12,7 @@
 #include "evlist.h"
 #include "evsel.h"
 #include "util.h"
+#include "debug.h"
 
 #include <sys/mman.h>
 
@@ -250,15 +251,19 @@ int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
        return evlist->mmap != NULL ? 0 : -ENOMEM;
 }
 
-static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
-                              int mask, int fd)
+static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
+                              int cpu, int prot, int mask, int fd)
 {
        evlist->mmap[cpu].prev = 0;
        evlist->mmap[cpu].mask = mask;
        evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
                                      MAP_SHARED, fd, 0);
-       if (evlist->mmap[cpu].base == MAP_FAILED)
+       if (evlist->mmap[cpu].base == MAP_FAILED) {
+               if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit)
+                       ui__warning("Inherit is not allowed on per-task "
+                                   "events using mmap.\n");
                return -1;
+       }
 
        perf_evlist__add_pollfd(evlist, fd);
        return 0;
@@ -312,7 +317,8 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
                                        if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
                                                  FD(first_evsel, cpu, 0)) != 0)
                                                goto out_unmap;
-                               } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
+                               } else if (__perf_evlist__mmap(evlist, evsel, cpu,
+                                                              prot, mask, fd) < 0)
                                        goto out_unmap;
 
                                if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
index 662596a..d6fd59b 100644 (file)
@@ -175,7 +175,7 @@ int __perf_evsel__read(struct perf_evsel *evsel,
 }
 
 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-                             struct thread_map *threads, bool group, bool inherit)
+                             struct thread_map *threads, bool group)
 {
        int cpu, thread;
        unsigned long flags = 0;
@@ -192,19 +192,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
 
        for (cpu = 0; cpu < cpus->nr; cpu++) {
                int group_fd = -1;
-               /*
-                * Don't allow mmap() of inherited per-task counters. This
-                * would create a performance issue due to all children writing
-                * to the same buffer.
-                *
-                * FIXME:
-                * Proper fix is not to pass 'inherit' to perf_evsel__open*,
-                * but a 'flags' parameter, with 'group' folded there as well,
-                * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
-                * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
-                * set. Lets go for the minimal fix first tho.
-                */
-               evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
 
                for (thread = 0; thread < threads->nr; thread++) {
 
@@ -253,7 +240,7 @@ static struct {
 };
 
 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-                    struct thread_map *threads, bool group, bool inherit)
+                    struct thread_map *threads, bool group)
 {
        if (cpus == NULL) {
                /* Work around old compiler warnings about strict aliasing */
@@ -263,19 +250,19 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
        if (threads == NULL)
                threads = &empty_thread_map.map;
 
-       return __perf_evsel__open(evsel, cpus, threads, group, inherit);
+       return __perf_evsel__open(evsel, cpus, threads, group);
 }
 
 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
-                            struct cpu_map *cpus, bool group, bool inherit)
+                            struct cpu_map *cpus, bool group)
 {
-       return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
+       return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
 }
 
 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
-                               struct thread_map *threads, bool group, bool inherit)
+                               struct thread_map *threads, bool group)
 {
-       return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
+       return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
 }
 
 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
index 6710ab5..f79bb2c 100644 (file)
@@ -81,11 +81,11 @@ void perf_evsel__free_id(struct perf_evsel *evsel);
 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 
 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
-                            struct cpu_map *cpus, bool group, bool inherit);
+                            struct cpu_map *cpus, bool group);
 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
-                               struct thread_map *threads, bool group, bool inherit);
+                               struct thread_map *threads, bool group);
 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-                    struct thread_map *threads, bool group, bool inherit);
+                    struct thread_map *threads, bool group);
 
 #define perf_evsel__match(evsel, t, c)         \
        (evsel->attr.type == PERF_TYPE_##t &&   \
index a9f2d7e..f5e3845 100644 (file)
@@ -498,11 +498,11 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
        struct cpu_map *cpus = NULL;
        struct thread_map *threads = NULL;
        PyObject *pcpus = NULL, *pthreads = NULL;
-       int group = 0, overwrite = 0;
-       static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL};
+       int group = 0, inherit = 0;
+       static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL};
 
        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
-                                        &pcpus, &pthreads, &group, &overwrite))
+                                        &pcpus, &pthreads, &group, &inherit))
                return NULL;
 
        if (pthreads != NULL)
@@ -511,7 +511,8 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
        if (pcpus != NULL)
                cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
 
-       if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) {
+       evsel->attr.inherit = inherit;
+       if (perf_evsel__open(evsel, cpus, threads, group) < 0) {
                PyErr_SetFromErrno(PyExc_OSError);
                return NULL;
        }
index 8c17a87..15633d6 100644 (file)
@@ -256,10 +256,9 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
                         int refresh)
 {
        struct objdump_line *pos, *n;
-       struct annotation *notes = symbol__annotation(sym);
+       struct annotation *notes;
        struct annotate_browser browser = {
                .b = {
-                       .entries = &notes->src->source,
                        .refresh = ui_browser__list_head_refresh,
                        .seek    = ui_browser__list_head_seek,
                        .write   = annotate_browser__write,
@@ -281,6 +280,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
 
        ui_helpline__push("Press <- or ESC to exit");
 
+       notes = symbol__annotation(sym);
+
        list_for_each_entry(pos, &notes->src->source, node) {
                struct objdump_line_rb_node *rbpos;
                size_t line_len = strlen(pos->line);
@@ -291,6 +292,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
                rbpos->idx = browser.b.nr_entries++;
        }
 
+       browser.b.entries = &notes->src->source,
        browser.b.width += 18; /* Percentage */
        ret = annotate_browser__run(&browser, evidx, refresh);
        list_for_each_entry_safe(pos, n, &notes->src->source, node) {
index 798efdc..5d767c6 100644 (file)
@@ -851,7 +851,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel,
                        goto out_free_stack;
                case 'a':
                        if (browser->selection == NULL ||
-                           browser->selection->map == NULL ||
+                           browser->selection->sym == NULL ||
                            browser->selection->map->dso->annotate_warned)
                                continue;
                        goto do_annotate;