Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 19 Apr 2011 22:16:41 +0000 (15:16 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 19 Apr 2011 22:16:41 +0000 (15:16 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (51 commits)
  netfilter: ipset: Fix the order of listing of sets
  ip6_pol_route panic: Do not allow VLAN on loopback
  bnx2x: Fix port identification problem
  r8169: add Realtek as maintainer.
  ip: ip_options_compile() resilient to NULL skb route
  bna: fix memory leak during RX path cleanup
  bna: fix for clean fw re-initialization
  usbnet: Fix up 'FLAG_POINTTOPOINT' and 'FLAG_MULTI_PACKET' overlaps.
  iwlegacy: fix tx_power initialization
  Revert "tcp: disallow bind() to reuse addr/port"
  qlcnic: limit skb frags for non tso packet
  net: can: mscan: fix build breakage in mpc5xxx_can
  netfilter: ipset: set match and SET target fixes
  netfilter: ipset: bitmap:ip,mac type requires "src" for MAC
  sctp: fix oops while removed transport still using as retran path
  sctp: fix oops when updating retransmit path with DEBUG on
  net: Disable NETIF_F_TSO_ECN when TSO is disabled
  net: Disable all TSO features when SG is disabled
  sfc: Use rmb() to ensure reads occur in order
  ieee802154: Remove hacked CFLAGS in net/ieee802154/Makefile
  ...

348 files changed:
Documentation/feature-removal-schedule.txt
Documentation/input/event-codes.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/alpha/kernel/Makefile
arch/alpha/kernel/core_mcpcia.c
arch/alpha/kernel/err_titan.c
arch/alpha/kernel/irq_alpha.c
arch/alpha/kernel/setup.c
arch/alpha/kernel/smc37c93x.c
arch/alpha/kernel/sys_wildfire.c
arch/alpha/kernel/time.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/common/Makefile
arch/arm/include/asm/thread_notify.h
arch/arm/kernel/Makefile
arch/arm/kernel/elf.c
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/process.c
arch/arm/kernel/traps.c
arch/arm/mach-mmp/include/mach/gpio.h
arch/arm/mach-mmp/include/mach/mfp-pxa168.h
arch/arm/mach-msm/board-qsd8x50.c
arch/arm/mach-msm/timer.c
arch/arm/mach-pxa/include/mach/gpio.h
arch/arm/mach-pxa/include/mach/irqs.h
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-tegra/gpio.c
arch/arm/mach-tegra/tegra2_clocks.c
arch/arm/mm/mmap.c
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-sa1100.S
arch/arm/mm/proc-v6.S
arch/arm/mm/proc-v7.S
arch/arm/mm/proc-xsc3.S
arch/arm/mm/proc-xscale.S
arch/arm/plat-s5p/pm.c
arch/arm/plat-samsung/pm-check.c
arch/arm/plat-samsung/pm.c
arch/arm/vfp/vfpmodule.c
arch/avr32/include/asm/setup.h
arch/avr32/kernel/setup.c
arch/avr32/kernel/traps.c
arch/avr32/mach-at32ap/clock.c
arch/avr32/mach-at32ap/extint.c
arch/avr32/mach-at32ap/pio.c
arch/avr32/mach-at32ap/pm-at32ap700x.S
arch/blackfin/include/asm/system.h
arch/blackfin/kernel/gptimers.c
arch/blackfin/kernel/time-ts.c
arch/blackfin/mach-common/smp.c
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/entry_mm.S
arch/m68k/kernel/syscalltable.S
arch/microblaze/Kconfig
arch/powerpc/Kconfig
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/pte-common.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/crash.c
arch/powerpc/kernel/ibmebus.c
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/perf_event.c
arch/powerpc/kernel/time.c
arch/powerpc/platforms/powermac/smp.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/fsl_rio.c
arch/um/Kconfig.x86
arch/um/include/asm/bug.h [new file with mode: 0644]
arch/x86/include/asm/gart.h
arch/x86/include/asm/msr-index.h
arch/x86/kernel/aperture_64.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/pci-gart_64.c
arch/x86/kernel/smpboot.c
arch/x86/platform/ce4100/falconfalls.dts
arch/x86/platform/mrst/mrst.c
arch/x86/xen/Kconfig
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-sysfs.c
block/blk.h
block/cfq-iosched.c
block/elevator.c
drivers/amba/bus.c
drivers/base/platform.c
drivers/base/power/main.c
drivers/dma/fsldma.c
drivers/gpio/ml_ioh_gpio.c
drivers/gpio/pca953x.c
drivers/gpio/pch_gpio.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_perf.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv50_crtc.c
drivers/gpu/drm/nouveau/nv50_evo.c
drivers/gpu/drm/nouveau/nv50_graph.c
drivers/gpu/drm/nouveau/nvc0_vm.c
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/stub/Kconfig
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/i2c-core.c
drivers/input/evdev.c
drivers/input/input.c
drivers/input/keyboard/twl4030_keypad.c
drivers/input/misc/xen-kbdfront.c
drivers/input/touchscreen/h3600_ts_input.c
drivers/leds/leds-regulator.c
drivers/md/dm-raid.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/video/videobuf-dma-contig.c
drivers/mfd/mfd-core.c
drivers/misc/sgi-gru/grufile.c
drivers/pci/Kconfig
drivers/pci/Makefile
drivers/pci/pci-driver.c
drivers/pcmcia/pxa2xx_balloon3.c
drivers/pcmcia/pxa2xx_trizeps4.c
drivers/platform/x86/Kconfig
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/eeepc-wmi.c
drivers/platform/x86/intel_pmic_gpio.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/rapidio/rio.c
drivers/rapidio/switches/idt_gen2.c
drivers/rtc/class.c
drivers/rtc/interface.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-mc13xxx.c
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-s3c.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_fc.c
drivers/spi/amba-pl022.c
drivers/spi/dw_spi.c
drivers/spi/pxa2xx_spi.c
drivers/spi/spi_bfin5xx.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/samsung-laptop/Kconfig [deleted file]
drivers/staging/samsung-laptop/Makefile [deleted file]
drivers/staging/samsung-laptop/TODO [deleted file]
drivers/staging/samsung-laptop/samsung-laptop.c [deleted file]
drivers/usb/Kconfig
drivers/usb/core/devices.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/gadget/f_audio.c
drivers/usb/gadget/f_eem.c
drivers/usb/gadget/fsl_qe_udc.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/pch_udc.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/host/ehci-q.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/ohci-au1xxx.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/Kconfig
drivers/usb/musb/blackfin.c
drivers/usb/musb/cppi_dma.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musbhsdma.c
drivers/usb/musb/omap2430.c
drivers/usb/musb/ux500.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/video/pxafb.c
drivers/xen/events.c
drivers/xen/manage.c
fs/9p/fid.c
fs/9p/v9fs.h
fs/9p/vfs_dentry.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/binfmt_elf.c
fs/btrfs/acl.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/xattr.c
fs/cifs/README
fs/cifs/cache.c
fs/cifs/cifs_debug.c
fs/cifs/cifs_spnego.c
fs/cifs/cifs_unicode.c
fs/cifs/cifs_unicode.h
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/link.c
fs/cifs/misc.c
fs/cifs/sess.c
fs/dcache.c
fs/ext4/ext4_jbd2.h
fs/ext4/fsync.c
fs/ext4/inode.c
fs/ext4/super.c
fs/fhandle.c
fs/filesystems.c
fs/gfs2/aops.c
fs/gfs2/dir.c
fs/gfs2/file.c
fs/gfs2/glops.c
fs/gfs2/inode.c
fs/gfs2/inode.h
fs/gfs2/ops_fstype.c
fs/gfs2/rgrp.c
fs/gfs2/super.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/namei.c
fs/namespace.c
fs/nfs/write.c
fs/nfsd/lockd.c
fs/nfsd/nfs4state.c
fs/partitions/ldm.c
fs/proc/base.c
fs/ramfs/file-nommu.c
fs/ubifs/debug.h
fs/ubifs/file.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_message.c
fs/xfs/linux-2.6/xfs_message.h
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/linux-2.6/xfs_sync.h
fs/xfs/quota/xfs_qm.c
fs/xfs/quota/xfs_qm.h
fs/xfs/quota/xfs_qm_syscalls.c
fs/xfs/xfs_alloc.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_mount.h
fs/xfs/xfs_trans_ail.c
fs/xfs/xfs_trans_priv.h
include/linux/blkdev.h
include/linux/device-mapper.h
include/linux/input.h
include/linux/input/mt.h
include/linux/memcontrol.h
include/linux/mfd/core.h
include/linux/pid.h
include/linux/platform_device.h
include/linux/posix-clock.h
include/linux/rio.h
include/linux/rio_ids.h
include/linux/rtc.h
include/linux/sched.h
include/linux/suspend.h
include/linux/vmstat.h
include/net/9p/9p.h
include/net/9p/client.h
include/trace/events/block.h
kernel/futex.c
kernel/perf_event.c
kernel/pid.c
kernel/power/Kconfig
kernel/sched.c
kernel/sched_fair.c
kernel/time/posix-clock.c
kernel/trace/blktrace.c
lib/kstrtox.c
lib/test-kstrtox.c
mm/huge_memory.c
mm/memory.c
mm/memory_hotplug.c
mm/mlock.c
mm/mmap.c
mm/oom_kill.c
mm/page_alloc.c
mm/shmem.c
mm/vmscan.c
mm/vmstat.c
net/9p/client.c
net/9p/protocol.c
net/9p/trans_common.c
net/9p/trans_virtio.c
net/ceph/osd_client.c
tools/perf/builtin-record.c
tools/perf/builtin-stat.c
tools/perf/builtin-test.c
tools/perf/builtin-top.c
tools/perf/util/cgroup.c
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/python.c
tools/perf/util/ui/browsers/annotate.c
tools/perf/util/ui/browsers/hists.c

index 274b32d..492e81d 100644 (file)
@@ -387,26 +387,6 @@ Who:       Tejun Heo <tj@kernel.org>
 
 ----------------------------
 
-What:  Support for lcd_switch and display_get in asus-laptop driver
-When:  March 2010
-Why:   These two features use non-standard interfaces. There are the
-       only features that really need multiple path to guess what's
-       the right method name on a specific laptop.
-
-       Removing them will allow to remove a lot of code an significantly
-       clean the drivers.
-
-       This will affect the backlight code which won't be able to know
-       if the backlight is on or off. The platform display file will also be
-       write only (like the one in eeepc-laptop).
-
-       This should'nt affect a lot of user because they usually know
-       when their display is on or off.
-
-Who:   Corentin Chary <corentin.chary@gmail.com>
-
-----------------------------
-
 What:  sysfs-class-rfkill state file
 When:  Feb 2014
 Files: net/rfkill/core.c
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
new file mode 100644 (file)
index 0000000..23fcb05
--- /dev/null
@@ -0,0 +1,262 @@
+The input protocol uses a map of types and codes to express input device values
+to userspace. This document describes the types and codes and how and when they
+may be used.
+
+A single hardware event generates multiple input events. Each input event
+contains the new value of a single data item. A special event type, EV_SYN, is
+used to separate input events into packets of input data changes occurring at
+the same moment in time. In the following, the term "event" refers to a single
+input event encompassing a type, code, and value.
+
+The input protocol is a stateful protocol. Events are emitted only when values
+of event codes have changed. However, the state is maintained within the Linux
+input subsystem; drivers do not need to maintain the state and may attempt to
+emit unchanged values without harm. Userspace may obtain the current state of
+event code values using the EVIOCG* ioctls defined in linux/input.h. The event
+reports supported by a device are also provided by sysfs in
+class/input/event*/device/capabilities/, and the properties of a device are
+provided in class/input/event*/device/properties.
+
+Types:
+==========
+Types are groupings of codes under a logical input construct. Each type has a
+set of applicable codes to be used in generating events. See the Codes section
+for details on valid codes for each type.
+
+* EV_SYN:
+  - Used as markers to separate events. Events may be separated in time or in
+    space, such as with the multitouch protocol.
+
+* EV_KEY:
+  - Used to describe state changes of keyboards, buttons, or other key-like
+    devices.
+
+* EV_REL:
+  - Used to describe relative axis value changes, e.g. moving the mouse 5 units
+    to the left.
+
+* EV_ABS:
+  - Used to describe absolute axis value changes, e.g. describing the
+    coordinates of a touch on a touchscreen.
+
+* EV_MSC:
+  - Used to describe miscellaneous input data that do not fit into other types.
+
+* EV_SW:
+  - Used to describe binary state input switches.
+
+* EV_LED:
+  - Used to turn LEDs on devices on and off.
+
+* EV_SND:
+  - Used to output sound to devices.
+
+* EV_REP:
+  - Used for autorepeating devices.
+
+* EV_FF:
+  - Used to send force feedback commands to an input device.
+
+* EV_PWR:
+  - A special type for power button and switch input.
+
+* EV_FF_STATUS:
+  - Used to receive force feedback device status.
+
+Codes:
+==========
+Codes define the precise type of event.
+
+EV_SYN:
+----------
+EV_SYN event values are undefined. Their usage is defined only by when they are
+sent in the evdev event stream.
+
+* SYN_REPORT:
+  - Used to synchronize and separate events into packets of input data changes
+    occurring at the same moment in time. For example, motion of a mouse may set
+    the REL_X and REL_Y values for one motion, then emit a SYN_REPORT. The next
+    motion will emit more REL_X and REL_Y values and send another SYN_REPORT.
+
+* SYN_CONFIG:
+  - TBD
+
+* SYN_MT_REPORT:
+  - Used to synchronize and separate touch events. See the
+    multi-touch-protocol.txt document for more information.
+
+* SYN_DROPPED:
+  - Used to indicate buffer overrun in the evdev client's event queue.
+    Client should ignore all events up to and including next SYN_REPORT
+    event and query the device (using EVIOCG* ioctls) to obtain its
+    current state.
+
+EV_KEY:
+----------
+EV_KEY events take the form KEY_<name> or BTN_<name>. For example, KEY_A is used
+to represent the 'A' key on a keyboard. When a key is depressed, an event with
+the key's code is emitted with value 1. When the key is released, an event is
+emitted with value 0. Some hardware send events when a key is repeated. These
+events have a value of 2. In general, KEY_<name> is used for keyboard keys, and
+BTN_<name> is used for other types of momentary switch events.
+
+A few EV_KEY codes have special meanings:
+
+* BTN_TOOL_<name>:
+  - These codes are used in conjunction with input trackpads, tablets, and
+    touchscreens. These devices may be used with fingers, pens, or other tools.
+    When an event occurs and a tool is used, the corresponding BTN_TOOL_<name>
+    code should be set to a value of 1. When the tool is no longer interacting
+    with the input device, the BTN_TOOL_<name> code should be reset to 0. All
+    trackpads, tablets, and touchscreens should use at least one BTN_TOOL_<name>
+    code when events are generated.
+
+* BTN_TOUCH:
+    BTN_TOUCH is used for touch contact. While an input tool is determined to be
+    within meaningful physical contact, the value of this property must be set
+    to 1. Meaningful physical contact may mean any contact, or it may mean
+    contact conditioned by an implementation defined property. For example, a
+    touchpad may set the value to 1 only when the touch pressure rises above a
+    certain value. BTN_TOUCH may be combined with BTN_TOOL_<name> codes. For
+    example, a pen tablet may set BTN_TOOL_PEN to 1 and BTN_TOUCH to 0 while the
+    pen is hovering over but not touching the tablet surface.
+
+Note: For appropriate function of the legacy mousedev emulation driver,
+BTN_TOUCH must be the first evdev code emitted in a synchronization frame.
+
+Note: Historically a touch device with BTN_TOOL_FINGER and BTN_TOUCH was
+interpreted as a touchpad by userspace, while a similar device without
+BTN_TOOL_FINGER was interpreted as a touchscreen. For backwards compatibility
+with current userspace it is recommended to follow this distinction. In the
+future, this distinction will be deprecated and the device properties ioctl
+EVIOCGPROP, defined in linux/input.h, will be used to convey the device type.
+
+* BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, BTN_TOOL_TRIPLETAP, BTN_TOOL_QUADTAP:
+  - These codes denote one, two, three, and four finger interaction on a
+    trackpad or touchscreen. For example, if the user uses two fingers and moves
+    them on the touchpad in an effort to scroll content on screen,
+    BTN_TOOL_DOUBLETAP should be set to value 1 for the duration of the motion.
+    Note that all BTN_TOOL_<name> codes and the BTN_TOUCH code are orthogonal in
+    purpose. A trackpad event generated by finger touches should generate events
+    for one code from each group. At most only one of these BTN_TOOL_<name>
+    codes should have a value of 1 during any synchronization frame.
+
+Note: Historically some drivers emitted multiple of the finger count codes with
+a value of 1 in the same synchronization frame. This usage is deprecated.
+
+Note: In multitouch drivers, the input_mt_report_finger_count() function should
+be used to emit these codes. Please see multi-touch-protocol.txt for details.
+
+EV_REL:
+----------
+EV_REL events describe relative changes in a property. For example, a mouse may
+move to the left by a certain number of units, but its absolute position in
+space is unknown. If the absolute position is known, EV_ABS codes should be used
+instead of EV_REL codes.
+
+A few EV_REL codes have special meanings:
+
+* REL_WHEEL, REL_HWHEEL:
+  - These codes are used for vertical and horizontal scroll wheels,
+    respectively.
+
+EV_ABS:
+----------
+EV_ABS events describe absolute changes in a property. For example, a touchpad
+may emit coordinates for a touch location.
+
+A few EV_ABS codes have special meanings:
+
+* ABS_DISTANCE:
+  - Used to describe the distance of a tool from an interaction surface. This
+    event should only be emitted while the tool is hovering, meaning in close
+    proximity of the device and while the value of the BTN_TOUCH code is 0. If
+    the input device may be used freely in three dimensions, consider ABS_Z
+    instead.
+
+* ABS_MT_<name>:
+  - Used to describe multitouch input events. Please see
+    multi-touch-protocol.txt for details.
+
+EV_SW:
+----------
+EV_SW events describe stateful binary switches. For example, the SW_LID code is
+used to denote when a laptop lid is closed.
+
+Upon binding to a device or resuming from suspend, a driver must report
+the current switch state. This ensures that the device, kernel, and userspace
+state is in sync.
+
+Upon resume, if the switch state is the same as before suspend, then the input
+subsystem will filter out the duplicate switch state reports. The driver does
+not need to keep the state of the switch at any time.
+
+EV_MSC:
+----------
+EV_MSC events are used for input and output events that do not fall under other
+categories.
+
+EV_LED:
+----------
+EV_LED events are used for input and output to set and query the state of
+various LEDs on devices.
+
+EV_REP:
+----------
+EV_REP events are used for specifying autorepeating events.
+
+EV_SND:
+----------
+EV_SND events are used for sending sound commands to simple sound output
+devices.
+
+EV_FF:
+----------
+EV_FF events are used to initialize a force feedback capable device and to cause
+such device to feedback.
+
+EV_PWR:
+----------
+EV_PWR events are a special type of event used specifically for power
+mangement. Its usage is not well defined. To be addressed later.
+
+Guidelines:
+==========
+The guidelines below ensure proper single-touch and multi-finger functionality.
+For multi-touch functionality, see the multi-touch-protocol.txt document for
+more information.
+
+Mice:
+----------
+REL_{X,Y} must be reported when the mouse moves. BTN_LEFT must be used to report
+the primary button press. BTN_{MIDDLE,RIGHT,4,5,etc.} should be used to report
+further buttons of the device. REL_WHEEL and REL_HWHEEL should be used to report
+scroll wheel events where available.
+
+Touchscreens:
+----------
+ABS_{X,Y} must be reported with the location of the touch. BTN_TOUCH must be
+used to report when a touch is active on the screen.
+BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch
+contact. BTN_TOOL_<name> events should be reported where possible.
+
+Trackpads:
+----------
+Legacy trackpads that only provide relative position information must report
+events like mice described above.
+
+Trackpads that provide absolute touch position must report ABS_{X,Y} for the
+location of the touch. BTN_TOUCH should be used to report when a touch is active
+on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should
+be used to report the number of touches active on the trackpad.
+
+Tablets:
+----------
+BTN_TOOL_<name> events must be reported when a stylus or other tool is active on
+the tablet. ABS_{X,Y} must be reported with the location of the tool. BTN_TOUCH
+should be used to report when the tool is in contact with the tablet.
+BTN_{STYLUS,STYLUS2} should be used to report buttons on the tool itself. Any
+button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}.
+BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use
+meaningful buttons, like BTN_FORWARD, unless the button is labeled for that
+purpose on the device.
index c85368d..1e2724e 100644 (file)
@@ -185,10 +185,9 @@ F: Documentation/filesystems/9p.txt
 F:     fs/9p/
 
 A2232 SERIAL BOARD DRIVER
-M:     Enver Haase <A2232@gmx.net>
 L:     linux-m68k@lists.linux-m68k.org
-S:     Maintained
-F:     drivers/char/ser_a2232*
+S:     Orphan
+F:     drivers/staging/generic_serial/ser_a2232*
 
 AACRAID SCSI RAID DRIVER
 M:     Adaptec OEM Raid Solutions <aacraid@adaptec.com>
@@ -878,6 +877,13 @@ F: arch/arm/mach-mv78xx0/
 F:     arch/arm/mach-orion5x/
 F:     arch/arm/plat-orion/
 
+ARM/Orion SoC/Technologic Systems TS-78xx platform support
+M:     Alexander Clouter <alex@digriz.org.uk>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+W:     http://www.digriz.org.uk/ts78xx/kernel
+S:     Maintained
+F:     arch/arm/mach-orion5x/ts78xx-*
+
 ARM/MIOA701 MACHINE SUPPORT
 M:     Robert Jarzmik <robert.jarzmik@free.fr>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1064,7 +1070,7 @@ F:        arch/arm/mach-shmobile/
 F:     drivers/sh/
 
 ARM/TELECHIPS ARM ARCHITECTURE
-M:     "Hans J. Koch" <hjk@linutronix.de>
+M:     "Hans J. Koch" <hjk@hansjkoch.de>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/plat-tcc/
@@ -1824,11 +1830,10 @@ S:      Maintained
 F:     drivers/platform/x86/compal-laptop.c
 
 COMPUTONE INTELLIPORT MULTIPORT CARD
-M:     "Michael H. Warfield" <mhw@wittsend.com>
 W:     http://www.wittsend.com/computone.html
-S:     Maintained
+S:     Orphan
 F:     Documentation/serial/computone.txt
-F:     drivers/char/ip2/
+F:     drivers/staging/tty/ip2/
 
 CONEXANT ACCESSRUNNER USB DRIVER
 M:     Simon Arlott <cxacru@fire.lp0.eu>
@@ -2011,7 +2016,7 @@ F:        drivers/net/wan/cycx*
 CYCLADES ASYNC MUX DRIVER
 W:     http://www.cyclades.com/
 S:     Orphan
-F:     drivers/char/cyclades.c
+F:     drivers/tty/cyclades.c
 F:     include/linux/cyclades.h
 
 CYCLADES PC300 DRIVER
@@ -2125,8 +2130,8 @@ L:        Eng.Linux@digi.com
 W:     http://www.digi.com
 S:     Orphan
 F:     Documentation/serial/digiepca.txt
-F:     drivers/char/epca*
-F:     drivers/char/digi*
+F:     drivers/staging/tty/epca*
+F:     drivers/staging/tty/digi*
 
 DIOLAN U2C-12 I2C DRIVER
 M:     Guenter Roeck <guenter.roeck@ericsson.com>
@@ -4078,7 +4083,7 @@ F:        drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
 
 MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
-M:     "Hans J. Koch" <hjk@linutronix.de>
+M:     "Hans J. Koch" <hjk@hansjkoch.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/max6650
@@ -4193,7 +4198,7 @@ MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
 M:     Jiri Slaby <jirislaby@gmail.com>
 S:     Maintained
 F:     Documentation/serial/moxa-smartio
-F:     drivers/char/mxser.*
+F:     drivers/tty/mxser.*
 
 MSI LAPTOP SUPPORT
 M:     "Lee, Chun-Yi" <jlee@novell.com>
@@ -4235,7 +4240,7 @@ F:        sound/oss/msnd*
 
 MULTITECH MULTIPORT CARD (ISICOM)
 S:     Orphan
-F:     drivers/char/isicom.c
+F:     drivers/tty/isicom.c
 F:     include/linux/isicom.h
 
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
@@ -5274,14 +5279,14 @@ F:      drivers/memstick/host/r592.*
 RISCOM8 DRIVER
 S:     Orphan
 F:     Documentation/serial/riscom8.txt
-F:     drivers/char/riscom8*
+F:     drivers/staging/tty/riscom8*
 
 ROCKETPORT DRIVER
 P:     Comtrol Corp.
 W:     http://www.comtrol.com
 S:     Maintained
 F:     Documentation/serial/rocket.txt
-F:     drivers/char/rocket*
+F:     drivers/tty/rocket*
 
 ROSE NETWORK LAYER
 M:     Ralf Baechle <ralf@linux-mips.org>
@@ -5917,10 +5922,9 @@ F:       arch/arm/mach-spear6xx/spear600.c
 F:     arch/arm/mach-spear6xx/spear600_evb.c
 
 SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
-M:     Roger Wolff <R.E.Wolff@BitWizard.nl>
-S:     Supported
+S:     Orphan
 F:     Documentation/serial/specialix.txt
-F:     drivers/char/specialix*
+F:     drivers/staging/tty/specialix*
 
 SPI SUBSYSTEM
 M:     David Brownell <dbrownell@users.sourceforge.net>
@@ -5965,7 +5969,6 @@ F:        arch/alpha/kernel/srm_env.c
 
 STABLE BRANCH
 M:     Greg Kroah-Hartman <greg@kroah.com>
-M:     Chris Wright <chrisw@sous-sol.org>
 L:     stable@kernel.org
 S:     Maintained
 
@@ -6249,7 +6252,8 @@ M:        Greg Ungerer <gerg@uclinux.org>
 W:     http://www.uclinux.org/
 L:     uclinux-dev@uclinux.org  (subscribers-only)
 S:     Maintained
-F:     arch/m68knommu/
+F:     arch/m68k/*/*_no.*
+F:     arch/m68k/include/asm/*_no.*
 
 UCLINUX FOR RENESAS H8/300 (H8300)
 M:     Yoshinori Sato <ysato@users.sourceforge.jp>
@@ -6619,7 +6623,7 @@ F:        fs/hostfs/
 F:     fs/hppfs/
 
 USERSPACE I/O (UIO)
-M:     "Hans J. Koch" <hjk@linutronix.de>
+M:     "Hans J. Koch" <hjk@hansjkoch.de>
 M:     Greg Kroah-Hartman <gregkh@suse.de>
 S:     Maintained
 F:     Documentation/DocBook/uio-howto.tmpl
index 8392b64..b967b96 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 39
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
index 9bb7b85..7a6d908 100644 (file)
@@ -4,7 +4,7 @@
 
 extra-y                := head.o vmlinux.lds
 asflags-y      := $(KBUILD_CFLAGS)
-ccflags-y      := -Werror -Wno-sign-compare
+ccflags-y      := -Wno-sign-compare
 
 obj-y    := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
            irq_alpha.o signal.o setup.o ptrace.o time.o \
index 381fec0..da7bcc3 100644 (file)
@@ -88,7 +88,7 @@ conf_read(unsigned long addr, unsigned char type1,
 {
        unsigned long flags;
        unsigned long mid = MCPCIA_HOSE2MID(hose->index);
-       unsigned int stat0, value, temp, cpu;
+       unsigned int stat0, value, cpu;
 
        cpu = smp_processor_id();
 
@@ -101,7 +101,7 @@ conf_read(unsigned long addr, unsigned char type1,
        stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
        *(vuip)MCPCIA_CAP_ERR(mid) = stat0;
        mb();
-       temp = *(vuip)MCPCIA_CAP_ERR(mid);
+       *(vuip)MCPCIA_CAP_ERR(mid);
        DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0));
 
        mb();
@@ -136,7 +136,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
 {
        unsigned long flags;
        unsigned long mid = MCPCIA_HOSE2MID(hose->index);
-       unsigned int stat0, temp, cpu;
+       unsigned int stat0, cpu;
 
        cpu = smp_processor_id();
 
@@ -145,7 +145,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
        /* Reset status register to avoid losing errors.  */
        stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
        *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb();
-       temp = *(vuip)MCPCIA_CAP_ERR(mid);
+       *(vuip)MCPCIA_CAP_ERR(mid);
        DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0));
 
        draina();
@@ -157,7 +157,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
        *((vuip)addr) = value;
        mb();
        mb();  /* magic */
-       temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
+       *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
        mcheck_expected(cpu) = 0;
        mb();
 
@@ -572,12 +572,10 @@ mcpcia_print_system_area(unsigned long la_ptr)
 void
 mcpcia_machine_check(unsigned long vector, unsigned long la_ptr)
 {
-       struct el_common *mchk_header;
        struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout;
        unsigned int cpu = smp_processor_id();
        int expected;
 
-       mchk_header = (struct el_common *)la_ptr;
        mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr;
        expected = mcheck_expected(cpu);
 
index c3b3781..14b26c4 100644 (file)
@@ -533,8 +533,6 @@ static struct el_subpacket_annotation el_titan_annotations[] = {
 static struct el_subpacket *
 el_process_regatta_subpacket(struct el_subpacket *header)
 {
-       int status;
-
        if (header->class != EL_CLASS__REGATTA_FAMILY) {
                printk("%s  ** Unexpected header CLASS %d TYPE %d, aborting\n",
                       err_print_prefix,
@@ -551,7 +549,7 @@ el_process_regatta_subpacket(struct el_subpacket *header)
                printk("%s  ** Occurred on CPU %d:\n", 
                       err_print_prefix,
                       (int)header->by_type.regatta_frame.cpuid);
-               status = privateer_process_logout_frame((struct el_common *)
+               privateer_process_logout_frame((struct el_common *)
                        header->by_type.regatta_frame.data_start, 1);
                break;
        default:
index 1479dc6..51b7fbd 100644 (file)
@@ -228,7 +228,7 @@ struct irqaction timer_irqaction = {
 void __init
 init_rtc_irq(void)
 {
-       irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
+       irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
                                      handle_simple_irq, "RTC");
        setup_irq(RTC_IRQ, &timer_irqaction);
 }
index d2634e4..edbddcb 100644 (file)
@@ -1404,8 +1404,6 @@ determine_cpu_caches (unsigned int cpu_type)
        case PCA56_CPU:
        case PCA57_CPU:
          {
-               unsigned long cbox_config, size;
-
                if (cpu_type == PCA56_CPU) {
                        L1I = CSHAPE(16*1024, 6, 1);
                        L1D = CSHAPE(8*1024, 5, 1);
@@ -1415,10 +1413,12 @@ determine_cpu_caches (unsigned int cpu_type)
                }
                L3 = -1;
 
+#if 0
+               unsigned long cbox_config, size;
+
                cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
                size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
 
-#if 0
                L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
 #else
                L2 = external_cache_probe(512*1024, 6);
index 3e6a289..6886b83 100644 (file)
@@ -79,7 +79,6 @@
 static unsigned long __init SMCConfigState(unsigned long baseAddr)
 {
        unsigned char devId;
-       unsigned char devRev;
 
        unsigned long configPort;
        unsigned long indexPort;
@@ -100,7 +99,7 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr)
                devId = inb(dataPort);
                if (devId == VALID_DEVICE_ID) {
                        outb(DEVICE_REV, indexPort);
-                       devRev = inb(dataPort);
+                       /* unsigned char devRev = */ inb(dataPort);
                        break;
                }
                else
index d3cb28b..d92cdc7 100644 (file)
@@ -156,7 +156,6 @@ static void __init
 wildfire_init_irq_per_pca(int qbbno, int pcano)
 {
        int i, irq_bias;
-       unsigned long io_bias;
        static struct irqaction isa_enable = {
                .handler        = no_action,
                .name           = "isa_enable",
@@ -165,10 +164,12 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
        irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
                 + pcano * WILDFIRE_IRQ_PER_PCA;
 
+#if 0
+       unsigned long io_bias;
+
        /* Only need the following for first PCI bus per PCA. */
        io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS;
 
-#if 0
        outb(0, DMA1_RESET_REG + io_bias);
        outb(0, DMA2_RESET_REG + io_bias);
        outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias);
index a58e84f..918e8e0 100644 (file)
@@ -153,6 +153,7 @@ void read_persistent_clock(struct timespec *ts)
                year += 100;
 
        ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+       ts->tv_nsec = 0;
 }
 
 
index fdc9d4d..377a7a5 100644 (file)
@@ -1540,7 +1540,6 @@ config HIGHMEM
 config HIGHPTE
        bool "Allocate 2nd-level pagetables from highmem"
        depends on HIGHMEM
-       depends on !OUTER_CACHE
 
 config HW_PERF_EVENTS
        bool "Enable hardware performance counter support for perf events"
@@ -2012,6 +2011,8 @@ source "kernel/power/Kconfig"
 
 config ARCH_SUSPEND_POSSIBLE
        depends on !ARCH_S5P64X0 && !ARCH_S5P6442
+       depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
+               CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE
        def_bool y
 
 endmenu
index 494224a..03d01d7 100644 (file)
@@ -63,17 +63,6 @@ config DEBUG_USER
              8 - SIGSEGV faults
             16 - SIGBUS faults
 
-config DEBUG_ERRORS
-       bool "Verbose kernel error messages"
-       depends on DEBUG_KERNEL
-       help
-         This option controls verbose debugging information which can be
-         printed when the kernel detects an internal error. This debugging
-         information is useful to kernel hackers when tracking down problems,
-         but mostly meaningless to other people. It's safe to say Y unless
-         you are concerned with the code size or don't want to see these
-         messages.
-
 config DEBUG_STACK_USAGE
        bool "Enable stack utilization instrumentation"
        depends on DEBUG_KERNEL
index e7521bc..6ea9b6f 100644 (file)
@@ -16,5 +16,4 @@ obj-$(CONFIG_SHARP_SCOOP)     += scoop.o
 obj-$(CONFIG_ARCH_IXP2000)     += uengine.o
 obj-$(CONFIG_ARCH_IXP23XX)     += uengine.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
-obj-$(CONFIG_COMMON_CLKDEV)    += clkdev.o
 obj-$(CONFIG_ARM_TIMER_SP804)  += timer-sp.o
index c4391ba..1dc9806 100644 (file)
@@ -43,6 +43,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread)
 #define THREAD_NOTIFY_FLUSH    0
 #define THREAD_NOTIFY_EXIT     1
 #define THREAD_NOTIFY_SWITCH   2
+#define THREAD_NOTIFY_COPY     3
 
 #endif
 #endif
index 74554f1..8d95446 100644 (file)
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES)         += armksyms.o module.o
 obj-$(CONFIG_ARTHUR)           += arthur.o
 obj-$(CONFIG_ISA_DMA)          += dma-isa.o
 obj-$(CONFIG_PCI)              += bios32.o isa.o
-obj-$(CONFIG_PM)               += sleep.o
+obj-$(CONFIG_PM_SLEEP)         += sleep.o
 obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
 obj-$(CONFIG_SMP)              += smp.o smp_tlb.o
 obj-$(CONFIG_HAVE_ARM_SCU)     += smp_scu.o
index d4a0da1..9b05c6a 100644 (file)
@@ -40,15 +40,22 @@ EXPORT_SYMBOL(elf_check_arch);
 void elf_set_personality(const struct elf32_hdr *x)
 {
        unsigned int eflags = x->e_flags;
-       unsigned int personality = PER_LINUX_32BIT;
+       unsigned int personality = current->personality & ~PER_MASK;
+
+       /*
+        * We only support Linux ELF executables, so always set the
+        * personality to LINUX.
+        */
+       personality |= PER_LINUX;
 
        /*
         * APCS-26 is only valid for OABI executables
         */
-       if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
-               if (eflags & EF_ARM_APCS_26)
-                       personality = PER_LINUX;
-       }
+       if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN &&
+           (eflags & EF_ARM_APCS_26))
+               personality &= ~ADDR_LIMIT_32BIT;
+       else
+               personality |= ADDR_LIMIT_32BIT;
 
        set_personality(personality);
 
index 8dbc126..87acc25 100644 (file)
@@ -868,6 +868,13 @@ static void reset_ctrl_regs(void *info)
                 */
                asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
                isb();
+
+               /*
+                * Clear any configured vector-catch events before
+                * enabling monitor mode.
+                */
+               asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
+               isb();
        }
 
        if (enable_monitor_mode())
index 69cfee0..979da39 100644 (file)
@@ -221,7 +221,7 @@ again:
        prev_raw_count &= armpmu->max_period;
 
        if (overflow)
-               delta = armpmu->max_period - prev_raw_count + new_raw_count;
+               delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
        else
                delta = new_raw_count - prev_raw_count;
 
index 94bbedb..5e1e541 100644 (file)
@@ -372,6 +372,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
        if (clone_flags & CLONE_SETTLS)
                thread->tp_value = regs->ARM_r3;
 
+       thread_notify(THREAD_NOTIFY_COPY, thread);
+
        return 0;
 }
 
index f0000e1..3b54ad1 100644 (file)
@@ -410,8 +410,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
        struct thread_info *thread = current_thread_info();
        siginfo_t info;
 
-       if (current->personality != PER_LINUX &&
-           current->personality != PER_LINUX_32BIT &&
+       if ((current->personality & PER_MASK) != PER_LINUX &&
            thread->exec_domain->handler) {
                thread->exec_domain->handler(n, regs);
                return regs->ARM_r0;
index ee8b02e..7bfb827 100644 (file)
@@ -10,7 +10,7 @@
 #define BANK_OFF(n)    (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
 #define GPIO_REG(x)    (*((volatile u32 *)(GPIO_REGS_VIRT + (x))))
 
-#define NR_BUILTIN_GPIO        (192)
+#define NR_BUILTIN_GPIO                IRQ_GPIO_NUM
 
 #define gpio_to_bank(gpio)     ((gpio) >> 5)
 #define gpio_to_irq(gpio)      (IRQ_GPIO_START + (gpio))
index 4621067..713be15 100644 (file)
@@ -8,6 +8,15 @@
 #define MFP_DRIVE_MEDIUM       (0x2 << 13)
 #define MFP_DRIVE_FAST         (0x3 << 13)
 
+#undef MFP_CFG
+#undef MFP_CFG_DRV
+
+#define MFP_CFG(pin, af)               \
+       (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_MEDIUM)
+
+#define MFP_CFG_DRV(pin, af, drv)      \
+       (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_##drv)
+
 /* GPIO */
 #define GPIO0_GPIO             MFP_CFG(GPIO0, AF5)
 #define GPIO1_GPIO             MFP_CFG(GPIO1, AF5)
index 7f56861..6a96911 100644 (file)
@@ -160,10 +160,7 @@ static struct msm_mmc_platform_data qsd8x50_sdc1_data = {
 
 static void __init qsd8x50_init_mmc(void)
 {
-       if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa())
-               vreg_mmc = vreg_get(NULL, "gp6");
-       else
-               vreg_mmc = vreg_get(NULL, "gp5");
+       vreg_mmc = vreg_get(NULL, "gp5");
 
        if (IS_ERR(vreg_mmc)) {
                pr_err("vreg get for vreg_mmc failed (%ld)\n",
index 56f920c..38b95e9 100644 (file)
@@ -269,7 +269,7 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
 
        /* Use existing clock_event for cpu 0 */
        if (!smp_processor_id())
-               return;
+               return 0;
 
        writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
 
index b024a8b..c463950 100644 (file)
 #define GAFR(x)                GPIO_REG(0x54 + (((x) & 0x70) >> 2))
 
 
-#define NR_BUILTIN_GPIO 128
+#define NR_BUILTIN_GPIO                PXA_GPIO_IRQ_NUM
 
 #define gpio_to_bank(gpio)     ((gpio) >> 5)
 #define gpio_to_irq(gpio)      IRQ_GPIO(gpio)
-#define irq_to_gpio(irq)       IRQ_TO_GPIO(irq)
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+       int gpio;
+
+       if (irq == IRQ_GPIO0 || irq == IRQ_GPIO1)
+               return irq - IRQ_GPIO0;
+
+       gpio = irq - PXA_GPIO_IRQ_BASE;
+       if (gpio >= 2 && gpio < NR_BUILTIN_GPIO)
+               return gpio;
+
+       return -1;
+}
 
 #ifdef CONFIG_CPU_PXA26x
 /* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted,
index a4285fc..0384024 100644 (file)
@@ -93,9 +93,6 @@
 #define GPIO_2_x_TO_IRQ(x)     (PXA_GPIO_IRQ_BASE + (x))
 #define IRQ_GPIO(x)    (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x))
 
-#define IRQ_TO_GPIO_2_x(i)     ((i) - PXA_GPIO_IRQ_BASE)
-#define IRQ_TO_GPIO(i) (((i) < IRQ_GPIO(2)) ? ((i) - IRQ_GPIO0) : IRQ_TO_GPIO_2_x(i))
-
 /*
  * The following interrupts are for board specific purposes. Since
  * the kernel can only run on one machine at a time, we can re-use
index 6bde595..a4af8c5 100644 (file)
@@ -285,7 +285,7 @@ static inline void pxa25x_init_pm(void) {}
 
 static int pxa25x_set_wake(struct irq_data *d, unsigned int on)
 {
-       int gpio = IRQ_TO_GPIO(d->irq);
+       int gpio = irq_to_gpio(d->irq);
        uint32_t mask = 0;
 
        if (gpio >= 0 && gpio < 85)
index 1cb5d0f..909756e 100644 (file)
@@ -345,7 +345,7 @@ static inline void pxa27x_init_pm(void) {}
  */
 static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
 {
-       int gpio = IRQ_TO_GPIO(d->irq);
+       int gpio = irq_to_gpio(d->irq);
        uint32_t mask;
 
        if (gpio >= 0 && gpio < 128)
index 76a3f65..65a1aba 100644 (file)
@@ -257,7 +257,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 void tegra_gpio_resume(void)
 {
        unsigned long flags;
-       int b, p, i;
+       int b;
+       int p;
 
        local_irq_save(flags);
 
@@ -280,7 +281,8 @@ void tegra_gpio_resume(void)
 void tegra_gpio_suspend(void)
 {
        unsigned long flags;
-       int b, p, i;
+       int b;
+       int p;
 
        local_irq_save(flags);
        for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
index 6d7c4ee..4459470 100644 (file)
@@ -1362,14 +1362,15 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
 {
        unsigned long flags;
        int ret;
+       long new_rate = rate;
 
-       rate = clk_round_rate(c->parent, rate);
-       if (rate < 0)
-               return rate;
+       new_rate = clk_round_rate(c->parent, new_rate);
+       if (new_rate < 0)
+               return new_rate;
 
        spin_lock_irqsave(&c->parent->spinlock, flags);
 
-       c->u.shared_bus_user.rate = rate;
+       c->u.shared_bus_user.rate = new_rate;
        ret = tegra_clk_shared_bus_update(c->parent);
 
        spin_unlock_irqrestore(&c->parent->spinlock, flags);
index afe209e..74be05f 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/shm.h>
 #include <linux/sched.h>
 #include <linux/io.h>
+#include <linux/personality.h>
 #include <linux/random.h>
 #include <asm/cputype.h>
 #include <asm/system.h>
@@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                mm->cached_hole_size = 0;
        }
        /* 8 bits of randomness in 20 address space bits */
-       if (current->flags & PF_RANDOMIZE)
+       if ((current->flags & PF_RANDOMIZE) &&
+           !(current->personality & ADDR_NO_RANDOMIZE))
                addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
 
 full_search:
index b46eb21..bf8a1d1 100644 (file)
@@ -390,7 +390,7 @@ ENTRY(cpu_arm920_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
 .globl cpu_arm920_suspend_size
 .equ   cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_arm920_do_suspend)
        stmfd   sp!, {r4 - r7, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ PID
index 6a4bdb2..0ed85d9 100644 (file)
@@ -404,7 +404,7 @@ ENTRY(cpu_arm926_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
 .globl cpu_arm926_suspend_size
 .equ   cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_arm926_do_suspend)
        stmfd   sp!, {r4 - r7, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ PID
index 74483d1..184a9c9 100644 (file)
@@ -171,7 +171,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
 
 .globl cpu_sa1100_suspend_size
 .equ   cpu_sa1100_suspend_size, 4*4
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_sa1100_do_suspend)
        stmfd   sp!, {r4 - r7, lr}
        mrc     p15, 0, r4, c3, c0, 0           @ domain ID
index bfa0c9f..7c99cb4 100644 (file)
@@ -124,7 +124,7 @@ ENTRY(cpu_v6_set_pte_ext)
 /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
 .globl cpu_v6_suspend_size
 .equ   cpu_v6_suspend_size, 4 * 8
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_v6_do_suspend)
        stmfd   sp!, {r4 - r11, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ FCSE/PID
index c35618e..babfba0 100644 (file)
@@ -211,7 +211,7 @@ cpu_v7_name:
 /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
 .globl cpu_v7_suspend_size
 .equ   cpu_v7_suspend_size, 4 * 8
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_v7_do_suspend)
        stmfd   sp!, {r4 - r11, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ FCSE/PID
index 63d8b20..5962136 100644 (file)
@@ -417,7 +417,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
 
 .globl cpu_xsc3_suspend_size
 .equ   cpu_xsc3_suspend_size, 4 * 8
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_xsc3_do_suspend)
        stmfd   sp!, {r4 - r10, lr}
        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
index 086038c..ce233bc 100644 (file)
@@ -518,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext)
 
 .globl cpu_xscale_suspend_size
 .equ   cpu_xscale_suspend_size, 4 * 7
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_xscale_do_suspend)
        stmfd   sp!, {r4 - r10, lr}
        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
index d592b63..d15dc47 100644 (file)
 
 #define PFX "s5p pm: "
 
-/* s3c_pm_check_resume_pin
- *
- * check to see if the pin is configured correctly for sleep mode, and
- * make any necessary adjustments if it is not
-*/
-
-static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs)
-{
-       /* nothing here yet */
-}
-
 /* s3c_pm_configure_extint
  *
  * configure all external interrupt pins
index e4baf76..6b733fa 100644 (file)
@@ -164,7 +164,6 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
  */
 static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
 {
-       void *save_at = phys_to_virt(s3c_sleep_save_phys);
        unsigned long addr;
        unsigned long left;
        void *stkpage;
@@ -192,11 +191,6 @@ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
                        goto skip_check;
                }
 
-               if (in_region(ptr, left, save_at, 32*4 )) {
-                       S3C_PMDBG("skipping %08lx, has save block in\n", addr);
-                       goto skip_check;
-               }
-
                /* calculate and check the checksum */
 
                calc = crc32_le(~0, ptr, left);
index d5b58d3..5c0a440 100644 (file)
@@ -214,8 +214,9 @@ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count)
  *
  * print any IRQs asserted at resume time (ie, we woke from)
 */
-static void s3c_pm_show_resume_irqs(int start, unsigned long which,
-                                   unsigned long mask)
+static void __maybe_unused s3c_pm_show_resume_irqs(int start,
+                                                  unsigned long which,
+                                                  unsigned long mask)
 {
        int i;
 
index bbf3da0..f746950 100644 (file)
@@ -78,6 +78,14 @@ static void vfp_thread_exit(struct thread_info *thread)
        put_cpu();
 }
 
+static void vfp_thread_copy(struct thread_info *thread)
+{
+       struct thread_info *parent = current_thread_info();
+
+       vfp_sync_hwstate(parent);
+       thread->vfpstate = parent->vfpstate;
+}
+
 /*
  * When this function is called with the following 'cmd's, the following
  * is true while this function is being run:
@@ -104,12 +112,17 @@ static void vfp_thread_exit(struct thread_info *thread)
 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
 {
        struct thread_info *thread = v;
+       u32 fpexc;
+#ifdef CONFIG_SMP
+       unsigned int cpu;
+#endif
 
-       if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
-               u32 fpexc = fmrx(FPEXC);
+       switch (cmd) {
+       case THREAD_NOTIFY_SWITCH:
+               fpexc = fmrx(FPEXC);
 
 #ifdef CONFIG_SMP
-               unsigned int cpu = thread->cpu;
+               cpu = thread->cpu;
 
                /*
                 * On SMP, if VFP is enabled, save the old state in
@@ -134,13 +147,20 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
                 * old state.
                 */
                fmxr(FPEXC, fpexc & ~FPEXC_EN);
-               return NOTIFY_DONE;
-       }
+               break;
 
-       if (cmd == THREAD_NOTIFY_FLUSH)
+       case THREAD_NOTIFY_FLUSH:
                vfp_thread_flush(thread);
-       else
+               break;
+
+       case THREAD_NOTIFY_EXIT:
                vfp_thread_exit(thread);
+               break;
+
+       case THREAD_NOTIFY_COPY:
+               vfp_thread_copy(thread);
+               break;
+       }
 
        return NOTIFY_DONE;
 }
index ff5b7cf..160543d 100644 (file)
@@ -94,6 +94,13 @@ struct tag_ethernet {
 
 #define ETH_INVALID_PHY        0xff
 
+/* board information */
+#define ATAG_BOARDINFO 0x54410008
+
+struct tag_boardinfo {
+       u32     board_number;
+};
+
 struct tag {
        struct tag_header hdr;
        union {
@@ -102,6 +109,7 @@ struct tag {
                struct tag_cmdline cmdline;
                struct tag_clock clock;
                struct tag_ethernet ethernet;
+               struct tag_boardinfo boardinfo;
        } u;
 };
 
@@ -128,6 +136,7 @@ extern struct tag *bootloader_tags;
 
 extern resource_size_t fbmem_start;
 extern resource_size_t fbmem_size;
+extern u32 board_number;
 
 void setup_processor(void);
 
index 5c70839..bb0974c 100644 (file)
@@ -390,6 +390,21 @@ static int __init parse_tag_clock(struct tag *tag)
 }
 __tagtable(ATAG_CLOCK, parse_tag_clock);
 
+/*
+ * The board_number correspond to the bd->bi_board_number in U-Boot. This
+ * parameter is only available during initialisation and can be used in some
+ * kind of board identification.
+ */
+u32 __initdata board_number;
+
+static int __init parse_tag_boardinfo(struct tag *tag)
+{
+       board_number = tag->u.boardinfo.board_number;
+
+       return 0;
+}
+__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo);
+
 /*
  * Scan the tag table for this tag, and call its parse function. The
  * tag table is built by the linker from all the __tagtable
index b91b204..7aa2575 100644 (file)
@@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code,
        info.si_code = code;
        info.si_addr = (void __user *)addr;
        force_sig_info(signr, &info, current);
-
-       /*
-        * Init gets no signals that it doesn't have a handler for.
-        * That's all very well, but if it has caused a synchronous
-        * exception and we ignore the resulting signal, it will just
-        * generate the same exception over and over again and we get
-        * nowhere.  Better to kill it and let the kernel panic.
-        */
-       if (is_global_init(current)) {
-               __sighandler_t handler;
-
-               spin_lock_irq(&current->sighand->siglock);
-               handler = current->sighand->action[signr-1].sa.sa_handler;
-               spin_unlock_irq(&current->sighand->siglock);
-               if (handler == SIG_DFL) {
-                       /* init has generated a synchronous exception
-                          and it doesn't have a handler for the signal */
-                       printk(KERN_CRIT "init has generated signal %ld "
-                              "but has no handler for it\n", signr);
-                       do_exit(signr);
-               }
-       }
 }
 
 asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
index 442f08c..86925fd 100644 (file)
@@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk)
        spin_unlock(&clk_list_lock);
 }
 
-struct clk *clk_get(struct device *dev, const char *id)
+static struct clk *__clk_get(struct device *dev, const char *id)
 {
        struct clk *clk;
 
-       spin_lock(&clk_list_lock);
-
        list_for_each_entry(clk, &at32_clock_list, list) {
                if (clk->dev == dev && strcmp(id, clk->name) == 0) {
-                       spin_unlock(&clk_list_lock);
                        return clk;
                }
        }
 
-       spin_unlock(&clk_list_lock);
        return ERR_PTR(-ENOENT);
 }
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+       struct clk *clk;
+
+       spin_lock(&clk_list_lock);
+       clk = __clk_get(dev, id);
+       spin_unlock(&clk_list_lock);
+
+       return clk;
+}
+
 EXPORT_SYMBOL(clk_get);
 
 void clk_put(struct clk *clk)
@@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused)
        spin_lock(&clk_list_lock);
 
        /* show clock tree as derived from the three oscillators */
-       clk = clk_get(NULL, "osc32k");
+       clk = __clk_get(NULL, "osc32k");
        dump_clock(clk, &r);
        clk_put(clk);
 
-       clk = clk_get(NULL, "osc0");
+       clk = __clk_get(NULL, "osc0");
        dump_clock(clk, &r);
        clk_put(clk);
 
-       clk = clk_get(NULL, "osc1");
+       clk = __clk_get(NULL, "osc1");
        dump_clock(clk, &r);
        clk_put(clk);
 
index 47ba4b9..fbc2aea 100644 (file)
@@ -61,34 +61,34 @@ struct eic {
 static struct eic *nmi_eic;
 static bool nmi_enabled;
 
-static void eic_ack_irq(struct irq_chip *d)
+static void eic_ack_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
 }
 
-static void eic_mask_irq(struct irq_chip *d)
+static void eic_mask_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
 }
 
-static void eic_mask_ack_irq(struct irq_chip *d)
+static void eic_mask_ack_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
        eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
 }
 
-static void eic_unmask_irq(struct irq_chip *d)
+static void eic_unmask_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, IER, 1 << (d->irq - eic->first_irq));
 }
 
-static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type)
+static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        unsigned int irq = d->irq;
        unsigned int i = irq - eic->first_irq;
        u32 mode, edge, level;
@@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev)
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        int_irq = platform_get_irq(pdev, 0);
-       if (!regs || !int_irq) {
+       if (!regs || (int)int_irq <= 0) {
                dev_dbg(&pdev->dev, "missing regs and/or irq resource\n");
                return -ENXIO;
        }
index f308e1d..2e0aa85 100644 (file)
@@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d)
        pio_writel(pio, IDR, 1 << (gpio & 0x1f));
 }
 
-static void gpio_irq_unmask(struct irq_data *d))
+static void gpio_irq_unmask(struct irq_data *d)
 {
        unsigned                gpio = irq_to_gpio(d->irq);
        struct pio_device       *pio = &pio_dev[gpio >> 5];
index 17503b0..f868f4c 100644 (file)
@@ -53,7 +53,7 @@ cpu_enter_idle:
        st.w    r8[TI_flags], r9
        unmask_interrupts
        sleep   CPU_SLEEP_IDLE
-       .size   cpu_idle_sleep, . - cpu_idle_sleep
+       .size   cpu_enter_idle, . - cpu_enter_idle
 
        /*
         * Common return path for PM functions that don't run from
index 19e2c7c..44bd0cc 100644 (file)
  * Force strict CPU ordering.
  */
 #define nop()  __asm__ __volatile__ ("nop;\n\t" : : )
-#define mb()   __asm__ __volatile__ (""   : : : "memory")
-#define rmb()  __asm__ __volatile__ (""   : : : "memory")
-#define wmb()  __asm__ __volatile__ (""   : : : "memory")
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-#define read_barrier_depends()                 do { } while(0)
+#define smp_mb()  mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define smp_read_barrier_depends()     read_barrier_depends()
 
 #ifdef CONFIG_SMP
 asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
                                        unsigned long new, unsigned long old);
 
 #ifdef __ARCH_SYNC_CORE_DCACHE
-# define smp_mb()      do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
-# define smp_rmb()     do { barrier(); smp_check_barrier(); } while (0)
-# define smp_wmb()     do { barrier(); smp_mark_barrier(); } while (0)
-#define smp_read_barrier_depends()     do { barrier(); smp_check_barrier(); } while (0)
-
+/* Force Core data cache coherence */
+# define mb()  do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
+# define rmb() do { barrier(); smp_check_barrier(); } while (0)
+# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
+# define read_barrier_depends()        do { barrier(); smp_check_barrier(); } while (0)
 #else
-# define smp_mb()      barrier()
-# define smp_rmb()     barrier()
-# define smp_wmb()     barrier()
-#define smp_read_barrier_depends()     barrier()
+# define mb()  barrier()
+# define rmb() barrier()
+# define wmb() barrier()
+# define read_barrier_depends()        do { } while (0)
 #endif
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 
 #else /* !CONFIG_SMP */
 
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_read_barrier_depends()     do { } while(0)
+#define mb()   barrier()
+#define rmb()  barrier()
+#define wmb()  barrier()
+#define read_barrier_depends() do { } while (0)
 
 struct __xchg_dummy {
        unsigned long a[100];
index cdbe075..8b81dc0 100644 (file)
@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask)
        _disable_gptimers(mask);
        for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
                if (mask & (1 << i))
-                       group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i];
+                       group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
        SSYNC();
 }
 EXPORT_SYMBOL(disable_gptimers);
index 8c9a43d..cdb4beb 100644 (file)
@@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = dev_id;
        smp_mb();
-       evt->event_handler(evt);
+       /*
+        * We want to ACK before we handle so that we can handle smaller timer
+        * intervals.  This way if the timer expires again while we're handling
+        * things, we're more likely to see that 2nd int rather than swallowing
+        * it by ACKing the int at the end of this handler.
+        */
        bfin_gptmr0_ack();
+       evt->event_handler(evt);
        return IRQ_HANDLED;
 }
 
index 6e17a26..8bce5ed 100644 (file)
@@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info)
        struct blackfin_flush_data *fdata = info;
 
        /* Invalidate the memory holding the bounds of the flushed region. */
-       invalidate_dcache_range((unsigned long)fdata,
-               (unsigned long)fdata + sizeof(*fdata));
+       blackfin_dcache_invalidate_range((unsigned long)fdata,
+                                        (unsigned long)fdata + sizeof(*fdata));
+
+       /* Make sure all write buffers in the data side of the core
+        * are flushed before trying to invalidate the icache.  This
+        * needs to be after the data flush and before the icache
+        * flush so that the SSYNC does the right thing in preventing
+        * the instruction prefetcher from hitting things in cached
+        * memory at the wrong time -- it runs much further ahead than
+        * the pipeline.
+        */
+       SSYNC();
 
-       flush_icache_range(fdata->start, fdata->end);
+       /* ipi_flaush_icache is invoked by generic flush_icache_range,
+        * so call blackfin arch icache flush directly here.
+        */
+       blackfin_icache_flush_range(fdata->start, fdata->end);
 }
 
 static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
index 26d851d..29e1790 100644 (file)
 #define __NR_fanotify_init     337
 #define __NR_fanotify_mark     338
 #define __NR_prlimit64         339
+#define __NR_name_to_handle_at 340
+#define __NR_open_by_handle_at 341
+#define __NR_clock_adjtime     342
+#define __NR_syncfs            343
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            340
+#define NR_syscalls            344
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 1559dea..1359ee6 100644 (file)
@@ -750,4 +750,8 @@ sys_call_table:
        .long sys_fanotify_init
        .long sys_fanotify_mark
        .long sys_prlimit64
+       .long sys_name_to_handle_at     /* 340 */
+       .long sys_open_by_handle_at
+       .long sys_clock_adjtime
+       .long sys_syncfs
 
index 79b1ed1..9b8393d 100644 (file)
@@ -358,6 +358,10 @@ ENTRY(sys_call_table)
        .long sys_fanotify_init
        .long sys_fanotify_mark
        .long sys_prlimit64
+       .long sys_name_to_handle_at     /* 340 */
+       .long sys_open_by_handle_at
+       .long sys_clock_adjtime
+       .long sys_syncfs
 
        .rept NR_syscalls-(.-sys_call_table)/4
                .long sys_ni_syscall
index 851b3bf..eccdefe 100644 (file)
@@ -6,7 +6,6 @@ config MICROBLAZE
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
-       select USB_ARCH_HAS_EHCI
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select HAVE_OPROFILE
        select HAVE_ARCH_KGDB
index b6ff882..8f4d50b 100644 (file)
@@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
        depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
-                  PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
+                  (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x
 
 config PPC_DCR_NATIVE
        bool
index be3cdf9..1833d1a 100644 (file)
@@ -382,10 +382,12 @@ extern const char *powerpc_base_platform;
 #define CPU_FTRS_E500_2        (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
            CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
            CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_E500MC        (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
-           CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+#define CPU_FTRS_E500MC        (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
            CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
            CPU_FTR_DBELL)
+#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
+           CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+           CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
 #define CPU_FTRS_GENERIC_32    (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
 
 /* 64-bit CPUs */
@@ -435,11 +437,15 @@ extern const char *powerpc_base_platform;
 #define CPU_FTRS_COMPATIBLE    (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
 
 #ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_POSSIBLE      (CPU_FTRS_E5500)
+#else
 #define CPU_FTRS_POSSIBLE      \
            (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |        \
            CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 |       \
            CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T |           \
            CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
+#endif
 #else
 enum {
        CPU_FTRS_POSSIBLE =
@@ -473,16 +479,21 @@ enum {
 #endif
 #ifdef CONFIG_E500
            CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
+           CPU_FTRS_E5500 |
 #endif
            0,
 };
 #endif /* __powerpc64__ */
 
 #ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_ALWAYS                (CPU_FTRS_E5500)
+#else
 #define CPU_FTRS_ALWAYS                \
            (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &        \
            CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 &       \
            CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
+#endif
 #else
 enum {
        CPU_FTRS_ALWAYS =
@@ -513,6 +524,7 @@ enum {
 #endif
 #ifdef CONFIG_E500
            CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
+           CPU_FTRS_E5500 &
 #endif
            CPU_FTRS_POSSIBLE,
 };
index 811f04a..8d1569c 100644 (file)
@@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
  * on platforms where such control is possible.
  */
 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
-       defined(CONFIG_KPROBES)
+       defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
 #define PAGE_KERNEL_TEXT       PAGE_KERNEL_X
 #else
 #define PAGE_KERNEL_TEXT       PAGE_KERNEL_ROX
index c9b68d0..b9602ee 100644 (file)
@@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x80240000,
                .cpu_name               = "e5500",
-               .cpu_features           = CPU_FTRS_E500MC,
+               .cpu_features           = CPU_FTRS_E5500,
                .cpu_user_features      = COMMON_USER_BOOKE,
                .mmu_features           = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
                        MMU_FTR_USE_TLBILX,
index 3d3d416..5b5e1f0 100644 (file)
@@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu)
 }
 
 /* wait for all the CPUs to hit real mode but timeout if they don't come in */
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
+#ifdef CONFIG_PPC_STD_MMU_64
 static void crash_kexec_wait_realmode(int cpu)
 {
        unsigned int msecs;
@@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu)
        }
        mb();
 }
-#else
-static inline void crash_kexec_wait_realmode(int cpu) {}
-#endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
 
 /*
  * This function will be called by secondary cpus or by kexec cpu
@@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs)
        crash_ipi_callback(regs);
 }
 
-#else
+#else  /* ! CONFIG_SMP */
+static inline void crash_kexec_wait_realmode(int cpu) {}
+
 static void crash_kexec_prepare_cpus(int cpu)
 {
        /*
@@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
 {
        cpus_in_sr = CPU_MASK_NONE;
 }
-#endif
+#endif /* CONFIG_SMP */
 
 /*
  * Register a function to be called on shutdown.  Only use this if you
index c00d4ca..28581f1 100644 (file)
@@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int ibmebus_bus_pm_freeze(struct device *dev)
 {
@@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
        return ret;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define ibmebus_bus_pm_freeze          NULL
 #define ibmebus_bus_pm_thaw            NULL
@@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
 #define ibmebus_bus_pm_poweroff_noirq  NULL
 #define ibmebus_bus_pm_restore_noirq   NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
        .prepare = ibmebus_bus_pm_prepare,
index c834757..2b97b80 100644 (file)
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void)
                if (!parent)
                        continue;
                if (of_match_node(legacy_serial_parents, parent) != NULL) {
-                       index = add_legacy_soc_port(np, np);
-                       if (index >= 0 && np == stdout)
-                               legacy_serial_console = index;
+                       if (of_device_is_available(np)) {
+                               index = add_legacy_soc_port(np, np);
+                               if (index >= 0 && np == stdout)
+                                       legacy_serial_console = index;
+                       }
                }
                of_node_put(parent);
        }
index c4063b7..822f630 100644 (file)
@@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
        return 0;
 }
 
+static u64 check_and_compute_delta(u64 prev, u64 val)
+{
+       u64 delta = (val - prev) & 0xfffffffful;
+
+       /*
+        * POWER7 can roll back counter values, if the new value is smaller
+        * than the previous value it will cause the delta and the counter to
+        * have bogus values unless we rolled a counter over.  If a coutner is
+        * rolled back, it will be smaller, but within 256, which is the maximum
+        * number of events to rollback at once.  If we dectect a rollback
+        * return 0.  This can lead to a small lack of precision in the
+        * counters.
+        */
+       if (prev > val && (prev - val) < 256)
+               delta = 0;
+
+       return delta;
+}
+
 static void power_pmu_read(struct perf_event *event)
 {
        s64 val, delta, prev;
@@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event)
                prev = local64_read(&event->hw.prev_count);
                barrier();
                val = read_pmc(event->hw.idx);
+               delta = check_and_compute_delta(prev, val);
+               if (!delta)
+                       return;
        } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 
-       /* The counters are only 32 bits wide */
-       delta = (val - prev) & 0xfffffffful;
        local64_add(delta, &event->count);
        local64_sub(delta, &event->hw.period_left);
 }
@@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
                val = (event->hw.idx == 5) ? pmc5 : pmc6;
                prev = local64_read(&event->hw.prev_count);
                event->hw.idx = 0;
-               delta = (val - prev) & 0xfffffffful;
-               local64_add(delta, &event->count);
+               delta = check_and_compute_delta(prev, val);
+               if (delta)
+                       local64_add(delta, &event->count);
        }
 }
 
@@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
                                  unsigned long pmc5, unsigned long pmc6)
 {
        struct perf_event *event;
-       u64 val;
+       u64 val, prev;
        int i;
 
        for (i = 0; i < cpuhw->n_limited; ++i) {
                event = cpuhw->limited_counter[i];
                event->hw.idx = cpuhw->limited_hwidx[i];
                val = (event->hw.idx == 5) ? pmc5 : pmc6;
-               local64_set(&event->hw.prev_count, val);
+               prev = local64_read(&event->hw.prev_count);
+               if (check_and_compute_delta(prev, val))
+                       local64_set(&event->hw.prev_count, val);
                perf_event_update_userpage(event);
        }
 }
@@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 
        /* we don't have to worry about interrupts here */
        prev = local64_read(&event->hw.prev_count);
-       delta = (val - prev) & 0xfffffffful;
+       delta = check_and_compute_delta(prev, val);
        local64_add(delta, &event->count);
 
        /*
index 375480c..f33acfd 100644 (file)
@@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb)
        u64 stolen = 0;
        u64 dtb;
 
+       if (!dtl)
+               return 0;
+
        if (i == vpa->dtl_idx)
                return 0;
        while (i < vpa->dtl_idx) {
index a830c5e..bc5f0dc 100644 (file)
@@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
        mpic_setup_this_cpu();
 }
 
+#ifdef CONFIG_PPC64
 #ifdef CONFIG_HOTPLUG_CPU
 static int smp_core99_cpu_notify(struct notifier_block *self,
                                 unsigned long action, void *hcpu)
@@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
 
 static void __init smp_core99_bringup_done(void)
 {
-#ifdef CONFIG_PPC64
        extern void g5_phy_disable_cpu1(void);
 
        /* Close i2c bus if it was used for tb sync */
@@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void)
                set_cpu_present(1, false);
                g5_phy_disable_cpu1();
        }
-#endif /* CONFIG_PPC64 */
-
 #ifdef CONFIG_HOTPLUG_CPU
        register_cpu_notifier(&smp_core99_cpu_nb);
 #endif
+
        if (ppc_md.progress)
                ppc_md.progress("smp_core99_bringup_done", 0x349);
 }
+#endif /* CONFIG_PPC64 */
 
 #ifdef CONFIG_HOTPLUG_CPU
 
@@ -975,7 +975,9 @@ static void pmac_cpu_die(void)
 struct smp_ops_t core99_smp_ops = {
        .message_pass   = smp_mpic_message_pass,
        .probe          = smp_core99_probe,
+#ifdef CONFIG_PPC64
        .bringup_done   = smp_core99_bringup_done,
+#endif
        .kick_cpu       = smp_core99_kick_cpu,
        .setup_cpu      = smp_core99_setup_cpu,
        .give_timebase  = smp_core99_give_timebase,
index 0007241..6c42cfd 100644 (file)
@@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void)
        int cpu, ret;
        struct paca_struct *pp;
        struct dtl_entry *dtl;
+       struct kmem_cache *dtl_cache;
 
        if (!firmware_has_feature(FW_FEATURE_SPLPAR))
                return 0;
 
+       dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
+                                               DISPATCH_LOG_BYTES, 0, NULL);
+       if (!dtl_cache) {
+               pr_warn("Failed to create dispatch trace log buffer cache\n");
+               pr_warn("Stolen time statistics will be unreliable\n");
+               return 0;
+       }
+
        for_each_possible_cpu(cpu) {
                pp = &paca[cpu];
-               dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL,
-                                  cpu_to_node(cpu));
+               dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
                if (!dtl) {
                        pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
                                cpu);
index f8f7f28..68ca929 100644 (file)
@@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
        struct resource rsrc;
        const int *bus_range;
 
+       if (!of_device_is_available(dev)) {
+               pr_warning("%s: disabled\n", dev->full_name);
+               return -ENODEV;
+       }
+
        pr_debug("Adding PCI host bridge %s\n", dev->full_name);
 
        /* Fetch host bridge registers address */
index 14232d5..4979853 100644 (file)
@@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev)
        port->ops = ops;
        port->priv = priv;
        port->phys_efptr = 0x100;
-       rio_register_mport(port);
 
        priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
        rio_regs_win = priv->regs_win;
@@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev)
        dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
                        port->sys_size ? 65536 : 256);
 
+       if (rio_register_mport(port))
+               goto err;
+
        if (port->host_deviceid >= 0)
                out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
                        RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
index 02fb017..a9da516 100644 (file)
@@ -4,6 +4,10 @@ menu "UML-specific options"
 
 menu "Host processor type and features"
 
+config CMPXCHG_LOCAL
+       bool
+       default n
+
 source "arch/x86/Kconfig.cpu"
 
 endmenu
diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h
new file mode 100644 (file)
index 0000000..9e33b86
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __UM_BUG_H
+#define __UM_BUG_H
+
+#include <asm-generic/bug.h>
+
+#endif
index 43085bf..156cd5d 100644 (file)
@@ -66,7 +66,7 @@ static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
         * Don't enable translation but enable GART IO and CPU accesses.
         * Also, set DISTLBWALKPRB since GART tables memory is UC.
         */
-       ctl = DISTLBWALKPRB | order << 1;
+       ctl = order << 1;
 
        pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
 }
@@ -75,17 +75,17 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
 {
        u32 tmp, ctl;
 
-        /* address of the mappings table */
-        addr >>= 12;
-        tmp = (u32) addr<<4;
-        tmp &= ~0xf;
-        pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
-
-        /* Enable GART translation for this hammer. */
-        pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
-        ctl |= GARTEN;
-        ctl &= ~(DISGARTCPU | DISGARTIO);
-        pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
+       /* address of the mappings table */
+       addr >>= 12;
+       tmp = (u32) addr<<4;
+       tmp &= ~0xf;
+       pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
+
+       /* Enable GART translation for this hammer. */
+       pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
+       ctl |= GARTEN | DISTLBWALKPRB;
+       ctl &= ~(DISGARTCPU | DISGARTIO);
+       pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
 }
 
 static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
index fd5a1f3..3cce714 100644 (file)
 #define MSR_IA32_MC0_ADDR              0x00000402
 #define MSR_IA32_MC0_MISC              0x00000403
 
+#define MSR_AMD64_MC0_MASK             0xc0010044
+
 #define MSR_IA32_MCx_CTL(x)            (MSR_IA32_MC0_CTL + 4*(x))
 #define MSR_IA32_MCx_STATUS(x)         (MSR_IA32_MC0_STATUS + 4*(x))
 #define MSR_IA32_MCx_ADDR(x)           (MSR_IA32_MC0_ADDR + 4*(x))
 #define MSR_IA32_MCx_MISC(x)           (MSR_IA32_MC0_MISC + 4*(x))
 
+#define MSR_AMD64_MCx_MASK(x)          (MSR_AMD64_MC0_MASK + (x))
+
 /* These are consecutive and not in the normal 4er MCE bank block */
 #define MSR_IA32_MC0_CTL2              0x00000280
 #define MSR_IA32_MCx_CTL2(x)           (MSR_IA32_MC0_CTL2 + (x))
index 86d1ad4..73fb469 100644 (file)
@@ -499,7 +499,7 @@ out:
                 * Don't enable translation yet but enable GART IO and CPU
                 * accesses and set DISTLBWALKPRB since GART table memory is UC.
                 */
-               u32 ctl = DISTLBWALKPRB | aper_order << 1;
+               u32 ctl = aper_order << 1;
 
                bus = amd_nb_bus_dev_ranges[i].bus;
                dev_base = amd_nb_bus_dev_ranges[i].dev_base;
index 3ecece0..3532d3b 100644 (file)
@@ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
        /* As a rule processors have APIC timer running in deep C states */
        if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400))
                set_cpu_cap(c, X86_FEATURE_ARAT);
+
+       /*
+        * Disable GART TLB Walk Errors on Fam10h. We do this here
+        * because this is always needed when GART is enabled, even in a
+        * kernel which has no MCE support built in.
+        */
+       if (c->x86 == 0x10) {
+               /*
+                * BIOS should disable GartTlbWlk Errors themself. If
+                * it doesn't do it here as suggested by the BKDG.
+                *
+                * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
+                */
+               u64 mask;
+
+               rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
+               mask |= (1 << 10);
+               wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
+       }
 }
 
 #ifdef CONFIG_X86_32
index 461f62b..cf4e369 100644 (file)
@@ -8,7 +8,7 @@ static __initconst const u64 amd_hw_cache_event_ids
  [ C(L1D) ] = {
        [ C(OP_READ) ] = {
                [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
-               [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
+               [ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
        },
        [ C(OP_WRITE) ] = {
                [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
@@ -427,7 +427,9 @@ static __initconst const struct x86_pmu amd_pmu = {
  *
  * Exceptions:
  *
+ * 0x000       FP      PERF_CTL[3], PERF_CTL[5:3] (*)
  * 0x003       FP      PERF_CTL[3]
+ * 0x004       FP      PERF_CTL[3], PERF_CTL[5:3] (*)
  * 0x00B       FP      PERF_CTL[3]
  * 0x00D       FP      PERF_CTL[3]
  * 0x023       DE      PERF_CTL[2:0]
@@ -448,6 +450,8 @@ static __initconst const struct x86_pmu amd_pmu = {
  * 0x0DF       LS      PERF_CTL[5:0]
  * 0x1D6       EX      PERF_CTL[5:0]
  * 0x1D8       EX      PERF_CTL[5:0]
+ *
+ * (*) depending on the umask all FPU counters may be used
  */
 
 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
@@ -460,18 +464,28 @@ static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
 static struct event_constraint *
 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
 {
-       unsigned int event_code = amd_get_event_code(&event->hw);
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned int event_code = amd_get_event_code(hwc);
 
        switch (event_code & AMD_EVENT_TYPE_MASK) {
        case AMD_EVENT_FP:
                switch (event_code) {
+               case 0x000:
+                       if (!(hwc->config & 0x0000F000ULL))
+                               break;
+                       if (!(hwc->config & 0x00000F00ULL))
+                               break;
+                       return &amd_f15_PMC3;
+               case 0x004:
+                       if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
+                               break;
+                       return &amd_f15_PMC3;
                case 0x003:
                case 0x00B:
                case 0x00D:
                        return &amd_f15_PMC3;
-               default:
-                       return &amd_f15_PMC53;
                }
+               return &amd_f15_PMC53;
        case AMD_EVENT_LS:
        case AMD_EVENT_DC:
        case AMD_EVENT_EX_LS:
index 82ada01..b117efd 100644 (file)
@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry;
 #define AGPEXTERN
 #endif
 
+/* GART can only remap to physical addresses < 1TB */
+#define GART_MAX_PHYS_ADDR     (1ULL << 40)
+
 /* backdoor interface to AGP driver */
 AGPEXTERN int agp_memory_reserved;
 AGPEXTERN __u32 *agp_gatt_table;
@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
                                size_t size, int dir, unsigned long align_mask)
 {
        unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
-       unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
+       unsigned long iommu_page;
        int i;
 
+       if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
+               return bad_dma_addr;
+
+       iommu_page = alloc_iommu(dev, npages, align_mask);
        if (iommu_page == -1) {
                if (!nonforced_iommu(dev, phys_mem, size))
                        return phys_mem;
index c2871d3..8ed8908 100644 (file)
@@ -312,6 +312,26 @@ void __cpuinit smp_store_cpu_info(int id)
                identify_secondary_cpu(c);
 }
 
+static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2)
+{
+       int node1 = early_cpu_to_node(cpu1);
+       int node2 = early_cpu_to_node(cpu2);
+
+       /*
+        * Our CPU scheduler assumes all logical cpus in the same physical cpu
+        * share the same node. But, buggy ACPI or NUMA emulation might assign
+        * them to different node. Fix it.
+        */
+       if (node1 != node2) {
+               pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n",
+                          cpu1, node1, cpu2, node2, node2);
+
+               numa_remove_cpu(cpu1);
+               numa_set_node(cpu1, node2);
+               numa_add_cpu(cpu1);
+       }
+}
+
 static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
 {
        cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
@@ -320,6 +340,7 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
        cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
        cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
        cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
+       check_cpu_siblings_on_same_node(cpu1, cpu2);
 }
 
 
@@ -361,10 +382,12 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
                        cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
                        cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
+                       check_cpu_siblings_on_same_node(cpu, i);
                }
                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
                        cpumask_set_cpu(i, cpu_core_mask(cpu));
                        cpumask_set_cpu(cpu, cpu_core_mask(i));
+                       check_cpu_siblings_on_same_node(cpu, i);
                        /*
                         *  Does this new cpu bringup a new core?
                         */
index dc701ea..2d6d226 100644 (file)
@@ -74,6 +74,7 @@
                                compatible = "intel,ce4100-pci", "pci";
                                device_type = "pci";
                                bus-range = <1 1>;
+                               reg = <0x0800 0x0 0x0 0x0 0x0>;
                                ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>;
 
                                interrupt-parent = <&ioapic2>;
                                #address-cells = <2>;
                                #size-cells = <1>;
                                compatible = "isa";
+                               reg = <0xf800 0x0 0x0 0x0 0x0>;
                                ranges = <1 0 0 0 0 0x100>;
 
                                rtc@70 {
index 5c0207b..275dbc1 100644 (file)
@@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table)
                        pentry->freq_hz, pentry->irq);
                        if (!pentry->irq)
                                continue;
-                       mp_irq.type = MP_IOAPIC;
+                       mp_irq.type = MP_INTSRC;
                        mp_irq.irqtype = mp_INT;
 /* triggering mode edge bit 2-3, active high polarity bit 0-1 */
                        mp_irq.irqflag = 5;
-                       mp_irq.srcbus = 0;
+                       mp_irq.srcbus = MP_BUS_ISA;
                        mp_irq.srcbusirq = pentry->irq; /* IRQ */
                        mp_irq.dstapic = MP_APIC_ALL;
                        mp_irq.dstirq = pentry->irq;
@@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
        for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
                pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
                        totallen, (u32)pentry->phys_addr, pentry->irq);
-               mp_irq.type = MP_IOAPIC;
+               mp_irq.type = MP_INTSRC;
                mp_irq.irqtype = mp_INT;
                mp_irq.irqflag = 0xf;   /* level trigger and active low */
-               mp_irq.srcbus = 0;
+               mp_irq.srcbus = MP_BUS_ISA;
                mp_irq.srcbusirq = pentry->irq; /* IRQ */
                mp_irq.dstapic = MP_APIC_ALL;
                mp_irq.dstirq = pentry->irq;
@@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void)
        /* Avoid searching for BIOS MP tables */
        x86_init.mpparse.find_smp_config = x86_init_noop;
        x86_init.mpparse.get_smp_config = x86_init_uint_noop;
-
+       set_bit(MP_BUS_ISA, mp_bus_not_pci);
 }
 
 /*
index 1c7121b..5cc821c 100644 (file)
@@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY
 config XEN_SAVE_RESTORE
        bool
        depends on XEN
+       select HIBERNATE_CALLBACKS
        default y
 
 config XEN_DEBUG_FS
index 49dbd78..e3c6a06 100644 (file)
@@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
 static __init void xen_init_cpuid_mask(void)
 {
        unsigned int ax, bx, cx, dx;
+       unsigned int xsave_mask;
 
        cpuid_leaf1_edx_mask =
                ~((1 << X86_FEATURE_MCE)  |  /* disable MCE */
@@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void)
                cpuid_leaf1_edx_mask &=
                        ~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
                          (1 << X86_FEATURE_ACPI));  /* disable ACPI */
-
        ax = 1;
-       cx = 0;
        xen_cpuid(&ax, &bx, &cx, &dx);
 
-       /* cpuid claims we support xsave; try enabling it to see what happens */
-       if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
-               unsigned long cr4;
-
-               set_in_cr4(X86_CR4_OSXSAVE);
-               
-               cr4 = read_cr4();
+       xsave_mask =
+               (1 << (X86_FEATURE_XSAVE % 32)) |
+               (1 << (X86_FEATURE_OSXSAVE % 32));
 
-               if ((cr4 & X86_CR4_OSXSAVE) == 0)
-                       cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
-
-               clear_in_cr4(X86_CR4_OSXSAVE);
-       }
+       /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
+       if ((cx & xsave_mask) != xsave_mask)
+               cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
 }
 
 static void xen_set_debugreg(int reg, unsigned long val)
index c82df6c..a991b57 100644 (file)
@@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte)
        if (io_page &&
            (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
                other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
-               WARN(addr != other_addr,
+               WARN_ONCE(addr != other_addr,
                        "0x%lx is using VM_IO, but it is 0x%lx!\n",
                        (unsigned long)addr, (unsigned long)other_addr);
        } else {
                pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
                other_addr = (_pte.pte & PTE_PFN_MASK);
-               WARN((addr == other_addr) && (!io_page) && (!iomap_set),
+               WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
                        "0x%lx is missing VM_IO (and wasn't fixed)!\n",
                        (unsigned long)addr);
        }
index 90f22cc..5fa3dd2 100644 (file)
@@ -198,26 +198,13 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 }
 EXPORT_SYMBOL(blk_dump_rq_flags);
 
-/*
- * Make sure that plugs that were pending when this function was entered,
- * are now complete and requests pushed to the queue.
-*/
-static inline void queue_sync_plugs(struct request_queue *q)
-{
-       /*
-        * If the current process is plugged and has barriers submitted,
-        * we will livelock if we don't unplug first.
-        */
-       blk_flush_plug(current);
-}
-
 static void blk_delay_work(struct work_struct *work)
 {
        struct request_queue *q;
 
        q = container_of(work, struct request_queue, delay_work.work);
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irq(q->queue_lock);
 }
 
@@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
  */
 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
-       schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
+       queue_delayed_work(kblockd_workqueue, &q->delay_work,
+                               msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL(blk_delay_queue);
 
@@ -251,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -298,7 +286,6 @@ void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->timeout);
        cancel_delayed_work_sync(&q->delay_work);
-       queue_sync_plugs(q);
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -310,9 +297,8 @@ EXPORT_SYMBOL(blk_sync_queue);
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
  *    held and interrupts disabled.
- *
  */
-void __blk_run_queue(struct request_queue *q, bool force_kblockd)
+void __blk_run_queue(struct request_queue *q)
 {
        if (unlikely(blk_queue_stopped(q)))
                return;
@@ -321,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                q->request_fn(q);
                queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else
@@ -329,6 +315,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
+/**
+ * blk_run_queue_async - run a single device queue in workqueue context
+ * @q: The queue to run
+ *
+ * Description:
+ *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
+ *    of us.
+ */
+void blk_run_queue_async(struct request_queue *q)
+{
+       if (likely(!blk_queue_stopped(q)))
+               queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+}
+
 /**
  * blk_run_queue - run a single device queue
  * @q: The queue to run
@@ -342,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -991,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
                blk_queue_end_tag(q, rq);
 
        add_acct_request(q, rq, where);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
@@ -1311,7 +1311,15 @@ get_rq:
 
        plug = current->plug;
        if (plug) {
-               if (!plug->should_sort && !list_empty(&plug->list)) {
+               /*
+                * If this is the first request added after a plug, fire
+                * of a plug trace. If others have been added before, check
+                * if we have multiple devices in this plug. If so, make a
+                * note to sort the list before dispatch.
+                */
+               if (list_empty(&plug->list))
+                       trace_block_plug(q);
+               else if (!plug->should_sort) {
                        struct request *__rq;
 
                        __rq = list_entry_rq(plug->list.prev);
@@ -1327,7 +1335,7 @@ get_rq:
        } else {
                spin_lock_irq(q->queue_lock);
                add_acct_request(q, req, where);
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
 out_unlock:
                spin_unlock_irq(q->queue_lock);
        }
@@ -2644,6 +2652,7 @@ void blk_start_plug(struct blk_plug *plug)
 
        plug->magic = PLUG_MAGIC;
        INIT_LIST_HEAD(&plug->list);
+       INIT_LIST_HEAD(&plug->cb_list);
        plug->should_sort = 0;
 
        /*
@@ -2668,33 +2677,93 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
        return !(rqa->q <= rqb->q);
 }
 
-static void flush_plug_list(struct blk_plug *plug)
+/*
+ * If 'from_schedule' is true, then postpone the dispatch of requests
+ * until a safe kblockd context. We due this to avoid accidental big
+ * additional stack usage in driver dispatch, in places where the originally
+ * plugger did not intend it.
+ */
+static void queue_unplugged(struct request_queue *q, unsigned int depth,
+                           bool from_schedule)
+       __releases(q->queue_lock)
+{
+       trace_block_unplug(q, depth, !from_schedule);
+
+       /*
+        * If we are punting this to kblockd, then we can safely drop
+        * the queue_lock before waking kblockd (which needs to take
+        * this lock).
+        */
+       if (from_schedule) {
+               spin_unlock(q->queue_lock);
+               blk_run_queue_async(q);
+       } else {
+               __blk_run_queue(q);
+               spin_unlock(q->queue_lock);
+       }
+
+}
+
+static void flush_plug_callbacks(struct blk_plug *plug)
+{
+       LIST_HEAD(callbacks);
+
+       if (list_empty(&plug->cb_list))
+               return;
+
+       list_splice_init(&plug->cb_list, &callbacks);
+
+       while (!list_empty(&callbacks)) {
+               struct blk_plug_cb *cb = list_first_entry(&callbacks,
+                                                         struct blk_plug_cb,
+                                                         list);
+               list_del(&cb->list);
+               cb->callback(cb);
+       }
+}
+
+void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
        struct request_queue *q;
        unsigned long flags;
        struct request *rq;
+       LIST_HEAD(list);
+       unsigned int depth;
 
        BUG_ON(plug->magic != PLUG_MAGIC);
 
+       flush_plug_callbacks(plug);
        if (list_empty(&plug->list))
                return;
 
-       if (plug->should_sort)
-               list_sort(NULL, &plug->list, plug_rq_cmp);
+       list_splice_init(&plug->list, &list);
+
+       if (plug->should_sort) {
+               list_sort(NULL, &list, plug_rq_cmp);
+               plug->should_sort = 0;
+       }
 
        q = NULL;
+       depth = 0;
+
+       /*
+        * Save and disable interrupts here, to avoid doing it for every
+        * queue lock we have to take.
+        */
        local_irq_save(flags);
-       while (!list_empty(&plug->list)) {
-               rq = list_entry_rq(plug->list.next);
+       while (!list_empty(&list)) {
+               rq = list_entry_rq(list.next);
                list_del_init(&rq->queuelist);
                BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
                BUG_ON(!rq->q);
                if (rq->q != q) {
-                       if (q) {
-                               __blk_run_queue(q, false);
-                               spin_unlock(q->queue_lock);
-                       }
+                       /*
+                        * This drops the queue lock
+                        */
+                       if (q)
+                               queue_unplugged(q, depth, from_schedule);
                        q = rq->q;
+                       depth = 0;
                        spin_lock(q->queue_lock);
                }
                rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2706,38 +2775,28 @@ static void flush_plug_list(struct blk_plug *plug)
                        __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
                else
                        __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
-       }
 
-       if (q) {
-               __blk_run_queue(q, false);
-               spin_unlock(q->queue_lock);
+               depth++;
        }
 
-       BUG_ON(!list_empty(&plug->list));
-       local_irq_restore(flags);
-}
-
-static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug)
-{
-       flush_plug_list(plug);
+       /*
+        * This drops the queue lock
+        */
+       if (q)
+               queue_unplugged(q, depth, from_schedule);
 
-       if (plug == tsk->plug)
-               tsk->plug = NULL;
+       local_irq_restore(flags);
 }
+EXPORT_SYMBOL(blk_flush_plug_list);
 
 void blk_finish_plug(struct blk_plug *plug)
 {
-       if (plug)
-               __blk_finish_plug(current, plug);
-}
-EXPORT_SYMBOL(blk_finish_plug);
+       blk_flush_plug_list(plug, false);
 
-void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug)
-{
-       __blk_finish_plug(tsk, plug);
-       tsk->plug = plug;
+       if (plug == current->plug)
+               current->plug = NULL;
 }
-EXPORT_SYMBOL(__blk_flush_plug);
+EXPORT_SYMBOL(blk_finish_plug);
 
 int __init blk_dev_init(void)
 {
index 7482b7f..81e3181 100644 (file)
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        WARN_ON(irqs_disabled());
        spin_lock_irq(q->queue_lock);
        __elv_add_request(q, rq, where);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        /* the queue is stopped so it won't be plugged+unplugged */
        if (rq->cmd_type == REQ_TYPE_PM_RESUME)
                q->request_fn(q);
index eba4a27..6c9b5e1 100644 (file)
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
         * request_fn may confuse the driver.  Always use kblockd.
         */
        if (queued)
-               __blk_run_queue(q, true);
+               blk_run_queue_async(q);
 }
 
 /**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
         * the comment in flush_end_io().
         */
        if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
-               __blk_run_queue(q, true);
+               blk_run_queue_async(q);
 }
 
 /**
index 261c75c..6d73512 100644 (file)
@@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk)
 {
        int ret;
        struct device *dev = disk_to_dev(disk);
-
        struct request_queue *q = disk->queue;
 
        if (WARN_ON(!q))
@@ -521,7 +520,7 @@ int blk_register_queue(struct gendisk *disk)
        if (ret) {
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
-               blk_trace_remove_sysfs(disk_to_dev(disk));
+               blk_trace_remove_sysfs(dev);
                kobject_put(&dev->kobj);
                return ret;
        }
index 6126346..c9df8fc 100644 (file)
@@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data);
 void blk_delete_timer(struct request *);
 void blk_add_timer(struct request *);
 void __generic_unplug_device(struct request_queue *);
+void blk_run_queue_async(struct request_queue *q);
 
 /*
  * Internal atomic flags for request handling
index 3be881e..46b0a1d 100644 (file)
@@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                            cfqd->busy_queues > 1) {
                                cfq_del_timer(cfqd, cfqq);
                                cfq_clear_cfqq_wait_request(cfqq);
-                               __blk_run_queue(cfqd->queue, false);
+                               __blk_run_queue(cfqd->queue);
                        } else {
                                cfq_blkiocg_update_idle_time_stats(
                                                &cfqq->cfqg->blkg);
@@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               __blk_run_queue(cfqd->queue, false);
+               __blk_run_queue(cfqd->queue);
        }
 }
 
@@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work)
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(cfqd->queue, false);
+       __blk_run_queue(cfqd->queue);
        spin_unlock_irq(q->queue_lock);
 }
 
index 0cdb4e7..6f6abc0 100644 (file)
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
         */
        elv_drain_elevator(q);
        while (q->rq.elvpriv) {
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
@@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
                 *   with anything.  There's no point in delaying queue
                 *   processing.
                 */
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
                break;
 
        case ELEVATOR_INSERT_SORT_MERGE:
index 8210405..7025593 100644 (file)
@@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int amba_pm_freeze(struct device *dev)
 {
@@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev)
        return ret;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define amba_pm_freeze         NULL
 #define amba_pm_thaw           NULL
@@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev)
 #define amba_pm_poweroff_noirq NULL
 #define amba_pm_restore_noirq  NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #ifdef CONFIG_PM
 
index f051cff..9e0e4fc 100644 (file)
@@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev)
 
        of_device_node_put(&pa->pdev.dev);
        kfree(pa->pdev.dev.platform_data);
+       kfree(pa->pdev.mfd_cell);
        kfree(pa->pdev.resource);
        kfree(pa);
 }
@@ -771,7 +772,7 @@ int __weak platform_pm_resume_noirq(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int platform_pm_freeze(struct device *dev)
 {
@@ -909,7 +910,7 @@ static int platform_pm_restore_noirq(struct device *dev)
        return ret;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define platform_pm_freeze             NULL
 #define platform_pm_thaw               NULL
@@ -920,7 +921,7 @@ static int platform_pm_restore_noirq(struct device *dev)
 #define platform_pm_poweroff_noirq     NULL
 #define platform_pm_restore_noirq      NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #ifdef CONFIG_PM_RUNTIME
 
index 052dc53..fbc5b6e 100644 (file)
@@ -233,7 +233,7 @@ static int pm_op(struct device *dev,
                }
                break;
 #endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
        case PM_EVENT_FREEZE:
        case PM_EVENT_QUIESCE:
                if (ops->freeze) {
@@ -260,7 +260,7 @@ static int pm_op(struct device *dev,
                        suspend_report_result(ops->restore, error);
                }
                break;
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
        default:
                error = -EINVAL;
        }
@@ -308,7 +308,7 @@ static int pm_noirq_op(struct device *dev,
                }
                break;
 #endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
        case PM_EVENT_FREEZE:
        case PM_EVENT_QUIESCE:
                if (ops->freeze_noirq) {
@@ -335,7 +335,7 @@ static int pm_noirq_op(struct device *dev,
                        suspend_report_result(ops->restore_noirq, error);
                }
                break;
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
        default:
                error = -EINVAL;
        }
index 6b39675..8a78154 100644 (file)
@@ -1448,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = {
        {}
 };
 
-static struct of_platform_driver fsldma_of_driver = {
+static struct platform_driver fsldma_of_driver = {
        .driver = {
                .name = "fsl-elo-dma",
                .owner = THIS_MODULE,
index 7f6f01a..0a775f7 100644 (file)
@@ -116,6 +116,7 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
                reg_val |= (1 << nr);
        else
                reg_val &= ~(1 << nr);
+       iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
 
        mutex_unlock(&chip->lock);
 
index 583e925..7630ab7 100644 (file)
@@ -558,7 +558,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
 
        ret = gpiochip_add(&chip->gpio_chip);
        if (ret)
-               goto out_failed;
+               goto out_failed_irq;
 
        if (pdata->setup) {
                ret = pdata->setup(client, chip->gpio_chip.base,
@@ -570,8 +570,9 @@ static int __devinit pca953x_probe(struct i2c_client *client,
        i2c_set_clientdata(client, chip);
        return 0;
 
-out_failed:
+out_failed_irq:
        pca953x_irq_teardown(chip);
+out_failed:
        kfree(chip->dyn_pdata);
        kfree(chip);
        return ret;
index 2c6af87..f970a5f 100644 (file)
@@ -105,6 +105,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
                reg_val |= (1 << nr);
        else
                reg_val &= ~(1 << nr);
+       iowrite32(reg_val, &chip->reg->po);
 
        mutex_unlock(&chip->lock);
 
index a6feb78..c58f691 100644 (file)
@@ -96,6 +96,7 @@ config DRM_I915
        # i915 depends on ACPI_VIDEO when ACPI is enabled
        # but for select to work, need to select ACPI_VIDEO's dependencies, ick
        select BACKLIGHT_CLASS_DEVICE if ACPI
+       select VIDEO_OUTPUT_CONTROL if ACPI
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        select ACPI_BUTTON if ACPI
index 8314a49..90aef64 100644 (file)
@@ -269,7 +269,7 @@ struct init_tbl_entry {
        int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
 };
 
-static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
+static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *);
 
 #define MACRO_INDEX_SIZE       2
 #define MACRO_SIZE             8
@@ -2010,6 +2010,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
        return 3;
 }
 
+static int
+init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+       /*
+        * INIT_JUMP   opcode: 0x5C ('\')
+        *
+        * offset      (8  bit): opcode
+        * offset + 1  (16 bit): offset (in bios)
+        *
+        * Continue execution of init table from 'offset'
+        */
+
+       uint16_t jmp_offset = ROM16(bios->data[offset + 1]);
+
+       if (!iexec->execute)
+               return 3;
+
+       BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset);
+       return jmp_offset - offset;
+}
+
 static int
 init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 {
@@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = {
        { "INIT_ZM_REG_SEQUENCE"              , 0x58, init_zm_reg_sequence            },
        /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
        { "INIT_SUB_DIRECT"                   , 0x5B, init_sub_direct                 },
+       { "INIT_JUMP"                         , 0x5C, init_jump                       },
        { "INIT_I2C_IF"                       , 0x5E, init_i2c_if                     },
        { "INIT_COPY_NV_REG"                  , 0x5F, init_copy_nv_reg                },
        { "INIT_ZM_INDEX_IO"                  , 0x62, init_zm_index_io                },
@@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = {
 #define MAX_TABLE_OPS 1000
 
 static int
-parse_init_table(struct nvbios *bios, unsigned int offset,
-                struct init_exec *iexec)
+parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 {
        /*
         * Parses all commands in an init table.
@@ -6333,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
                }
        }
 
+       /* XFX GT-240X-YA
+        *
+        * So many things wrong here, replace the entire encoder table..
+        */
+       if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) {
+               if (idx == 0) {
+                       *conn = 0x02001300; /* VGA, connector 1 */
+                       *conf = 0x00000028;
+               } else
+               if (idx == 1) {
+                       *conn = 0x01010312; /* DVI, connector 0 */
+                       *conf = 0x00020030;
+               } else
+               if (idx == 2) {
+                       *conn = 0x01010310; /* VGA, connector 0 */
+                       *conf = 0x00000028;
+               } else
+               if (idx == 3) {
+                       *conn = 0x02022362; /* HDMI, connector 2 */
+                       *conf = 0x00020010;
+               } else {
+                       *conn = 0x0000000e; /* EOL */
+                       *conf = 0x00000000;
+               }
+       }
+
        return true;
 }
 
index 57e5302..856d56a 100644 (file)
@@ -1190,7 +1190,7 @@ extern int  nv50_graph_load_context(struct nouveau_channel *);
 extern int  nv50_graph_unload_context(struct drm_device *);
 extern int  nv50_grctx_init(struct nouveau_grctx *);
 extern void nv50_graph_tlb_flush(struct drm_device *dev);
-extern void nv86_graph_tlb_flush(struct drm_device *dev);
+extern void nv84_graph_tlb_flush(struct drm_device *dev);
 extern struct nouveau_enum nv50_data_error_names[];
 
 /* nvc0_graph.c */
index 2683377..78f467f 100644 (file)
@@ -552,6 +552,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
        u8 tRC;         /* Byte 9 */
        u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
        u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+       u8 magic_number = 0; /* Yeah... sorry*/
        u8 *mem = NULL, *entry;
        int i, recordlen, entries;
 
@@ -596,6 +597,12 @@ nouveau_mem_timing_init(struct drm_device *dev)
        if (!memtimings->timing)
                return;
 
+       /* Get "some number" from the timing reg for NV_40
+        * Used in calculations later */
+       if(dev_priv->card_type == NV_40) {
+               magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24;
+       }
+
        entry = mem + mem[1];
        for (i = 0; i < entries; i++, entry += recordlen) {
                struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
@@ -635,36 +642,51 @@ nouveau_mem_timing_init(struct drm_device *dev)
 
                /* XXX: I don't trust the -1's and +1's... they must come
                 *      from somewhere! */
-               timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
+               timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
                                      tUNK_18 << 16 |
-                                     (tUNK_1 + tUNK_19 + 1) << 8 |
-                                     (tUNK_2 - 1));
+                                     (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
+               if(dev_priv->chipset == 0xa8) {
+                       timing->reg_100224 |= (tUNK_2 - 1);
+               } else {
+                       timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
+               }
 
                timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
-               if(recordlen > 19) {
-                       timing->reg_100228 += (tUNK_19 - 1) << 24;
-               }/* I cannot back-up this else-statement right now
-                        else {
-                       timing->reg_100228 += tUNK_12 << 24;
-               }*/
-
-               /* XXX: reg_10022c */
-               timing->reg_10022c = tUNK_2 - 1;
-
-               timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
-                                     tUNK_13 << 8  | tUNK_13);
-
-               /* XXX: +6? */
-               timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
-               timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
-
-               /* XXX; reg_100238, reg_10023c
-                * reg: 0x00??????
-                * reg_10023c:
-                *      0 for pre-NV50 cards
-                *      0x????0202 for NV50+ cards (empirical evidence) */
-               if(dev_priv->card_type >= NV_50) {
+               if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) {
+                       timing->reg_100228 |= (tUNK_19 - 1) << 24;
+               }
+
+               if(dev_priv->card_type == NV_40) {
+                       /* NV40: don't know what the rest of the regs are..
+                        * And don't need to know either */
+                       timing->reg_100228 |= 0x20200000 | magic_number << 24;
+               } else if(dev_priv->card_type >= NV_50) {
+                       /* XXX: reg_10022c */
+                       timing->reg_10022c = tUNK_2 - 1;
+
+                       timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+                                                 tUNK_13 << 8  | tUNK_13);
+
+                       timing->reg_100234 = (tRAS << 24 | tRC);
+                       timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
+
+                       if(dev_priv->chipset < 0xa3) {
+                               timing->reg_100234 |= (tUNK_2 + 2) << 8;
+                       } else {
+                               /* XXX: +6? */
+                               timing->reg_100234 |= (tUNK_19 + 6) << 8;
+                       }
+
+                       /* XXX; reg_100238, reg_10023c
+                        * reg_100238: 0x00??????
+                        * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */
                        timing->reg_10023c = 0x202;
+                       if(dev_priv->chipset < 0xa3) {
+                               timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
+                       } else {
+                               /* currently unknown
+                                * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
+                       }
                }
 
                NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
@@ -675,7 +697,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
                         timing->reg_100238, timing->reg_10023c);
        }
 
-       memtimings->nr_timing  = entries;
+       memtimings->nr_timing = entries;
        memtimings->supported = true;
 }
 
index ac62a1b..670e3cb 100644 (file)
@@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev)
                case 0x13:
                case 0x15:
                        perflvl->fanspeed = entry[55];
-                       perflvl->voltage = entry[56];
+                       perflvl->voltage = (recordlen > 56) ? entry[56] : 0;
                        perflvl->core = ROM32(entry[1]) * 10;
                        perflvl->memory = ROM32(entry[5]) * 20;
                        break;
index 5bb2859..6e2b1a6 100644 (file)
@@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->graph.destroy_context   = nv50_graph_destroy_context;
                engine->graph.load_context      = nv50_graph_load_context;
                engine->graph.unload_context    = nv50_graph_unload_context;
-               if (dev_priv->chipset != 0x86)
+               if (dev_priv->chipset == 0x50 ||
+                   dev_priv->chipset == 0xac)
                        engine->graph.tlb_flush = nv50_graph_tlb_flush;
-               else {
-                       /* from what i can see nvidia do this on every
-                        * pre-NVA3 board except NVAC, but, we've only
-                        * ever seen problems on NV86
-                        */
-                       engine->graph.tlb_flush = nv86_graph_tlb_flush;
-               }
+               else
+                       engine->graph.tlb_flush = nv84_graph_tlb_flush;
                engine->fifo.channels           = 128;
                engine->fifo.init               = nv50_fifo_init;
                engine->fifo.takedown           = nv50_fifo_takedown;
index c82db37..12098bf 100644 (file)
@@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
        int head = nv_encoder->restore.head;
 
        if (nv_encoder->dcb->type == OUTPUT_LVDS) {
-               struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
-               if (native_mode)
-                       call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
-                                        native_mode->clock);
-               else
-                       NV_ERROR(dev, "Not restoring LVDS without native mode\n");
+               struct nouveau_connector *connector =
+                       nouveau_encoder_connector_get(nv_encoder);
+
+               if (connector && connector->native_mode)
+                       call_lvds_script(dev, nv_encoder->dcb, head,
+                                        LVDS_PANEL_ON,
+                                        connector->native_mode->clock);
 
        } else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
                int clock = nouveau_hw_pllvals_to_clk
index 2b99840..a19ccaa 100644 (file)
@@ -469,9 +469,6 @@ nv50_crtc_wait_complete(struct drm_crtc *crtc)
 
        start = ptimer->read(dev);
        do {
-               nv_wr32(dev, 0x61002c, 0x370);
-               nv_wr32(dev, 0x000140, 1);
-
                if (nv_ro32(disp->ntfy, 0x000))
                        return 0;
        } while (ptimer->read(dev) - start < 2000000000ULL);
index a2cfaa6..c8e83c1 100644 (file)
@@ -186,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
        nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
 
        evo->dma.max = (4096/4) - 2;
+       evo->dma.max &= ~7;
        evo->dma.put = 0;
        evo->dma.cur = evo->dma.put;
        evo->dma.free = evo->dma.max - evo->dma.cur;
index 8675b00..b02a5b1 100644 (file)
@@ -503,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev)
 }
 
 void
-nv86_graph_tlb_flush(struct drm_device *dev)
+nv84_graph_tlb_flush(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
index 69af0ba..a0a2a02 100644 (file)
@@ -104,20 +104,26 @@ nvc0_vm_flush(struct nouveau_vm *vm)
        struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
        struct drm_device *dev = vm->dev;
        struct nouveau_vm_pgd *vpgd;
-       u32 r100c80, engine;
+       u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
 
        pinstmem->flush(vm->dev);
 
-       if (vm == dev_priv->chan_vm)
-               engine = 1;
-       else
-               engine = 5;
-
+       spin_lock(&dev_priv->ramin_lock);
        list_for_each_entry(vpgd, &vm->pgd_list, head) {
-               r100c80 = nv_rd32(dev, 0x100c80);
+               /* looks like maybe a "free flush slots" counter, the
+                * faster you write to 0x100cbc to more it decreases
+                */
+               if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
+                       NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
+                                nv_rd32(dev, 0x100c80), engine);
+               }
                nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
                nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
-               if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
-                       NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
+               /* wait for flush to be queued? */
+               if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
+                       NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
+                                nv_rd32(dev, 0x100c80), engine);
+               }
        }
+       spin_unlock(&dev_priv->ramin_lock);
 }
index 258fa5e..d71d375 100644 (file)
@@ -32,6 +32,7 @@
 #include "atom.h"
 #include "atom-names.h"
 #include "atom-bits.h"
+#include "radeon.h"
 
 #define ATOM_COND_ABOVE                0
 #define ATOM_COND_ABOVEOREQUAL 1
@@ -101,7 +102,9 @@ static void debug_print_spaces(int n)
 static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
                                 uint32_t index, uint32_t data)
 {
+       struct radeon_device *rdev = ctx->card->dev->dev_private;
        uint32_t temp = 0xCDCDCDCD;
+
        while (1)
                switch (CU8(base)) {
                case ATOM_IIO_NOP:
@@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
                        base += 3;
                        break;
                case ATOM_IIO_WRITE:
-                       (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+                       if (rdev->family == CHIP_RV515)
+                               (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
                        ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
                        base += 3;
                        break;
index b41ec59..9d516a8 100644 (file)
@@ -531,6 +531,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                        pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
                else
                        pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+               if ((rdev->family == CHIP_R600) ||
+                   (rdev->family == CHIP_RV610) ||
+                   (rdev->family == CHIP_RV630) ||
+                   (rdev->family == CHIP_RV670))
+                       pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
        } else {
                pll->flags |= RADEON_PLL_LEGACY;
 
index 0b0cc74..3453910 100644 (file)
@@ -120,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev)
        struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
        struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 
-       if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
-               if (voltage->voltage != rdev->pm.current_vddc) {
-                       radeon_atom_set_voltage(rdev, voltage->voltage);
+       if (voltage->type == VOLTAGE_SW) {
+               if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
+                       radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
                        rdev->pm.current_vddc = voltage->voltage;
-                       DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+                       DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+               }
+               if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
+                       radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
+                       rdev->pm.current_vddci = voltage->vddci;
+                       DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
                }
        }
 }
@@ -3036,9 +3041,6 @@ int evergreen_init(struct radeon_device *rdev)
 {
        int r;
 
-       r = radeon_dummy_page_init(rdev);
-       if (r)
-               return r;
        /* This don't do much */
        r = radeon_gem_init(rdev);
        if (r)
@@ -3150,7 +3152,6 @@ void evergreen_fini(struct radeon_device *rdev)
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
        rdev->bios = NULL;
-       radeon_dummy_page_fini(rdev);
 }
 
 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
index be271c4..15d5829 100644 (file)
@@ -587,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev)
 
        if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
                if (voltage->voltage != rdev->pm.current_vddc) {
-                       radeon_atom_set_voltage(rdev, voltage->voltage);
+                       radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
                        rdev->pm.current_vddc = voltage->voltage;
                        DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
                }
@@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev)
 {
        int r;
 
-       r = radeon_dummy_page_init(rdev);
-       if (r)
-               return r;
        if (r600_debugfs_mc_info_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for mc !\n");
        }
@@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev)
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
        rdev->bios = NULL;
-       radeon_dummy_page_fini(rdev);
 }
 
 
index 93f5365..ba643b5 100644 (file)
@@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
 void radeon_pm_resume(struct radeon_device *rdev);
 void radeon_combios_get_power_modes(struct radeon_device *rdev);
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
-void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
 void rs690_pm_info(struct radeon_device *rdev);
 extern int rv6xx_get_temp(struct radeon_device *rdev);
 extern int rv770_get_temp(struct radeon_device *rdev);
@@ -767,7 +767,9 @@ struct radeon_voltage {
        u8 vddci_id; /* index into vddci voltage table */
        bool vddci_enabled;
        /* r6xx+ sw */
-       u32 voltage;
+       u16 voltage;
+       /* evergreen+ vddci */
+       u16 vddci;
 };
 
 /* clock mode flags */
@@ -835,10 +837,12 @@ struct radeon_pm {
        int                     default_power_state_index;
        u32                     current_sclk;
        u32                     current_mclk;
-       u32                     current_vddc;
+       u16                     current_vddc;
+       u16                     current_vddci;
        u32                     default_sclk;
        u32                     default_mclk;
-       u32                     default_vddc;
+       u16                     default_vddc;
+       u16                     default_vddci;
        struct radeon_i2c_chan *i2c_bus;
        /* selected pm method */
        enum radeon_pm_method     pm_method;
index eb888ee..ca57619 100644 (file)
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
                rdev->mc_rreg = &rs600_mc_rreg;
                rdev->mc_wreg = &rs600_mc_wreg;
        }
-       if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
+       if (rdev->family >= CHIP_R600) {
                rdev->pciep_rreg = &r600_pciep_rreg;
                rdev->pciep_wreg = &r600_pciep_wreg;
        }
index 99768d9..f5d12fb 100644 (file)
@@ -2176,24 +2176,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
        }
 }
 
-static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
+static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+                                                u16 *vddc, u16 *vddci)
 {
        struct radeon_mode_info *mode_info = &rdev->mode_info;
        int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
        u8 frev, crev;
        u16 data_offset;
        union firmware_info *firmware_info;
-       u16 vddc = 0;
+
+       *vddc = 0;
+       *vddci = 0;
 
        if (atom_parse_data_header(mode_info->atom_context, index, NULL,
                                   &frev, &crev, &data_offset)) {
                firmware_info =
                        (union firmware_info *)(mode_info->atom_context->bios +
                                                data_offset);
-               vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+               *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+               if ((frev == 2) && (crev >= 2))
+                       *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
        }
-
-       return vddc;
 }
 
 static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
@@ -2203,7 +2206,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
        int j;
        u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
        u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
-       u16 vddc = radeon_atombios_get_default_vddc(rdev);
+       u16 vddc, vddci;
+
+       radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
 
        rdev->pm.power_state[state_index].misc = misc;
        rdev->pm.power_state[state_index].misc2 = misc2;
@@ -2244,6 +2249,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
                        rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
                        rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
                        rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+                       rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
                } else {
                        /* patch the table values with the default slck/mclk from firmware info */
                        for (j = 0; j < mode_index; j++) {
@@ -2286,6 +2292,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
                        VOLTAGE_SW;
                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
                        le16_to_cpu(clock_info->evergreen.usVDDC);
+               rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+                       le16_to_cpu(clock_info->evergreen.usVDDCI);
        } else {
                sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
                sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2577,25 +2585,25 @@ union set_voltage {
        struct _SET_VOLTAGE_PARAMETERS_V2 v2;
 };
 
-void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
 {
        union set_voltage args;
        int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
-       u8 frev, crev, volt_index = level;
+       u8 frev, crev, volt_index = voltage_level;
 
        if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
                return;
 
        switch (crev) {
        case 1:
-               args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+               args.v1.ucVoltageType = voltage_type;
                args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
                args.v1.ucVoltageIndex = volt_index;
                break;
        case 2:
-               args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+               args.v2.ucVoltageType = voltage_type;
                args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
-               args.v2.usVoltageLevel = cpu_to_le16(level);
+               args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
                break;
        default:
                DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
index 9e59868..bbcd1dd 100644 (file)
@@ -79,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
                        scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
                else
                        scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               seq = rdev->wb.wb[scratch_index/4];
+               seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
        } else
                seq = RREG32(rdev->fence_drv.scratch_reg);
        if (seq != rdev->fence_drv.last_seq) {
index f0534ef..8a955bb 100644 (file)
@@ -285,4 +285,6 @@ void radeon_gart_fini(struct radeon_device *rdev)
        rdev->gart.pages = NULL;
        rdev->gart.pages_addr = NULL;
        rdev->gart.ttm_alloced = NULL;
+
+       radeon_dummy_page_fini(rdev);
 }
index ded2a45..ccbabf7 100644 (file)
@@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
                *val = in_buf[0];
                DRM_DEBUG("val = 0x%02x\n", *val);
        } else {
-               DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+               DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
                          addr, *val);
        }
 }
@@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
        out_buf[1] = val;
 
        if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
-               DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+               DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
                          addr, val);
 }
 
index 5b54268..2f46e0c 100644 (file)
@@ -269,7 +269,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
        .disable = radeon_legacy_encoder_disable,
 };
 
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
 #define MAX_RADEON_LEVEL 0xFF
 
index 08de669..86eda1e 100644 (file)
@@ -23,6 +23,7 @@
 #include "drmP.h"
 #include "radeon.h"
 #include "avivod.h"
+#include "atom.h"
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
 #endif
@@ -535,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev)
        /* set up the default clocks if the MC ucode is loaded */
        if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
                if (rdev->pm.default_vddc)
-                       radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+                       radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+                                               SET_VOLTAGE_TYPE_ASIC_VDDC);
+               if (rdev->pm.default_vddci)
+                       radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+                                               SET_VOLTAGE_TYPE_ASIC_VDDCI);
                if (rdev->pm.default_sclk)
                        radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
                if (rdev->pm.default_mclk)
@@ -548,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
        rdev->pm.current_sclk = rdev->pm.default_sclk;
        rdev->pm.current_mclk = rdev->pm.default_mclk;
        rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+       rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
        if (rdev->pm.pm_method == PM_METHOD_DYNPM
            && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
                rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@@ -585,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev)
                /* set up the default clocks if the MC ucode is loaded */
                if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
                        if (rdev->pm.default_vddc)
-                               radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+                               radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+                                                       SET_VOLTAGE_TYPE_ASIC_VDDC);
                        if (rdev->pm.default_sclk)
                                radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
                        if (rdev->pm.default_mclk)
index bbc9cd8..c6776e4 100644 (file)
@@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
 void radeon_ring_free_size(struct radeon_device *rdev)
 {
        if (rdev->wb.enabled)
-               rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4];
+               rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
        else {
                if (rdev->family >= CHIP_R600)
                        rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
index 876cebc..6e3b11e 100644 (file)
@@ -114,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
                                udelay(voltage->delay);
                }
        } else if (voltage->type == VOLTAGE_VDDC)
-               radeon_atom_set_voltage(rdev, voltage->vddc_id);
+               radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
 
        dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
        dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
index b974ac7..ef8a5ba 100644 (file)
@@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
 
        if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
                if (voltage->voltage != rdev->pm.current_vddc) {
-                       radeon_atom_set_voltage(rdev, voltage->voltage);
+                       radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
                        rdev->pm.current_vddc = voltage->voltage;
                        DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
                }
@@ -1255,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev)
 {
        int r;
 
-       r = radeon_dummy_page_init(rdev);
-       if (r)
-               return r;
        /* This don't do much */
        r = radeon_gem_init(rdev);
        if (r)
@@ -1372,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev)
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
        rdev->bios = NULL;
-       radeon_dummy_page_fini(rdev);
 }
 
 static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
index 737a2a2..9d9d929 100644 (file)
@@ -683,22 +683,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
                        gfp_flags |= GFP_HIGHUSER;
 
                for (r = 0; r < count; ++r) {
-                       if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
-                               void *addr;
-                               addr = dma_alloc_coherent(NULL, PAGE_SIZE,
-                                                         &dma_address[r],
-                                                         gfp_flags);
-                               if (addr == NULL)
-                                       return -ENOMEM;
-                               p = virt_to_page(addr);
-                       } else
-                               p = alloc_page(gfp_flags);
+                       p = alloc_page(gfp_flags);
                        if (!p) {
 
                                printk(KERN_ERR TTM_PFX
                                       "Unable to allocate page.");
                                return -ENOMEM;
                        }
+
                        list_add(&p->lru, pages);
                }
                return 0;
@@ -746,24 +738,12 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
        unsigned long irq_flags;
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct page *p, *tmp;
-       unsigned r;
 
        if (pool == NULL) {
                /* No pool for this memory type so free the pages */
 
-               r = page_count-1;
                list_for_each_entry_safe(p, tmp, pages, lru) {
-                       if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
-                               void *addr = page_address(p);
-                               WARN_ON(!addr || !dma_address[r]);
-                               if (addr)
-                                       dma_free_coherent(NULL, PAGE_SIZE,
-                                                         addr,
-                                                         dma_address[r]);
-                               dma_address[r] = 0;
-                       } else
-                               __free_page(p);
-                       r--;
+                       __free_page(p);
                }
                /* Make the pages list empty */
                INIT_LIST_HEAD(pages);
index 70e60a4..4199179 100644 (file)
@@ -5,6 +5,7 @@ config STUB_POULSBO
        # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
        # but for select to work, need to select ACPI_VIDEO's dependencies, ick
        select BACKLIGHT_CLASS_DEVICE if ACPI
+       select VIDEO_OUTPUT_CONTROL if ACPI
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        select THERMAL if ACPI
index 38319a6..d6d5868 100644 (file)
@@ -232,9 +232,17 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
  * Sanity check for the adapter hardware - check the reaction of
  * the bus lines only if it seems to be idle.
  */
-static int test_bus(struct i2c_algo_bit_data *adap, char *name)
+static int test_bus(struct i2c_adapter *i2c_adap)
 {
-       int scl, sda;
+       struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+       const char *name = i2c_adap->name;
+       int scl, sda, ret;
+
+       if (adap->pre_xfer) {
+               ret = adap->pre_xfer(i2c_adap);
+               if (ret < 0)
+                       return -ENODEV;
+       }
 
        if (adap->getscl == NULL)
                pr_info("%s: Testing SDA only, SCL is not readable\n", name);
@@ -297,11 +305,19 @@ static int test_bus(struct i2c_algo_bit_data *adap, char *name)
                       "while pulling SCL high!\n", name);
                goto bailout;
        }
+
+       if (adap->post_xfer)
+               adap->post_xfer(i2c_adap);
+
        pr_info("%s: Test OK\n", name);
        return 0;
 bailout:
        sdahi(adap);
        sclhi(adap);
+
+       if (adap->post_xfer)
+               adap->post_xfer(i2c_adap);
+
        return -ENODEV;
 }
 
@@ -607,7 +623,7 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
        int ret;
 
        if (bit_test) {
-               ret = test_bus(bit_adap, adap->name);
+               ret = test_bus(adap);
                if (ret < 0)
                        return -ENODEV;
        }
index 70c30e6..9a58994 100644 (file)
@@ -797,7 +797,8 @@ static int i2c_do_add_adapter(struct i2c_driver *driver,
 
        /* Let legacy drivers scan this bus for matching devices */
        if (driver->attach_adapter) {
-               dev_warn(&adap->dev, "attach_adapter method is deprecated\n");
+               dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n",
+                        driver->driver.name);
                dev_warn(&adap->dev, "Please use another way to instantiate "
                         "your i2c_client\n");
                /* We ignore the return code; if it fails, too bad */
@@ -984,7 +985,8 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
 
        if (!driver->detach_adapter)
                return 0;
-       dev_warn(&adapter->dev, "detach_adapter method is deprecated\n");
+       dev_warn(&adapter->dev, "%s: detach_adapter method is deprecated\n",
+                driver->driver.name);
        res = driver->detach_adapter(adapter);
        if (res)
                dev_err(&adapter->dev, "detach_adapter failed (%d) "
index 7f42d3a..88d8e4c 100644 (file)
@@ -39,13 +39,13 @@ struct evdev {
 };
 
 struct evdev_client {
-       int head;
-       int tail;
+       unsigned int head;
+       unsigned int tail;
        spinlock_t buffer_lock; /* protects access to buffer, head and tail */
        struct fasync_struct *fasync;
        struct evdev *evdev;
        struct list_head node;
-       int bufsize;
+       unsigned int bufsize;
        struct input_event buffer[];
 };
 
@@ -55,16 +55,25 @@ static DEFINE_MUTEX(evdev_table_mutex);
 static void evdev_pass_event(struct evdev_client *client,
                             struct input_event *event)
 {
-       /*
-        * Interrupts are disabled, just acquire the lock.
-        * Make sure we don't leave with the client buffer
-        * "empty" by having client->head == client->tail.
-        */
+       /* Interrupts are disabled, just acquire the lock. */
        spin_lock(&client->buffer_lock);
-       do {
-               client->buffer[client->head++] = *event;
-               client->head &= client->bufsize - 1;
-       } while (client->head == client->tail);
+
+       client->buffer[client->head++] = *event;
+       client->head &= client->bufsize - 1;
+
+       if (unlikely(client->head == client->tail)) {
+               /*
+                * This effectively "drops" all unconsumed events, leaving
+                * EV_SYN/SYN_DROPPED plus the newest event in the queue.
+                */
+               client->tail = (client->head - 2) & (client->bufsize - 1);
+
+               client->buffer[client->tail].time = event->time;
+               client->buffer[client->tail].type = EV_SYN;
+               client->buffer[client->tail].code = SYN_DROPPED;
+               client->buffer[client->tail].value = 0;
+       }
+
        spin_unlock(&client->buffer_lock);
 
        if (event->type == EV_SYN)
index d6e8bd8..ebbceed 100644 (file)
@@ -1746,6 +1746,42 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
 }
 EXPORT_SYMBOL(input_set_capability);
 
+static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
+{
+       int mt_slots;
+       int i;
+       unsigned int events;
+
+       if (dev->mtsize) {
+               mt_slots = dev->mtsize;
+       } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
+               mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
+                          dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
+               clamp(mt_slots, 2, 32);
+       } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+               mt_slots = 2;
+       } else {
+               mt_slots = 0;
+       }
+
+       events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
+
+       for (i = 0; i < ABS_CNT; i++) {
+               if (test_bit(i, dev->absbit)) {
+                       if (input_is_mt_axis(i))
+                               events += mt_slots;
+                       else
+                               events++;
+               }
+       }
+
+       for (i = 0; i < REL_CNT; i++)
+               if (test_bit(i, dev->relbit))
+                       events++;
+
+       return events;
+}
+
 #define INPUT_CLEANSE_BITMASK(dev, type, bits)                         \
        do {                                                            \
                if (!test_bit(EV_##type, dev->evbit))                   \
@@ -1793,6 +1829,10 @@ int input_register_device(struct input_dev *dev)
        /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
        input_cleanse_bitmasks(dev);
 
+       if (!dev->hint_events_per_packet)
+               dev->hint_events_per_packet =
+                               input_estimate_events_per_packet(dev);
+
        /*
         * If delay and period are pre-set by the driver, then autorepeating
         * is handled by the driver itself and we don't do it in input.c.
index 09bef79..a26922c 100644 (file)
@@ -332,18 +332,20 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
 static int __devinit twl4030_kp_probe(struct platform_device *pdev)
 {
        struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
-       const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
+       const struct matrix_keymap_data *keymap_data;
        struct twl4030_keypad *kp;
        struct input_dev *input;
        u8 reg;
        int error;
 
-       if (!pdata || !pdata->rows || !pdata->cols ||
+       if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data ||
            pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) {
                dev_err(&pdev->dev, "Invalid platform_data\n");
                return -EINVAL;
        }
 
+       keymap_data = pdata->keymap_data;
+
        kp = kzalloc(sizeof(*kp), GFP_KERNEL);
        input = input_allocate_device();
        if (!kp || !input) {
index 7077f9b..62bae99 100644 (file)
@@ -303,7 +303,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
                                   enum xenbus_state backend_state)
 {
        struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
-       int val;
+       int ret, val;
 
        switch (backend_state) {
        case XenbusStateInitialising:
@@ -316,6 +316,17 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
 
        case XenbusStateInitWait:
 InitWait:
+               ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+                                  "feature-abs-pointer", "%d", &val);
+               if (ret < 0)
+                       val = 0;
+               if (val) {
+                       ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
+                                           "request-abs-pointer", "1");
+                       if (ret)
+                               pr_warning("xenkbd: can't request abs-pointer");
+               }
+
                xenbus_switch_state(dev, XenbusStateConnected);
                break;
 
index efa0688..45f93d0 100644 (file)
@@ -399,31 +399,34 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
                        IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) {
                printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
                err = -EBUSY;
-               goto fail2;
+               goto fail1;
        }
 
        if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
                        IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) {
                printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
                err = -EBUSY;
-               goto fail3;
+               goto fail2;
        }
 
        serio_set_drvdata(serio, ts);
 
        err = serio_open(serio, drv);
        if (err)
-               return err;
+               goto fail3;
 
        //h3600_flite_control(1, 25);     /* default brightness */
-       input_register_device(ts->dev);
+       err = input_register_device(ts->dev);
+       if (err)
+               goto fail4;
 
        return 0;
 
-fail3: free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
+fail4: serio_close(serio);
+fail3: serio_set_drvdata(serio, NULL);
+       free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
 fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
-fail1: serio_set_drvdata(serio, NULL);
-       input_free_device(input_dev);
+fail1: input_free_device(input_dev);
        kfree(ts);
        return err;
 }
index 3790816..8497f56 100644 (file)
@@ -178,6 +178,10 @@ static int __devinit regulator_led_probe(struct platform_device *pdev)
        led->cdev.flags |= LED_CORE_SUSPENDRESUME;
        led->vcc = vcc;
 
+       /* to handle correctly an already enabled regulator */
+       if (regulator_is_enabled(led->vcc))
+               led->enabled = 1;
+
        mutex_init(&led->mutex);
        INIT_WORK(&led->work, led_work);
 
index 5ef136c..e5d8904 100644 (file)
@@ -390,13 +390,6 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
        return md_raid5_congested(&rs->md, bits);
 }
 
-static void raid_unplug(struct dm_target_callbacks *cb)
-{
-       struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
-
-       md_raid5_kick_device(rs->md.private);
-}
-
 /*
  * Construct a RAID4/5/6 mapping:
  * Args:
@@ -487,7 +480,6 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
        }
 
        rs->callbacks.congested_fn = raid_is_congested;
-       rs->callbacks.unplug_fn = raid_unplug;
        dm_table_add_target_callbacks(ti->table, &rs->callbacks);
 
        return 0;
index b12b377..6e853c6 100644 (file)
@@ -447,48 +447,59 @@ EXPORT_SYMBOL(md_flush_request);
 
 /* Support for plugging.
  * This mirrors the plugging support in request_queue, but does not
- * require having a whole queue
+ * require having a whole queue or request structures.
+ * We allocate an md_plug_cb for each md device and each thread it gets
+ * plugged on.  This links tot the private plug_handle structure in the
+ * personality data where we keep a count of the number of outstanding
+ * plugs so other code can see if a plug is active.
  */
-static void plugger_work(struct work_struct *work)
-{
-       struct plug_handle *plug =
-               container_of(work, struct plug_handle, unplug_work);
-       plug->unplug_fn(plug);
-}
-static void plugger_timeout(unsigned long data)
-{
-       struct plug_handle *plug = (void *)data;
-       kblockd_schedule_work(NULL, &plug->unplug_work);
-}
-void plugger_init(struct plug_handle *plug,
-                 void (*unplug_fn)(struct plug_handle *))
-{
-       plug->unplug_flag = 0;
-       plug->unplug_fn = unplug_fn;
-       init_timer(&plug->unplug_timer);
-       plug->unplug_timer.function = plugger_timeout;
-       plug->unplug_timer.data = (unsigned long)plug;
-       INIT_WORK(&plug->unplug_work, plugger_work);
-}
-EXPORT_SYMBOL_GPL(plugger_init);
+struct md_plug_cb {
+       struct blk_plug_cb cb;
+       mddev_t *mddev;
+};
 
-void plugger_set_plug(struct plug_handle *plug)
+static void plugger_unplug(struct blk_plug_cb *cb)
 {
-       if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag))
-               mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1);
+       struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
+       if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
+               md_wakeup_thread(mdcb->mddev->thread);
+       kfree(mdcb);
 }
-EXPORT_SYMBOL_GPL(plugger_set_plug);
 
-int plugger_remove_plug(struct plug_handle *plug)
+/* Check that an unplug wakeup will come shortly.
+ * If not, wakeup the md thread immediately
+ */
+int mddev_check_plugged(mddev_t *mddev)
 {
-       if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) {
-               del_timer(&plug->unplug_timer);
-               return 1;
-       } else
+       struct blk_plug *plug = current->plug;
+       struct md_plug_cb *mdcb;
+
+       if (!plug)
+               return 0;
+
+       list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
+               if (mdcb->cb.callback == plugger_unplug &&
+                   mdcb->mddev == mddev) {
+                       /* Already on the list, move to top */
+                       if (mdcb != list_first_entry(&plug->cb_list,
+                                                   struct md_plug_cb,
+                                                   cb.list))
+                               list_move(&mdcb->cb.list, &plug->cb_list);
+                       return 1;
+               }
+       }
+       /* Not currently on the callback list */
+       mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
+       if (!mdcb)
                return 0;
-}
-EXPORT_SYMBOL_GPL(plugger_remove_plug);
 
+       mdcb->mddev = mddev;
+       mdcb->cb.callback = plugger_unplug;
+       atomic_inc(&mddev->plug_cnt);
+       list_add(&mdcb->cb.list, &plug->cb_list);
+       return 1;
+}
+EXPORT_SYMBOL_GPL(mddev_check_plugged);
 
 static inline mddev_t *mddev_get(mddev_t *mddev)
 {
@@ -538,6 +549,7 @@ void mddev_init(mddev_t *mddev)
        atomic_set(&mddev->active, 1);
        atomic_set(&mddev->openers, 0);
        atomic_set(&mddev->active_io, 0);
+       atomic_set(&mddev->plug_cnt, 0);
        spin_lock_init(&mddev->write_lock);
        atomic_set(&mddev->flush_pending, 0);
        init_waitqueue_head(&mddev->sb_wait);
@@ -4723,7 +4735,6 @@ static void md_clean(mddev_t *mddev)
        mddev->bitmap_info.chunksize = 0;
        mddev->bitmap_info.daemon_sleep = 0;
        mddev->bitmap_info.max_write_behind = 0;
-       mddev->plug = NULL;
 }
 
 static void __md_stop_writes(mddev_t *mddev)
@@ -6688,12 +6699,6 @@ int md_allow_write(mddev_t *mddev)
 }
 EXPORT_SYMBOL_GPL(md_allow_write);
 
-void md_unplug(mddev_t *mddev)
-{
-       if (mddev->plug)
-               mddev->plug->unplug_fn(mddev->plug);
-}
-
 #define SYNC_MARKS     10
 #define        SYNC_MARK_STEP  (3*HZ)
 void md_do_sync(mddev_t *mddev)
index 52b4073..0b1fd3f 100644 (file)
 typedef struct mddev_s mddev_t;
 typedef struct mdk_rdev_s mdk_rdev_t;
 
-/* generic plugging support - like that provided with request_queue,
- * but does not require a request_queue
- */
-struct plug_handle {
-       void                    (*unplug_fn)(struct plug_handle *);
-       struct timer_list       unplug_timer;
-       struct work_struct      unplug_work;
-       unsigned long           unplug_flag;
-};
-#define        PLUGGED_FLAG 1
-void plugger_init(struct plug_handle *plug,
-                 void (*unplug_fn)(struct plug_handle *));
-void plugger_set_plug(struct plug_handle *plug);
-int plugger_remove_plug(struct plug_handle *plug);
-static inline void plugger_flush(struct plug_handle *plug)
-{
-       del_timer_sync(&plug->unplug_timer);
-       cancel_work_sync(&plug->unplug_work);
-}
-
 /*
  * MD's 'extended' device
  */
@@ -199,6 +179,9 @@ struct mddev_s
        int                             delta_disks, new_level, new_layout;
        int                             new_chunk_sectors;
 
+       atomic_t                        plug_cnt;       /* If device is expecting
+                                                        * more bios soon.
+                                                        */
        struct mdk_thread_s             *thread;        /* management thread */
        struct mdk_thread_s             *sync_thread;   /* doing resync or reconstruct */
        sector_t                        curr_resync;    /* last block scheduled */
@@ -336,7 +319,6 @@ struct mddev_s
        struct list_head                all_mddevs;
 
        struct attribute_group          *to_remove;
-       struct plug_handle              *plug; /* if used by personality */
 
        struct bio_set                  *bio_set;
 
@@ -516,7 +498,6 @@ extern int md_integrity_register(mddev_t *mddev);
 extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
 extern void restore_bitmap_write_access(struct file *file);
-extern void md_unplug(mddev_t *mddev);
 
 extern void mddev_init(mddev_t *mddev);
 extern int md_run(mddev_t *mddev);
@@ -530,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
                                   mddev_t *mddev);
 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
                                   mddev_t *mddev);
+extern int mddev_check_plugged(mddev_t *mddev);
 #endif /* _MD_MD_H */
index c2a21ae..2b7a7ff 100644 (file)
@@ -565,12 +565,6 @@ static void flush_pending_writes(conf_t *conf)
                spin_unlock_irq(&conf->device_lock);
 }
 
-static void md_kick_device(mddev_t *mddev)
-{
-       blk_flush_plug(current);
-       md_wakeup_thread(mddev->thread);
-}
-
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
@@ -600,7 +594,7 @@ static void raise_barrier(conf_t *conf)
 
        /* Wait until no block IO is waiting */
        wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        /* block any new IO from starting */
        conf->barrier++;
@@ -608,7 +602,7 @@ static void raise_barrier(conf_t *conf)
        /* Now wait for all pending IO to complete */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        spin_unlock_irq(&conf->resync_lock);
 }
@@ -630,7 +624,7 @@ static void wait_barrier(conf_t *conf)
                conf->nr_waiting++;
                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
                                    conf->resync_lock,
-                                   md_kick_device(conf->mddev));
+                                   );
                conf->nr_waiting--;
        }
        conf->nr_pending++;
@@ -666,8 +660,7 @@ static void freeze_array(conf_t *conf)
        wait_event_lock_irq(conf->wait_barrier,
                            conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
-                           ({ flush_pending_writes(conf);
-                              md_kick_device(conf->mddev); }));
+                           flush_pending_writes(conf));
        spin_unlock_irq(&conf->resync_lock);
 }
 static void unfreeze_array(conf_t *conf)
@@ -729,6 +722,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        mdk_rdev_t *blocked_rdev;
+       int plugged;
 
        /*
         * Register the new request and wait if the reconstruction
@@ -820,6 +814,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
         * inc refcount on their rdev.  Record them by setting
         * bios[x] to bio
         */
+       plugged = mddev_check_plugged(mddev);
+
        disks = conf->raid_disks;
  retry_write:
        blocked_rdev = NULL;
@@ -925,7 +921,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
-       if (do_sync || !bitmap)
+       if (do_sync || !bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
 
        return 0;
@@ -1516,13 +1512,16 @@ static void raid1d(mddev_t *mddev)
        conf_t *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
        mdk_rdev_t *rdev;
+       struct blk_plug plug;
 
        md_check_recovery(mddev);
-       
+
+       blk_start_plug(&plug);
        for (;;) {
                char b[BDEVNAME_SIZE];
 
-               flush_pending_writes(conf);
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -1593,6 +1592,7 @@ static void raid1d(mddev_t *mddev)
                }
                cond_resched();
        }
+       blk_finish_plug(&plug);
 }
 
 
@@ -2039,7 +2039,6 @@ static int stop(mddev_t *mddev)
 
        md_unregister_thread(mddev->thread);
        mddev->thread = NULL;
-       blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
        if (conf->r1bio_pool)
                mempool_destroy(conf->r1bio_pool);
        kfree(conf->mirrors);
index 2da83d5..8e94626 100644 (file)
@@ -634,12 +634,6 @@ static void flush_pending_writes(conf_t *conf)
                spin_unlock_irq(&conf->device_lock);
 }
 
-static void md_kick_device(mddev_t *mddev)
-{
-       blk_flush_plug(current);
-       md_wakeup_thread(mddev->thread);
-}
-
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
@@ -669,15 +663,15 @@ static void raise_barrier(conf_t *conf, int force)
 
        /* Wait until no block IO is waiting (unless 'force') */
        wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        /* block any new IO from starting */
        conf->barrier++;
 
-       /* No wait for all pending IO to complete */
+       /* Now wait for all pending IO to complete */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
-                           conf->resync_lock, md_kick_device(conf->mddev));
+                           conf->resync_lock, );
 
        spin_unlock_irq(&conf->resync_lock);
 }
@@ -698,7 +692,7 @@ static void wait_barrier(conf_t *conf)
                conf->nr_waiting++;
                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
                                    conf->resync_lock,
-                                   md_kick_device(conf->mddev));
+                                   );
                conf->nr_waiting--;
        }
        conf->nr_pending++;
@@ -734,8 +728,8 @@ static void freeze_array(conf_t *conf)
        wait_event_lock_irq(conf->wait_barrier,
                            conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
-                           ({ flush_pending_writes(conf);
-                              md_kick_device(conf->mddev); }));
+                           flush_pending_writes(conf));
+
        spin_unlock_irq(&conf->resync_lock);
 }
 
@@ -762,6 +756,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
        mdk_rdev_t *blocked_rdev;
+       int plugged;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
@@ -870,6 +865,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
         * inc refcount on their rdev.  Record them by setting
         * bios[x] to bio
         */
+       plugged = mddev_check_plugged(mddev);
+
        raid10_find_phys(conf, r10_bio);
  retry_write:
        blocked_rdev = NULL;
@@ -946,9 +943,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
-       if (do_sync || !mddev->bitmap)
+       if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
-
        return 0;
 }
 
@@ -1640,9 +1636,11 @@ static void raid10d(mddev_t *mddev)
        conf_t *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
        mdk_rdev_t *rdev;
+       struct blk_plug plug;
 
        md_check_recovery(mddev);
 
+       blk_start_plug(&plug);
        for (;;) {
                char b[BDEVNAME_SIZE];
 
@@ -1716,6 +1714,7 @@ static void raid10d(mddev_t *mddev)
                }
                cond_resched();
        }
+       blk_finish_plug(&plug);
 }
 
 
index e867ee4..f301e6a 100644 (file)
  *
  * We group bitmap updates into batches.  Each batch has a number.
  * We may write out several batches at once, but that isn't very important.
- * conf->bm_write is the number of the last batch successfully written.
- * conf->bm_flush is the number of the last batch that was closed to
+ * conf->seq_write is the number of the last batch successfully written.
+ * conf->seq_flush is the number of the last batch that was closed to
  *    new additions.
  * When we discover that we will need to write to any block in a stripe
  * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
- * the number of the batch it will be in. This is bm_flush+1.
+ * the number of the batch it will be in. This is seq_flush+1.
  * When we are ready to do a write, if that batch hasn't been written yet,
  *   we plug the array and queue the stripe for later.
  * When an unplug happens, we increment bm_flush, thus closing the current
@@ -199,14 +199,12 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state)) {
+                       if (test_bit(STRIPE_DELAYED, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
-                               plugger_set_plug(&conf->plug);
-                       } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
-                                  sh->bm_seq - conf->seq_write > 0) {
+                       else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+                                  sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
-                               plugger_set_plug(&conf->plug);
-                       } else {
+                       else {
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -461,7 +459,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
                                                     < (conf->max_nr_stripes *3/4)
                                                     || !conf->inactive_blocked),
                                                    conf->device_lock,
-                                                   md_raid5_kick_device(conf));
+                                                   );
                                conf->inactive_blocked = 0;
                        } else
                                init_stripe(sh, sector, previous);
@@ -1470,7 +1468,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
                wait_event_lock_irq(conf->wait_for_stripe,
                                    !list_empty(&conf->inactive_list),
                                    conf->device_lock,
-                                   blk_flush_plug(current));
+                                   );
                osh = get_free_stripe(conf);
                spin_unlock_irq(&conf->device_lock);
                atomic_set(&nsh->count, 1);
@@ -3623,8 +3621,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
                                atomic_inc(&conf->preread_active_stripes);
                        list_add_tail(&sh->lru, &conf->hold_list);
                }
-       } else
-               plugger_set_plug(&conf->plug);
+       }
 }
 
 static void activate_bit_delay(raid5_conf_t *conf)
@@ -3641,21 +3638,6 @@ static void activate_bit_delay(raid5_conf_t *conf)
        }
 }
 
-void md_raid5_kick_device(raid5_conf_t *conf)
-{
-       blk_flush_plug(current);
-       raid5_activate_delayed(conf);
-       md_wakeup_thread(conf->mddev->thread);
-}
-EXPORT_SYMBOL_GPL(md_raid5_kick_device);
-
-static void raid5_unplug(struct plug_handle *plug)
-{
-       raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
-
-       md_raid5_kick_device(conf);
-}
-
 int md_raid5_congested(mddev_t *mddev, int bits)
 {
        raid5_conf_t *conf = mddev->private;
@@ -3945,6 +3927,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
        int remaining;
+       int plugged;
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
@@ -3963,6 +3946,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
 
+       plugged = mddev_check_plugged(mddev);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
                DEFINE_WAIT(w);
                int disks, data_disks;
@@ -4057,7 +4041,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
                                 * add failed due to overlap.  Flush everything
                                 * and wait a while
                                 */
-                               md_raid5_kick_device(conf);
+                               md_wakeup_thread(mddev->thread);
                                release_stripe(sh);
                                schedule();
                                goto retry;
@@ -4077,6 +4061,9 @@ static int make_request(mddev_t *mddev, struct bio * bi)
                }
                        
        }
+       if (!plugged)
+               md_wakeup_thread(mddev->thread);
+
        spin_lock_irq(&conf->device_lock);
        remaining = raid5_dec_bi_phys_segments(bi);
        spin_unlock_irq(&conf->device_lock);
@@ -4478,24 +4465,30 @@ static void raid5d(mddev_t *mddev)
        struct stripe_head *sh;
        raid5_conf_t *conf = mddev->private;
        int handled;
+       struct blk_plug plug;
 
        pr_debug("+++ raid5d active\n");
 
        md_check_recovery(mddev);
 
+       blk_start_plug(&plug);
        handled = 0;
        spin_lock_irq(&conf->device_lock);
        while (1) {
                struct bio *bio;
 
-               if (conf->seq_flush != conf->seq_write) {
-                       int seq = conf->seq_flush;
+               if (atomic_read(&mddev->plug_cnt) == 0 &&
+                   !list_empty(&conf->bitmap_list)) {
+                       /* Now is a good time to flush some bitmap updates */
+                       conf->seq_flush++;
                        spin_unlock_irq(&conf->device_lock);
                        bitmap_unplug(mddev->bitmap);
                        spin_lock_irq(&conf->device_lock);
-                       conf->seq_write = seq;
+                       conf->seq_write = conf->seq_flush;
                        activate_bit_delay(conf);
                }
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       raid5_activate_delayed(conf);
 
                while ((bio = remove_bio_from_retry(conf))) {
                        int ok;
@@ -4525,6 +4518,7 @@ static void raid5d(mddev_t *mddev)
        spin_unlock_irq(&conf->device_lock);
 
        async_tx_issue_pending_all();
+       blk_finish_plug(&plug);
 
        pr_debug("--- raid5d inactive\n");
 }
@@ -5141,8 +5135,6 @@ static int run(mddev_t *mddev)
                       mdname(mddev));
        md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
 
-       plugger_init(&conf->plug, raid5_unplug);
-       mddev->plug = &conf->plug;
        if (mddev->queue) {
                int chunk_size;
                /* read-ahead size must cover two whole stripes, which
@@ -5192,7 +5184,6 @@ static int stop(mddev_t *mddev)
        mddev->thread = NULL;
        if (mddev->queue)
                mddev->queue->backing_dev_info.congested_fn = NULL;
-       plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
        free_conf(conf);
        mddev->private = NULL;
        mddev->to_remove = &raid5_attrs_group;
index 8d563a4..3ca77a2 100644 (file)
@@ -400,8 +400,6 @@ struct raid5_private_data {
                                            * Cleared when a sync completes.
                                            */
 
-       struct plug_handle      plug;
-
        /* per cpu variables */
        struct raid5_percpu {
                struct page     *spare_page; /* Used when checking P/Q in raid6 */
index c4742fc..c969111 100644 (file)
@@ -300,7 +300,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        retval = remap_pfn_range(vma, vma->vm_start,
-                                PFN_DOWN(virt_to_phys(mem->vaddr)),
+                                mem->dma_handle >> PAGE_SHIFT,
                                 size, vma->vm_page_prot);
        if (retval) {
                dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
index d01574d..f4c8c84 100644 (file)
@@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev)
 }
 EXPORT_SYMBOL(mfd_cell_disable);
 
+static int mfd_platform_add_cell(struct platform_device *pdev,
+                                const struct mfd_cell *cell)
+{
+       if (!cell)
+               return 0;
+
+       pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
+       if (!pdev->mfd_cell)
+               return -ENOMEM;
+
+       return 0;
+}
+
 static int mfd_add_device(struct device *parent, int id,
                          const struct mfd_cell *cell,
                          struct resource *mem_base,
@@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id,
 
        pdev->dev.parent = parent;
 
-       ret = platform_device_add_data(pdev, cell, sizeof(*cell));
+       ret = mfd_platform_add_cell(pdev, cell);
        if (ret)
                goto fail_res;
 
@@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id,
 
        return 0;
 
-/*     platform_device_del(pdev); */
 fail_res:
        kfree(res);
 fail_device:
index 20e4e93..ecafa4b 100644 (file)
@@ -348,15 +348,15 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
 
 static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
 
-static void gru_noop(unsigned int irq)
+static void gru_noop(struct irq_data *d)
 {
 }
 
 static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
        [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
-               .mask           = gru_noop,
-               .unmask         = gru_noop,
-               .ack            = gru_noop
+               .irq_mask       = gru_noop,
+               .irq_unmask     = gru_noop,
+               .irq_ack        = gru_noop
        }
 };
 
index c8ff646..0fa466a 100644 (file)
@@ -88,4 +88,6 @@ config PCI_IOAPIC
        depends on HOTPLUG
        default y
 
-select NLS if (DMI || ACPI)
+config PCI_LABEL
+       def_bool y if (DMI || ACPI)
+       select NLS
index 98d61c8..c85f744 100644 (file)
@@ -56,10 +56,10 @@ obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
 # ACPI Related PCI FW Functions
 # ACPI _DSM provided firmware instance and string name
 #
-obj-$(CONFIG_ACPI)    += pci-acpi.o pci-label.o
+obj-$(CONFIG_ACPI)    += pci-acpi.o
 
 # SMBIOS provided firmware instance and labels
-obj-$(CONFIG_DMI)    += pci-label.o
+obj-$(CONFIG_PCI_LABEL) += pci-label.o
 
 # Cardbus & CompactPCI use setup-bus
 obj-$(CONFIG_HOTPLUG) += setup-bus.o
index d86ea8b..135df16 100644 (file)
@@ -781,7 +781,7 @@ static int pci_pm_resume(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int pci_pm_freeze(struct device *dev)
 {
@@ -970,7 +970,7 @@ static int pci_pm_restore(struct device *dev)
        return error;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define pci_pm_freeze          NULL
 #define pci_pm_freeze_noirq    NULL
@@ -981,7 +981,7 @@ static int pci_pm_restore(struct device *dev)
 #define pci_pm_restore         NULL
 #define pci_pm_restore_noirq   NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #ifdef CONFIG_PM_RUNTIME
 
index 453c54c..4c3e94c 100644 (file)
@@ -25,6 +25,8 @@
 
 #include <mach/balloon3.h>
 
+#include <asm/mach-types.h>
+
 #include "soc_common.h"
 
 /*
@@ -127,6 +129,9 @@ static int __init balloon3_pcmcia_init(void)
 {
        int ret;
 
+       if (!machine_is_balloon3())
+               return -ENODEV;
+
        balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
        if (!balloon3_pcmcia_device)
                return -ENOMEM;
index b7e5966..b829e65 100644 (file)
@@ -69,15 +69,15 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
        for (i = 0; i < ARRAY_SIZE(irqs); i++) {
                if (irqs[i].sock != skt->nr)
                        continue;
-               if (gpio_request(IRQ_TO_GPIO(irqs[i].irq), irqs[i].str) < 0) {
+               if (gpio_request(irq_to_gpio(irqs[i].irq), irqs[i].str) < 0) {
                        pr_err("%s: sock %d unable to request gpio %d\n",
-                               __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq));
+                               __func__, skt->nr, irq_to_gpio(irqs[i].irq));
                        ret = -EBUSY;
                        goto error;
                }
-               if (gpio_direction_input(IRQ_TO_GPIO(irqs[i].irq)) < 0) {
+               if (gpio_direction_input(irq_to_gpio(irqs[i].irq)) < 0) {
                        pr_err("%s: sock %d unable to set input gpio %d\n",
-                               __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq));
+                               __func__, skt->nr, irq_to_gpio(irqs[i].irq));
                        ret = -EINVAL;
                        goto error;
                }
@@ -86,7 +86,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
 
 error:
        for (; i >= 0; i--) {
-               gpio_free(IRQ_TO_GPIO(irqs[i].irq));
+               gpio_free(irq_to_gpio(irqs[i].irq));
        }
        return (ret);
 }
@@ -97,7 +97,7 @@ static void trizeps_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
        /* free allocated gpio's */
        gpio_free(GPIO_PRDY);
        for (i = 0; i < ARRAY_SIZE(irqs); i++)
-               gpio_free(IRQ_TO_GPIO(irqs[i].irq));
+               gpio_free(irq_to_gpio(irqs[i].irq));
 }
 
 static unsigned long trizeps_pcmcia_status[2];
@@ -226,6 +226,9 @@ static int __init trizeps_pcmcia_init(void)
 {
        int ret;
 
+       if (!machine_is_trizeps4() && !machine_is_trizeps4wl())
+               return -ENODEV;
+
        trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
        if (!trizeps_pcmcia_device)
                return -ENOMEM;
index 2ee442c..0485e39 100644 (file)
@@ -187,7 +187,8 @@ config MSI_LAPTOP
        depends on ACPI
        depends on BACKLIGHT_CLASS_DEVICE
        depends on RFKILL
-       depends on SERIO_I8042
+       depends on INPUT && SERIO_I8042
+       select INPUT_SPARSEKMAP
        ---help---
          This is a driver for laptops built by MSI (MICRO-STAR
          INTERNATIONAL):
index 5ea6c34..ac4e7f8 100644 (file)
@@ -89,7 +89,7 @@ MODULE_LICENSE("GPL");
 #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026"
 
 MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
-MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3");
+MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3");
 MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
 
 enum acer_wmi_event_ids {
index efc776c..832a3fd 100644 (file)
@@ -201,8 +201,8 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
        if (!asus->inputdev)
                return -ENOMEM;
 
-       asus->inputdev->name = asus->driver->input_phys;
-       asus->inputdev->phys = asus->driver->input_name;
+       asus->inputdev->name = asus->driver->input_name;
+       asus->inputdev->phys = asus->driver->input_phys;
        asus->inputdev->id.bustype = BUS_HOST;
        asus->inputdev->dev.parent = &asus->platform_device->dev;
 
index 0ddc434..649dcad 100644 (file)
@@ -67,9 +67,11 @@ static const struct key_entry eeepc_wmi_keymap[] = {
        { KE_KEY, 0x82, { KEY_CAMERA } },
        { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } },
        { KE_KEY, 0x88, { KEY_WLAN } },
+       { KE_KEY, 0xbd, { KEY_CAMERA } },
        { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
        { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
        { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
+       { KE_KEY, 0xe8, { KEY_SCREENLOCK } },
        { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
        { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
        { KE_KEY, 0xec, { KEY_CAMERA_UP } },
index d653104..464bb3f 100644 (file)
@@ -74,6 +74,19 @@ struct pmic_gpio {
        u32                     trigger_type;
 };
 
+static void pmic_program_irqtype(int gpio, int type)
+{
+       if (type & IRQ_TYPE_EDGE_RISING)
+               intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
+       else
+               intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
+
+       if (type & IRQ_TYPE_EDGE_FALLING)
+               intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
+       else
+               intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
+};
+
 static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
        if (offset > 8) {
@@ -166,16 +179,38 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
        return pg->irq_base + offset;
 }
 
+static void pmic_bus_lock(struct irq_data *data)
+{
+       struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+
+       mutex_lock(&pg->buslock);
+}
+
+static void pmic_bus_sync_unlock(struct irq_data *data)
+{
+       struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+
+       if (pg->update_type) {
+               unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE;
+
+               pmic_program_irqtype(gpio, pg->trigger_type);
+               pg->update_type = 0;
+       }
+       mutex_unlock(&pg->buslock);
+}
+
 /* the gpiointr register is read-clear, so just do nothing. */
 static void pmic_irq_unmask(struct irq_data *data) { }
 
 static void pmic_irq_mask(struct irq_data *data) { }
 
 static struct irq_chip pmic_irqchip = {
-       .name           = "PMIC-GPIO",
-       .irq_mask       = pmic_irq_mask,
-       .irq_unmask     = pmic_irq_unmask,
-       .irq_set_type   = pmic_irq_type,
+       .name                   = "PMIC-GPIO",
+       .irq_mask               = pmic_irq_mask,
+       .irq_unmask             = pmic_irq_unmask,
+       .irq_set_type           = pmic_irq_type,
+       .irq_bus_lock           = pmic_bus_lock,
+       .irq_bus_sync_unlock    = pmic_bus_sync_unlock,
 };
 
 static irqreturn_t pmic_irq_handler(int irq, void *data)
index de434c6..d347116 100644 (file)
@@ -570,6 +570,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                },
                .callback = dmi_check_cb,
        },
+       {
+               .ident = "R410 Plus",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR,
+                                       "SAMSUNG ELECTRONICS CO., LTD."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
+                       DMI_MATCH(DMI_BOARD_NAME, "R460"),
+               },
+               .callback = dmi_check_cb,
+       },
        {
                .ident = "R518",
                .matches = {
@@ -591,12 +601,12 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                .callback = dmi_check_cb,
        },
        {
-               .ident = "N150/N210/N220",
+               .ident = "N150/N210/N220/N230",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR,
                                        "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
+                       DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
                },
                .callback = dmi_check_cb,
        },
@@ -771,6 +781,7 @@ static int __init samsung_init(void)
 
        /* create a backlight device to talk to this one */
        memset(&props, 0, sizeof(struct backlight_properties));
+       props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = sabi_config->max_brightness;
        backlight_device = backlight_device_register("samsung", &sdev->dev,
                                                     NULL, &backlight_ops,
index e642f5f..8f709ae 100644 (file)
@@ -138,6 +138,8 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
                 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
                 "(default: 0)");
 
+static void sony_nc_kbd_backlight_resume(void);
+
 enum sony_nc_rfkill {
        SONY_WIFI,
        SONY_BLUETOOTH,
@@ -771,11 +773,6 @@ static int sony_nc_handles_setup(struct platform_device *pd)
        if (!handles)
                return -ENOMEM;
 
-       sysfs_attr_init(&handles->devattr.attr);
-       handles->devattr.attr.name = "handles";
-       handles->devattr.attr.mode = S_IRUGO;
-       handles->devattr.show = sony_nc_handles_show;
-
        for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
                if (!acpi_callsetfunc(sony_nc_acpi_handle,
                                        "SN00", i + 0x20, &result)) {
@@ -785,11 +782,18 @@ static int sony_nc_handles_setup(struct platform_device *pd)
                }
        }
 
-       /* allow reading capabilities via sysfs */
-       if (device_create_file(&pd->dev, &handles->devattr)) {
-               kfree(handles);
-               handles = NULL;
-               return -1;
+       if (debug) {
+               sysfs_attr_init(&handles->devattr.attr);
+               handles->devattr.attr.name = "handles";
+               handles->devattr.attr.mode = S_IRUGO;
+               handles->devattr.show = sony_nc_handles_show;
+
+               /* allow reading capabilities via sysfs */
+               if (device_create_file(&pd->dev, &handles->devattr)) {
+                       kfree(handles);
+                       handles = NULL;
+                       return -1;
+               }
        }
 
        return 0;
@@ -798,7 +802,8 @@ static int sony_nc_handles_setup(struct platform_device *pd)
 static int sony_nc_handles_cleanup(struct platform_device *pd)
 {
        if (handles) {
-               device_remove_file(&pd->dev, &handles->devattr);
+               if (debug)
+                       device_remove_file(&pd->dev, &handles->devattr);
                kfree(handles);
                handles = NULL;
        }
@@ -808,6 +813,11 @@ static int sony_nc_handles_cleanup(struct platform_device *pd)
 static int sony_find_snc_handle(int handle)
 {
        int i;
+
+       /* not initialized yet, return early */
+       if (!handles)
+               return -1;
+
        for (i = 0; i < 0x10; i++) {
                if (handles->cap[i] == handle) {
                        dprintk("found handle 0x%.4x (offset: 0x%.2x)\n",
@@ -1168,6 +1178,9 @@ static int sony_nc_resume(struct acpi_device *device)
        /* re-read rfkill state */
        sony_nc_rfkill_update();
 
+       /* restore kbd backlight states */
+       sony_nc_kbd_backlight_resume();
+
        return 0;
 }
 
@@ -1355,6 +1368,7 @@ out_no_enum:
 #define KBDBL_HANDLER  0x137
 #define KBDBL_PRESENT  0xB00
 #define        SET_MODE        0xC00
+#define SET_STATE      0xD00
 #define SET_TIMEOUT    0xE00
 
 struct kbd_backlight {
@@ -1377,6 +1391,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
                                (value << 0x10) | SET_MODE, &result))
                return -EIO;
 
+       /* Try to turn the light on/off immediately */
+       sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
+                       &result);
+
        kbdbl_handle->mode = value;
 
        return 0;
@@ -1458,7 +1476,7 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
 {
        int result;
 
-       if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result))
+       if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
                return 0;
        if (!(result & 0x02))
                return 0;
@@ -1501,13 +1519,36 @@ outkzalloc:
 static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
 {
        if (kbdbl_handle) {
+               int result;
+
                device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
                device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
+
+               /* restore the default hw behaviour */
+               sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
+               sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
+
                kfree(kbdbl_handle);
        }
        return 0;
 }
 
+static void sony_nc_kbd_backlight_resume(void)
+{
+       int ignore = 0;
+
+       if (!kbdbl_handle)
+               return;
+
+       if (kbdbl_handle->mode == 0)
+               sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
+
+       if (kbdbl_handle->timeout != 0)
+               sony_call_snc_handle(KBDBL_HANDLER,
+                               (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
+                               &ignore);
+}
+
 static void sony_nc_backlight_setup(void)
 {
        acpi_handle unused;
index a08561f..efb3b6b 100644 (file)
@@ -8618,8 +8618,7 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
                tpacpi_is_fw_digit(s[1]) &&
                s[2] == t && s[3] == 'T' &&
                tpacpi_is_fw_digit(s[4]) &&
-               tpacpi_is_fw_digit(s[5]) &&
-               s[6] == 'W' && s[7] == 'W';
+               tpacpi_is_fw_digit(s[5]);
 }
 
 /* returns 0 - probe ok, or < 0 - probe error.
index c29719c..86c9a09 100644 (file)
@@ -1171,16 +1171,17 @@ static int rio_hdid_setup(char *str)
 
 __setup("riohdid=", rio_hdid_setup);
 
-void rio_register_mport(struct rio_mport *port)
+int rio_register_mport(struct rio_mport *port)
 {
        if (next_portid >= RIO_MAX_MPORTS) {
                pr_err("RIO: reached specified max number of mports\n");
-               return;
+               return 1;
        }
 
        port->id = next_portid++;
        port->host_deviceid = rio_get_hdid(port->id);
        list_add_tail(&port->node, &rio_mports);
+       return 0;
 }
 
 EXPORT_SYMBOL_GPL(rio_local_get_device_id);
index 095016a..ac2701b 100644 (file)
@@ -418,3 +418,4 @@ DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init);
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1432, idtg2_switch_init);
index 09b4437..3901386 100644 (file)
@@ -171,7 +171,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
        err = __rtc_read_alarm(rtc, &alrm);
 
        if (!err && !rtc_valid_tm(&alrm.time))
-               rtc_set_alarm(rtc, &alrm);
+               rtc_initialize_alarm(rtc, &alrm);
 
        strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
        dev_set_name(&rtc->dev, "rtc%d", id);
index 23719f0..ef6316a 100644 (file)
@@ -375,6 +375,32 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 }
 EXPORT_SYMBOL_GPL(rtc_set_alarm);
 
+/* Called once per device from rtc_device_register */
+int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+       int err;
+
+       err = rtc_valid_tm(&alarm->time);
+       if (err != 0)
+               return err;
+
+       err = mutex_lock_interruptible(&rtc->ops_lock);
+       if (err)
+               return err;
+
+       rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
+       rtc->aie_timer.period = ktime_set(0, 0);
+       if (alarm->enabled) {
+               rtc->aie_timer.enabled = 1;
+               timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
+       }
+       mutex_unlock(&rtc->ops_lock);
+       return err;
+}
+EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
+
+
+
 int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
 {
        int err = mutex_lock_interruptible(&rtc->ops_lock);
index a0fc4cf..90d8662 100644 (file)
@@ -250,6 +250,8 @@ static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
                bfin_rtc_int_set_alarm(rtc);
        else
                bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+
+       return 0;
 }
 
 static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
index c420064..c5ac037 100644 (file)
@@ -401,6 +401,7 @@ const struct platform_device_id mc13xxx_rtc_idtable[] = {
        }, {
                .name = "mc13892-rtc",
        },
+       { }
 };
 
 static struct platform_driver mc13xxx_rtc_driver = {
index de0dd7b..bcae8dd 100644 (file)
@@ -394,7 +394,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
        return 0;
 
 fail2:
-       free_irq(omap_rtc_timer, NULL);
+       free_irq(omap_rtc_timer, rtc);
 fail1:
        rtc_device_unregister(rtc);
 fail0:
index 7149649..b3466c4 100644 (file)
@@ -336,7 +336,6 @@ static void s3c_rtc_release(struct device *dev)
 
        /* do not clear AIE here, it may be needed for wake */
 
-       s3c_rtc_setpie(dev, 0);
        free_irq(s3c_rtc_alarmno, rtc_dev);
        free_irq(s3c_rtc_tickno, rtc_dev);
 }
@@ -408,7 +407,6 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
        platform_set_drvdata(dev, NULL);
        rtc_device_unregister(rtc);
 
-       s3c_rtc_setpie(&dev->dev, 0);
        s3c_rtc_setaie(&dev->dev, 0);
 
        clk_disable(rtc_clk);
index 6d5c7ff..ab55c2f 100644 (file)
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
                                        &sdev->request_queue->queue_flags);
                if (flagset)
                        queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
-               __blk_run_queue(sdev->request_queue, false);
+               __blk_run_queue(sdev->request_queue);
                if (flagset)
                        queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
                spin_unlock(sdev->request_queue->queue_lock);
index fdf3fa6..28c3350 100644 (file)
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
                  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
        if (flagset)
                queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
-       __blk_run_queue(rport->rqst_q, false);
+       __blk_run_queue(rport->rqst_q);
        if (flagset)
                queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
        spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
index 5825370..08de58e 100644 (file)
@@ -1555,7 +1555,7 @@ static int stop_queue(struct pl022 *pl022)
         * A wait_queue on the pl022->busy could be used, but then the common
         * execution path (pump_messages) would be required to call wake_up or
         * friends on every SPI message. Do this instead */
-       while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
+       while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) {
                spin_unlock_irqrestore(&pl022->queue_lock, flags);
                msleep(10);
                spin_lock_irqsave(&pl022->queue_lock, flags);
index b1a4b9f..871e337 100644 (file)
@@ -821,7 +821,7 @@ static int stop_queue(struct dw_spi *dws)
 
        spin_lock_irqsave(&dws->lock, flags);
        dws->run = QUEUE_STOPPED;
-       while (!list_empty(&dws->queue) && dws->busy && limit--) {
+       while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
                spin_unlock_irqrestore(&dws->lock, flags);
                msleep(10);
                spin_lock_irqsave(&dws->lock, flags);
index 9c74aad..dc25bee 100644 (file)
@@ -1493,7 +1493,7 @@ static int stop_queue(struct driver_data *drv_data)
         * execution path (pump_messages) would be required to call wake_up or
         * friends on every SPI message. Do this instead */
        drv_data->run = QUEUE_STOPPED;
-       while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+       while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
                spin_unlock_irqrestore(&drv_data->lock, flags);
                msleep(10);
                spin_lock_irqsave(&drv_data->lock, flags);
index bdb7289..f706dba 100644 (file)
@@ -1284,7 +1284,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
         * friends on every SPI message. Do this instead
         */
        drv_data->running = false;
-       while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+       while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
                spin_unlock_irqrestore(&drv_data->lock, flags);
                msleep(10);
                spin_lock_irqsave(&drv_data->lock, flags);
index dca4a0b..e3786f1 100644 (file)
@@ -131,8 +131,6 @@ source "drivers/staging/wlags49_h2/Kconfig"
 
 source "drivers/staging/wlags49_h25/Kconfig"
 
-source "drivers/staging/samsung-laptop/Kconfig"
-
 source "drivers/staging/sm7xx/Kconfig"
 
 source "drivers/staging/dt3155v4l/Kconfig"
index eb93012..f0d5c53 100644 (file)
@@ -48,7 +48,6 @@ obj-$(CONFIG_XVMALLOC)                += zram/
 obj-$(CONFIG_ZCACHE)           += zcache/
 obj-$(CONFIG_WLAGS49_H2)       += wlags49_h2/
 obj-$(CONFIG_WLAGS49_H25)      += wlags49_h25/
-obj-$(CONFIG_SAMSUNG_LAPTOP)   += samsung-laptop/
 obj-$(CONFIG_FB_SM7XX)         += sm7xx/
 obj-$(CONFIG_VIDEO_DT3155)     += dt3155v4l/
 obj-$(CONFIG_CRYSTALHD)                += crystalhd/
diff --git a/drivers/staging/samsung-laptop/Kconfig b/drivers/staging/samsung-laptop/Kconfig
deleted file mode 100644 (file)
index f27c608..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-config SAMSUNG_LAPTOP
-       tristate "Samsung Laptop driver"
-       default n
-       depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86
-       help
-         This module implements a driver for the N128 Samsung Laptop
-         providing control over the Wireless LED and the LCD backlight
-
-         To compile this driver as a module, choose
-         M here: the module will be called samsung-laptop.
diff --git a/drivers/staging/samsung-laptop/Makefile b/drivers/staging/samsung-laptop/Makefile
deleted file mode 100644 (file)
index 3c6f420..0000000
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SAMSUNG_LAPTOP)   += samsung-laptop.o
diff --git a/drivers/staging/samsung-laptop/TODO b/drivers/staging/samsung-laptop/TODO
deleted file mode 100644 (file)
index f7a6d58..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-TODO:
-       - review from other developers
-       - figure out ACPI video issues
-
-Please send patches to Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
deleted file mode 100644 (file)
index 2529446..0000000
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * Samsung Laptop driver
- *
- * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de)
- * Copyright (C) 2009,2011 Novell Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/dmi.h>
-#include <linux/platform_device.h>
-#include <linux/rfkill.h>
-
-/*
- * This driver is needed because a number of Samsung laptops do not hook
- * their control settings through ACPI.  So we have to poke around in the
- * BIOS to do things like brightness values, and "special" key controls.
- */
-
-/*
- * We have 0 - 8 as valid brightness levels.  The specs say that level 0 should
- * be reserved by the BIOS (which really doesn't make much sense), we tell
- * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8
- */
-#define MAX_BRIGHT     0x07
-
-
-#define SABI_IFACE_MAIN                        0x00
-#define SABI_IFACE_SUB                 0x02
-#define SABI_IFACE_COMPLETE            0x04
-#define SABI_IFACE_DATA                        0x05
-
-/* Structure to get data back to the calling function */
-struct sabi_retval {
-       u8 retval[20];
-};
-
-struct sabi_header_offsets {
-       u8 port;
-       u8 re_mem;
-       u8 iface_func;
-       u8 en_mem;
-       u8 data_offset;
-       u8 data_segment;
-};
-
-struct sabi_commands {
-       /*
-        * Brightness is 0 - 8, as described above.
-        * Value 0 is for the BIOS to use
-        */
-       u8 get_brightness;
-       u8 set_brightness;
-
-       /*
-        * first byte:
-        * 0x00 - wireless is off
-        * 0x01 - wireless is on
-        * second byte:
-        * 0x02 - 3G is off
-        * 0x03 - 3G is on
-        * TODO, verify 3G is correct, that doesn't seem right...
-        */
-       u8 get_wireless_button;
-       u8 set_wireless_button;
-
-       /* 0 is off, 1 is on */
-       u8 get_backlight;
-       u8 set_backlight;
-
-       /*
-        * 0x80 or 0x00 - no action
-        * 0x81 - recovery key pressed
-        */
-       u8 get_recovery_mode;
-       u8 set_recovery_mode;
-
-       /*
-        * on seclinux: 0 is low, 1 is high,
-        * on swsmi: 0 is normal, 1 is silent, 2 is turbo
-        */
-       u8 get_performance_level;
-       u8 set_performance_level;
-
-       /*
-        * Tell the BIOS that Linux is running on this machine.
-        * 81 is on, 80 is off
-        */
-       u8 set_linux;
-};
-
-struct sabi_performance_level {
-       const char *name;
-       u8 value;
-};
-
-struct sabi_config {
-       const char *test_string;
-       u16 main_function;
-       const struct sabi_header_offsets header_offsets;
-       const struct sabi_commands commands;
-       const struct sabi_performance_level performance_levels[4];
-       u8 min_brightness;
-       u8 max_brightness;
-};
-
-static const struct sabi_config sabi_configs[] = {
-       {
-               .test_string = "SECLINUX",
-
-               .main_function = 0x4c49,
-
-               .header_offsets = {
-                       .port = 0x00,
-                       .re_mem = 0x02,
-                       .iface_func = 0x03,
-                       .en_mem = 0x04,
-                       .data_offset = 0x05,
-                       .data_segment = 0x07,
-               },
-
-               .commands = {
-                       .get_brightness = 0x00,
-                       .set_brightness = 0x01,
-
-                       .get_wireless_button = 0x02,
-                       .set_wireless_button = 0x03,
-
-                       .get_backlight = 0x04,
-                       .set_backlight = 0x05,
-
-                       .get_recovery_mode = 0x06,
-                       .set_recovery_mode = 0x07,
-
-                       .get_performance_level = 0x08,
-                       .set_performance_level = 0x09,
-
-                       .set_linux = 0x0a,
-               },
-
-               .performance_levels = {
-                       {
-                               .name = "silent",
-                               .value = 0,
-                       },
-                       {
-                               .name = "normal",
-                               .value = 1,
-                       },
-                       { },
-               },
-               .min_brightness = 1,
-               .max_brightness = 8,
-       },
-       {
-               .test_string = "SwSmi@",
-
-               .main_function = 0x5843,
-
-               .header_offsets = {
-                       .port = 0x00,
-                       .re_mem = 0x04,
-                       .iface_func = 0x02,
-                       .en_mem = 0x03,
-                       .data_offset = 0x05,
-                       .data_segment = 0x07,
-               },
-
-               .commands = {
-                       .get_brightness = 0x10,
-                       .set_brightness = 0x11,
-
-                       .get_wireless_button = 0x12,
-                       .set_wireless_button = 0x13,
-
-                       .get_backlight = 0x2d,
-                       .set_backlight = 0x2e,
-
-                       .get_recovery_mode = 0xff,
-                       .set_recovery_mode = 0xff,
-
-                       .get_performance_level = 0x31,
-                       .set_performance_level = 0x32,
-
-                       .set_linux = 0xff,
-               },
-
-               .performance_levels = {
-                       {
-                               .name = "normal",
-                               .value = 0,
-                       },
-                       {
-                               .name = "silent",
-                               .value = 1,
-                       },
-                       {
-                               .name = "overclock",
-                               .value = 2,
-                       },
-                       { },
-               },
-               .min_brightness = 0,
-               .max_brightness = 8,
-       },
-       { },
-};
-
-static const struct sabi_config *sabi_config;
-
-static void __iomem *sabi;
-static void __iomem *sabi_iface;
-static void __iomem *f0000_segment;
-static struct backlight_device *backlight_device;
-static struct mutex sabi_mutex;
-static struct platform_device *sdev;
-static struct rfkill *rfk;
-
-static int force;
-module_param(force, bool, 0);
-MODULE_PARM_DESC(force,
-               "Disable the DMI check and forces the driver to be loaded");
-
-static int debug;
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
-
-static int sabi_get_command(u8 command, struct sabi_retval *sretval)
-{
-       int retval = 0;
-       u16 port = readw(sabi + sabi_config->header_offsets.port);
-       u8 complete, iface_data;
-
-       mutex_lock(&sabi_mutex);
-
-       /* enable memory to be able to write to it */
-       outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
-
-       /* write out the command */
-       writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
-       writew(command, sabi_iface + SABI_IFACE_SUB);
-       writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
-       outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
-
-       /* write protect memory to make it safe */
-       outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
-
-       /* see if the command actually succeeded */
-       complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
-       iface_data = readb(sabi_iface + SABI_IFACE_DATA);
-       if (complete != 0xaa || iface_data == 0xff) {
-               pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
-                       command, complete, iface_data);
-               retval = -EINVAL;
-               goto exit;
-       }
-       /*
-        * Save off the data into a structure so the caller use it.
-        * Right now we only want the first 4 bytes,
-        * There are commands that need more, but not for the ones we
-        * currently care about.
-        */
-       sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA);
-       sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1);
-       sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2);
-       sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3);
-
-exit:
-       mutex_unlock(&sabi_mutex);
-       return retval;
-
-}
-
-static int sabi_set_command(u8 command, u8 data)
-{
-       int retval = 0;
-       u16 port = readw(sabi + sabi_config->header_offsets.port);
-       u8 complete, iface_data;
-
-       mutex_lock(&sabi_mutex);
-
-       /* enable memory to be able to write to it */
-       outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
-
-       /* write out the command */
-       writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
-       writew(command, sabi_iface + SABI_IFACE_SUB);
-       writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
-       writeb(data, sabi_iface + SABI_IFACE_DATA);
-       outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
-
-       /* write protect memory to make it safe */
-       outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
-
-       /* see if the command actually succeeded */
-       complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
-       iface_data = readb(sabi_iface + SABI_IFACE_DATA);
-       if (complete != 0xaa || iface_data == 0xff) {
-               pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
-                      command, complete, iface_data);
-               retval = -EINVAL;
-       }
-
-       mutex_unlock(&sabi_mutex);
-       return retval;
-}
-
-static void test_backlight(void)
-{
-       struct sabi_retval sretval;
-
-       sabi_get_command(sabi_config->commands.get_backlight, &sretval);
-       printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-
-       sabi_set_command(sabi_config->commands.set_backlight, 0);
-       printk(KERN_DEBUG "backlight should be off\n");
-
-       sabi_get_command(sabi_config->commands.get_backlight, &sretval);
-       printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-
-       msleep(1000);
-
-       sabi_set_command(sabi_config->commands.set_backlight, 1);
-       printk(KERN_DEBUG "backlight should be on\n");
-
-       sabi_get_command(sabi_config->commands.get_backlight, &sretval);
-       printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-}
-
-static void test_wireless(void)
-{
-       struct sabi_retval sretval;
-
-       sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
-       printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-
-       sabi_set_command(sabi_config->commands.set_wireless_button, 0);
-       printk(KERN_DEBUG "wireless led should be off\n");
-
-       sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
-       printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-
-       msleep(1000);
-
-       sabi_set_command(sabi_config->commands.set_wireless_button, 1);
-       printk(KERN_DEBUG "wireless led should be on\n");
-
-       sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
-       printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-}
-
-static u8 read_brightness(void)
-{
-       struct sabi_retval sretval;
-       int user_brightness = 0;
-       int retval;
-
-       retval = sabi_get_command(sabi_config->commands.get_brightness,
-                                 &sretval);
-       if (!retval) {
-               user_brightness = sretval.retval[0];
-               if (user_brightness != 0)
-                       user_brightness -= sabi_config->min_brightness;
-       }
-       return user_brightness;
-}
-
-static void set_brightness(u8 user_brightness)
-{
-       u8 user_level = user_brightness - sabi_config->min_brightness;
-
-       sabi_set_command(sabi_config->commands.set_brightness, user_level);
-}
-
-static int get_brightness(struct backlight_device *bd)
-{
-       return (int)read_brightness();
-}
-
-static int update_status(struct backlight_device *bd)
-{
-       set_brightness(bd->props.brightness);
-
-       if (bd->props.power == FB_BLANK_UNBLANK)
-               sabi_set_command(sabi_config->commands.set_backlight, 1);
-       else
-               sabi_set_command(sabi_config->commands.set_backlight, 0);
-       return 0;
-}
-
-static const struct backlight_ops backlight_ops = {
-       .get_brightness = get_brightness,
-       .update_status  = update_status,
-};
-
-static int rfkill_set(void *data, bool blocked)
-{
-       /* Do something with blocked...*/
-       /*
-        * blocked == false is on
-        * blocked == true is off
-        */
-       if (blocked)
-               sabi_set_command(sabi_config->commands.set_wireless_button, 0);
-       else
-               sabi_set_command(sabi_config->commands.set_wireless_button, 1);
-
-       return 0;
-}
-
-static struct rfkill_ops rfkill_ops = {
-       .set_block = rfkill_set,
-};
-
-static int init_wireless(struct platform_device *sdev)
-{
-       int retval;
-
-       rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN,
-                          &rfkill_ops, NULL);
-       if (!rfk)
-               return -ENOMEM;
-
-       retval = rfkill_register(rfk);
-       if (retval) {
-               rfkill_destroy(rfk);
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static void destroy_wireless(void)
-{
-       rfkill_unregister(rfk);
-       rfkill_destroy(rfk);
-}
-
-static ssize_t get_performance_level(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
-{
-       struct sabi_retval sretval;
-       int retval;
-       int i;
-
-       /* Read the state */
-       retval = sabi_get_command(sabi_config->commands.get_performance_level,
-                                 &sretval);
-       if (retval)
-               return retval;
-
-       /* The logic is backwards, yeah, lots of fun... */
-       for (i = 0; sabi_config->performance_levels[i].name; ++i) {
-               if (sretval.retval[0] == sabi_config->performance_levels[i].value)
-                       return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name);
-       }
-       return sprintf(buf, "%s\n", "unknown");
-}
-
-static ssize_t set_performance_level(struct device *dev,
-                               struct device_attribute *attr, const char *buf,
-                               size_t count)
-{
-       if (count >= 1) {
-               int i;
-               for (i = 0; sabi_config->performance_levels[i].name; ++i) {
-                       const struct sabi_performance_level *level =
-                               &sabi_config->performance_levels[i];
-                       if (!strncasecmp(level->name, buf, strlen(level->name))) {
-                               sabi_set_command(sabi_config->commands.set_performance_level,
-                                                level->value);
-                               break;
-                       }
-               }
-               if (!sabi_config->performance_levels[i].name)
-                       return -EINVAL;
-       }
-       return count;
-}
-static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
-                  get_performance_level, set_performance_level);
-
-
-static int __init dmi_check_cb(const struct dmi_system_id *id)
-{
-       pr_info("found laptop model '%s'\n",
-               id->ident);
-       return 0;
-}
-
-static struct dmi_system_id __initdata samsung_dmi_table[] = {
-       {
-               .ident = "N128",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N128"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N130",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N130"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "X125",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
-                       DMI_MATCH(DMI_BOARD_NAME, "X125"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "X120/X170",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
-                       DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "NC10",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
-                       DMI_MATCH(DMI_BOARD_NAME, "NC10"),
-               },
-               .callback = dmi_check_cb,
-       },
-               {
-               .ident = "NP-Q45",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
-                       DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
-               },
-               .callback = dmi_check_cb,
-               },
-       {
-               .ident = "X360",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
-                       DMI_MATCH(DMI_BOARD_NAME, "X360"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R410 Plus",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R460"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R518",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R518"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R519/R719",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N150/N210/N220/N230",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N150P/N210P/N220P",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R530/R730",
-               .matches = {
-                     DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                     DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
-                     DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "NF110/NF210/NF310",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
-                       DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N145P/N250P/N260P",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R70/R71",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "P460",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
-                       DMI_MATCH(DMI_BOARD_NAME, "P460"),
-               },
-               .callback = dmi_check_cb,
-       },
-       { },
-};
-MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
-
-static int find_signature(void __iomem *memcheck, const char *testStr)
-{
-       int i = 0;
-       int loca;
-
-       for (loca = 0; loca < 0xffff; loca++) {
-               char temp = readb(memcheck + loca);
-
-               if (temp == testStr[i]) {
-                       if (i == strlen(testStr)-1)
-                               break;
-                       ++i;
-               } else {
-                       i = 0;
-               }
-       }
-       return loca;
-}
-
-static int __init samsung_init(void)
-{
-       struct backlight_properties props;
-       struct sabi_retval sretval;
-       unsigned int ifaceP;
-       int i;
-       int loca;
-       int retval;
-
-       mutex_init(&sabi_mutex);
-
-       if (!force && !dmi_check_system(samsung_dmi_table))
-               return -ENODEV;
-
-       f0000_segment = ioremap_nocache(0xf0000, 0xffff);
-       if (!f0000_segment) {
-               pr_err("Can't map the segment at 0xf0000\n");
-               return -EINVAL;
-       }
-
-       /* Try to find one of the signatures in memory to find the header */
-       for (i = 0; sabi_configs[i].test_string != 0; ++i) {
-               sabi_config = &sabi_configs[i];
-               loca = find_signature(f0000_segment, sabi_config->test_string);
-               if (loca != 0xffff)
-                       break;
-       }
-
-       if (loca == 0xffff) {
-               pr_err("This computer does not support SABI\n");
-               goto error_no_signature;
-       }
-
-       /* point to the SMI port Number */
-       loca += 1;
-       sabi = (f0000_segment + loca);
-
-       if (debug) {
-               printk(KERN_DEBUG "This computer supports SABI==%x\n",
-                       loca + 0xf0000 - 6);
-               printk(KERN_DEBUG "SABI header:\n");
-               printk(KERN_DEBUG " SMI Port Number = 0x%04x\n",
-                       readw(sabi + sabi_config->header_offsets.port));
-               printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n",
-                       readb(sabi + sabi_config->header_offsets.iface_func));
-               printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n",
-                       readb(sabi + sabi_config->header_offsets.en_mem));
-               printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n",
-                       readb(sabi + sabi_config->header_offsets.re_mem));
-               printk(KERN_DEBUG " SABI data offset = 0x%04x\n",
-                       readw(sabi + sabi_config->header_offsets.data_offset));
-               printk(KERN_DEBUG " SABI data segment = 0x%04x\n",
-                       readw(sabi + sabi_config->header_offsets.data_segment));
-       }
-
-       /* Get a pointer to the SABI Interface */
-       ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4;
-       ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff;
-       sabi_iface = ioremap_nocache(ifaceP, 16);
-       if (!sabi_iface) {
-               pr_err("Can't remap %x\n", ifaceP);
-               goto exit;
-       }
-       if (debug) {
-               printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
-               printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface);
-
-               test_backlight();
-               test_wireless();
-
-               retval = sabi_get_command(sabi_config->commands.get_brightness,
-                                         &sretval);
-               printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]);
-       }
-
-       /* Turn on "Linux" mode in the BIOS */
-       if (sabi_config->commands.set_linux != 0xff) {
-               retval = sabi_set_command(sabi_config->commands.set_linux,
-                                         0x81);
-               if (retval) {
-                       pr_warn("Linux mode was not set!\n");
-                       goto error_no_platform;
-               }
-       }
-
-       /* knock up a platform device to hang stuff off of */
-       sdev = platform_device_register_simple("samsung", -1, NULL, 0);
-       if (IS_ERR(sdev))
-               goto error_no_platform;
-
-       /* create a backlight device to talk to this one */
-       memset(&props, 0, sizeof(struct backlight_properties));
-       props.type = BACKLIGHT_PLATFORM;
-       props.max_brightness = sabi_config->max_brightness;
-       backlight_device = backlight_device_register("samsung", &sdev->dev,
-                                                    NULL, &backlight_ops,
-                                                    &props);
-       if (IS_ERR(backlight_device))
-               goto error_no_backlight;
-
-       backlight_device->props.brightness = read_brightness();
-       backlight_device->props.power = FB_BLANK_UNBLANK;
-       backlight_update_status(backlight_device);
-
-       retval = init_wireless(sdev);
-       if (retval)
-               goto error_no_rfk;
-
-       retval = device_create_file(&sdev->dev, &dev_attr_performance_level);
-       if (retval)
-               goto error_file_create;
-
-exit:
-       return 0;
-
-error_file_create:
-       destroy_wireless();
-
-error_no_rfk:
-       backlight_device_unregister(backlight_device);
-
-error_no_backlight:
-       platform_device_unregister(sdev);
-
-error_no_platform:
-       iounmap(sabi_iface);
-
-error_no_signature:
-       iounmap(f0000_segment);
-       return -EINVAL;
-}
-
-static void __exit samsung_exit(void)
-{
-       /* Turn off "Linux" mode in the BIOS */
-       if (sabi_config->commands.set_linux != 0xff)
-               sabi_set_command(sabi_config->commands.set_linux, 0x80);
-
-       device_remove_file(&sdev->dev, &dev_attr_performance_level);
-       backlight_device_unregister(backlight_device);
-       destroy_wireless();
-       iounmap(sabi_iface);
-       iounmap(f0000_segment);
-       platform_device_unregister(sdev);
-}
-
-module_init(samsung_init);
-module_exit(samsung_exit);
-
-MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
-MODULE_DESCRIPTION("Samsung Backlight driver");
-MODULE_LICENSE("GPL");
index 41b6e51..006489d 100644 (file)
@@ -66,6 +66,7 @@ config USB_ARCH_HAS_EHCI
        default y if ARCH_VT8500
        default y if PLAT_SPEAR
        default y if ARCH_MSM
+       default y if MICROBLAZE
        default PCI
 
 # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
index a3d2e23..96fdfb8 100644 (file)
@@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
                break;
        case USB_ENDPOINT_XFER_INT:
                type = "Int.";
-               if (speed == USB_SPEED_HIGH)
+               if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
                        interval = 1 << (desc->bInterval - 1);
                else
                        interval = desc->bInterval;
@@ -229,7 +229,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
        default:        /* "can't happen" */
                return start;
        }
-       interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000;
+       interval *= (speed == USB_SPEED_HIGH ||
+                    speed == USB_SPEED_SUPER) ? 125 : 1000;
        if (interval % 1000)
                unit = 'u';
        else {
@@ -542,8 +543,9 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
        if (level == 0) {
                int     max;
 
-               /* high speed reserves 80%, full/low reserves 90% */
-               if (usbdev->speed == USB_SPEED_HIGH)
+               /* super/high speed reserves 80%, full/low reserves 90% */
+               if (usbdev->speed == USB_SPEED_HIGH ||
+                   usbdev->speed == USB_SPEED_SUPER)
                        max = 800;
                else
                        max = FRAME_TIME_MAX_USECS_ALLOC;
index 8eed05d..77a7fae 100644 (file)
@@ -1908,7 +1908,7 @@ void usb_free_streams(struct usb_interface *interface,
 
        /* Streams only apply to bulk endpoints. */
        for (i = 0; i < num_eps; i++)
-               if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
+               if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc))
                        return;
 
        hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
index 8fb7549..93720bd 100644 (file)
@@ -2285,7 +2285,17 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
        }
 
        /* see 7.1.7.6 */
-       status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND);
+       /* Clear PORT_POWER if it's a USB3.0 device connected to USB 3.0
+        * external hub.
+        * FIXME: this is a temporary workaround to make the system able
+        * to suspend/resume.
+        */
+       if ((hub->hdev->parent != NULL) && hub_is_superspeed(hub->hdev))
+               status = clear_port_feature(hub->hdev, port1,
+                                               USB_PORT_FEAT_POWER);
+       else
+               status = set_port_feature(hub->hdev, port1,
+                                               USB_PORT_FEAT_SUSPEND);
        if (status) {
                dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
                                port1, status);
index 9abecfd..0111f8a 100644 (file)
@@ -706,6 +706,7 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
        struct f_audio          *audio = func_to_audio(f);
 
        usb_free_descriptors(f->descriptors);
+       usb_free_descriptors(f->hs_descriptors);
        kfree(audio);
 }
 
index 95dd466..b3c3042 100644 (file)
@@ -314,6 +314,9 @@ eem_unbind(struct usb_configuration *c, struct usb_function *f)
 
 static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
 {
+       struct sk_buff *skb = (struct sk_buff *)req->context;
+
+       dev_kfree_skb_any(skb);
 }
 
 /*
@@ -428,10 +431,11 @@ static int eem_unwrap(struct gether *port,
                                skb_trim(skb2, len);
                                put_unaligned_le16(BIT(15) | BIT(11) | len,
                                                        skb_push(skb2, 2));
-                               skb_copy_bits(skb, 0, req->buf, skb->len);
-                               req->length = skb->len;
+                               skb_copy_bits(skb2, 0, req->buf, skb2->len);
+                               req->length = skb2->len;
                                req->complete = eem_cmd_complete;
                                req->zero = 1;
+                               req->context = skb2;
                                if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
                                        DBG(cdev, "echo response queue fail\n");
                                break;
index aee7e3c..36613b3 100644 (file)
@@ -1148,6 +1148,12 @@ static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
 static int txcomplete(struct qe_ep *ep, unsigned char restart)
 {
        if (ep->tx_req != NULL) {
+               struct qe_req *req = ep->tx_req;
+               unsigned zlp = 0, last_len = 0;
+
+               last_len = min_t(unsigned, req->req.length - ep->sent,
+                               ep->ep.maxpacket);
+
                if (!restart) {
                        int asent = ep->last;
                        ep->sent += asent;
@@ -1156,9 +1162,18 @@ static int txcomplete(struct qe_ep *ep, unsigned char restart)
                        ep->last = 0;
                }
 
+               /* zlp needed when req->re.zero is set */
+               if (req->req.zero) {
+                       if (last_len == 0 ||
+                               (req->req.length % ep->ep.maxpacket) != 0)
+                               zlp = 0;
+                       else
+                               zlp = 1;
+               } else
+                       zlp = 0;
+
                /* a request already were transmitted completely */
-               if ((ep->tx_req->req.length - ep->sent) <= 0) {
-                       ep->tx_req->req.actual = (unsigned int)ep->sent;
+               if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
                        done(ep, ep->tx_req, 0);
                        ep->tx_req = NULL;
                        ep->last = 0;
@@ -1191,6 +1206,7 @@ static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
        buf = (u8 *)ep->tx_req->req.buf + ep->sent;
        if (buf && size) {
                ep->last = size;
+               ep->tx_req->req.actual += size;
                frame_set_data(frame, buf);
                frame_set_length(frame, size);
                frame_set_status(frame, FRAME_OK);
index 3ed73f4..a01383f 100644 (file)
@@ -386,8 +386,10 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
 
        /* halt any endpoint by doing a "wrong direction" i/o call */
        if (usb_endpoint_dir_in(&data->desc)) {
-               if (usb_endpoint_xfer_isoc(&data->desc))
+               if (usb_endpoint_xfer_isoc(&data->desc)) {
+                       mutex_unlock(&data->lock);
                        return -EINVAL;
+               }
                DBG (data->dev, "%s halt\n", data->name);
                spin_lock_irq (&data->dev->lock);
                if (likely (data->ep != NULL))
index 3e4b35e..68dbcc3 100644 (file)
@@ -1608,7 +1608,7 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
                return -EINVAL;
        if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
                return -ESHUTDOWN;
-       spin_lock_irqsave(&ep->dev->lock, iflags);
+       spin_lock_irqsave(&dev->lock, iflags);
        /* map the buffer for dma */
        if (usbreq->length &&
            ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
@@ -1625,8 +1625,10 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
                                                             DMA_FROM_DEVICE);
                } else {
                        req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
-                       if (!req->buf)
-                               return -ENOMEM;
+                       if (!req->buf) {
+                               retval = -ENOMEM;
+                               goto probe_end;
+                       }
                        if (ep->in) {
                                memcpy(req->buf, usbreq->buf, usbreq->length);
                                req->dma = dma_map_single(&dev->pdev->dev,
index 0151185..6dcc1f6 100644 (file)
@@ -1083,7 +1083,9 @@ static void irq_device_state(struct r8a66597 *r8a66597)
 
        if (dvsq == DS_DFLT) {
                /* bus reset */
+               spin_unlock(&r8a66597->lock);
                r8a66597->driver->disconnect(&r8a66597->gadget);
+               spin_lock(&r8a66597->lock);
                r8a66597_update_usb_speed(r8a66597);
        }
        if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
index 98ded66..42abd0f 100644 (file)
@@ -1247,24 +1247,27 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
 
 static void scan_async (struct ehci_hcd *ehci)
 {
+       bool                    stopped;
        struct ehci_qh          *qh;
        enum ehci_timer_action  action = TIMER_IO_WATCHDOG;
 
        ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
        timer_action_done (ehci, TIMER_ASYNC_SHRINK);
 rescan:
+       stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state);
        qh = ehci->async->qh_next.qh;
        if (likely (qh != NULL)) {
                do {
                        /* clean any finished work for this qh */
-                       if (!list_empty (&qh->qtd_list)
-                                       && qh->stamp != ehci->stamp) {
+                       if (!list_empty(&qh->qtd_list) && (stopped ||
+                                       qh->stamp != ehci->stamp)) {
                                int temp;
 
                                /* unlinks could happen here; completion
                                 * reporting drops the lock.  rescan using
                                 * the latest schedule, but don't rescan
-                                * qhs we already finished (no looping).
+                                * qhs we already finished (no looping)
+                                * unless the controller is stopped.
                                 */
                                qh = qh_get (qh);
                                qh->stamp = ehci->stamp;
@@ -1285,9 +1288,9 @@ rescan:
                         */
                        if (list_empty(&qh->qtd_list)
                                        && qh->qh_state == QH_STATE_LINKED) {
-                               if (!ehci->reclaim
-                                       && ((ehci->stamp - qh->stamp) & 0x1fff)
-                                               >= (EHCI_SHRINK_FRAMES * 8))
+                               if (!ehci->reclaim && (stopped ||
+                                       ((ehci->stamp - qh->stamp) & 0x1fff)
+                                               >= EHCI_SHRINK_FRAMES * 8))
                                        start_unlink_async(ehci, qh);
                                else
                                        action = TIMER_ASYNC_SHRINK;
index f50e84a..795345a 100644 (file)
@@ -295,7 +295,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
        }
 
        dev_err(hcd->self.controller,
-                               "%s: Can not allocate %lu bytes of memory\n"
+                               "%s: Cannot allocate %zu bytes of memory\n"
                                "Current memory map:\n",
                                __func__, qtd->length);
        for (i = 0; i < BLOCKS; i++) {
index 17a6043..958d985 100644 (file)
@@ -33,7 +33,7 @@
 
 #ifdef __LITTLE_ENDIAN
 #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C)
-#elif __BIG_ENDIAN
+#elif defined(__BIG_ENDIAN)
 #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \
                          USBH_ENABLE_BE)
 #else
index 1d586d4..9b166d7 100644 (file)
@@ -84,65 +84,92 @@ int usb_amd_find_chipset_info(void)
 {
        u8 rev = 0;
        unsigned long flags;
+       struct amd_chipset_info info;
+       int ret;
 
        spin_lock_irqsave(&amd_lock, flags);
 
-       amd_chipset.probe_count++;
        /* probe only once */
-       if (amd_chipset.probe_count > 1) {
+       if (amd_chipset.probe_count > 0) {
+               amd_chipset.probe_count++;
                spin_unlock_irqrestore(&amd_lock, flags);
                return amd_chipset.probe_result;
        }
+       memset(&info, 0, sizeof(info));
+       spin_unlock_irqrestore(&amd_lock, flags);
 
-       amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
-       if (amd_chipset.smbus_dev) {
-               rev = amd_chipset.smbus_dev->revision;
+       info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
+       if (info.smbus_dev) {
+               rev = info.smbus_dev->revision;
                if (rev >= 0x40)
-                       amd_chipset.sb_type = 1;
+                       info.sb_type = 1;
                else if (rev >= 0x30 && rev <= 0x3b)
-                       amd_chipset.sb_type = 3;
+                       info.sb_type = 3;
        } else {
-               amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
-                                                       0x780b, NULL);
-               if (!amd_chipset.smbus_dev) {
-                       spin_unlock_irqrestore(&amd_lock, flags);
-                       return 0;
+               info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+                                               0x780b, NULL);
+               if (!info.smbus_dev) {
+                       ret = 0;
+                       goto commit;
                }
-               rev = amd_chipset.smbus_dev->revision;
+
+               rev = info.smbus_dev->revision;
                if (rev >= 0x11 && rev <= 0x18)
-                       amd_chipset.sb_type = 2;
+                       info.sb_type = 2;
        }
 
-       if (amd_chipset.sb_type == 0) {
-               if (amd_chipset.smbus_dev) {
-                       pci_dev_put(amd_chipset.smbus_dev);
-                       amd_chipset.smbus_dev = NULL;
+       if (info.sb_type == 0) {
+               if (info.smbus_dev) {
+                       pci_dev_put(info.smbus_dev);
+                       info.smbus_dev = NULL;
                }
-               spin_unlock_irqrestore(&amd_lock, flags);
-               return 0;
+               ret = 0;
+               goto commit;
        }
 
-       amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
-       if (amd_chipset.nb_dev) {
-               amd_chipset.nb_type = 1;
+       info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
+       if (info.nb_dev) {
+               info.nb_type = 1;
        } else {
-               amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
-                                                       0x1510, NULL);
-               if (amd_chipset.nb_dev) {
-                       amd_chipset.nb_type = 2;
-               } else  {
-                       amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
-                                                               0x9600, NULL);
-                       if (amd_chipset.nb_dev)
-                               amd_chipset.nb_type = 3;
+               info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
+               if (info.nb_dev) {
+                       info.nb_type = 2;
+               } else {
+                       info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+                                                    0x9600, NULL);
+                       if (info.nb_dev)
+                               info.nb_type = 3;
                }
        }
 
-       amd_chipset.probe_result = 1;
+       ret = info.probe_result = 1;
        printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
 
-       spin_unlock_irqrestore(&amd_lock, flags);
-       return amd_chipset.probe_result;
+commit:
+
+       spin_lock_irqsave(&amd_lock, flags);
+       if (amd_chipset.probe_count > 0) {
+               /* race - someone else was faster - drop devices */
+
+               /* Mark that we where here */
+               amd_chipset.probe_count++;
+               ret = amd_chipset.probe_result;
+
+               spin_unlock_irqrestore(&amd_lock, flags);
+
+               if (info.nb_dev)
+                       pci_dev_put(info.nb_dev);
+               if (info.smbus_dev)
+                       pci_dev_put(info.smbus_dev);
+
+       } else {
+               /* no race - commit the result */
+               info.probe_count++;
+               amd_chipset = info;
+               spin_unlock_irqrestore(&amd_lock, flags);
+       }
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
 
@@ -284,6 +311,7 @@ EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
 
 void usb_amd_dev_put(void)
 {
+       struct pci_dev *nb, *smbus;
        unsigned long flags;
 
        spin_lock_irqsave(&amd_lock, flags);
@@ -294,20 +322,23 @@ void usb_amd_dev_put(void)
                return;
        }
 
-       if (amd_chipset.nb_dev) {
-               pci_dev_put(amd_chipset.nb_dev);
-               amd_chipset.nb_dev = NULL;
-       }
-       if (amd_chipset.smbus_dev) {
-               pci_dev_put(amd_chipset.smbus_dev);
-               amd_chipset.smbus_dev = NULL;
-       }
+       /* save them to pci_dev_put outside of spinlock */
+       nb    = amd_chipset.nb_dev;
+       smbus = amd_chipset.smbus_dev;
+
+       amd_chipset.nb_dev = NULL;
+       amd_chipset.smbus_dev = NULL;
        amd_chipset.nb_type = 0;
        amd_chipset.sb_type = 0;
        amd_chipset.isoc_reqs = 0;
        amd_chipset.probe_result = 0;
 
        spin_unlock_irqrestore(&amd_lock, flags);
+
+       if (nb)
+               pci_dev_put(nb);
+       if (smbus)
+               pci_dev_put(smbus);
 }
 EXPORT_SYMBOL_GPL(usb_amd_dev_put);
 
index a003e79..627f343 100644 (file)
@@ -846,7 +846,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
                 * Skip ports that don't have known speeds, or have duplicate
                 * Extended Capabilities port speed entries.
                 */
-               if (port_speed == 0 || port_speed == -1)
+               if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
                        continue;
 
                /*
@@ -974,6 +974,47 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
        return 0;
 }
 
+/*
+ * Convert interval expressed as 2^(bInterval - 1) == interval into
+ * straight exponent value 2^n == interval.
+ *
+ */
+static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
+               struct usb_host_endpoint *ep)
+{
+       unsigned int interval;
+
+       interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
+       if (interval != ep->desc.bInterval - 1)
+               dev_warn(&udev->dev,
+                        "ep %#x - rounding interval to %d microframes\n",
+                        ep->desc.bEndpointAddress,
+                        1 << interval);
+
+       return interval;
+}
+
+/*
+ * Convert bInterval expressed in frames (in 1-255 range) to exponent of
+ * microframes, rounded down to nearest power of 2.
+ */
+static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
+               struct usb_host_endpoint *ep)
+{
+       unsigned int interval;
+
+       interval = fls(8 * ep->desc.bInterval) - 1;
+       interval = clamp_val(interval, 3, 10);
+       if ((1 << interval) != 8 * ep->desc.bInterval)
+               dev_warn(&udev->dev,
+                        "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
+                        ep->desc.bEndpointAddress,
+                        1 << interval,
+                        8 * ep->desc.bInterval);
+
+       return interval;
+}
+
 /* Return the polling or NAK interval.
  *
  * The polling interval is expressed in "microframes".  If xHCI's Interval field
@@ -982,7 +1023,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
  * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
  * is set to 0.
  */
-static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
        unsigned int interval = 0;
@@ -991,45 +1032,38 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
        case USB_SPEED_HIGH:
                /* Max NAK rate */
                if (usb_endpoint_xfer_control(&ep->desc) ||
-                               usb_endpoint_xfer_bulk(&ep->desc))
+                   usb_endpoint_xfer_bulk(&ep->desc)) {
                        interval = ep->desc.bInterval;
+                       break;
+               }
                /* Fall through - SS and HS isoc/int have same decoding */
+
        case USB_SPEED_SUPER:
                if (usb_endpoint_xfer_int(&ep->desc) ||
-                               usb_endpoint_xfer_isoc(&ep->desc)) {
-                       if (ep->desc.bInterval == 0)
-                               interval = 0;
-                       else
-                               interval = ep->desc.bInterval - 1;
-                       if (interval > 15)
-                               interval = 15;
-                       if (interval != ep->desc.bInterval + 1)
-                               dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
-                                               ep->desc.bEndpointAddress, 1 << interval);
+                   usb_endpoint_xfer_isoc(&ep->desc)) {
+                       interval = xhci_parse_exponent_interval(udev, ep);
                }
                break;
-       /* Convert bInterval (in 1-255 frames) to microframes and round down to
-        * nearest power of 2.
-        */
+
        case USB_SPEED_FULL:
+               if (usb_endpoint_xfer_int(&ep->desc)) {
+                       interval = xhci_parse_exponent_interval(udev, ep);
+                       break;
+               }
+               /*
+                * Fall through for isochronous endpoint interval decoding
+                * since it uses the same rules as low speed interrupt
+                * endpoints.
+                */
+
        case USB_SPEED_LOW:
                if (usb_endpoint_xfer_int(&ep->desc) ||
-                               usb_endpoint_xfer_isoc(&ep->desc)) {
-                       interval = fls(8*ep->desc.bInterval) - 1;
-                       if (interval > 10)
-                               interval = 10;
-                       if (interval < 3)
-                               interval = 3;
-                       if ((1 << interval) != 8*ep->desc.bInterval)
-                               dev_warn(&udev->dev,
-                                               "ep %#x - rounding interval"
-                                               " to %d microframes, "
-                                               "ep desc says %d microframes\n",
-                                               ep->desc.bEndpointAddress,
-                                               1 << interval,
-                                               8*ep->desc.bInterval);
+                   usb_endpoint_xfer_isoc(&ep->desc)) {
+
+                       interval = xhci_parse_frame_interval(udev, ep);
                }
                break;
+
        default:
                BUG();
        }
@@ -1041,7 +1075,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
  * transaction opportunities per microframe", but that goes in the Max Burst
  * endpoint context field.
  */
-static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
+static u32 xhci_get_endpoint_mult(struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
        if (udev->speed != USB_SPEED_SUPER ||
@@ -1050,7 +1084,7 @@ static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
        return ep->ss_ep_comp.bmAttributes;
 }
 
-static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
+static u32 xhci_get_endpoint_type(struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
        int in;
@@ -1084,7 +1118,7 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
  * Basically, this is the maxpacket size, multiplied by the burst size
  * and mult size.
  */
-static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
+static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
                struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
@@ -1727,12 +1761,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
                         * found a similar duplicate.
                         */
                        if (xhci->port_array[i] != major_revision &&
-                               xhci->port_array[i] != (u8) -1) {
+                               xhci->port_array[i] != DUPLICATE_ENTRY) {
                                if (xhci->port_array[i] == 0x03)
                                        xhci->num_usb3_ports--;
                                else
                                        xhci->num_usb2_ports--;
-                               xhci->port_array[i] = (u8) -1;
+                               xhci->port_array[i] = DUPLICATE_ENTRY;
                        }
                        /* FIXME: Should we disable the port? */
                        continue;
@@ -1831,7 +1865,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
                for (i = 0; i < num_ports; i++) {
                        if (xhci->port_array[i] == 0x03 ||
                                        xhci->port_array[i] == 0 ||
-                                       xhci->port_array[i] == -1)
+                                       xhci->port_array[i] == DUPLICATE_ENTRY)
                                continue;
 
                        xhci->usb2_ports[port_index] =
index ceea9f3..a10494c 100644 (file)
@@ -114,6 +114,10 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
        if (pdev->vendor == PCI_VENDOR_ID_NEC)
                xhci->quirks |= XHCI_NEC_HOST;
 
+       /* AMD PLL quirk */
+       if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
+               xhci->quirks |= XHCI_AMD_PLL_FIX;
+
        /* Make sure the HC is halted. */
        retval = xhci_halt(xhci);
        if (retval)
index cfc1ad9..7437386 100644 (file)
@@ -93,7 +93,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
 /* Does this link TRB point to the first segment in a ring,
  * or was the previous TRB the last TRB on the last segment in the ERST?
  */
-static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
+static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
                struct xhci_segment *seg, union xhci_trb *trb)
 {
        if (ring == xhci->event_ring)
@@ -107,7 +107,7 @@ static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring
  * segment?  I.e. would the updated event TRB pointer step off the end of the
  * event seg?
  */
-static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
                struct xhci_segment *seg, union xhci_trb *trb)
 {
        if (ring == xhci->event_ring)
@@ -116,7 +116,7 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
                return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
 }
 
-static inline int enqueue_is_link_trb(struct xhci_ring *ring)
+static int enqueue_is_link_trb(struct xhci_ring *ring)
 {
        struct xhci_link_trb *link = &ring->enqueue->link;
        return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
@@ -592,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
        ep->ep_state |= SET_DEQ_PENDING;
 }
 
-static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
+static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
                struct xhci_virt_ep *ep)
 {
        ep->ep_state &= ~EP_HALT_PENDING;
@@ -619,6 +619,13 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
 
        /* Only giveback urb when this is the last td in urb */
        if (urb_priv->td_cnt == urb_priv->length) {
+               if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+                       xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+                       if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
+                               if (xhci->quirks & XHCI_AMD_PLL_FIX)
+                                       usb_amd_quirk_pll_enable();
+                       }
+               }
                usb_hcd_unlink_urb_from_ep(hcd, urb);
                xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
 
@@ -1209,7 +1216,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
                 * Skip ports that don't have known speeds, or have duplicate
                 * Extended Capabilities port speed entries.
                 */
-               if (port_speed == 0 || port_speed == -1)
+               if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
                        continue;
 
                /*
@@ -1235,6 +1242,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
        u8 major_revision;
        struct xhci_bus_state *bus_state;
        u32 __iomem **port_array;
+       bool bogus_port_status = false;
 
        /* Port status change events always have a successful completion code */
        if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
@@ -1247,6 +1255,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
        max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
        if ((port_id <= 0) || (port_id > max_ports)) {
                xhci_warn(xhci, "Invalid port id %d\n", port_id);
+               bogus_port_status = true;
                goto cleanup;
        }
 
@@ -1258,12 +1267,14 @@ static void handle_port_status(struct xhci_hcd *xhci,
                xhci_warn(xhci, "Event for port %u not in "
                                "Extended Capabilities, ignoring.\n",
                                port_id);
+               bogus_port_status = true;
                goto cleanup;
        }
-       if (major_revision == (u8) -1) {
+       if (major_revision == DUPLICATE_ENTRY) {
                xhci_warn(xhci, "Event for port %u duplicated in"
                                "Extended Capabilities, ignoring.\n",
                                port_id);
+               bogus_port_status = true;
                goto cleanup;
        }
 
@@ -1335,6 +1346,13 @@ cleanup:
        /* Update event ring dequeue pointer before dropping the lock */
        inc_deq(xhci, xhci->event_ring, true);
 
+       /* Don't make the USB core poll the roothub if we got a bad port status
+        * change event.  Besides, at that point we can't tell which roothub
+        * (USB 2.0 or USB 3.0) to kick.
+        */
+       if (bogus_port_status)
+               return;
+
        spin_unlock(&xhci->lock);
        /* Pass this up to the core */
        usb_hcd_poll_rh_status(hcd);
@@ -1554,8 +1572,17 @@ td_cleanup:
 
                urb_priv->td_cnt++;
                /* Giveback the urb when all the tds are completed */
-               if (urb_priv->td_cnt == urb_priv->length)
+               if (urb_priv->td_cnt == urb_priv->length) {
                        ret = 1;
+                       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+                               xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+                               if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
+                                       == 0) {
+                                       if (xhci->quirks & XHCI_AMD_PLL_FIX)
+                                               usb_amd_quirk_pll_enable();
+                               }
+                       }
+               }
        }
 
        return ret;
@@ -1675,71 +1702,52 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        struct urb_priv *urb_priv;
        int idx;
        int len = 0;
-       int skip_td = 0;
        union xhci_trb *cur_trb;
        struct xhci_segment *cur_seg;
+       struct usb_iso_packet_descriptor *frame;
        u32 trb_comp_code;
+       bool skip_td = false;
 
        ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
        trb_comp_code = GET_COMP_CODE(event->transfer_len);
        urb_priv = td->urb->hcpriv;
        idx = urb_priv->td_cnt;
+       frame = &td->urb->iso_frame_desc[idx];
 
-       if (ep->skip) {
-               /* The transfer is partly done */
-               *status = -EXDEV;
-               td->urb->iso_frame_desc[idx].status = -EXDEV;
-       } else {
-               /* handle completion code */
-               switch (trb_comp_code) {
-               case COMP_SUCCESS:
-                       td->urb->iso_frame_desc[idx].status = 0;
-                       xhci_dbg(xhci, "Successful isoc transfer!\n");
-                       break;
-               case COMP_SHORT_TX:
-                       if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
-                               td->urb->iso_frame_desc[idx].status =
-                                        -EREMOTEIO;
-                       else
-                               td->urb->iso_frame_desc[idx].status = 0;
-                       break;
-               case COMP_BW_OVER:
-                       td->urb->iso_frame_desc[idx].status = -ECOMM;
-                       skip_td = 1;
-                       break;
-               case COMP_BUFF_OVER:
-               case COMP_BABBLE:
-                       td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
-                       skip_td = 1;
-                       break;
-               case COMP_STALL:
-                       td->urb->iso_frame_desc[idx].status = -EPROTO;
-                       skip_td = 1;
-                       break;
-               case COMP_STOP:
-               case COMP_STOP_INVAL:
-                       break;
-               default:
-                       td->urb->iso_frame_desc[idx].status = -1;
-                       break;
-               }
-       }
-
-       /* calc actual length */
-       if (ep->skip) {
-               td->urb->iso_frame_desc[idx].actual_length = 0;
-               /* Update ring dequeue pointer */
-               while (ep_ring->dequeue != td->last_trb)
-                       inc_deq(xhci, ep_ring, false);
-               inc_deq(xhci, ep_ring, false);
-               return finish_td(xhci, td, event_trb, event, ep, status, true);
+       /* handle completion code */
+       switch (trb_comp_code) {
+       case COMP_SUCCESS:
+               frame->status = 0;
+               xhci_dbg(xhci, "Successful isoc transfer!\n");
+               break;
+       case COMP_SHORT_TX:
+               frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
+                               -EREMOTEIO : 0;
+               break;
+       case COMP_BW_OVER:
+               frame->status = -ECOMM;
+               skip_td = true;
+               break;
+       case COMP_BUFF_OVER:
+       case COMP_BABBLE:
+               frame->status = -EOVERFLOW;
+               skip_td = true;
+               break;
+       case COMP_STALL:
+               frame->status = -EPROTO;
+               skip_td = true;
+               break;
+       case COMP_STOP:
+       case COMP_STOP_INVAL:
+               break;
+       default:
+               frame->status = -1;
+               break;
        }
 
-       if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
-               td->urb->iso_frame_desc[idx].actual_length =
-                       td->urb->iso_frame_desc[idx].length;
-               td->urb->actual_length +=
-                       td->urb->iso_frame_desc[idx].length;
+       if (trb_comp_code == COMP_SUCCESS || skip_td) {
+               frame->actual_length = frame->length;
+               td->urb->actual_length += frame->length;
        } else {
                for (cur_trb = ep_ring->dequeue,
                     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
@@ -1755,7 +1763,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                        TRB_LEN(event->transfer_len);
 
                if (trb_comp_code != COMP_STOP_INVAL) {
-                       td->urb->iso_frame_desc[idx].actual_length = len;
+                       frame->actual_length = len;
                        td->urb->actual_length += len;
                }
        }
@@ -1766,6 +1774,35 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        return finish_td(xhci, td, event_trb, event, ep, status, false);
 }
 
+static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+                       struct xhci_transfer_event *event,
+                       struct xhci_virt_ep *ep, int *status)
+{
+       struct xhci_ring *ep_ring;
+       struct urb_priv *urb_priv;
+       struct usb_iso_packet_descriptor *frame;
+       int idx;
+
+       ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+       urb_priv = td->urb->hcpriv;
+       idx = urb_priv->td_cnt;
+       frame = &td->urb->iso_frame_desc[idx];
+
+       /* The transfer is partly done */
+       *status = -EXDEV;
+       frame->status = -EXDEV;
+
+       /* calc actual length */
+       frame->actual_length = 0;
+
+       /* Update ring dequeue pointer */
+       while (ep_ring->dequeue != td->last_trb)
+               inc_deq(xhci, ep_ring, false);
+       inc_deq(xhci, ep_ring, false);
+
+       return finish_td(xhci, td, NULL, event, ep, status, true);
+}
+
 /*
  * Process bulk and interrupt tds, update urb status and actual_length.
  */
@@ -2024,36 +2061,42 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                }
 
                td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+
                /* Is this a TRB in the currently executing TD? */
                event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
                                td->last_trb, event_dma);
-               if (event_seg && ep->skip) {
+               if (!event_seg) {
+                       if (!ep->skip ||
+                           !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+                               /* HC is busted, give up! */
+                               xhci_err(xhci,
+                                       "ERROR Transfer event TRB DMA ptr not "
+                                       "part of current TD\n");
+                               return -ESHUTDOWN;
+                       }
+
+                       ret = skip_isoc_td(xhci, td, event, ep, &status);
+                       goto cleanup;
+               }
+
+               if (ep->skip) {
                        xhci_dbg(xhci, "Found td. Clear skip flag.\n");
                        ep->skip = false;
                }
-               if (!event_seg &&
-                  (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
-                       /* HC is busted, give up! */
-                       xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
-                                       "part of current TD\n");
-                       return -ESHUTDOWN;
-               }
 
-               if (event_seg) {
-                       event_trb = &event_seg->trbs[(event_dma -
-                                        event_seg->dma) / sizeof(*event_trb)];
-                       /*
-                        * No-op TRB should not trigger interrupts.
-                        * If event_trb is a no-op TRB, it means the
-                        * corresponding TD has been cancelled. Just ignore
-                        * the TD.
-                        */
-                       if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
-                                        == TRB_TYPE(TRB_TR_NOOP)) {
-                               xhci_dbg(xhci, "event_trb is a no-op TRB. "
-                                               "Skip it\n");
-                               goto cleanup;
-                       }
+               event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
+                                               sizeof(*event_trb)];
+               /*
+                * No-op TRB should not trigger interrupts.
+                * If event_trb is a no-op TRB, it means the
+                * corresponding TD has been cancelled. Just ignore
+                * the TD.
+                */
+               if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
+                                == TRB_TYPE(TRB_TR_NOOP)) {
+                       xhci_dbg(xhci,
+                                "event_trb is a no-op TRB. Skip it\n");
+                       goto cleanup;
                }
 
                /* Now update the urb's actual_length and give back to
@@ -3126,6 +3169,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                }
        }
 
+       if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
+               if (xhci->quirks & XHCI_AMD_PLL_FIX)
+                       usb_amd_quirk_pll_disable();
+       }
+       xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
+
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
                        start_cycle, start_trb);
        return 0;
index 196e018..81b976e 100644 (file)
@@ -550,6 +550,9 @@ void xhci_stop(struct usb_hcd *hcd)
        del_timer_sync(&xhci->event_ring_timer);
 #endif
 
+       if (xhci->quirks & XHCI_AMD_PLL_FIX)
+               usb_amd_dev_put();
+
        xhci_dbg(xhci, "// Disabling event ring interrupts\n");
        temp = xhci_readl(xhci, &xhci->op_regs->status);
        xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
@@ -771,7 +774,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 
        /* If restore operation fails, re-initialize the HC during resume */
        if ((temp & STS_SRE) || hibernated) {
-               usb_root_hub_lost_power(hcd->self.root_hub);
+               /* Let the USB core know _both_ roothubs lost power. */
+               usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
+               usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
 
                xhci_dbg(xhci, "Stop HCD\n");
                xhci_halt(xhci);
@@ -2386,10 +2391,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
        /* Everything but endpoint 0 is disabled, so free or cache the rings. */
        last_freed_endpoint = 1;
        for (i = 1; i < 31; ++i) {
-               if (!virt_dev->eps[i].ring)
-                       continue;
-               xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
-               last_freed_endpoint = i;
+               struct xhci_virt_ep *ep = &virt_dev->eps[i];
+
+               if (ep->ep_state & EP_HAS_STREAMS) {
+                       xhci_free_stream_info(xhci, ep->stream_info);
+                       ep->stream_info = NULL;
+                       ep->ep_state &= ~EP_HAS_STREAMS;
+               }
+
+               if (ep->ring) {
+                       xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+                       last_freed_endpoint = i;
+               }
        }
        xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
        xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
index 07e2630..ba1be6b 100644 (file)
@@ -30,6 +30,7 @@
 
 /* Code sharing between pci-quirks and xhci hcd */
 #include       "xhci-ext-caps.h"
+#include "pci-quirks.h"
 
 /* xHCI PCI Configuration Registers */
 #define XHCI_SBRN_OFFSET       (0x60)
@@ -232,7 +233,7 @@ struct xhci_op_regs {
  * notification type that matches a bit set in this bit field.
  */
 #define        DEV_NOTE_MASK           (0xffff)
-#define ENABLE_DEV_NOTE(x)     (1 << x)
+#define ENABLE_DEV_NOTE(x)     (1 << (x))
 /* Most of the device notification types should only be used for debug.
  * SW does need to pay attention to function wake notifications.
  */
@@ -348,6 +349,9 @@ struct xhci_op_regs {
 /* Initiate a warm port reset - complete when PORT_WRC is '1' */
 #define PORT_WR                (1 << 31)
 
+/* We mark duplicate entries with -1 */
+#define DUPLICATE_ENTRY ((u8)(-1))
+
 /* Port Power Management Status and Control - port_power_base bitmasks */
 /* Inactivity timer value for transitions into U1, in microseconds.
  * Timeout can be up to 127us.  0xFF means an infinite timeout.
@@ -601,11 +605,11 @@ struct xhci_ep_ctx {
 #define EP_STATE_STOPPED       3
 #define EP_STATE_ERROR         4
 /* Mult - Max number of burtst within an interval, in EP companion desc. */
-#define EP_MULT(p)             ((p & 0x3) << 8)
+#define EP_MULT(p)             (((p) & 0x3) << 8)
 /* bits 10:14 are Max Primary Streams */
 /* bit 15 is Linear Stream Array */
 /* Interval - period between requests to an endpoint - 125u increments. */
-#define EP_INTERVAL(p)         ((p & 0xff) << 16)
+#define EP_INTERVAL(p)         (((p) & 0xff) << 16)
 #define EP_INTERVAL_TO_UFRAMES(p)              (1 << (((p) >> 16) & 0xff))
 #define EP_MAXPSTREAMS_MASK    (0x1f << 10)
 #define EP_MAXPSTREAMS(p)      (((p) << 10) & EP_MAXPSTREAMS_MASK)
@@ -1276,6 +1280,7 @@ struct xhci_hcd {
 #define        XHCI_LINK_TRB_QUIRK     (1 << 0)
 #define XHCI_RESET_EP_QUIRK    (1 << 1)
 #define XHCI_NEC_HOST          (1 << 2)
+#define XHCI_AMD_PLL_FIX       (1 << 3)
        /* There are two roothubs to keep track of bus suspend info for */
        struct xhci_bus_state   bus_state[2];
        /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
index 4cbb7e4..74073b3 100644 (file)
@@ -14,7 +14,7 @@ config USB_MUSB_HDRC
        select TWL4030_USB if MACH_OMAP_3430SDP
        select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
        select USB_OTG_UTILS
-       tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
+       bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
        help
          Say Y here if your system has a dual role high speed USB
          controller based on the Mentor Graphics silicon IP.  Then
@@ -30,8 +30,8 @@ config USB_MUSB_HDRC
 
          If you do not know what this is, please say N.
 
-         To compile this driver as a module, choose M here; the
-         module will be called "musb-hdrc".
+#        To compile this driver as a module, choose M here; the
+#        module will be called "musb-hdrc".
 
 choice
        prompt "Platform Glue Layer"
index 52312e8..8e2a1ff 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/cacheflush.h>
 
 #include "musb_core.h"
+#include "musbhsdma.h"
 #include "blackfin.h"
 
 struct bfin_glue {
@@ -332,6 +333,27 @@ static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode)
        return -EIO;
 }
 
+static int bfin_musb_adjust_channel_params(struct dma_channel *channel,
+                               u16 packet_sz, u8 *mode,
+                               dma_addr_t *dma_addr, u32 *len)
+{
+       struct musb_dma_channel *musb_channel = channel->private_data;
+
+       /*
+        * Anomaly 05000450 might cause data corruption when using DMA
+        * MODE 1 transmits with short packet.  So to work around this,
+        * we truncate all MODE 1 transfers down to a multiple of the
+        * max packet size, and then do the last short packet transfer
+        * (if there is any) using MODE 0.
+        */
+       if (ANOMALY_05000450) {
+               if (musb_channel->transmit && *mode == 1)
+                       *len = *len - (*len % packet_sz);
+       }
+
+       return 0;
+}
+
 static void bfin_musb_reg_init(struct musb *musb)
 {
        if (ANOMALY_05000346) {
@@ -430,6 +452,8 @@ static const struct musb_platform_ops bfin_ops = {
 
        .vbus_status    = bfin_musb_vbus_status,
        .set_vbus       = bfin_musb_set_vbus,
+
+       .adjust_channel_params = bfin_musb_adjust_channel_params,
 };
 
 static u64 bfin_dmamask = DMA_BIT_MASK(32);
index de55a3c..ab434fb 100644 (file)
@@ -597,12 +597,12 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
                length = min(n_bds * maxpacket, length);
        }
 
-       DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
+       DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
                        tx->index,
                        maxpacket,
                        rndis ? "rndis" : "transparent",
                        n_bds,
-                       addr, length);
+                       (unsigned long long)addr, length);
 
        cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
 
@@ -820,7 +820,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
        length = min(n_bds * maxpacket, length);
 
        DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
-                       "dma 0x%x len %u %u/%u\n",
+                       "dma 0x%llx len %u %u/%u\n",
                        rx->index, maxpacket,
                        onepacket
                                ? (is_rndis ? "rndis" : "onepacket")
@@ -829,7 +829,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
                        musb_readl(tibase,
                                DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
                                        & 0xffff,
-                       addr, length, rx->channel.actual_len, rx->buf_len);
+                       (unsigned long long)addr, length,
+                       rx->channel.actual_len, rx->buf_len);
 
        /* only queue one segment at a time, since the hardware prevents
         * correct queue shutdown after unexpected short packets
@@ -1039,9 +1040,9 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
                if (!completed && (bd->hw_options & CPPI_OWN_SET))
                        break;
 
-               DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
+               DBG(5, "C/RXBD %llx: nxt %08x buf %08x "
                        "off.len %08x opt.len %08x (%d)\n",
-                       bd->dma, bd->hw_next, bd->hw_bufp,
+                       (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
                        bd->hw_off_len, bd->hw_options,
                        rx->channel.actual_len);
 
@@ -1111,11 +1112,12 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
                musb_ep_select(cppi->mregs, rx->index + 1);
                csr = musb_readw(regs, MUSB_RXCSR);
                if (csr & MUSB_RXCSR_DMAENAB) {
-                       DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
+                       DBG(4, "list%d %p/%p, last %llx%s, csr %04x\n",
                                rx->index,
                                rx->head, rx->tail,
                                rx->last_processed
-                                       ? rx->last_processed->dma
+                                       ? (unsigned long long)
+                                               rx->last_processed->dma
                                        : 0,
                                completed ? ", completed" : "",
                                csr);
@@ -1167,8 +1169,11 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
        tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
        rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
 
-       if (!tx && !rx)
+       if (!tx && !rx) {
+               if (cppi->irq)
+                       spin_unlock_irqrestore(&musb->lock, flags);
                return IRQ_NONE;
+       }
 
        DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
 
@@ -1199,7 +1204,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
                 */
                if (NULL == bd) {
                        DBG(1, "null BD\n");
-                       tx_ram->tx_complete = 0;
+                       musb_writel(&tx_ram->tx_complete, 0, 0);
                        continue;
                }
 
@@ -1452,7 +1457,7 @@ static int cppi_channel_abort(struct dma_channel *channel)
                 *    compare mode by writing 1 to the tx_complete register.
                 */
                cppi_reset_tx(tx_ram, 1);
-               cppi_ch->head = 0;
+               cppi_ch->head = NULL;
                musb_writel(&tx_ram->tx_complete, 0, 1);
                cppi_dump_tx(5, cppi_ch, " (done teardown)");
 
index 630ae7f..f10ff00 100644 (file)
@@ -1030,6 +1030,7 @@ static void musb_shutdown(struct platform_device *pdev)
        struct musb     *musb = dev_to_musb(&pdev->dev);
        unsigned long   flags;
 
+       pm_runtime_get_sync(musb->controller);
        spin_lock_irqsave(&musb->lock, flags);
        musb_platform_disable(musb);
        musb_generic_disable(musb);
@@ -1040,6 +1041,7 @@ static void musb_shutdown(struct platform_device *pdev)
        musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
        musb_platform_exit(musb);
 
+       pm_runtime_put(musb->controller);
        /* FIXME power down */
 }
 
index 4bd9e21..0e053b5 100644 (file)
@@ -261,6 +261,7 @@ enum musb_g_ep0_state {
  * @try_ilde:  tries to idle the IP
  * @vbus_status: returns vbus status if possible
  * @set_vbus:  forces vbus status
+ * @channel_program: pre check for standard dma channel_program func
  */
 struct musb_platform_ops {
        int     (*init)(struct musb *musb);
@@ -274,6 +275,10 @@ struct musb_platform_ops {
 
        int     (*vbus_status)(struct musb *musb);
        void    (*set_vbus)(struct musb *musb, int on);
+
+       int     (*adjust_channel_params)(struct dma_channel *channel,
+                               u16 packet_sz, u8 *mode,
+                               dma_addr_t *dma_addr, u32 *len);
 };
 
 /*
index 98519c5..6dfbf9f 100644 (file)
@@ -535,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
                        is_dma = 1;
                        csr |= MUSB_TXCSR_P_WZC_BITS;
                        csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
-                                MUSB_TXCSR_TXPKTRDY);
+                                MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
                        musb_writew(epio, MUSB_TXCSR, csr);
                        /* Ensure writebuffer is empty. */
                        csr = musb_readw(epio, MUSB_TXCSR);
@@ -1296,7 +1296,7 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
        }
 
        /* if the hardware doesn't have the request, easy ... */
-       if (musb_ep->req_list.next != &request->list || musb_ep->busy)
+       if (musb_ep->req_list.next != &req->list || musb_ep->busy)
                musb_g_giveback(musb_ep, request, -ECONNRESET);
 
        /* ... else abort the dma transfer ... */
index 0144a2d..d281792 100644 (file)
@@ -169,6 +169,14 @@ static int dma_channel_program(struct dma_channel *channel,
        BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
                channel->status == MUSB_DMA_STATUS_BUSY);
 
+       /* Let targets check/tweak the arguments */
+       if (musb->ops->adjust_channel_params) {
+               int ret = musb->ops->adjust_channel_params(channel,
+                       packet_sz, &mode, &dma_addr, &len);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * The DMA engine in RTL1.8 and above cannot handle
         * DMA addresses that are not aligned to a 4 byte boundary.
index 25cb8b0..57a27fa 100644 (file)
@@ -259,9 +259,10 @@ static int musb_otg_notifications(struct notifier_block *nb,
        case USB_EVENT_VBUS:
                DBG(4, "VBUS Connect\n");
 
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
                if (musb->gadget_driver)
                        pm_runtime_get_sync(musb->controller);
-
+#endif
                otg_init(musb->xceiv);
                break;
 
index d6384e4..f7e04bf 100644 (file)
@@ -93,6 +93,8 @@ static int __init ux500_probe(struct platform_device *pdev)
        }
 
        musb->dev.parent                = &pdev->dev;
+       musb->dev.dma_mask              = pdev->dev.dma_mask;
+       musb->dev.coherent_dma_mask     = pdev->dev.coherent_dma_mask;
 
        glue->dev                       = &pdev->dev;
        glue->musb                      = musb;
index a973c7a..4de6ef0 100644 (file)
@@ -151,6 +151,8 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
  * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
  */
 static struct usb_device_id id_table_combined [] = {
+       { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
@@ -525,6 +527,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) },
        { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) },
        { USB_DEVICE(OCT_VID, OCT_US101_PID) },
+       { USB_DEVICE(OCT_VID, OCT_DK201_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID),
                .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID),
@@ -787,6 +790,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
+       { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
+       { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
        { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
        { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
        { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
index c543e55..efffc23 100644 (file)
  * Hameg HO820 and HO870 interface (using VID 0x0403)
  */
 #define HAMEG_HO820_PID                        0xed74
+#define HAMEG_HO730_PID                        0xed73
+#define HAMEG_HO720_PID                        0xed72
 #define HAMEG_HO870_PID                        0xed71
 
 /*
 /* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
 /* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
 /* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
+#define OCT_DK201_PID          0x0103  /* OCT DK201 USB docking station */
 #define OCT_US101_PID          0x0421  /* OCT US101 USB to RS-232 */
 
 /*
 #define QIHARDWARE_VID                 0x20B7
 #define MILKYMISTONE_JTAGSERIAL_PID    0x0713
 
+/*
+ * CTI GmbH RS485 Converter http://www.cti-lean.com/
+ */
+/* USB-485-Mini*/
+#define FTDI_CTI_MINI_PID      0xF608
+/* USB-Nano-485*/
+#define FTDI_CTI_NANO_PID      0xF60B
+
+
index 75c7f45..d77ff04 100644 (file)
@@ -407,6 +407,10 @@ static void option_instat_callback(struct urb *urb);
 /* ONDA MT825UP HSDPA 14.2 modem */
 #define ONDA_MT825UP         0x000b
 
+/* Samsung products */
+#define SAMSUNG_VENDOR_ID                       0x04e8
+#define SAMSUNG_PRODUCT_GT_B3730                0x6889
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -968,6 +972,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
        { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
+       { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 8858201..54a9dab 100644 (file)
@@ -111,7 +111,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
        ifnum = intf->desc.bInterfaceNumber;
        dbg("This Interface = %d", ifnum);
 
-       data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private),
+       data = kzalloc(sizeof(struct usb_wwan_intf_private),
                                         GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -134,8 +134,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
                    usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
                        dbg("QDL port found");
 
-                       if (serial->interface->num_altsetting == 1)
-                               return 0;
+                       if (serial->interface->num_altsetting == 1) {
+                               retval = 0; /* Success */
+                               break;
+                       }
 
                        retval = usb_set_interface(serial->dev, ifnum, 1);
                        if (retval < 0) {
@@ -145,7 +147,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
                                retval = -ENODEV;
                                kfree(data);
                        }
-                       return retval;
                }
                break;
 
@@ -166,6 +167,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
                                        "Could not set interface, error %d\n",
                                        retval);
                                retval = -ENODEV;
+                               kfree(data);
                        }
                } else if (ifnum == 2) {
                        dbg("Modem port found");
@@ -177,7 +179,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
                                retval = -ENODEV;
                                kfree(data);
                        }
-                       return retval;
                } else if (ifnum==3) {
                        /*
                         * NMEA (serial line 9600 8N1)
@@ -191,6 +192,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
                                        "Could not set interface, error %d\n",
                                        retval);
                                retval = -ENODEV;
+                               kfree(data);
                        }
                }
                break;
@@ -199,12 +201,27 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
                dev_err(&serial->dev->dev,
                        "unknown number of interfaces: %d\n", nintf);
                kfree(data);
-               return -ENODEV;
+               retval = -ENODEV;
        }
 
+       /* Set serial->private if not returning -ENODEV */
+       if (retval != -ENODEV)
+               usb_set_serial_data(serial, data);
        return retval;
 }
 
+static void qc_release(struct usb_serial *serial)
+{
+       struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
+
+       dbg("%s", __func__);
+
+       /* Call usb_wwan release & free the private data allocated in qcprobe */
+       usb_wwan_release(serial);
+       usb_set_serial_data(serial, NULL);
+       kfree(priv);
+}
+
 static struct usb_serial_driver qcdevice = {
        .driver = {
                .owner     = THIS_MODULE,
@@ -222,7 +239,7 @@ static struct usb_serial_driver qcdevice = {
        .chars_in_buffer     = usb_wwan_chars_in_buffer,
        .attach              = usb_wwan_startup,
        .disconnect          = usb_wwan_disconnect,
-       .release             = usb_wwan_release,
+       .release             = qc_release,
 #ifdef CONFIG_PM
        .suspend             = usb_wwan_suspend,
        .resume              = usb_wwan_resume,
index a2e5b51..0f4e8c9 100644 (file)
@@ -1648,7 +1648,9 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
 
        switch (val) {
        case CPUFREQ_PRECHANGE:
-               if (!fbi->overlay[0].usage && !fbi->overlay[1].usage)
+#ifdef CONFIG_FB_PXA_OVERLAY
+               if (!(fbi->overlay[0].usage || fbi->overlay[1].usage))
+#endif
                        set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE);
                break;
 
index 42d6c93..33167b4 100644 (file)
@@ -912,8 +912,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
                              unsigned long irqflags,
                              const char *devname, void *dev_id)
 {
-       unsigned int irq;
-       int retval;
+       int irq, retval;
 
        irq = bind_evtchn_to_irq(evtchn);
        if (irq < 0)
@@ -955,8 +954,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
                            irq_handler_t handler,
                            unsigned long irqflags, const char *devname, void *dev_id)
 {
-       unsigned int irq;
-       int retval;
+       int irq, retval;
 
        irq = bind_virq_to_irq(virq, cpu);
        if (irq < 0)
index 95143dd..1ac9412 100644 (file)
@@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled)
        xen_mm_unpin_all();
 }
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 static int xen_suspend(void *data)
 {
        struct suspend_info *si = data;
@@ -173,7 +173,7 @@ out:
 #endif
        shutting_down = SHUTDOWN_INVALID;
 }
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
 
 struct shutdown_handler {
        const char *command;
@@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch,
                { "poweroff",   do_poweroff },
                { "halt",       do_poweroff },
                { "reboot",     do_reboot   },
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
                { "suspend",    do_suspend  },
 #endif
                {NULL, NULL},
index 0ee5945..85b67ff 100644 (file)
@@ -286,11 +286,9 @@ static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid)
 
 struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
 {
-       int err, flags;
+       int err;
        struct p9_fid *fid;
-       struct v9fs_session_info *v9ses;
 
-       v9ses = v9fs_dentry2v9ses(dentry);
        fid = v9fs_fid_clone_with_uid(dentry, 0);
        if (IS_ERR(fid))
                goto error_out;
@@ -299,17 +297,8 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
         * dirty pages. We always request for the open fid in read-write
         * mode so that a partial page write which result in page
         * read can work.
-        *
-        * we don't have a tsyncfs operation for older version
-        * of protocol. So make sure the write back fid is
-        * opened in O_SYNC mode.
         */
-       if (!v9fs_proto_dotl(v9ses))
-               flags = O_RDWR | O_SYNC;
-       else
-               flags = O_RDWR;
-
-       err = p9_client_open(fid, flags);
+       err = p9_client_open(fid, O_RDWR);
        if (err < 0) {
                p9_client_clunk(fid);
                fid = ERR_PTR(err);
index 9665c2b..e5ebedf 100644 (file)
@@ -116,7 +116,6 @@ struct v9fs_session_info {
        struct list_head slist; /* list of sessions registered with v9fs */
        struct backing_dev_info bdi;
        struct rw_semaphore rename_sem;
-       struct p9_fid *root_fid; /* Used for file system sync */
 };
 
 /* cache_validity flags */
index b6a3b9f..e022890 100644 (file)
@@ -126,7 +126,9 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
                        retval = v9fs_refresh_inode_dotl(fid, inode);
                else
                        retval = v9fs_refresh_inode(fid, inode);
-               if (retval <= 0)
+               if (retval == -ENOENT)
+                       return 0;
+               if (retval < 0)
                        return retval;
        }
 out_valid:
index ffbb113..82a7c38 100644 (file)
@@ -811,7 +811,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
        fid = v9fs_fid_lookup(dentry);
        if (IS_ERR(fid)) {
                __putname(link);
-               link = ERR_PTR(PTR_ERR(fid));
+               link = ERR_CAST(fid);
                goto ndset;
        }
        retval = p9_client_readlink(fid, &target);
index f3eed33..feef6cd 100644 (file)
@@ -154,6 +154,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
                retval = PTR_ERR(inode);
                goto release_sb;
        }
+
        root = d_alloc_root(inode);
        if (!root) {
                iput(inode);
@@ -185,21 +186,10 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
                p9stat_free(st);
                kfree(st);
        }
-       v9fs_fid_add(root, fid);
        retval = v9fs_get_acl(inode, fid);
        if (retval)
                goto release_sb;
-       /*
-        * Add the root fid to session info. This is used
-        * for file system sync. We want a cloned fid here
-        * so that we can do a sync_filesystem after a
-        * shrink_dcache_for_umount
-        */
-       v9ses->root_fid = v9fs_fid_clone(root);
-       if (IS_ERR(v9ses->root_fid)) {
-               retval = PTR_ERR(v9ses->root_fid);
-               goto release_sb;
-       }
+       v9fs_fid_add(root, fid);
 
        P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
        return dget(sb->s_root);
@@ -210,11 +200,15 @@ close_session:
        v9fs_session_close(v9ses);
        kfree(v9ses);
        return ERR_PTR(retval);
+
 release_sb:
        /*
-        * we will do the session_close and root dentry
-        * release in the below call.
+        * we will do the session_close and root dentry release
+        * in the below call. But we need to clunk fid, because we haven't
+        * attached the fid to dentry so it won't get clunked
+        * automatically.
         */
+       p9_client_clunk(fid);
        deactivate_locked_super(sb);
        return ERR_PTR(retval);
 }
@@ -232,7 +226,7 @@ static void v9fs_kill_super(struct super_block *s)
        P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
 
        kill_anon_super(s);
-       p9_client_clunk(v9ses->root_fid);
+
        v9fs_session_cancel(v9ses);
        v9fs_session_close(v9ses);
        kfree(v9ses);
@@ -285,14 +279,6 @@ done:
        return res;
 }
 
-static int v9fs_sync_fs(struct super_block *sb, int wait)
-{
-       struct v9fs_session_info *v9ses = sb->s_fs_info;
-
-       P9_DPRINTK(P9_DEBUG_VFS, "v9fs_sync_fs: super_block %p\n", sb);
-       return p9_client_sync_fs(v9ses->root_fid);
-}
-
 static int v9fs_drop_inode(struct inode *inode)
 {
        struct v9fs_session_info *v9ses;
@@ -307,6 +293,51 @@ static int v9fs_drop_inode(struct inode *inode)
        return 1;
 }
 
+static int v9fs_write_inode(struct inode *inode,
+                           struct writeback_control *wbc)
+{
+       int ret;
+       struct p9_wstat wstat;
+       struct v9fs_inode *v9inode;
+       /*
+        * send an fsync request to server irrespective of
+        * wbc->sync_mode.
+        */
+       P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+       v9inode = V9FS_I(inode);
+       if (!v9inode->writeback_fid)
+               return 0;
+       v9fs_blank_wstat(&wstat);
+
+       ret = p9_client_wstat(v9inode->writeback_fid, &wstat);
+       if (ret < 0) {
+               __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+               return ret;
+       }
+       return 0;
+}
+
+static int v9fs_write_inode_dotl(struct inode *inode,
+                                struct writeback_control *wbc)
+{
+       int ret;
+       struct v9fs_inode *v9inode;
+       /*
+        * send an fsync request to server irrespective of
+        * wbc->sync_mode.
+        */
+       P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+       v9inode = V9FS_I(inode);
+       if (!v9inode->writeback_fid)
+               return 0;
+       ret = p9_client_fsync(v9inode->writeback_fid, 0);
+       if (ret < 0) {
+               __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+               return ret;
+       }
+       return 0;
+}
+
 static const struct super_operations v9fs_super_ops = {
        .alloc_inode = v9fs_alloc_inode,
        .destroy_inode = v9fs_destroy_inode,
@@ -314,17 +345,18 @@ static const struct super_operations v9fs_super_ops = {
        .evict_inode = v9fs_evict_inode,
        .show_options = generic_show_options,
        .umount_begin = v9fs_umount_begin,
+       .write_inode = v9fs_write_inode,
 };
 
 static const struct super_operations v9fs_super_ops_dotl = {
        .alloc_inode = v9fs_alloc_inode,
        .destroy_inode = v9fs_destroy_inode,
-       .sync_fs = v9fs_sync_fs,
        .statfs = v9fs_statfs,
        .drop_inode = v9fs_drop_inode,
        .evict_inode = v9fs_evict_inode,
        .show_options = generic_show_options,
        .umount_begin = v9fs_umount_begin,
+       .write_inode = v9fs_write_inode_dotl,
 };
 
 struct file_system_type v9fs_fs_type = {
index f34078d..303983f 100644 (file)
@@ -941,9 +941,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        current->mm->start_stack = bprm->p;
 
 #ifdef arch_randomize_brk
-       if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
+       if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
                current->mm->brk = current->mm->start_brk =
                        arch_randomize_brk(current->mm);
+#ifdef CONFIG_COMPAT_BRK
+               current->brk_randomized = 1;
+#endif
+       }
 #endif
 
        if (current->personality & MMAP_PAGE_ZERO) {
index de34bfa..5d505aa 100644 (file)
@@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
 
        if (value) {
                acl = posix_acl_from_xattr(value, size);
-               if (acl == NULL) {
-                       value = NULL;
-                       size = 0;
+               if (acl) {
+                       ret = posix_acl_valid(acl);
+                       if (ret)
+                               goto out;
                } else if (IS_ERR(acl)) {
                        return PTR_ERR(acl);
                }
        }
 
        ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type);
-
+out:
        posix_acl_release(acl);
 
        return ret;
index 3458b57..2e61fe1 100644 (file)
@@ -740,8 +740,10 @@ struct btrfs_space_info {
         */
        unsigned long reservation_progress;
 
-       int full;               /* indicates that we cannot allocate any more
+       int full:1;             /* indicates that we cannot allocate any more
                                   chunks for this space */
+       int chunk_alloc:1;      /* set if we are allocating a chunk */
+
        int force_alloc;        /* set if we need to force a chunk alloc for
                                   this space */
 
@@ -2576,6 +2578,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
                              struct inode *inode, u64 start, u64 end);
 int btrfs_release_file(struct inode *inode, struct file *file);
+void btrfs_drop_pages(struct page **pages, size_t num_pages);
+int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
+                     struct page **pages, size_t num_pages,
+                     loff_t pos, size_t write_bytes,
+                     struct extent_state **cached);
 
 /* tree-defrag.c */
 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
index 8f1d44b..68c84c8 100644 (file)
@@ -3057,7 +3057,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
                btrfs_destroy_pinned_extent(root,
                                            root->fs_info->pinned_extents);
 
-               t->use_count = 0;
+               atomic_set(&t->use_count, 0);
                list_del_init(&t->list);
                memset(t, 0, sizeof(*t));
                kmem_cache_free(btrfs_transaction_cachep, t);
index f619c3c..31f33ba 100644 (file)
 #include "locking.h"
 #include "free-space-cache.h"
 
+/* control flags for do_chunk_alloc's force field
+ * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
+ * if we really need one.
+ *
+ * CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
+ * CHUNK_ALLOC_LIMITED means to only try and allocate one
+ * if we have very few chunks already allocated.  This is
+ * used as part of the clustering code to help make sure
+ * we have a good pool of storage to cluster in, without
+ * filling the FS with empty chunks
+ *
+ */
+enum {
+       CHUNK_ALLOC_NO_FORCE = 0,
+       CHUNK_ALLOC_FORCE = 1,
+       CHUNK_ALLOC_LIMITED = 2,
+};
+
 static int update_block_group(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
                              u64 bytenr, u64 num_bytes, int alloc);
@@ -3019,7 +3038,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
        found->bytes_readonly = 0;
        found->bytes_may_use = 0;
        found->full = 0;
-       found->force_alloc = 0;
+       found->force_alloc = CHUNK_ALLOC_NO_FORCE;
+       found->chunk_alloc = 0;
        *space_info = found;
        list_add_rcu(&found->list, &info->space_info);
        atomic_set(&found->caching_threads, 0);
@@ -3150,7 +3170,7 @@ again:
                if (!data_sinfo->full && alloc_chunk) {
                        u64 alloc_target;
 
-                       data_sinfo->force_alloc = 1;
+                       data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
                        spin_unlock(&data_sinfo->lock);
 alloc:
                        alloc_target = btrfs_get_alloc_profile(root, 1);
@@ -3160,7 +3180,8 @@ alloc:
 
                        ret = do_chunk_alloc(trans, root->fs_info->extent_root,
                                             bytes + 2 * 1024 * 1024,
-                                            alloc_target, 0);
+                                            alloc_target,
+                                            CHUNK_ALLOC_NO_FORCE);
                        btrfs_end_transaction(trans, root);
                        if (ret < 0) {
                                if (ret != -ENOSPC)
@@ -3239,31 +3260,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
        rcu_read_lock();
        list_for_each_entry_rcu(found, head, list) {
                if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
-                       found->force_alloc = 1;
+                       found->force_alloc = CHUNK_ALLOC_FORCE;
        }
        rcu_read_unlock();
 }
 
 static int should_alloc_chunk(struct btrfs_root *root,
-                             struct btrfs_space_info *sinfo, u64 alloc_bytes)
+                             struct btrfs_space_info *sinfo, u64 alloc_bytes,
+                             int force)
 {
        u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
+       u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
        u64 thresh;
 
-       if (sinfo->bytes_used + sinfo->bytes_reserved +
-           alloc_bytes + 256 * 1024 * 1024 < num_bytes)
+       if (force == CHUNK_ALLOC_FORCE)
+               return 1;
+
+       /*
+        * in limited mode, we want to have some free space up to
+        * about 1% of the FS size.
+        */
+       if (force == CHUNK_ALLOC_LIMITED) {
+               thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+               thresh = max_t(u64, 64 * 1024 * 1024,
+                              div_factor_fine(thresh, 1));
+
+               if (num_bytes - num_allocated < thresh)
+                       return 1;
+       }
+
+       /*
+        * we have two similar checks here, one based on percentage
+        * and once based on a hard number of 256MB.  The idea
+        * is that if we have a good amount of free
+        * room, don't allocate a chunk.  A good mount is
+        * less than 80% utilized of the chunks we have allocated,
+        * or more than 256MB free
+        */
+       if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
                return 0;
 
-       if (sinfo->bytes_used + sinfo->bytes_reserved +
-           alloc_bytes < div_factor(num_bytes, 8))
+       if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
                return 0;
 
        thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+
+       /* 256MB or 5% of the FS */
        thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
 
        if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
                return 0;
-
        return 1;
 }
 
@@ -3273,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 {
        struct btrfs_space_info *space_info;
        struct btrfs_fs_info *fs_info = extent_root->fs_info;
+       int wait_for_alloc = 0;
        int ret = 0;
 
-       mutex_lock(&fs_info->chunk_mutex);
-
        flags = btrfs_reduce_alloc_profile(extent_root, flags);
 
        space_info = __find_space_info(extent_root->fs_info, flags);
@@ -3287,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
        }
        BUG_ON(!space_info);
 
+again:
        spin_lock(&space_info->lock);
        if (space_info->force_alloc)
-               force = 1;
+               force = space_info->force_alloc;
        if (space_info->full) {
                spin_unlock(&space_info->lock);
-               goto out;
+               return 0;
        }
 
-       if (!force && !should_alloc_chunk(extent_root, space_info,
-                                         alloc_bytes)) {
+       if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
                spin_unlock(&space_info->lock);
-               goto out;
+               return 0;
+       } else if (space_info->chunk_alloc) {
+               wait_for_alloc = 1;
+       } else {
+               space_info->chunk_alloc = 1;
        }
+
        spin_unlock(&space_info->lock);
 
+       mutex_lock(&fs_info->chunk_mutex);
+
+       /*
+        * The chunk_mutex is held throughout the entirety of a chunk
+        * allocation, so once we've acquired the chunk_mutex we know that the
+        * other guy is done and we need to recheck and see if we should
+        * allocate.
+        */
+       if (wait_for_alloc) {
+               mutex_unlock(&fs_info->chunk_mutex);
+               wait_for_alloc = 0;
+               goto again;
+       }
+
        /*
         * If we have mixed data/metadata chunks we want to make sure we keep
         * allocating mixed chunks instead of individual chunks.
@@ -3327,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
                space_info->full = 1;
        else
                ret = 1;
-       space_info->force_alloc = 0;
+
+       space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+       space_info->chunk_alloc = 0;
        spin_unlock(&space_info->lock);
-out:
        mutex_unlock(&extent_root->fs_info->chunk_mutex);
        return ret;
 }
@@ -5303,11 +5368,13 @@ loop:
 
                if (allowed_chunk_alloc) {
                        ret = do_chunk_alloc(trans, root, num_bytes +
-                                            2 * 1024 * 1024, data, 1);
+                                            2 * 1024 * 1024, data,
+                                            CHUNK_ALLOC_LIMITED);
                        allowed_chunk_alloc = 0;
                        done_chunk_alloc = 1;
-               } else if (!done_chunk_alloc) {
-                       space_info->force_alloc = 1;
+               } else if (!done_chunk_alloc &&
+                          space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
+                       space_info->force_alloc = CHUNK_ALLOC_LIMITED;
                }
 
                if (loop < LOOP_NO_EMPTY_SIZE) {
@@ -5393,7 +5460,8 @@ again:
         */
        if (empty_size || root->ref_cows)
                ret = do_chunk_alloc(trans, root->fs_info->extent_root,
-                                    num_bytes + 2 * 1024 * 1024, data, 0);
+                                    num_bytes + 2 * 1024 * 1024, data,
+                                    CHUNK_ALLOC_NO_FORCE);
 
        WARN_ON(num_bytes < root->sectorsize);
        ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -5405,7 +5473,7 @@ again:
                num_bytes = num_bytes & ~(root->sectorsize - 1);
                num_bytes = max(num_bytes, min_alloc_size);
                do_chunk_alloc(trans, root->fs_info->extent_root,
-                              num_bytes, data, 1);
+                              num_bytes, data, CHUNK_ALLOC_FORCE);
                goto again;
        }
        if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
@@ -8109,13 +8177,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
 
        alloc_flags = update_block_group_flags(root, cache->flags);
        if (alloc_flags != cache->flags)
-               do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+               do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+                              CHUNK_ALLOC_FORCE);
 
        ret = set_block_group_ro(cache);
        if (!ret)
                goto out;
        alloc_flags = get_alloc_profile(root, cache->space_info->flags);
-       ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+       ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+                            CHUNK_ALLOC_FORCE);
        if (ret < 0)
                goto out;
        ret = set_block_group_ro(cache);
@@ -8128,7 +8198,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root, u64 type)
 {
        u64 alloc_flags = get_alloc_profile(root, type);
-       return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+       return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+                             CHUNK_ALLOC_FORCE);
 }
 
 /*
index 20ddb28..3151386 100644 (file)
@@ -690,6 +690,15 @@ static void cache_state(struct extent_state *state,
        }
 }
 
+static void uncache_state(struct extent_state **cached_ptr)
+{
+       if (cached_ptr && (*cached_ptr)) {
+               struct extent_state *state = *cached_ptr;
+               *cached_ptr = NULL;
+               free_extent_state(state);
+       }
+}
+
 /*
  * set some bits on a range in the tree.  This may require allocations or
  * sleeping, so the gfp mask is used to indicate what is allowed.
@@ -940,10 +949,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
 }
 
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
-                       gfp_t mask)
+                       struct extent_state **cached_state, gfp_t mask)
 {
-       return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
-                             NULL, mask);
+       return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
+                             NULL, cached_state, mask);
 }
 
 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1012,8 +1021,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
                                mask);
 }
 
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
-                 gfp_t mask)
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
 {
        return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
                                mask);
@@ -1735,6 +1743,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
 
        do {
                struct page *page = bvec->bv_page;
+               struct extent_state *cached = NULL;
+               struct extent_state *state;
+
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
                start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1749,9 +1760,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                if (++bvec <= bvec_end)
                        prefetchw(&bvec->bv_page->flags);
 
+               spin_lock(&tree->lock);
+               state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
+               if (state && state->start == start) {
+                       /*
+                        * take a reference on the state, unlock will drop
+                        * the ref
+                        */
+                       cache_state(state, &cached);
+               }
+               spin_unlock(&tree->lock);
+
                if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
                        ret = tree->ops->readpage_end_io_hook(page, start, end,
-                                                             NULL);
+                                                             state);
                        if (ret)
                                uptodate = 0;
                }
@@ -1764,15 +1786,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                                        test_bit(BIO_UPTODATE, &bio->bi_flags);
                                if (err)
                                        uptodate = 0;
+                               uncache_state(&cached);
                                continue;
                        }
                }
 
                if (uptodate) {
-                       set_extent_uptodate(tree, start, end,
+                       set_extent_uptodate(tree, start, end, &cached,
                                            GFP_ATOMIC);
                }
-               unlock_extent(tree, start, end, GFP_ATOMIC);
+               unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
 
                if (whole_page) {
                        if (uptodate) {
@@ -1811,6 +1834,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
 
        do {
                struct page *page = bvec->bv_page;
+               struct extent_state *cached = NULL;
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
                start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1821,13 +1845,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
                        prefetchw(&bvec->bv_page->flags);
 
                if (uptodate) {
-                       set_extent_uptodate(tree, start, end, GFP_ATOMIC);
+                       set_extent_uptodate(tree, start, end, &cached,
+                                           GFP_ATOMIC);
                } else {
                        ClearPageUptodate(page);
                        SetPageError(page);
                }
 
-               unlock_extent(tree, start, end, GFP_ATOMIC);
+               unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
 
        } while (bvec >= bio->bi_io_vec);
 
@@ -2016,14 +2041,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
        while (cur <= end) {
                if (cur >= last_byte) {
                        char *userpage;
+                       struct extent_state *cached = NULL;
+
                        iosize = PAGE_CACHE_SIZE - page_offset;
                        userpage = kmap_atomic(page, KM_USER0);
                        memset(userpage + page_offset, 0, iosize);
                        flush_dcache_page(page);
                        kunmap_atomic(userpage, KM_USER0);
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
-                                           GFP_NOFS);
-                       unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+                                           &cached, GFP_NOFS);
+                       unlock_extent_cached(tree, cur, cur + iosize - 1,
+                                            &cached, GFP_NOFS);
                        break;
                }
                em = get_extent(inode, page, page_offset, cur,
@@ -2063,14 +2091,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                /* we've found a hole, just zero and go on */
                if (block_start == EXTENT_MAP_HOLE) {
                        char *userpage;
+                       struct extent_state *cached = NULL;
+
                        userpage = kmap_atomic(page, KM_USER0);
                        memset(userpage + page_offset, 0, iosize);
                        flush_dcache_page(page);
                        kunmap_atomic(userpage, KM_USER0);
 
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
-                                           GFP_NOFS);
-                       unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+                                           &cached, GFP_NOFS);
+                       unlock_extent_cached(tree, cur, cur + iosize - 1,
+                                            &cached, GFP_NOFS);
                        cur = cur + iosize;
                        page_offset += iosize;
                        continue;
@@ -2789,9 +2820,12 @@ int extent_prepare_write(struct extent_io_tree *tree,
                        iocount++;
                        block_start = block_start + iosize;
                } else {
-                       set_extent_uptodate(tree, block_start, cur_end,
+                       struct extent_state *cached = NULL;
+
+                       set_extent_uptodate(tree, block_start, cur_end, &cached,
                                            GFP_NOFS);
-                       unlock_extent(tree, block_start, cur_end, GFP_NOFS);
+                       unlock_extent_cached(tree, block_start, cur_end,
+                                            &cached, GFP_NOFS);
                        block_start = cur_end + 1;
                }
                page_offset = block_start & (PAGE_CACHE_SIZE - 1);
@@ -3457,7 +3491,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
        num_pages = num_extent_pages(eb->start, eb->len);
 
        set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
-                           GFP_NOFS);
+                           NULL, GFP_NOFS);
        for (i = 0; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
                if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
@@ -3885,6 +3919,12 @@ static void move_pages(struct page *dst_page, struct page *src_page,
        kunmap_atomic(dst_kaddr, KM_USER0);
 }
 
+static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
+{
+       unsigned long distance = (src > dst) ? src - dst : dst - src;
+       return distance < len;
+}
+
 static void copy_pages(struct page *dst_page, struct page *src_page,
                       unsigned long dst_off, unsigned long src_off,
                       unsigned long len)
@@ -3892,10 +3932,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
        char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
        char *src_kaddr;
 
-       if (dst_page != src_page)
+       if (dst_page != src_page) {
                src_kaddr = kmap_atomic(src_page, KM_USER1);
-       else
+       } else {
                src_kaddr = dst_kaddr;
+               BUG_ON(areas_overlap(src_off, dst_off, len));
+       }
 
        memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
        kunmap_atomic(dst_kaddr, KM_USER0);
@@ -3970,7 +4012,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
                       "len %lu len %lu\n", dst_offset, len, dst->len);
                BUG_ON(1);
        }
-       if (dst_offset < src_offset) {
+       if (!areas_overlap(src_offset, dst_offset, len)) {
                memcpy_extent_buffer(dst, dst_offset, src_offset, len);
                return;
        }
index f62c544..af2d717 100644 (file)
@@ -208,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
                   int bits, int exclusive_bits, u64 *failed_start,
                   struct extent_state **cached_state, gfp_t mask);
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
-                       gfp_t mask);
+                       struct extent_state **cached_state, gfp_t mask);
 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
                   gfp_t mask);
 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
index e621ea5..75899a0 100644 (file)
@@ -104,7 +104,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
 /*
  * unlocks pages after btrfs_file_write is done with them
  */
-static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
+void btrfs_drop_pages(struct page **pages, size_t num_pages)
 {
        size_t i;
        for (i = 0; i < num_pages; i++) {
@@ -127,16 +127,13 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
  * this also makes the decision about creating an inline extent vs
  * doing real data extents, marking pages dirty and delalloc as required.
  */
-static noinline int dirty_and_release_pages(struct btrfs_root *root,
-                                           struct file *file,
-                                           struct page **pages,
-                                           size_t num_pages,
-                                           loff_t pos,
-                                           size_t write_bytes)
+int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
+                     struct page **pages, size_t num_pages,
+                     loff_t pos, size_t write_bytes,
+                     struct extent_state **cached)
 {
        int err = 0;
        int i;
-       struct inode *inode = fdentry(file)->d_inode;
        u64 num_bytes;
        u64 start_pos;
        u64 end_of_last_block;
@@ -149,7 +146,7 @@ static noinline int dirty_and_release_pages(struct btrfs_root *root,
 
        end_of_last_block = start_pos + num_bytes - 1;
        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
-                                       NULL);
+                                       cached);
        if (err)
                return err;
 
@@ -992,9 +989,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                }
 
                if (copied > 0) {
-                       ret = dirty_and_release_pages(root, file, pages,
-                                                     dirty_pages, pos,
-                                                     copied);
+                       ret = btrfs_dirty_pages(root, inode, pages,
+                                               dirty_pages, pos, copied,
+                                               NULL);
                        if (ret) {
                                btrfs_delalloc_release_space(inode,
                                        dirty_pages << PAGE_CACHE_SHIFT);
index f561c95..11d2e9c 100644 (file)
@@ -508,6 +508,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        struct inode *inode;
        struct rb_node *node;
        struct list_head *pos, *n;
+       struct page **pages;
        struct page *page;
        struct extent_state *cached_state = NULL;
        struct btrfs_free_cluster *cluster = NULL;
@@ -517,13 +518,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        u64 start, end, len;
        u64 bytes = 0;
        u32 *crc, *checksums;
-       pgoff_t index = 0, last_index = 0;
        unsigned long first_page_offset;
-       int num_checksums;
+       int index = 0, num_pages = 0;
        int entries = 0;
        int bitmaps = 0;
        int ret = 0;
        bool next_page = false;
+       bool out_of_space = false;
 
        root = root->fs_info->tree_root;
 
@@ -551,24 +552,31 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                return 0;
        }
 
-       last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+       num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+               PAGE_CACHE_SHIFT;
        filemap_write_and_wait(inode->i_mapping);
        btrfs_wait_ordered_range(inode, inode->i_size &
                                 ~(root->sectorsize - 1), (u64)-1);
 
        /* We need a checksum per page. */
-       num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE;
-       crc = checksums  = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
+       crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
        if (!crc) {
                iput(inode);
                return 0;
        }
 
+       pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
+       if (!pages) {
+               kfree(crc);
+               iput(inode);
+               return 0;
+       }
+
        /* Since the first page has all of our checksums and our generation we
         * need to calculate the offset into the page that we can start writing
         * our entries.
         */
-       first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
+       first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
 
        /* Get the cluster for this block_group if it exists */
        if (!list_empty(&block_group->cluster_list))
@@ -590,20 +598,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
         * after find_get_page at this point.  Just putting this here so people
         * know and don't freak out.
         */
-       while (index <= last_index) {
+       while (index < num_pages) {
                page = grab_cache_page(inode->i_mapping, index);
                if (!page) {
-                       pgoff_t i = 0;
+                       int i;
 
-                       while (i < index) {
-                               page = find_get_page(inode->i_mapping, i);
-                               unlock_page(page);
-                               page_cache_release(page);
-                               page_cache_release(page);
-                               i++;
+                       for (i = 0; i < num_pages; i++) {
+                               unlock_page(pages[i]);
+                               page_cache_release(pages[i]);
                        }
                        goto out_free;
                }
+               pages[index] = page;
                index++;
        }
 
@@ -631,7 +637,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                        offset = start_offset;
                }
 
-               page = find_get_page(inode->i_mapping, index);
+               if (index >= num_pages) {
+                       out_of_space = true;
+                       break;
+               }
+
+               page = pages[index];
 
                addr = kmap(page);
                entry = addr + start_offset;
@@ -708,23 +719,6 @@ int btrfs_write_out_cache(struct btrfs_root *root,
 
                bytes += PAGE_CACHE_SIZE;
 
-               ClearPageChecked(page);
-               set_page_extent_mapped(page);
-               SetPageUptodate(page);
-               set_page_dirty(page);
-
-               /*
-                * We need to release our reference we got for grab_cache_page,
-                * except for the first page which will hold our checksums, we
-                * do that below.
-                */
-               if (index != 0) {
-                       unlock_page(page);
-                       page_cache_release(page);
-               }
-
-               page_cache_release(page);
-
                index++;
        } while (node || next_page);
 
@@ -734,7 +728,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                struct btrfs_free_space *entry =
                        list_entry(pos, struct btrfs_free_space, list);
 
-               page = find_get_page(inode->i_mapping, index);
+               if (index >= num_pages) {
+                       out_of_space = true;
+                       break;
+               }
+               page = pages[index];
 
                addr = kmap(page);
                memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
@@ -745,64 +743,58 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                crc++;
                bytes += PAGE_CACHE_SIZE;
 
-               ClearPageChecked(page);
-               set_page_extent_mapped(page);
-               SetPageUptodate(page);
-               set_page_dirty(page);
-               unlock_page(page);
-               page_cache_release(page);
-               page_cache_release(page);
                list_del_init(&entry->list);
                index++;
        }
 
+       if (out_of_space) {
+               btrfs_drop_pages(pages, num_pages);
+               unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+                                    i_size_read(inode) - 1, &cached_state,
+                                    GFP_NOFS);
+               ret = 0;
+               goto out_free;
+       }
+
        /* Zero out the rest of the pages just to make sure */
-       while (index <= last_index) {
+       while (index < num_pages) {
                void *addr;
 
-               page = find_get_page(inode->i_mapping, index);
-
+               page = pages[index];
                addr = kmap(page);
                memset(addr, 0, PAGE_CACHE_SIZE);
                kunmap(page);
-               ClearPageChecked(page);
-               set_page_extent_mapped(page);
-               SetPageUptodate(page);
-               set_page_dirty(page);
-               unlock_page(page);
-               page_cache_release(page);
-               page_cache_release(page);
                bytes += PAGE_CACHE_SIZE;
                index++;
        }
 
-       btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state);
-
        /* Write the checksums and trans id to the first page */
        {
                void *addr;
                u64 *gen;
 
-               page = find_get_page(inode->i_mapping, 0);
+               page = pages[0];
 
                addr = kmap(page);
-               memcpy(addr, checksums, sizeof(u32) * num_checksums);
-               gen = addr + (sizeof(u32) * num_checksums);
+               memcpy(addr, checksums, sizeof(u32) * num_pages);
+               gen = addr + (sizeof(u32) * num_pages);
                *gen = trans->transid;
                kunmap(page);
-               ClearPageChecked(page);
-               set_page_extent_mapped(page);
-               SetPageUptodate(page);
-               set_page_dirty(page);
-               unlock_page(page);
-               page_cache_release(page);
-               page_cache_release(page);
        }
-       BTRFS_I(inode)->generation = trans->transid;
 
+       ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
+                                           bytes, &cached_state);
+       btrfs_drop_pages(pages, num_pages);
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
                             i_size_read(inode) - 1, &cached_state, GFP_NOFS);
 
+       if (ret) {
+               ret = 0;
+               goto out_free;
+       }
+
+       BTRFS_I(inode)->generation = trans->transid;
+
        filemap_write_and_wait(inode->i_mapping);
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -853,6 +845,7 @@ out_free:
                BTRFS_I(inode)->generation = 0;
        }
        kfree(checksums);
+       kfree(pages);
        btrfs_update_inode(trans, root, inode);
        iput(inode);
        return ret;
index 5cc64ab..fcd66b6 100644 (file)
@@ -1770,9 +1770,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
        add_pending_csums(trans, inode, ordered_extent->file_offset,
                          &ordered_extent->list);
 
-       btrfs_ordered_update_i_size(inode, 0, ordered_extent);
-       ret = btrfs_update_inode(trans, root, inode);
-       BUG_ON(ret);
+       ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+       if (!ret) {
+               ret = btrfs_update_inode(trans, root, inode);
+               BUG_ON(ret);
+       }
+       ret = 0;
 out:
        if (nolock) {
                if (trans)
@@ -2590,6 +2593,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
                            struct btrfs_inode_item *item,
                            struct inode *inode)
 {
+       if (!leaf->map_token)
+               map_private_extent_buffer(leaf, (unsigned long)item,
+                                         sizeof(struct btrfs_inode_item),
+                                         &leaf->map_token, &leaf->kaddr,
+                                         &leaf->map_start, &leaf->map_len,
+                                         KM_USER1);
+
        btrfs_set_inode_uid(leaf, item, inode->i_uid);
        btrfs_set_inode_gid(leaf, item, inode->i_gid);
        btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
@@ -2618,6 +2628,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
        btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
        btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
+
+       if (leaf->map_token) {
+               unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+               leaf->map_token = NULL;
+       }
 }
 
 /*
@@ -4207,10 +4222,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        struct btrfs_key found_key;
        struct btrfs_path *path;
        int ret;
-       u32 nritems;
        struct extent_buffer *leaf;
        int slot;
-       int advance;
        unsigned char d_type;
        int over = 0;
        u32 di_cur;
@@ -4253,27 +4266,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
                goto err;
-       advance = 0;
 
        while (1) {
                leaf = path->nodes[0];
-               nritems = btrfs_header_nritems(leaf);
                slot = path->slots[0];
-               if (advance || slot >= nritems) {
-                       if (slot >= nritems - 1) {
-                               ret = btrfs_next_leaf(root, path);
-                               if (ret)
-                                       break;
-                               leaf = path->nodes[0];
-                               nritems = btrfs_header_nritems(leaf);
-                               slot = path->slots[0];
-                       } else {
-                               slot++;
-                               path->slots[0]++;
-                       }
+               if (slot >= btrfs_header_nritems(leaf)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               goto err;
+                       else if (ret > 0)
+                               break;
+                       continue;
                }
 
-               advance = 1;
                item = btrfs_item_nr(leaf, slot);
                btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
@@ -4282,7 +4287,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
                if (btrfs_key_type(&found_key) != key_type)
                        break;
                if (found_key.offset < filp->f_pos)
-                       continue;
+                       goto next;
 
                filp->f_pos = found_key.offset;
 
@@ -4335,6 +4340,8 @@ skip:
                        di_cur += di_len;
                        di = (struct btrfs_dir_item *)((char *)di + di_len);
                }
+next:
+               path->slots[0]++;
        }
 
        /* Reached end of directory/root. Bump pos past the last item. */
@@ -4527,14 +4534,17 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        BUG_ON(!path);
 
        inode = new_inode(root->fs_info->sb);
-       if (!inode)
+       if (!inode) {
+               btrfs_free_path(path);
                return ERR_PTR(-ENOMEM);
+       }
 
        if (dir) {
                trace_btrfs_inode_request(dir);
 
                ret = btrfs_set_inode_index(dir, index);
                if (ret) {
+                       btrfs_free_path(path);
                        iput(inode);
                        return ERR_PTR(ret);
                }
@@ -4834,9 +4844,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        if (inode->i_nlink == ~0U)
                return -EMLINK;
 
-       btrfs_inc_nlink(inode);
-       inode->i_ctime = CURRENT_TIME;
-
        err = btrfs_set_inode_index(dir, &index);
        if (err)
                goto fail;
@@ -4852,6 +4859,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                goto fail;
        }
 
+       btrfs_inc_nlink(inode);
+       inode->i_ctime = CURRENT_TIME;
+
        btrfs_set_trans_block_group(trans, dir);
        ihold(inode);
 
@@ -5221,7 +5231,7 @@ again:
                        btrfs_mark_buffer_dirty(leaf);
                }
                set_extent_uptodate(io_tree, em->start,
-                                   extent_map_end(em) - 1, GFP_NOFS);
+                                   extent_map_end(em) - 1, NULL, GFP_NOFS);
                goto insert;
        } else {
                printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
@@ -5428,17 +5438,30 @@ out:
 }
 
 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
+                                                 struct extent_map *em,
                                                  u64 start, u64 len)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
-       struct extent_map *em;
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        struct btrfs_key ins;
        u64 alloc_hint;
        int ret;
+       bool insert = false;
 
-       btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+       /*
+        * Ok if the extent map we looked up is a hole and is for the exact
+        * range we want, there is no reason to allocate a new one, however if
+        * it is not right then we need to free this one and drop the cache for
+        * our range.
+        */
+       if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
+           em->len != len) {
+               free_extent_map(em);
+               em = NULL;
+               insert = true;
+               btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+       }
 
        trans = btrfs_join_transaction(root, 0);
        if (IS_ERR(trans))
@@ -5454,10 +5477,12 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
                goto out;
        }
 
-       em = alloc_extent_map(GFP_NOFS);
        if (!em) {
-               em = ERR_PTR(-ENOMEM);
-               goto out;
+               em = alloc_extent_map(GFP_NOFS);
+               if (!em) {
+                       em = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
        }
 
        em->start = start;
@@ -5467,9 +5492,15 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
        em->block_start = ins.objectid;
        em->block_len = ins.offset;
        em->bdev = root->fs_info->fs_devices->latest_bdev;
+
+       /*
+        * We need to do this because if we're using the original em we searched
+        * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
+        */
+       em->flags = 0;
        set_bit(EXTENT_FLAG_PINNED, &em->flags);
 
-       while (1) {
+       while (insert) {
                write_lock(&em_tree->lock);
                ret = add_extent_mapping(em_tree, em);
                write_unlock(&em_tree->lock);
@@ -5687,8 +5718,7 @@ must_cow:
         * it above
         */
        len = bh_result->b_size;
-       free_extent_map(em);
-       em = btrfs_new_extent_direct(inode, start, len);
+       em = btrfs_new_extent_direct(inode, em, start, len);
        if (IS_ERR(em))
                return PTR_ERR(em);
        len = min(len, em->len - (start - em->start));
@@ -5851,8 +5881,10 @@ again:
        }
 
        add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
-       btrfs_ordered_update_i_size(inode, 0, ordered);
-       btrfs_update_inode(trans, root, inode);
+       ret = btrfs_ordered_update_i_size(inode, 0, ordered);
+       if (!ret)
+               btrfs_update_inode(trans, root, inode);
+       ret = 0;
 out_unlock:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
                             ordered->file_offset + ordered->len - 1,
@@ -5938,7 +5970,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
 
 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
                                         int rw, u64 file_offset, int skip_sum,
-                                        u32 *csums)
+                                        u32 *csums, int async_submit)
 {
        int write = rw & REQ_WRITE;
        struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5949,13 +5981,24 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
        if (ret)
                goto err;
 
-       if (write && !skip_sum) {
+       if (skip_sum)
+               goto map;
+
+       if (write && async_submit) {
                ret = btrfs_wq_submit_bio(root->fs_info,
                                   inode, rw, bio, 0, 0,
                                   file_offset,
                                   __btrfs_submit_bio_start_direct_io,
                                   __btrfs_submit_bio_done);
                goto err;
+       } else if (write) {
+               /*
+                * If we aren't doing async submit, calculate the csum of the
+                * bio now.
+                */
+               ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
+               if (ret)
+                       goto err;
        } else if (!skip_sum) {
                ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
                                          file_offset, csums);
@@ -5963,7 +6006,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
                        goto err;
        }
 
-       ret = btrfs_map_bio(root, rw, bio, 0, 1);
+map:
+       ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
 err:
        bio_put(bio);
        return ret;
@@ -5985,15 +6029,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        int nr_pages = 0;
        u32 *csums = dip->csums;
        int ret = 0;
+       int async_submit = 0;
        int write = rw & REQ_WRITE;
 
-       bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
-       if (!bio)
-               return -ENOMEM;
-       bio->bi_private = dip;
-       bio->bi_end_io = btrfs_end_dio_bio;
-       atomic_inc(&dip->pending_bios);
-
        map_length = orig_bio->bi_size;
        ret = btrfs_map_block(map_tree, READ, start_sector << 9,
                              &map_length, NULL, 0);
@@ -6002,6 +6040,19 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                return -EIO;
        }
 
+       if (map_length >= orig_bio->bi_size) {
+               bio = orig_bio;
+               goto submit;
+       }
+
+       async_submit = 1;
+       bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
+       if (!bio)
+               return -ENOMEM;
+       bio->bi_private = dip;
+       bio->bi_end_io = btrfs_end_dio_bio;
+       atomic_inc(&dip->pending_bios);
+
        while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
                if (unlikely(map_length < submit_len + bvec->bv_len ||
                    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
@@ -6015,7 +6066,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                        atomic_inc(&dip->pending_bios);
                        ret = __btrfs_submit_dio_bio(bio, inode, rw,
                                                     file_offset, skip_sum,
-                                                    csums);
+                                                    csums, async_submit);
                        if (ret) {
                                bio_put(bio);
                                atomic_dec(&dip->pending_bios);
@@ -6052,8 +6103,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                }
        }
 
+submit:
        ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
-                                    csums);
+                                    csums, async_submit);
        if (!ret)
                return 0;
 
@@ -6148,6 +6200,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
                        unsigned long nr_segs)
 {
        int seg;
+       int i;
        size_t size;
        unsigned long addr;
        unsigned blocksize_mask = root->sectorsize - 1;
@@ -6162,8 +6215,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
                addr = (unsigned long)iov[seg].iov_base;
                size = iov[seg].iov_len;
                end += size;
-               if ((addr & blocksize_mask) || (size & blocksize_mask)) 
+               if ((addr & blocksize_mask) || (size & blocksize_mask))
                        goto out;
+
+               /* If this is a write we don't need to check anymore */
+               if (rw & WRITE)
+                       continue;
+
+               /*
+                * Check to make sure we don't have duplicate iov_base's in this
+                * iovec, if so return EINVAL, otherwise we'll get csum errors
+                * when reading back.
+                */
+               for (i = seg + 1; i < nr_segs; i++) {
+                       if (iov[seg].iov_base == iov[i].iov_base)
+                               goto out;
+               }
        }
        retval = 0;
 out:
index cfc264f..ffb48d6 100644 (file)
@@ -2287,7 +2287,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
        struct btrfs_ioctl_space_info space;
        struct btrfs_ioctl_space_info *dest;
        struct btrfs_ioctl_space_info *dest_orig;
-       struct btrfs_ioctl_space_info *user_dest;
+       struct btrfs_ioctl_space_info __user *user_dest;
        struct btrfs_space_info *info;
        u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
                       BTRFS_BLOCK_GROUP_SYSTEM,
index 58e7de9..0ac712e 100644 (file)
@@ -159,7 +159,7 @@ enum {
        Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
        Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
        Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
-       Opt_enospc_debug, Opt_err,
+       Opt_enospc_debug, Opt_subvolrootid, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -189,6 +189,7 @@ static match_table_t tokens = {
        {Opt_clear_cache, "clear_cache"},
        {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
        {Opt_enospc_debug, "enospc_debug"},
+       {Opt_subvolrootid, "subvolrootid=%d"},
        {Opt_err, NULL},
 };
 
@@ -232,6 +233,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                        break;
                case Opt_subvol:
                case Opt_subvolid:
+               case Opt_subvolrootid:
                case Opt_device:
                        /*
                         * These are parsed by btrfs_parse_early_options
@@ -388,7 +390,7 @@ out:
  */
 static int btrfs_parse_early_options(const char *options, fmode_t flags,
                void *holder, char **subvol_name, u64 *subvol_objectid,
-               struct btrfs_fs_devices **fs_devices)
+               u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices)
 {
        substring_t args[MAX_OPT_ARGS];
        char *opts, *orig, *p;
@@ -429,6 +431,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
                                        *subvol_objectid = intarg;
                        }
                        break;
+               case Opt_subvolrootid:
+                       intarg = 0;
+                       error = match_int(&args[0], &intarg);
+                       if (!error) {
+                               /* we want the original fs_tree */
+                               if (!intarg)
+                                       *subvol_rootid =
+                                               BTRFS_FS_TREE_OBJECTID;
+                               else
+                                       *subvol_rootid = intarg;
+                       }
+                       break;
                case Opt_device:
                        error = btrfs_scan_one_device(match_strdup(&args[0]),
                                        flags, holder, fs_devices);
@@ -736,6 +750,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        fmode_t mode = FMODE_READ;
        char *subvol_name = NULL;
        u64 subvol_objectid = 0;
+       u64 subvol_rootid = 0;
        int error = 0;
 
        if (!(flags & MS_RDONLY))
@@ -743,7 +758,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
 
        error = btrfs_parse_early_options(data, mode, fs_type,
                                          &subvol_name, &subvol_objectid,
-                                         &fs_devices);
+                                         &subvol_rootid, &fs_devices);
        if (error)
                return ERR_PTR(error);
 
@@ -807,15 +822,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
                s->s_flags |= MS_ACTIVE;
        }
 
-       root = get_default_root(s, subvol_objectid);
-       if (IS_ERR(root)) {
-               error = PTR_ERR(root);
-               deactivate_locked_super(s);
-               goto error_free_subvol_name;
-       }
        /* if they gave us a subvolume name bind mount into that */
        if (strcmp(subvol_name, ".")) {
                struct dentry *new_root;
+
+               root = get_default_root(s, subvol_rootid);
+               if (IS_ERR(root)) {
+                       error = PTR_ERR(root);
+                       deactivate_locked_super(s);
+                       goto error_free_subvol_name;
+               }
+
                mutex_lock(&root->d_inode->i_mutex);
                new_root = lookup_one_len(subvol_name, root,
                                      strlen(subvol_name));
@@ -836,6 +853,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
                }
                dput(root);
                root = new_root;
+       } else {
+               root = get_default_root(s, subvol_objectid);
+               if (IS_ERR(root)) {
+                       error = PTR_ERR(root);
+                       deactivate_locked_super(s);
+                       goto error_free_subvol_name;
+               }
        }
 
        kfree(subvol_name);
index 5b158da..c571734 100644 (file)
 
 static noinline void put_transaction(struct btrfs_transaction *transaction)
 {
-       WARN_ON(transaction->use_count == 0);
-       transaction->use_count--;
-       if (transaction->use_count == 0) {
-               list_del_init(&transaction->list);
+       WARN_ON(atomic_read(&transaction->use_count) == 0);
+       if (atomic_dec_and_test(&transaction->use_count)) {
                memset(transaction, 0, sizeof(*transaction));
                kmem_cache_free(btrfs_transaction_cachep, transaction);
        }
@@ -60,14 +58,14 @@ static noinline int join_transaction(struct btrfs_root *root)
                if (!cur_trans)
                        return -ENOMEM;
                root->fs_info->generation++;
-               cur_trans->num_writers = 1;
+               atomic_set(&cur_trans->num_writers, 1);
                cur_trans->num_joined = 0;
                cur_trans->transid = root->fs_info->generation;
                init_waitqueue_head(&cur_trans->writer_wait);
                init_waitqueue_head(&cur_trans->commit_wait);
                cur_trans->in_commit = 0;
                cur_trans->blocked = 0;
-               cur_trans->use_count = 1;
+               atomic_set(&cur_trans->use_count, 1);
                cur_trans->commit_done = 0;
                cur_trans->start_time = get_seconds();
 
@@ -88,7 +86,7 @@ static noinline int join_transaction(struct btrfs_root *root)
                root->fs_info->running_transaction = cur_trans;
                spin_unlock(&root->fs_info->new_trans_lock);
        } else {
-               cur_trans->num_writers++;
+               atomic_inc(&cur_trans->num_writers);
                cur_trans->num_joined++;
        }
 
@@ -145,7 +143,7 @@ static void wait_current_trans(struct btrfs_root *root)
        cur_trans = root->fs_info->running_transaction;
        if (cur_trans && cur_trans->blocked) {
                DEFINE_WAIT(wait);
-               cur_trans->use_count++;
+               atomic_inc(&cur_trans->use_count);
                while (1) {
                        prepare_to_wait(&root->fs_info->transaction_wait, &wait,
                                        TASK_UNINTERRUPTIBLE);
@@ -181,6 +179,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
 {
        struct btrfs_trans_handle *h;
        struct btrfs_transaction *cur_trans;
+       int retries = 0;
        int ret;
 
        if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
@@ -204,7 +203,7 @@ again:
        }
 
        cur_trans = root->fs_info->running_transaction;
-       cur_trans->use_count++;
+       atomic_inc(&cur_trans->use_count);
        if (type != TRANS_JOIN_NOLOCK)
                mutex_unlock(&root->fs_info->trans_mutex);
 
@@ -224,10 +223,18 @@ again:
 
        if (num_items > 0) {
                ret = btrfs_trans_reserve_metadata(h, root, num_items);
-               if (ret == -EAGAIN) {
+               if (ret == -EAGAIN && !retries) {
+                       retries++;
                        btrfs_commit_transaction(h, root);
                        goto again;
+               } else if (ret == -EAGAIN) {
+                       /*
+                        * We have already retried and got EAGAIN, so really we
+                        * don't have space, so set ret to -ENOSPC.
+                        */
+                       ret = -ENOSPC;
                }
+
                if (ret < 0) {
                        btrfs_end_transaction(h, root);
                        return ERR_PTR(ret);
@@ -327,7 +334,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
                        goto out_unlock;  /* nothing committing|committed */
        }
 
-       cur_trans->use_count++;
+       atomic_inc(&cur_trans->use_count);
        mutex_unlock(&root->fs_info->trans_mutex);
 
        wait_for_commit(root, cur_trans);
@@ -457,18 +464,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
                        wake_up_process(info->transaction_kthread);
        }
 
-       if (lock)
-               mutex_lock(&info->trans_mutex);
        WARN_ON(cur_trans != info->running_transaction);
-       WARN_ON(cur_trans->num_writers < 1);
-       cur_trans->num_writers--;
+       WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
+       atomic_dec(&cur_trans->num_writers);
 
        smp_mb();
        if (waitqueue_active(&cur_trans->writer_wait))
                wake_up(&cur_trans->writer_wait);
        put_transaction(cur_trans);
-       if (lock)
-               mutex_unlock(&info->trans_mutex);
 
        if (current->journal_info == trans)
                current->journal_info = NULL;
@@ -1178,7 +1181,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
        /* take transaction reference */
        mutex_lock(&root->fs_info->trans_mutex);
        cur_trans = trans->transaction;
-       cur_trans->use_count++;
+       atomic_inc(&cur_trans->use_count);
        mutex_unlock(&root->fs_info->trans_mutex);
 
        btrfs_end_transaction(trans, root);
@@ -1237,7 +1240,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        mutex_lock(&root->fs_info->trans_mutex);
        if (cur_trans->in_commit) {
-               cur_trans->use_count++;
+               atomic_inc(&cur_trans->use_count);
                mutex_unlock(&root->fs_info->trans_mutex);
                btrfs_end_transaction(trans, root);
 
@@ -1259,7 +1262,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                prev_trans = list_entry(cur_trans->list.prev,
                                        struct btrfs_transaction, list);
                if (!prev_trans->commit_done) {
-                       prev_trans->use_count++;
+                       atomic_inc(&prev_trans->use_count);
                        mutex_unlock(&root->fs_info->trans_mutex);
 
                        wait_for_commit(root, prev_trans);
@@ -1300,14 +1303,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                                TASK_UNINTERRUPTIBLE);
 
                smp_mb();
-               if (cur_trans->num_writers > 1)
+               if (atomic_read(&cur_trans->num_writers) > 1)
                        schedule_timeout(MAX_SCHEDULE_TIMEOUT);
                else if (should_grow)
                        schedule_timeout(1);
 
                mutex_lock(&root->fs_info->trans_mutex);
                finish_wait(&cur_trans->writer_wait, &wait);
-       } while (cur_trans->num_writers > 1 ||
+       } while (atomic_read(&cur_trans->num_writers) > 1 ||
                 (should_grow && cur_trans->num_joined != joined));
 
        ret = create_pending_snapshots(trans, root->fs_info);
@@ -1394,6 +1397,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        wake_up(&cur_trans->commit_wait);
 
+       list_del_init(&cur_trans->list);
        put_transaction(cur_trans);
        put_transaction(cur_trans);
 
index 229a594..e441acc 100644 (file)
@@ -27,11 +27,11 @@ struct btrfs_transaction {
         * total writers in this transaction, it must be zero before the
         * transaction can end
         */
-       unsigned long num_writers;
+       atomic_t num_writers;
 
        unsigned long num_joined;
        int in_commit;
-       int use_count;
+       atomic_t use_count;
        int commit_done;
        int blocked;
        struct list_head list;
index a5303b8..cfd6605 100644 (file)
@@ -180,11 +180,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_dir_item *di;
-       int ret = 0, slot, advance;
+       int ret = 0, slot;
        size_t total_size = 0, size_left = size;
        unsigned long name_ptr;
        size_t name_len;
-       u32 nritems;
 
        /*
         * ok we want all objects associated with this id.
@@ -204,34 +203,24 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
                goto err;
-       advance = 0;
+
        while (1) {
                leaf = path->nodes[0];
-               nritems = btrfs_header_nritems(leaf);
                slot = path->slots[0];
 
                /* this is where we start walking through the path */
-               if (advance || slot >= nritems) {
+               if (slot >= btrfs_header_nritems(leaf)) {
                        /*
                         * if we've reached the last slot in this leaf we need
                         * to go to the next leaf and reset everything
                         */
-                       if (slot >= nritems-1) {
-                               ret = btrfs_next_leaf(root, path);
-                               if (ret)
-                                       break;
-                               leaf = path->nodes[0];
-                               nritems = btrfs_header_nritems(leaf);
-                               slot = path->slots[0];
-                       } else {
-                               /*
-                                * just walking through the slots on this leaf
-                                */
-                               slot++;
-                               path->slots[0]++;
-                       }
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               goto err;
+                       else if (ret > 0)
+                               break;
+                       continue;
                }
-               advance = 1;
 
                btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
@@ -250,7 +239,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 
                /* we are just looking for how big our buffer needs to be */
                if (!size)
-                       continue;
+                       goto next;
 
                if (!buffer || (name_len + 1) > size_left) {
                        ret = -ERANGE;
@@ -263,6 +252,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 
                size_left -= name_len + 1;
                buffer += name_len + 1;
+next:
+               path->slots[0]++;
        }
        ret = total_size;
 
index fe16835..74ab165 100644 (file)
@@ -685,22 +685,6 @@ LinuxExtensionsEnabled     If set to one then the client will attempt to
                        support and want to map the uid and gid fields 
                        to values supplied at mount (rather than the 
                        actual values, then set this to zero. (default 1)
-Experimental            When set to 1 used to enable certain experimental
-                       features (currently enables multipage writes
-                       when signing is enabled, the multipage write
-                       performance enhancement was disabled when
-                       signing turned on in case buffer was modified
-                       just before it was sent, also this flag will
-                       be used to use the new experimental directory change 
-                       notification code).  When set to 2 enables
-                       an additional experimental feature, "raw ntlmssp"
-                       session establishment support (which allows
-                       specifying "sec=ntlmssp" on mount). The Linux cifs
-                       module will use ntlmv2 authentication encapsulated
-                       in "raw ntlmssp" (not using SPNEGO) when
-                       "sec=ntlmssp" is specified on mount.
-                       This support also requires building cifs with
-                       the CONFIG_CIFS_EXPERIMENTAL configuration flag.
 
 These experimental features and tracing can be enabled by changing flags in 
 /proc/fs/cifs (after the cifs module has been installed or built into the 
index e654dfd..53d57a3 100644 (file)
@@ -50,7 +50,7 @@ void cifs_fscache_unregister(void)
  */
 struct cifs_server_key {
        uint16_t        family;         /* address family */
-       uint16_t        port;           /* IP port */
+       __be16          port;           /* IP port */
        union {
                struct in_addr  ipv4_addr;
                struct in6_addr ipv6_addr;
index 65829d3..30d01bc 100644 (file)
@@ -423,7 +423,6 @@ static const struct file_operations cifs_lookup_cache_proc_fops;
 static const struct file_operations traceSMB_proc_fops;
 static const struct file_operations cifs_multiuser_mount_proc_fops;
 static const struct file_operations cifs_security_flags_proc_fops;
-static const struct file_operations cifs_experimental_proc_fops;
 static const struct file_operations cifs_linux_ext_proc_fops;
 
 void
@@ -441,8 +440,6 @@ cifs_proc_init(void)
        proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
        proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
        proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
-       proc_create("Experimental", 0, proc_fs_cifs,
-                   &cifs_experimental_proc_fops);
        proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
                    &cifs_linux_ext_proc_fops);
        proc_create("MultiuserMount", 0, proc_fs_cifs,
@@ -469,7 +466,6 @@ cifs_proc_clean(void)
        remove_proc_entry("OplockEnabled", proc_fs_cifs);
        remove_proc_entry("SecurityFlags", proc_fs_cifs);
        remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
-       remove_proc_entry("Experimental", proc_fs_cifs);
        remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
        remove_proc_entry("fs/cifs", NULL);
 }
@@ -550,45 +546,6 @@ static const struct file_operations cifs_oplock_proc_fops = {
        .write          = cifs_oplock_proc_write,
 };
 
-static int cifs_experimental_proc_show(struct seq_file *m, void *v)
-{
-       seq_printf(m, "%d\n", experimEnabled);
-       return 0;
-}
-
-static int cifs_experimental_proc_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, cifs_experimental_proc_show, NULL);
-}
-
-static ssize_t cifs_experimental_proc_write(struct file *file,
-               const char __user *buffer, size_t count, loff_t *ppos)
-{
-       char c;
-       int rc;
-
-       rc = get_user(c, buffer);
-       if (rc)
-               return rc;
-       if (c == '0' || c == 'n' || c == 'N')
-               experimEnabled = 0;
-       else if (c == '1' || c == 'y' || c == 'Y')
-               experimEnabled = 1;
-       else if (c == '2')
-               experimEnabled = 2;
-
-       return count;
-}
-
-static const struct file_operations cifs_experimental_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = cifs_experimental_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = cifs_experimental_proc_write,
-};
-
 static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
 {
        seq_printf(m, "%d\n", linuxExtEnabled);
index 4dfba82..33d2213 100644 (file)
@@ -113,7 +113,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
                   MAX_MECH_STR_LEN +
                   UID_KEY_LEN + (sizeof(uid_t) * 2) +
                   CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
-                  USER_KEY_LEN + strlen(sesInfo->userName) +
+                  USER_KEY_LEN + strlen(sesInfo->user_name) +
                   PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
 
        spnego_key = ERR_PTR(-ENOMEM);
@@ -153,7 +153,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
        sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
 
        dp = description + strlen(description);
-       sprintf(dp, ";user=%s", sesInfo->userName);
+       sprintf(dp, ";user=%s", sesInfo->user_name);
 
        dp = description + strlen(description);
        sprintf(dp, ";pid=0x%x", current->pid);
index fc0fd4f..23d43cd 100644 (file)
@@ -90,7 +90,7 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
        case UNI_COLON:
                *target = ':';
                break;
-       case UNI_ASTERIK:
+       case UNI_ASTERISK:
                *target = '*';
                break;
        case UNI_QUESTION:
@@ -264,40 +264,40 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
  * names are little endian 16 bit Unicode on the wire
  */
 int
-cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+cifsConvertToUCS(__le16 *target, const char *source, int srclen,
                 const struct nls_table *cp, int mapChars)
 {
        int i, j, charlen;
-       int len_remaining = maxlen;
        char src_char;
-       __u16 temp;
+       __le16 dst_char;
+       wchar_t tmp;
 
        if (!mapChars)
                return cifs_strtoUCS(target, source, PATH_MAX, cp);
 
-       for (i = 0, j = 0; i < maxlen; j++) {
+       for (i = 0, j = 0; i < srclen; j++) {
                src_char = source[i];
                switch (src_char) {
                case 0:
-                       put_unaligned_le16(0, &target[j]);
+                       put_unaligned(0, &target[j]);
                        goto ctoUCS_out;
                case ':':
-                       temp = UNI_COLON;
+                       dst_char = cpu_to_le16(UNI_COLON);
                        break;
                case '*':
-                       temp = UNI_ASTERIK;
+                       dst_char = cpu_to_le16(UNI_ASTERISK);
                        break;
                case '?':
-                       temp = UNI_QUESTION;
+                       dst_char = cpu_to_le16(UNI_QUESTION);
                        break;
                case '<':
-                       temp = UNI_LESSTHAN;
+                       dst_char = cpu_to_le16(UNI_LESSTHAN);
                        break;
                case '>':
-                       temp = UNI_GRTRTHAN;
+                       dst_char = cpu_to_le16(UNI_GRTRTHAN);
                        break;
                case '|':
-                       temp = UNI_PIPE;
+                       dst_char = cpu_to_le16(UNI_PIPE);
                        break;
                /*
                 * FIXME: We can not handle remapping backslash (UNI_SLASH)
@@ -305,17 +305,17 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
                 * as they use backslash as separator.
                 */
                default:
-                       charlen = cp->char2uni(source+i, len_remaining,
-                                               &temp);
+                       charlen = cp->char2uni(source + i, srclen - i, &tmp);
+                       dst_char = cpu_to_le16(tmp);
+
                        /*
                         * if no match, use question mark, which at least in
                         * some cases serves as wild card
                         */
                        if (charlen < 1) {
-                               temp = 0x003f;
+                               dst_char = cpu_to_le16(0x003f);
                                charlen = 1;
                        }
-                       len_remaining -= charlen;
                        /*
                         * character may take more than one byte in the source
                         * string, but will take exactly two bytes in the
@@ -324,9 +324,8 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
                        i += charlen;
                        continue;
                }
-               put_unaligned_le16(temp, &target[j]);
+               put_unaligned(dst_char, &target[j]);
                i++; /* move to next char in source string */
-               len_remaining--;
        }
 
 ctoUCS_out:
index 7fe6b52..644dd88 100644 (file)
@@ -44,7 +44,7 @@
  * reserved symbols (along with \ and /), otherwise illegal to store
  * in filenames in NTFS
  */
-#define UNI_ASTERI    (__u16) ('*' + 0xF000)
+#define UNI_ASTERISK    (__u16) ('*' + 0xF000)
 #define UNI_QUESTION    (__u16) ('?' + 0xF000)
 #define UNI_COLON       (__u16) (':' + 0xF000)
 #define UNI_GRTRTHAN    (__u16) ('>' + 0xF000)
index a51585f..d1a016b 100644 (file)
 #include <linux/ctype.h>
 #include <linux/random.h>
 
-/* Calculate and return the CIFS signature based on the mac key and SMB PDU */
-/* the 16 byte signature must be allocated by the caller  */
-/* Note we only use the 1st eight bytes */
-/* Note that the smb header signature field on input contains the
-       sequence number before this function is called */
-
+/*
+ * Calculate and return the CIFS signature based on the mac key and SMB PDU.
+ * The 16 byte signature must be allocated by the caller. Note we only use the
+ * 1st eight bytes and that the smb header signature field on input contains
+ * the sequence number before this function is called. Also, this function
+ * should be called with the server->srv_mutex held.
+ */
 static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
                                struct TCP_Server_Info *server, char *signature)
 {
@@ -209,8 +210,10 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
                                        cpu_to_le32(expected_sequence_number);
        cifs_pdu->Signature.Sequence.Reserved = 0;
 
+       mutex_lock(&server->srv_mutex);
        rc = cifs_calculate_signature(cifs_pdu, server,
                what_we_think_sig_should_be);
+       mutex_unlock(&server->srv_mutex);
 
        if (rc)
                return rc;
@@ -469,15 +472,15 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash,
                return rc;
        }
 
-       /* convert ses->userName to unicode and uppercase */
-       len = strlen(ses->userName);
+       /* convert ses->user_name to unicode and uppercase */
+       len = strlen(ses->user_name);
        user = kmalloc(2 + (len * 2), GFP_KERNEL);
        if (user == NULL) {
                cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
                rc = -ENOMEM;
                goto calc_exit_2;
        }
-       len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
+       len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
        UniStrupr(user);
 
        crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
index f297013..5c412b3 100644 (file)
@@ -53,7 +53,6 @@ int cifsFYI = 0;
 int cifsERROR = 1;
 int traceSMB = 0;
 unsigned int oplockEnabled = 1;
-unsigned int experimEnabled = 0;
 unsigned int linuxExtEnabled = 1;
 unsigned int lookupCacheEnabled = 1;
 unsigned int multiuser_mount = 0;
@@ -127,6 +126,7 @@ cifs_read_super(struct super_block *sb, void *data,
                kfree(cifs_sb);
                return rc;
        }
+       cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
        /* copy mount params to sb for use in submounts */
@@ -409,8 +409,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
                seq_printf(s, ",multiuser");
-       else if (tcon->ses->userName)
-               seq_printf(s, ",username=%s", tcon->ses->userName);
+       else if (tcon->ses->user_name)
+               seq_printf(s, ",username=%s", tcon->ses->user_name);
 
        if (tcon->ses->domainName)
                seq_printf(s, ",domain=%s", tcon->ses->domainName);
index 17afb0f..a5d1106 100644 (file)
 
 #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
 #define MAX_SERVER_SIZE 15
-#define MAX_SHARE_SIZE  64     /* used to be 20, this should still be enough */
-#define MAX_USERNAME_SIZE 32   /* 32 is to allow for 15 char names + null
-                                  termination then *2 for unicode versions */
-#define MAX_PASSWORD_SIZE 512  /* max for windows seems to be 256 wide chars */
+#define MAX_SHARE_SIZE 80
+#define MAX_USERNAME_SIZE 256  /* reasonable maximum for current servers */
+#define MAX_PASSWORD_SIZE 512  /* max for windows seems to be 256 wide chars */
 
 #define CIFS_MIN_RCV_POOL 4
 
@@ -92,7 +91,8 @@ enum statusEnum {
        CifsNew = 0,
        CifsGood,
        CifsExiting,
-       CifsNeedReconnect
+       CifsNeedReconnect,
+       CifsNeedNegotiate
 };
 
 enum securityEnum {
@@ -274,7 +274,7 @@ struct cifsSesInfo {
        int capabilities;
        char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
                                TCP names - will ipv6 and sctp addresses fit? */
-       char userName[MAX_USERNAME_SIZE + 1];
+       char *user_name;
        char *domainName;
        char *password;
        struct session_key auth_key;
@@ -817,7 +817,6 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
                                have the uid/password or Kerberos credential
                                or equivalent for current user */
 GLOBAL_EXTERN unsigned int oplockEnabled;
-GLOBAL_EXTERN unsigned int experimEnabled;
 GLOBAL_EXTERN unsigned int lookupCacheEnabled;
 GLOBAL_EXTERN unsigned int global_secflags;    /* if on, session setup sent
                                with more secure ntlmssp2 challenge/resp */
index 2644a5d..df959ba 100644 (file)
@@ -142,9 +142,9 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
         */
        while (server->tcpStatus == CifsNeedReconnect) {
                wait_event_interruptible_timeout(server->response_q,
-                       (server->tcpStatus == CifsGood), 10 * HZ);
+                       (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
 
-               /* is TCP session is reestablished now ?*/
+               /* are we still trying to reconnect? */
                if (server->tcpStatus != CifsNeedReconnect)
                        break;
 
@@ -729,7 +729,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
                return rc;
 
        /* set up echo request */
-       smb->hdr.Tid = cpu_to_le16(0xffff);
+       smb->hdr.Tid = 0xffff;
        smb->hdr.WordCount = 1;
        put_unaligned_le16(1, &smb->EchoCount);
        put_bcc_le(1, &smb->hdr);
@@ -1884,10 +1884,10 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
                                        __constant_cpu_to_le16(CIFS_WRLCK))
                                pLockData->fl_type = F_WRLCK;
 
-                       pLockData->fl_start = parm_data->start;
-                       pLockData->fl_end = parm_data->start +
-                                               parm_data->length - 1;
-                       pLockData->fl_pid = parm_data->pid;
+                       pLockData->fl_start = le64_to_cpu(parm_data->start);
+                       pLockData->fl_end = pLockData->fl_start +
+                                       le64_to_cpu(parm_data->length) - 1;
+                       pLockData->fl_pid = le32_to_cpu(parm_data->pid);
                }
        }
 
index 6e2b2ad..db9d55b 100644 (file)
@@ -199,8 +199,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
        }
        spin_unlock(&GlobalMid_Lock);
 
-       while ((server->tcpStatus != CifsExiting) &&
-              (server->tcpStatus != CifsGood)) {
+       while (server->tcpStatus == CifsNeedReconnect) {
                try_to_freeze();
 
                /* we should try only the port we connected to before */
@@ -212,7 +211,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                        atomic_inc(&tcpSesReconnectCount);
                        spin_lock(&GlobalMid_Lock);
                        if (server->tcpStatus != CifsExiting)
-                               server->tcpStatus = CifsGood;
+                               server->tcpStatus = CifsNeedNegotiate;
                        spin_unlock(&GlobalMid_Lock);
                }
        }
@@ -248,24 +247,24 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
        total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
        data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
 
-       remaining = total_data_size - data_in_this_rsp;
-
-       if (remaining == 0)
+       if (total_data_size == data_in_this_rsp)
                return 0;
-       else if (remaining < 0) {
+       else if (total_data_size < data_in_this_rsp) {
                cFYI(1, "total data %d smaller than data in frame %d",
                        total_data_size, data_in_this_rsp);
                return -EINVAL;
-       } else {
-               cFYI(1, "missing %d bytes from transact2, check next response",
-                       remaining);
-               if (total_data_size > maxBufSize) {
-                       cERROR(1, "TotalDataSize %d is over maximum buffer %d",
-                               total_data_size, maxBufSize);
-                       return -EINVAL;
-               }
-               return remaining;
        }
+
+       remaining = total_data_size - data_in_this_rsp;
+
+       cFYI(1, "missing %d bytes from transact2, check next response",
+               remaining);
+       if (total_data_size > maxBufSize) {
+               cERROR(1, "TotalDataSize %d is over maximum buffer %d",
+                       total_data_size, maxBufSize);
+               return -EINVAL;
+       }
+       return remaining;
 }
 
 static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
@@ -421,7 +420,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
                pdu_length = 4; /* enough to get RFC1001 header */
 
 incomplete_rcv:
-               if (echo_retries > 0 &&
+               if (echo_retries > 0 && server->tcpStatus == CifsGood &&
                    time_after(jiffies, server->lstrp +
                                        (echo_retries * SMB_ECHO_INTERVAL))) {
                        cERROR(1, "Server %s has not responded in %d seconds. "
@@ -881,7 +880,8 @@ cifs_parse_mount_options(char *options, const char *devname,
                                /* null user, ie anonymous, authentication */
                                vol->nullauth = 1;
                        }
-                       if (strnlen(value, 200) < 200) {
+                       if (strnlen(value, MAX_USERNAME_SIZE) <
+                                               MAX_USERNAME_SIZE) {
                                vol->username = value;
                        } else {
                                printk(KERN_WARNING "CIFS: username too long\n");
@@ -1472,7 +1472,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
 static bool
 match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
 {
-       unsigned short int port, *sport;
+       __be16 port, *sport;
 
        switch (addr->sa_family) {
        case AF_INET:
@@ -1765,6 +1765,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
                module_put(THIS_MODULE);
                goto out_err_crypto_release;
        }
+       tcp_ses->tcpStatus = CifsNeedNegotiate;
 
        /* thread spawned, put it on the list */
        spin_lock(&cifs_tcp_ses_lock);
@@ -1808,7 +1809,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
                        break;
                default:
                        /* anything else takes username/password */
-                       if (strncmp(ses->userName, vol->username,
+                       if (ses->user_name == NULL)
+                               continue;
+                       if (strncmp(ses->user_name, vol->username,
                                    MAX_USERNAME_SIZE))
                                continue;
                        if (strlen(vol->username) != 0 &&
@@ -1851,6 +1854,8 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
        cifs_put_tcp_session(server);
 }
 
+static bool warned_on_ntlm;  /* globals init to false automatically */
+
 static struct cifsSesInfo *
 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
 {
@@ -1906,9 +1911,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
        else
                sprintf(ses->serverName, "%pI4", &addr->sin_addr);
 
-       if (volume_info->username)
-               strncpy(ses->userName, volume_info->username,
-                       MAX_USERNAME_SIZE);
+       if (volume_info->username) {
+               ses->user_name = kstrdup(volume_info->username, GFP_KERNEL);
+               if (!ses->user_name)
+                       goto get_ses_fail;
+       }
 
        /* volume_info->password freed at unmount */
        if (volume_info->password) {
@@ -1923,6 +1930,15 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
        }
        ses->cred_uid = volume_info->cred_uid;
        ses->linux_uid = volume_info->linux_uid;
+
+       /* ntlmv2 is much stronger than ntlm security, and has been broadly
+       supported for many years, time to update default security mechanism */
+       if ((volume_info->secFlg == 0) && warned_on_ntlm == false) {
+               warned_on_ntlm = true;
+               cERROR(1, "default security mechanism requested.  The default "
+                       "security mechanism will be upgraded from ntlm to "
+                       "ntlmv2 in kernel release 2.6.41");
+       }
        ses->overrideSecFlg = volume_info->secFlg;
 
        mutex_lock(&ses->session_mutex);
@@ -2276,7 +2292,7 @@ static int
 generic_ip_connect(struct TCP_Server_Info *server)
 {
        int rc = 0;
-       unsigned short int sport;
+       __be16 sport;
        int slen, sfamily;
        struct socket *socket = server->ssocket;
        struct sockaddr *saddr;
@@ -2361,7 +2377,7 @@ generic_ip_connect(struct TCP_Server_Info *server)
 static int
 ip_connect(struct TCP_Server_Info *server)
 {
-       unsigned short int *sport;
+       __be16 *sport;
        struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
        struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
 
@@ -2826,7 +2842,7 @@ try_mount_again:
 
 remote_path_check:
        /* check if a whole path (including prepath) is not remote */
-       if (!rc && cifs_sb->prepathlen && tcon) {
+       if (!rc && tcon) {
                /* build_path_to_root works only when we have a valid tcon */
                full_path = cifs_build_path_to_root(cifs_sb, tcon);
                if (full_path == NULL) {
index c27d236..faf5952 100644 (file)
@@ -575,8 +575,10 @@ reopen_error_exit:
 
 int cifs_close(struct inode *inode, struct file *file)
 {
-       cifsFileInfo_put(file->private_data);
-       file->private_data = NULL;
+       if (file->private_data != NULL) {
+               cifsFileInfo_put(file->private_data);
+               file->private_data = NULL;
+       }
 
        /* return code from the ->release op is always ignored */
        return 0;
@@ -970,6 +972,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
             total_written += bytes_written) {
                rc = -EAGAIN;
                while (rc == -EAGAIN) {
+                       struct kvec iov[2];
+                       unsigned int len;
+
                        if (open_file->invalidHandle) {
                                /* we could deadlock if we called
                                   filemap_fdatawait from here so tell
@@ -979,31 +984,14 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
                                if (rc != 0)
                                        break;
                        }
-                       if (experimEnabled || (pTcon->ses->server &&
-                               ((pTcon->ses->server->secMode &
-                               (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
-                               == 0))) {
-                               struct kvec iov[2];
-                               unsigned int len;
-
-                               len = min((size_t)cifs_sb->wsize,
-                                         write_size - total_written);
-                               /* iov[0] is reserved for smb header */
-                               iov[1].iov_base = (char *)write_data +
-                                                 total_written;
-                               iov[1].iov_len = len;
-                               rc = CIFSSMBWrite2(xid, pTcon,
-                                               open_file->netfid, len,
-                                               *poffset, &bytes_written,
-                                               iov, 1, 0);
-                       } else
-                               rc = CIFSSMBWrite(xid, pTcon,
-                                        open_file->netfid,
-                                        min_t(const int, cifs_sb->wsize,
-                                              write_size - total_written),
-                                        *poffset, &bytes_written,
-                                        write_data + total_written,
-                                        NULL, 0);
+
+                       len = min((size_t)cifs_sb->wsize,
+                                 write_size - total_written);
+                       /* iov[0] is reserved for smb header */
+                       iov[1].iov_base = (char *)write_data + total_written;
+                       iov[1].iov_len = len;
+                       rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
+                                          *poffset, &bytes_written, iov, 1, 0);
                }
                if (rc || (bytes_written == 0)) {
                        if (total_written)
@@ -1240,12 +1228,6 @@ static int cifs_writepages(struct address_space *mapping,
        }
 
        tcon = tlink_tcon(open_file->tlink);
-       if (!experimEnabled && tcon->ses->server->secMode &
-                       (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
-               cifsFileInfo_put(open_file);
-               kfree(iov);
-               return generic_writepages(mapping, wbc);
-       }
        cifsFileInfo_put(open_file);
 
        xid = GetXid();
@@ -1980,6 +1962,24 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
        return total_read;
 }
 
+/*
+ * If the page is mmap'ed into a process' page tables, then we need to make
+ * sure that it doesn't change while being written back.
+ */
+static int
+cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct page *page = vmf->page;
+
+       lock_page(page);
+       return VM_FAULT_LOCKED;
+}
+
+static struct vm_operations_struct cifs_file_vm_ops = {
+       .fault = filemap_fault,
+       .page_mkwrite = cifs_page_mkwrite,
+};
+
 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
 {
        int rc, xid;
@@ -1991,6 +1991,8 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
                cifs_invalidate_mapping(inode);
 
        rc = generic_file_mmap(file, vma);
+       if (rc == 0)
+               vma->vm_ops = &cifs_file_vm_ops;
        FreeXid(xid);
        return rc;
 }
@@ -2007,6 +2009,8 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
                return rc;
        }
        rc = generic_file_mmap(file, vma);
+       if (rc == 0)
+               vma->vm_ops = &cifs_file_vm_ops;
        FreeXid(xid);
        return rc;
 }
index e8804d3..ce417a9 100644 (file)
@@ -239,7 +239,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
        if (rc != 0)
                return rc;
 
-       if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+       if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
                CIFSSMBClose(xid, tcon, netfid);
                /* it's not a symlink */
                return -EINVAL;
@@ -316,7 +316,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
        if (rc != 0)
                goto out;
 
-       if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+       if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
                CIFSSMBClose(xid, pTcon, netfid);
                /* it's not a symlink */
                goto out;
index 2a930a7..0c684ae 100644 (file)
@@ -100,6 +100,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
                memset(buf_to_free->password, 0, strlen(buf_to_free->password));
                kfree(buf_to_free->password);
        }
+       kfree(buf_to_free->user_name);
        kfree(buf_to_free->domainName);
        kfree(buf_to_free);
 }
@@ -520,7 +521,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
                        (struct smb_com_transaction_change_notify_rsp *)buf;
                struct file_notify_information *pnotify;
                __u32 data_offset = 0;
-               if (pSMBr->ByteCount > sizeof(struct file_notify_information)) {
+               if (get_bcc_le(buf) > sizeof(struct file_notify_information)) {
                        data_offset = le32_to_cpu(pSMBr->DataOffset);
 
                        pnotify = (struct file_notify_information *)
index 1676570..f6728eb 100644 (file)
@@ -219,12 +219,12 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
                bcc_ptr++;
        } */
        /* copy user */
-       if (ses->userName == NULL) {
+       if (ses->user_name == NULL) {
                /* null user mount */
                *bcc_ptr = 0;
                *(bcc_ptr+1) = 0;
        } else {
-               bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
+               bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name,
                                          MAX_USERNAME_SIZE, nls_cp);
        }
        bcc_ptr += 2 * bytes_ret;
@@ -244,12 +244,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
        /* copy user */
        /* BB what about null user mounts - check that we do this BB */
        /* copy user */
-       if (ses->userName == NULL) {
-               /* BB what about null user mounts - check that we do this BB */
-       } else {
-               strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
-       }
-       bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
+       if (ses->user_name != NULL)
+               strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
+       /* else null user mount */
+
+       bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
        *bcc_ptr = 0;
        bcc_ptr++; /* account for null termination */
 
@@ -405,8 +404,8 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
        /* BB spec says that if AvId field of MsvAvTimestamp is populated then
                we must set the MIC field of the AUTHENTICATE_MESSAGE */
        ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
-       tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
-       tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
+       tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
+       tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
        if (tilen) {
                ses->auth_key.response = kmalloc(tilen, GFP_KERNEL);
                if (!ses->auth_key.response) {
@@ -523,14 +522,14 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                tmp += len;
        }
 
-       if (ses->userName == NULL) {
+       if (ses->user_name == NULL) {
                sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->UserName.Length = 0;
                sec_blob->UserName.MaximumLength = 0;
                tmp += 2;
        } else {
                int len;
-               len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
+               len = cifs_strtoUCS((__le16 *)tmp, ses->user_name,
                                    MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
                sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
index ad25c4c..129a357 100644 (file)
@@ -2131,7 +2131,7 @@ EXPORT_SYMBOL(d_rehash);
  */
 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
 {
-       BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
+       BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
        BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
 
        spin_lock(&dentry->d_lock);
index e25e99b..d0f5353 100644 (file)
@@ -86,8 +86,8 @@
 
 #ifdef CONFIG_QUOTA
 /* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
+ * allocated so we need to update only data block */
+#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
 /* Amount of blocks needed for quota insert/delete - we do some block writes
  * but inode, sb and group updates are done only once */
 #define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
index 4673bc0..e9473cb 100644 (file)
@@ -125,9 +125,11 @@ extern int ext4_flush_completed_IO(struct inode *inode)
  * the parent directory's parent as well, and so on recursively, if
  * they are also freshly created.
  */
-static void ext4_sync_parent(struct inode *inode)
+static int ext4_sync_parent(struct inode *inode)
 {
+       struct writeback_control wbc;
        struct dentry *dentry = NULL;
+       int ret = 0;
 
        while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
                ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -136,8 +138,17 @@ static void ext4_sync_parent(struct inode *inode)
                if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
                        break;
                inode = dentry->d_parent->d_inode;
-               sync_mapping_buffers(inode->i_mapping);
+               ret = sync_mapping_buffers(inode->i_mapping);
+               if (ret)
+                       break;
+               memset(&wbc, 0, sizeof(wbc));
+               wbc.sync_mode = WB_SYNC_ALL;
+               wbc.nr_to_write = 0;         /* only write out the inode */
+               ret = sync_inode(inode, &wbc);
+               if (ret)
+                       break;
        }
+       return ret;
 }
 
 /*
@@ -176,7 +187,7 @@ int ext4_sync_file(struct file *file, int datasync)
        if (!journal) {
                ret = generic_file_fsync(file, datasync);
                if (!ret && !list_empty(&inode->i_dentry))
-                       ext4_sync_parent(inode);
+                       ret = ext4_sync_parent(inode);
                goto out;
        }
 
index ad8e303..f2fa5e8 100644 (file)
@@ -2502,6 +2502,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
                 * for partial write.
                 */
                set_buffer_new(bh);
+               set_buffer_mapped(bh);
        }
        return 0;
 }
@@ -4429,8 +4430,8 @@ void ext4_truncate(struct inode *inode)
        Indirect chain[4];
        Indirect *partial;
        __le32 nr = 0;
-       int n;
-       ext4_lblk_t last_block;
+       int n = 0;
+       ext4_lblk_t last_block, max_block;
        unsigned blocksize = inode->i_sb->s_blocksize;
 
        trace_ext4_truncate_enter(inode);
@@ -4455,14 +4456,18 @@ void ext4_truncate(struct inode *inode)
 
        last_block = (inode->i_size + blocksize-1)
                                        >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+       max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
+                                       >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
 
        if (inode->i_size & (blocksize - 1))
                if (ext4_block_truncate_page(handle, mapping, inode->i_size))
                        goto out_stop;
 
-       n = ext4_block_to_path(inode, last_block, offsets, NULL);
-       if (n == 0)
-               goto out_stop;  /* error */
+       if (last_block != max_block) {
+               n = ext4_block_to_path(inode, last_block, offsets, NULL);
+               if (n == 0)
+                       goto out_stop;  /* error */
+       }
 
        /*
         * OK.  This truncate is going to happen.  We add the inode to the
@@ -4493,7 +4498,13 @@ void ext4_truncate(struct inode *inode)
         */
        ei->i_disksize = inode->i_size;
 
-       if (n == 1) {           /* direct blocks */
+       if (last_block == max_block) {
+               /*
+                * It is unnecessary to free any data blocks if last_block is
+                * equal to the indirect block limit.
+                */
+               goto out_unlock;
+       } else if (n == 1) {            /* direct blocks */
                ext4_free_data(handle, inode, NULL, i_data+offsets[0],
                               i_data + EXT4_NDIR_BLOCKS);
                goto do_indirects;
@@ -4553,6 +4564,7 @@ do_indirects:
                ;
        }
 
+out_unlock:
        up_write(&ei->i_data_sem);
        inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
        ext4_mark_inode_dirty(handle, inode);
@@ -5398,13 +5410,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
        /* if nrblocks are contiguous */
        if (chunk) {
                /*
-                * With N contiguous data blocks, it need at most
-                * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
-                * 2 dindirect blocks
-                * 1 tindirect block
+                * With N contiguous data blocks, we need at most
+                * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+                * 2 dindirect blocks, and 1 tindirect block
                 */
-               indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
-               return indirects + 3;
+               return DIV_ROUND_UP(nrblocks,
+                                   EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
        }
        /*
         * if nrblocks are not contiguous, worse case, each block touch
index 056474b..8553dfb 100644 (file)
@@ -242,27 +242,44 @@ static void ext4_put_nojournal(handle_t *handle)
  * journal_end calls result in the superblock being marked dirty, so
  * that sync() will call the filesystem's write_super callback if
  * appropriate.
+ *
+ * To avoid j_barrier hold in userspace when a user calls freeze(),
+ * ext4 prevents a new handle from being started by s_frozen, which
+ * is in an upper layer.
  */
 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
 {
        journal_t *journal;
+       handle_t  *handle;
 
        if (sb->s_flags & MS_RDONLY)
                return ERR_PTR(-EROFS);
 
-       vfs_check_frozen(sb, SB_FREEZE_TRANS);
-       /* Special case here: if the journal has aborted behind our
-        * backs (eg. EIO in the commit thread), then we still need to
-        * take the FS itself readonly cleanly. */
        journal = EXT4_SB(sb)->s_journal;
-       if (journal) {
-               if (is_journal_aborted(journal)) {
-                       ext4_abort(sb, "Detected aborted journal");
-                       return ERR_PTR(-EROFS);
-               }
-               return jbd2_journal_start(journal, nblocks);
+       handle = ext4_journal_current_handle();
+
+       /*
+        * If a handle has been started, it should be allowed to
+        * finish, otherwise deadlock could happen between freeze
+        * and others(e.g. truncate) due to the restart of the
+        * journal handle if the filesystem is forzen and active
+        * handles are not stopped.
+        */
+       if (!handle)
+               vfs_check_frozen(sb, SB_FREEZE_TRANS);
+
+       if (!journal)
+               return ext4_get_nojournal();
+       /*
+        * Special case here: if the journal has aborted behind our
+        * backs (eg. EIO in the commit thread), then we still need to
+        * take the FS itself readonly cleanly.
+        */
+       if (is_journal_aborted(journal)) {
+               ext4_abort(sb, "Detected aborted journal");
+               return ERR_PTR(-EROFS);
        }
-       return ext4_get_nojournal();
+       return jbd2_journal_start(journal, nblocks);
 }
 
 /*
@@ -2975,6 +2992,12 @@ static int ext4_register_li_request(struct super_block *sb,
        mutex_unlock(&ext4_li_info->li_list_mtx);
 
        sbi->s_li_request = elr;
+       /*
+        * set elr to NULL here since it has been inserted to
+        * the request_list and the removal and free of it is
+        * handled by ext4_clear_request_list from now on.
+        */
+       elr = NULL;
 
        if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
                ret = ext4_run_lazyinit_thread();
@@ -3385,6 +3408,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
+       init_timer(&sbi->s_err_report);
+       sbi->s_err_report.function = print_daily_error_info;
+       sbi->s_err_report.data = (unsigned long) sb;
+
        err = percpu_counter_init(&sbi->s_freeblocks_counter,
                        ext4_count_free_blocks(sb));
        if (!err) {
@@ -3646,9 +3673,6 @@ no_journal:
                 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
                 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
 
-       init_timer(&sbi->s_err_report);
-       sbi->s_err_report.function = print_daily_error_info;
-       sbi->s_err_report.data = (unsigned long) sb;
        if (es->s_error_count)
                mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
 
@@ -3672,6 +3696,7 @@ failed_mount_wq:
                sbi->s_journal = NULL;
        }
 failed_mount3:
+       del_timer(&sbi->s_err_report);
        if (sbi->s_flex_groups) {
                if (is_vmalloc_addr(sbi->s_flex_groups))
                        vfree(sbi->s_flex_groups);
@@ -4138,6 +4163,11 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
 /*
  * LVM calls this function before a (read-only) snapshot is created.  This
  * gives us a chance to flush the journal completely and mark the fs clean.
+ *
+ * Note that only this function cannot bring a filesystem to be in a clean
+ * state independently, because ext4 prevents a new handle from being started
+ * by @sb->s_frozen, which stays in an upper layer.  It thus needs help from
+ * the upper layer.
  */
 static int ext4_freeze(struct super_block *sb)
 {
@@ -4614,11 +4644,24 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
 
 static int ext4_quota_off(struct super_block *sb, int type)
 {
+       struct inode *inode = sb_dqopt(sb)->files[type];
+       handle_t *handle;
+
        /* Force all delayed allocation blocks to be allocated.
         * Caller already holds s_umount sem */
        if (test_opt(sb, DELALLOC))
                sync_filesystem(sb);
 
+       /* Update modification times of quota files when userspace can
+        * start looking at them */
+       handle = ext4_journal_start(inode, 1);
+       if (IS_ERR(handle))
+               goto out;
+       inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       ext4_mark_inode_dirty(handle, inode);
+       ext4_journal_stop(handle);
+
+out:
        return dquot_quota_off(sb, type);
 }
 
@@ -4714,9 +4757,8 @@ out:
        if (inode->i_size < off + len) {
                i_size_write(inode, off + len);
                EXT4_I(inode)->i_disksize = inode->i_size;
+               ext4_mark_inode_dirty(handle, inode);
        }
-       inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-       ext4_mark_inode_dirty(handle, inode);
        mutex_unlock(&inode->i_mutex);
        return len;
 }
index bf93ad2..6b08864 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/exportfs.h>
 #include <linux/fs_struct.h>
 #include <linux/fsnotify.h>
+#include <linux/personality.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
index 751d6b2..0845f84 100644 (file)
@@ -110,14 +110,13 @@ int unregister_filesystem(struct file_system_type * fs)
                        *tmp = fs->next;
                        fs->next = NULL;
                        write_unlock(&file_systems_lock);
+                       synchronize_rcu();
                        return 0;
                }
                tmp = &(*tmp)->next;
        }
        write_unlock(&file_systems_lock);
 
-       synchronize_rcu();
-
        return -EINVAL;
 }
 
index c71995b..0f5c4f9 100644 (file)
@@ -884,8 +884,8 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
        }
 
        brelse(dibh);
-       gfs2_trans_end(sdp);
 failed:
+       gfs2_trans_end(sdp);
        if (al) {
                gfs2_inplace_release(ip);
                gfs2_quota_unlock(ip);
index 5c356d0..f789c57 100644 (file)
@@ -1506,7 +1506,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
                inode = gfs2_inode_lookup(dir->i_sb, 
                                be16_to_cpu(dent->de_type),
                                be64_to_cpu(dent->de_inum.no_addr),
-                               be64_to_cpu(dent->de_inum.no_formal_ino));
+                               be64_to_cpu(dent->de_inum.no_formal_ino), 0);
                brelse(bh);
                return inode;
        }
index b2682e0..e483108 100644 (file)
@@ -617,18 +617,51 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        return generic_file_aio_write(iocb, iov, nr_segs, pos);
 }
 
-static void empty_write_end(struct page *page, unsigned from,
-                          unsigned to)
+static int empty_write_end(struct page *page, unsigned from,
+                          unsigned to, int mode)
 {
-       struct gfs2_inode *ip = GFS2_I(page->mapping->host);
+       struct inode *inode = page->mapping->host;
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct buffer_head *bh;
+       unsigned offset, blksize = 1 << inode->i_blkbits;
+       pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
 
        zero_user(page, from, to-from);
        mark_page_accessed(page);
 
-       if (!gfs2_is_writeback(ip))
-               gfs2_page_add_databufs(ip, page, from, to);
+       if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
+               if (!gfs2_is_writeback(ip))
+                       gfs2_page_add_databufs(ip, page, from, to);
+
+               block_commit_write(page, from, to);
+               return 0;
+       }
+
+       offset = 0;
+       bh = page_buffers(page);
+       while (offset < to) {
+               if (offset >= from) {
+                       set_buffer_uptodate(bh);
+                       mark_buffer_dirty(bh);
+                       clear_buffer_new(bh);
+                       write_dirty_buffer(bh, WRITE);
+               }
+               offset += blksize;
+               bh = bh->b_this_page;
+       }
 
-       block_commit_write(page, from, to);
+       offset = 0;
+       bh = page_buffers(page);
+       while (offset < to) {
+               if (offset >= from) {
+                       wait_on_buffer(bh);
+                       if (!buffer_uptodate(bh))
+                               return -EIO;
+               }
+               offset += blksize;
+               bh = bh->b_this_page;
+       }
+       return 0;
 }
 
 static int needs_empty_write(sector_t block, struct inode *inode)
@@ -643,7 +676,8 @@ static int needs_empty_write(sector_t block, struct inode *inode)
        return !buffer_mapped(&bh_map);
 }
 
-static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
+static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
+                             int mode)
 {
        struct inode *inode = page->mapping->host;
        unsigned start, end, next, blksize;
@@ -668,7 +702,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
                                                          gfs2_block_map);
                                if (unlikely(ret))
                                        return ret;
-                               empty_write_end(page, start, end);
+                               ret = empty_write_end(page, start, end, mode);
+                               if (unlikely(ret))
+                                       return ret;
                                end = 0;
                        }
                        start = next;
@@ -682,7 +718,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
                ret = __block_write_begin(page, start, end - start, gfs2_block_map);
                if (unlikely(ret))
                        return ret;
-               empty_write_end(page, start, end);
+               ret = empty_write_end(page, start, end, mode);
+               if (unlikely(ret))
+                       return ret;
        }
 
        return 0;
@@ -731,7 +769,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
 
                if (curr == end)
                        to = end_offset;
-               error = write_empty_blocks(page, from, to);
+               error = write_empty_blocks(page, from, to, mode);
                if (!error && offset + to > inode->i_size &&
                    !(mode & FALLOC_FL_KEEP_SIZE)) {
                        i_size_write(inode, offset + to);
index 3754e3c..25eeb2b 100644 (file)
@@ -385,6 +385,10 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
 static void iopen_go_callback(struct gfs2_glock *gl)
 {
        struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
+       struct gfs2_sbd *sdp = gl->gl_sbd;
+
+       if (sdp->sd_vfs->s_flags & MS_RDONLY)
+               return;
 
        if (gl->gl_demote_state == LM_ST_UNLOCKED &&
            gl->gl_state == LM_ST_SHARED && ip) {
index 97d54a2..9134dcb 100644 (file)
@@ -40,37 +40,61 @@ struct gfs2_inum_range_host {
        u64 ir_length;
 };
 
+struct gfs2_skip_data {
+       u64 no_addr;
+       int skipped;
+       int non_block;
+};
+
 static int iget_test(struct inode *inode, void *opaque)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
-       u64 *no_addr = opaque;
+       struct gfs2_skip_data *data = opaque;
 
-       if (ip->i_no_addr == *no_addr)
+       if (ip->i_no_addr == data->no_addr) {
+               if (data->non_block &&
+                   inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
+                       data->skipped = 1;
+                       return 0;
+               }
                return 1;
-
+       }
        return 0;
 }
 
 static int iget_set(struct inode *inode, void *opaque)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
-       u64 *no_addr = opaque;
+       struct gfs2_skip_data *data = opaque;
 
-       inode->i_ino = (unsigned long)*no_addr;
-       ip->i_no_addr = *no_addr;
+       if (data->skipped)
+               return -ENOENT;
+       inode->i_ino = (unsigned long)(data->no_addr);
+       ip->i_no_addr = data->no_addr;
        return 0;
 }
 
 struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
 {
        unsigned long hash = (unsigned long)no_addr;
-       return ilookup5(sb, hash, iget_test, &no_addr);
+       struct gfs2_skip_data data;
+
+       data.no_addr = no_addr;
+       data.skipped = 0;
+       data.non_block = 0;
+       return ilookup5(sb, hash, iget_test, &data);
 }
 
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
+static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr,
+                              int non_block)
 {
+       struct gfs2_skip_data data;
        unsigned long hash = (unsigned long)no_addr;
-       return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
+
+       data.no_addr = no_addr;
+       data.skipped = 0;
+       data.non_block = non_block;
+       return iget5_locked(sb, hash, iget_test, iget_set, &data);
 }
 
 /**
@@ -111,19 +135,20 @@ static void gfs2_set_iop(struct inode *inode)
  * @sb: The super block
  * @no_addr: The inode number
  * @type: The type of the inode
+ * non_block: Can we block on inodes that are being freed?
  *
  * Returns: A VFS inode, or an error
  */
 
 struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
-                               u64 no_addr, u64 no_formal_ino)
+                               u64 no_addr, u64 no_formal_ino, int non_block)
 {
        struct inode *inode;
        struct gfs2_inode *ip;
        struct gfs2_glock *io_gl = NULL;
        int error;
 
-       inode = gfs2_iget(sb, no_addr);
+       inode = gfs2_iget(sb, no_addr, non_block);
        ip = GFS2_I(inode);
 
        if (!inode)
@@ -185,11 +210,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
 {
        struct super_block *sb = sdp->sd_vfs;
        struct gfs2_holder i_gh;
-       struct inode *inode;
+       struct inode *inode = NULL;
        int error;
 
+       /* Must not read in block until block type is verified */
        error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
-                                 LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+                                 LM_ST_EXCLUSIVE, GL_SKIP, &i_gh);
        if (error)
                return ERR_PTR(error);
 
@@ -197,7 +223,7 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
        if (error)
                goto fail;
 
-       inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0);
+       inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, 1);
        if (IS_ERR(inode))
                goto fail;
 
@@ -843,7 +869,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
                goto fail_gunlock2;
 
        inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr,
-                                 inum.no_formal_ino);
+                                 inum.no_formal_ino, 0);
        if (IS_ERR(inode))
                goto fail_gunlock2;
 
index 3e00a66..099ca30 100644 (file)
@@ -97,7 +97,8 @@ err:
 }
 
 extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 
-                                      u64 no_addr, u64 no_formal_ino);
+                                      u64 no_addr, u64 no_formal_ino,
+                                      int non_block);
 extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
                                         u64 *no_formal_ino,
                                         unsigned int blktype);
index 42ef243..d3c69eb 100644 (file)
@@ -430,7 +430,7 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
        struct dentry *dentry;
        struct inode *inode;
 
-       inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
+       inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
        if (IS_ERR(inode)) {
                fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
                return PTR_ERR(inode);
index cf930cd..6fcae84 100644 (file)
@@ -945,7 +945,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
                /* rgblk_search can return a block < goal, so we need to
                   keep it marching forward. */
                no_addr = block + rgd->rd_data0;
-               goal++;
+               goal = max(block + 1, goal + 1);
                if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
                        continue;
                if (no_addr == skip)
@@ -971,7 +971,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
                        found++;
 
                /* Limit reclaim to sensible number of tasks */
-               if (found > 2*NR_CPUS)
+               if (found > NR_CPUS)
                        return;
        }
 
index a4e23d6..b9f28e6 100644 (file)
@@ -1318,15 +1318,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
 
 static void gfs2_evict_inode(struct inode *inode)
 {
-       struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
+       struct super_block *sb = inode->i_sb;
+       struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_holder gh;
        int error;
 
-       if (inode->i_nlink)
+       if (inode->i_nlink || (sb->s_flags & MS_RDONLY))
                goto out;
 
-       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+       /* Must not read inode block until block type has been verified */
+       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
        if (unlikely(error)) {
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
                goto out;
@@ -1336,6 +1338,12 @@ static void gfs2_evict_inode(struct inode *inode)
        if (error)
                goto out_truncate;
 
+       if (test_bit(GIF_INVALID, &ip->i_flags)) {
+               error = gfs2_inode_refresh(ip);
+               if (error)
+                       goto out_truncate;
+       }
+
        ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
        gfs2_glock_dq_wait(&ip->i_iopen_gh);
        gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
index 20af62f..6e28000 100644 (file)
@@ -105,6 +105,8 @@ static int journal_submit_commit_record(journal_t *journal,
        int ret;
        struct timespec now = current_kernel_time();
 
+       *cbh = NULL;
+
        if (is_journal_aborted(journal))
                return 0;
 
@@ -806,7 +808,7 @@ wait_for_iobuf:
                if (err)
                        __jbd2_journal_abort_hard(journal);
        }
-       if (!err && !is_journal_aborted(journal))
+       if (cbh)
                err = journal_wait_on_commit_record(journal, cbh);
        if (JBD2_HAS_INCOMPAT_FEATURE(journal,
                                      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
index aba8eba..e0ec3db 100644 (file)
@@ -2413,10 +2413,12 @@ const char *jbd2_dev_to_name(dev_t device)
        new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
        if (!new_dev)
                return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
+       bd = bdget(device);
        spin_lock(&devname_cache_lock);
        if (devcache[i]) {
                if (devcache[i]->device == device) {
                        kfree(new_dev);
+                       bdput(bd);
                        ret = devcache[i]->devname;
                        spin_unlock(&devname_cache_lock);
                        return ret;
@@ -2425,7 +2427,6 @@ const char *jbd2_dev_to_name(dev_t device)
        }
        devcache[i] = new_dev;
        devcache[i]->device = device;
-       bd = bdget(device);
        if (bd) {
                bdevname(bd, devcache[i]->devname);
                bdput(bd);
index e6cd611..54fc993 100644 (file)
@@ -697,6 +697,7 @@ static __always_inline void set_root_rcu(struct nameidata *nd)
                do {
                        seq = read_seqcount_begin(&fs->seq);
                        nd->root = fs->root;
+                       nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
                } while (read_seqcount_retry(&fs->seq, seq));
        }
 }
index 7dba2ed..d99bcf5 100644 (file)
@@ -1030,18 +1030,6 @@ const struct seq_operations mounts_op = {
        .show   = show_vfsmnt
 };
 
-static int uuid_is_nil(u8 *uuid)
-{
-       int i;
-       u8  *cp = (u8 *)uuid;
-
-       for (i = 0; i < 16; i++) {
-               if (*cp++)
-                       return 0;
-       }
-       return 1;
-}
-
 static int show_mountinfo(struct seq_file *m, void *v)
 {
        struct proc_mounts *p = m->private;
@@ -1085,10 +1073,6 @@ static int show_mountinfo(struct seq_file *m, void *v)
        if (IS_MNT_UNBINDABLE(mnt))
                seq_puts(m, " unbindable");
 
-       if (!uuid_is_nil(mnt->mnt_sb->s_uuid))
-               /* print the uuid */
-               seq_printf(m, " uuid:%pU", mnt->mnt_sb->s_uuid);
-
        /* Filesystem specific data */
        seq_puts(m, " - ");
        show_type(m, sb);
index af0c627..e4cbc11 100644 (file)
@@ -542,11 +542,15 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u
        if (!nfs_need_commit(nfsi))
                return 0;
 
+       spin_lock(&inode->i_lock);
        ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
        if (ret > 0)
                nfsi->ncommit -= ret;
+       spin_unlock(&inode->i_lock);
+
        if (nfs_need_commit(NFS_I(inode)))
                __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+
        return ret;
 }
 #else
@@ -1483,9 +1487,7 @@ int nfs_commit_inode(struct inode *inode, int how)
        res = nfs_commit_set_lock(NFS_I(inode), may_wait);
        if (res <= 0)
                goto out_mark_dirty;
-       spin_lock(&inode->i_lock);
        res = nfs_scan_commit(inode, &head, 0, 0);
-       spin_unlock(&inode->i_lock);
        if (res) {
                int error;
 
index 0c6d816..7c831a2 100644 (file)
@@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
        exp_readlock();
        nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp);
        fh_put(&fh);
-       rqstp->rq_client = NULL;
        exp_readunlock();
        /* We return nlm error codes as nlm doesn't know
         * about nfsd, but nfsd does know about nlm..
index 4b36ec3..aa309aa 100644 (file)
@@ -397,10 +397,13 @@ static void unhash_generic_stateid(struct nfs4_stateid *stp)
 
 static void free_generic_stateid(struct nfs4_stateid *stp)
 {
-       int oflag = nfs4_access_bmap_to_omode(stp);
+       int oflag;
 
-       nfs4_file_put_access(stp->st_file, oflag);
-       put_nfs4_file(stp->st_file);
+       if (stp->st_access_bmap) {
+               oflag = nfs4_access_bmap_to_omode(stp);
+               nfs4_file_put_access(stp->st_file, oflag);
+               put_nfs4_file(stp->st_file);
+       }
        kmem_cache_free(stateid_slab, stp);
 }
 
index b10e354..ce4f624 100644 (file)
@@ -1299,6 +1299,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
 
        BUG_ON (!data || !frags);
 
+       if (size < 2 * VBLK_SIZE_HEAD) {
+               ldm_error("Value of size is to small.");
+               return false;
+       }
+
        group = get_unaligned_be32(data + 0x08);
        rec   = get_unaligned_be16(data + 0x0C);
        num   = get_unaligned_be16(data + 0x0E);
@@ -1306,6 +1311,10 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
                ldm_error ("A VBLK claims to have %d parts.", num);
                return false;
        }
+       if (rec >= num) {
+               ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
+               return false;
+       }
 
        list_for_each (item, frags) {
                f = list_entry (item, struct frag, list);
@@ -1334,10 +1343,9 @@ found:
 
        f->map |= (1 << rec);
 
-       if (num > 0) {
-               data += VBLK_SIZE_HEAD;
-               size -= VBLK_SIZE_HEAD;
-       }
+       data += VBLK_SIZE_HEAD;
+       size -= VBLK_SIZE_HEAD;
+
        memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
 
        return true;
index dd6628d..dfa5327 100644 (file)
@@ -3124,11 +3124,16 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
 /* for the /proc/ directory itself, after non-process stuff has been done */
 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
 {
-       unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
-       struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
+       unsigned int nr;
+       struct task_struct *reaper;
        struct tgid_iter iter;
        struct pid_namespace *ns;
 
+       if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
+               goto out_no_task;
+       nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+
+       reaper = get_proc_task(filp->f_path.dentry->d_inode);
        if (!reaper)
                goto out_no_task;
 
index 9eead2c..fbb0b47 100644 (file)
@@ -112,6 +112,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
                SetPageDirty(page);
 
                unlock_page(page);
+               put_page(page);
        }
 
        return 0;
index 919f0de..e6493ca 100644 (file)
 #ifndef __UBIFS_DEBUG_H__
 #define __UBIFS_DEBUG_H__
 
+/* Checking helper functions */
+typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
+                                struct ubifs_zbranch *zbr, void *priv);
+typedef int (*dbg_znode_callback)(struct ubifs_info *c,
+                                 struct ubifs_znode *znode, void *priv);
+
 #ifdef CONFIG_UBIFS_FS_DEBUG
 
 /**
@@ -270,11 +276,6 @@ void dbg_dump_tnc(struct ubifs_info *c);
 void dbg_dump_index(struct ubifs_info *c);
 void dbg_dump_lpt_lebs(const struct ubifs_info *c);
 
-/* Checking helper functions */
-typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
-                                struct ubifs_zbranch *zbr, void *priv);
-typedef int (*dbg_znode_callback)(struct ubifs_info *c,
-                                 struct ubifs_znode *znode, void *priv);
 int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
                   dbg_znode_callback znode_cb, void *priv);
 
@@ -295,7 +296,6 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size);
 int dbg_check_filesystem(struct ubifs_info *c);
 void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
                    int add_pos);
-int dbg_check_lprops(struct ubifs_info *c);
 int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
                        int row, int col);
 int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
@@ -401,58 +401,94 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
 #define DBGKEY(key)  ((char *)(key))
 #define DBGKEY1(key) ((char *)(key))
 
-#define ubifs_debugging_init(c)                0
-#define ubifs_debugging_exit(c)                ({})
-
-#define dbg_ntype(type)                        ""
-#define dbg_cstate(cmt_state)                  ""
-#define dbg_jhead(jhead)                       ""
-#define dbg_get_key_dump(c, key)               ({})
-#define dbg_dump_inode(c, inode)               ({})
-#define dbg_dump_node(c, node)                 ({})
-#define dbg_dump_lpt_node(c, node, lnum, offs) ({})
-#define dbg_dump_budget_req(req)               ({})
-#define dbg_dump_lstats(lst)                   ({})
-#define dbg_dump_budg(c)                       ({})
-#define dbg_dump_lprop(c, lp)                  ({})
-#define dbg_dump_lprops(c)                     ({})
-#define dbg_dump_lpt_info(c)                   ({})
-#define dbg_dump_leb(c, lnum)                  ({})
-#define dbg_dump_znode(c, znode)               ({})
-#define dbg_dump_heap(c, heap, cat)            ({})
-#define dbg_dump_pnode(c, pnode, parent, iip)  ({})
-#define dbg_dump_tnc(c)                        ({})
-#define dbg_dump_index(c)                      ({})
-#define dbg_dump_lpt_lebs(c)                   ({})
-
-#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0
-#define dbg_old_index_check_init(c, zroot)         0
-#define dbg_save_space_info(c)                     ({})
-#define dbg_check_space_info(c)                    0
-#define dbg_check_old_index(c, zroot)              0
-#define dbg_check_cats(c)                          0
-#define dbg_check_ltab(c)                          0
-#define dbg_chk_lpt_free_spc(c)                    0
-#define dbg_chk_lpt_sz(c, action, len)             0
-#define dbg_check_synced_i_size(inode)             0
-#define dbg_check_dir_size(c, dir)                 0
-#define dbg_check_tnc(c, x)                        0
-#define dbg_check_idx_size(c, idx_size)            0
-#define dbg_check_filesystem(c)                    0
-#define dbg_check_heap(c, heap, cat, add_pos)      ({})
-#define dbg_check_lprops(c)                        0
-#define dbg_check_lpt_nodes(c, cnode, row, col)    0
-#define dbg_check_inode_size(c, inode, size)       0
-#define dbg_check_data_nodes_order(c, head)        0
-#define dbg_check_nondata_nodes_order(c, head)     0
-#define dbg_force_in_the_gaps_enabled              0
-#define dbg_force_in_the_gaps()                    0
-#define dbg_failure_mode                           0
-
-#define dbg_debugfs_init()                         0
-#define dbg_debugfs_exit()
-#define dbg_debugfs_init_fs(c)                     0
-#define dbg_debugfs_exit_fs(c)                     0
+static inline int ubifs_debugging_init(struct ubifs_info *c)      { return 0; }
+static inline void ubifs_debugging_exit(struct ubifs_info *c)     { return; }
+static inline const char *dbg_ntype(int type)                     { return ""; }
+static inline const char *dbg_cstate(int cmt_state)               { return ""; }
+static inline const char *dbg_jhead(int jhead)                    { return ""; }
+static inline const char *
+dbg_get_key_dump(const struct ubifs_info *c,
+                const union ubifs_key *key)                      { return ""; }
+static inline void dbg_dump_inode(const struct ubifs_info *c,
+                                 const struct inode *inode)      { return; }
+static inline void dbg_dump_node(const struct ubifs_info *c,
+                                const void *node)                { return; }
+static inline void dbg_dump_lpt_node(const struct ubifs_info *c,
+                                    void *node, int lnum,
+                                    int offs)                    { return; }
+static inline void
+dbg_dump_budget_req(const struct ubifs_budget_req *req)           { return; }
+static inline void
+dbg_dump_lstats(const struct ubifs_lp_stats *lst)                 { return; }
+static inline void dbg_dump_budg(struct ubifs_info *c)            { return; }
+static inline void dbg_dump_lprop(const struct ubifs_info *c,
+                                 const struct ubifs_lprops *lp)  { return; }
+static inline void dbg_dump_lprops(struct ubifs_info *c)          { return; }
+static inline void dbg_dump_lpt_info(struct ubifs_info *c)        { return; }
+static inline void dbg_dump_leb(const struct ubifs_info *c,
+                               int lnum)                         { return; }
+static inline void
+dbg_dump_znode(const struct ubifs_info *c,
+              const struct ubifs_znode *znode)                   { return; }
+static inline void dbg_dump_heap(struct ubifs_info *c,
+                                struct ubifs_lpt_heap *heap,
+                                int cat)                         { return; }
+static inline void dbg_dump_pnode(struct ubifs_info *c,
+                                 struct ubifs_pnode *pnode,
+                                 struct ubifs_nnode *parent,
+                                 int iip)                        { return; }
+static inline void dbg_dump_tnc(struct ubifs_info *c)             { return; }
+static inline void dbg_dump_index(struct ubifs_info *c)           { return; }
+static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c)  { return; }
+
+static inline int dbg_walk_index(struct ubifs_info *c,
+                                dbg_leaf_callback leaf_cb,
+                                dbg_znode_callback znode_cb,
+                                void *priv)                      { return 0; }
+static inline void dbg_save_space_info(struct ubifs_info *c)      { return; }
+static inline int dbg_check_space_info(struct ubifs_info *c)      { return 0; }
+static inline int dbg_check_lprops(struct ubifs_info *c)          { return 0; }
+static inline int
+dbg_old_index_check_init(struct ubifs_info *c,
+                        struct ubifs_zbranch *zroot)             { return 0; }
+static inline int
+dbg_check_old_index(struct ubifs_info *c,
+                   struct ubifs_zbranch *zroot)                  { return 0; }
+static inline int dbg_check_cats(struct ubifs_info *c)            { return 0; }
+static inline int dbg_check_ltab(struct ubifs_info *c)            { return 0; }
+static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c)      { return 0; }
+static inline int dbg_chk_lpt_sz(struct ubifs_info *c,
+                                int action, int len)             { return 0; }
+static inline int dbg_check_synced_i_size(struct inode *inode)    { return 0; }
+static inline int dbg_check_dir_size(struct ubifs_info *c,
+                                    const struct inode *dir)     { return 0; }
+static inline int dbg_check_tnc(struct ubifs_info *c, int extra)  { return 0; }
+static inline int dbg_check_idx_size(struct ubifs_info *c,
+                                    long long idx_size)          { return 0; }
+static inline int dbg_check_filesystem(struct ubifs_info *c)      { return 0; }
+static inline void dbg_check_heap(struct ubifs_info *c,
+                                 struct ubifs_lpt_heap *heap,
+                                 int cat, int add_pos)           { return; }
+static inline int dbg_check_lpt_nodes(struct ubifs_info *c,
+       struct ubifs_cnode *cnode, int row, int col)              { return 0; }
+static inline int dbg_check_inode_size(struct ubifs_info *c,
+                                      const struct inode *inode,
+                                      loff_t size)               { return 0; }
+static inline int
+dbg_check_data_nodes_order(struct ubifs_info *c,
+                          struct list_head *head)                { return 0; }
+static inline int
+dbg_check_nondata_nodes_order(struct ubifs_info *c,
+                             struct list_head *head)             { return 0; }
+
+static inline int dbg_force_in_the_gaps(void)                     { return 0; }
+#define dbg_force_in_the_gaps_enabled 0
+#define dbg_failure_mode              0
+
+static inline int dbg_debugfs_init(void)                          { return 0; }
+static inline void dbg_debugfs_exit(void)                         { return; }
+static inline int dbg_debugfs_init_fs(struct ubifs_info *c)       { return 0; }
+static inline int dbg_debugfs_exit_fs(struct ubifs_info *c)       { return 0; }
 
 #endif /* !CONFIG_UBIFS_FS_DEBUG */
 #endif /* !__UBIFS_DEBUG_H__ */
index 28be1e6..b286db7 100644 (file)
@@ -1312,6 +1312,9 @@ int ubifs_fsync(struct file *file, int datasync)
 
        dbg_gen("syncing inode %lu", inode->i_ino);
 
+       if (inode->i_sb->s_flags & MS_RDONLY)
+               return 0;
+
        /*
         * VFS has already synchronized dirty pages for this inode. Synchronize
         * the inode unless this is a 'datasync()' call.
index 5ea4020..9ef9ed2 100644 (file)
@@ -293,7 +293,6 @@ xfs_buf_allocate_memory(
        size_t                  nbytes, offset;
        gfp_t                   gfp_mask = xb_to_gfp(flags);
        unsigned short          page_count, i;
-       pgoff_t                 first;
        xfs_off_t               end;
        int                     error;
 
@@ -333,7 +332,6 @@ use_alloc_page:
                return error;
 
        offset = bp->b_offset;
-       first = bp->b_file_offset >> PAGE_SHIFT;
        bp->b_flags |= _XBF_PAGES;
 
        for (i = 0; i < bp->b_page_count; i++) {
@@ -657,8 +655,6 @@ xfs_buf_readahead(
        xfs_off_t               ioff,
        size_t                  isize)
 {
-       struct backing_dev_info *bdi;
-
        if (bdi_read_congested(target->bt_bdi))
                return;
 
@@ -919,8 +915,6 @@ xfs_buf_lock(
 
        if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
                xfs_log_force(bp->b_target->bt_mount, 0);
-       if (atomic_read(&bp->b_io_remaining))
-               blk_flush_plug(current);
        down(&bp->b_sema);
        XB_SET_OWNER(bp);
 
@@ -1309,8 +1303,6 @@ xfs_buf_iowait(
 {
        trace_xfs_buf_iowait(bp, _RET_IP_);
 
-       if (atomic_read(&bp->b_io_remaining))
-               blk_flush_plug(current);
        wait_for_completion(&bp->b_iowait);
 
        trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1747,8 +1739,8 @@ xfsbufd(
        do {
                long    age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
                long    tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
-               int     count = 0;
                struct list_head tmp;
+               struct blk_plug plug;
 
                if (unlikely(freezing(current))) {
                        set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
@@ -1764,16 +1756,15 @@ xfsbufd(
 
                xfs_buf_delwri_split(target, &tmp, age);
                list_sort(NULL, &tmp, xfs_buf_cmp);
+
+               blk_start_plug(&plug);
                while (!list_empty(&tmp)) {
                        struct xfs_buf *bp;
                        bp = list_first_entry(&tmp, struct xfs_buf, b_list);
                        list_del_init(&bp->b_list);
                        xfs_bdstrat_cb(bp);
-                       count++;
                }
-               if (count)
-                       blk_flush_plug(current);
-
+               blk_finish_plug(&plug);
        } while (!kthread_should_stop());
 
        return 0;
@@ -1793,6 +1784,7 @@ xfs_flush_buftarg(
        int             pincount = 0;
        LIST_HEAD(tmp_list);
        LIST_HEAD(wait_list);
+       struct blk_plug plug;
 
        xfs_buf_runall_queues(xfsconvertd_workqueue);
        xfs_buf_runall_queues(xfsdatad_workqueue);
@@ -1807,6 +1799,8 @@ xfs_flush_buftarg(
         * we do that after issuing all the IO.
         */
        list_sort(NULL, &tmp_list, xfs_buf_cmp);
+
+       blk_start_plug(&plug);
        while (!list_empty(&tmp_list)) {
                bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
                ASSERT(target == bp->b_target);
@@ -1817,10 +1811,10 @@ xfs_flush_buftarg(
                }
                xfs_bdstrat_cb(bp);
        }
+       blk_finish_plug(&plug);
 
        if (wait) {
-               /* Expedite and wait for IO to complete. */
-               blk_flush_plug(current);
+               /* Wait for IO to complete. */
                while (!list_empty(&wait_list)) {
                        bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
 
index 508e06f..3ca7956 100644 (file)
 /*
  * XFS logging functions
  */
-static int
+static void
 __xfs_printk(
        const char              *level,
        const struct xfs_mount  *mp,
        struct va_format        *vaf)
 {
        if (mp && mp->m_fsname)
-               return printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
-       return printk("%sXFS: %pV\n", level, vaf);
+               printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
+       printk("%sXFS: %pV\n", level, vaf);
 }
 
-int xfs_printk(
+void xfs_printk(
        const char              *level,
        const struct xfs_mount  *mp,
        const char              *fmt, ...)
 {
        struct va_format        vaf;
        va_list                 args;
-       int                      r;
 
        va_start(args, fmt);
 
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       r = __xfs_printk(level, mp, &vaf);
+       __xfs_printk(level, mp, &vaf);
        va_end(args);
-
-       return r;
 }
 
 #define define_xfs_printk_level(func, kern_level)              \
-int func(const struct xfs_mount *mp, const char *fmt, ...)     \
+void func(const struct xfs_mount *mp, const char *fmt, ...)    \
 {                                                              \
        struct va_format        vaf;                            \
        va_list                 args;                           \
-       int                     r;                              \
                                                                \
        va_start(args, fmt);                                    \
                                                                \
        vaf.fmt = fmt;                                          \
        vaf.va = &args;                                         \
                                                                \
-       r = __xfs_printk(kern_level, mp, &vaf);                 \
+       __xfs_printk(kern_level, mp, &vaf);                     \
        va_end(args);                                           \
-                                                               \
-       return r;                                               \
 }                                                              \
 
 define_xfs_printk_level(xfs_emerg, KERN_EMERG);
@@ -88,7 +82,7 @@ define_xfs_printk_level(xfs_info, KERN_INFO);
 define_xfs_printk_level(xfs_debug, KERN_DEBUG);
 #endif
 
-int
+void
 xfs_alert_tag(
        const struct xfs_mount  *mp,
        int                     panic_tag,
@@ -97,7 +91,6 @@ xfs_alert_tag(
        struct va_format        vaf;
        va_list                 args;
        int                     do_panic = 0;
-       int                     r;
 
        if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
                xfs_printk(KERN_ALERT, mp,
@@ -110,12 +103,10 @@ xfs_alert_tag(
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       r = __xfs_printk(KERN_ALERT, mp, &vaf);
+       __xfs_printk(KERN_ALERT, mp, &vaf);
        va_end(args);
 
        BUG_ON(do_panic);
-
-       return r;
 }
 
 void
index e77ffa1..f1b3fc1 100644 (file)
@@ -3,32 +3,34 @@
 
 struct xfs_mount;
 
-extern int xfs_printk(const char *level, const struct xfs_mount *mp,
+extern void xfs_printk(const char *level, const struct xfs_mount *mp,
                       const char *fmt, ...)
         __attribute__ ((format (printf, 3, 4)));
-extern int xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_alert_tag(const struct xfs_mount *mp, int tag,
+extern void xfs_alert_tag(const struct xfs_mount *mp, int tag,
                         const char *fmt, ...)
         __attribute__ ((format (printf, 3, 4)));
-extern int xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
 
 #ifdef DEBUG
-extern int xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
 #else
-#define xfs_debug(mp, fmt, ...)        (0)
+static inline void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
+{
+}
 #endif
 
 extern void assfail(char *expr, char *f, int l);
index 1ba5c45..b38e58d 100644 (file)
@@ -816,75 +816,6 @@ xfs_setup_devices(
        return 0;
 }
 
-/*
- * XFS AIL push thread support
- */
-void
-xfsaild_wakeup(
-       struct xfs_ail          *ailp,
-       xfs_lsn_t               threshold_lsn)
-{
-       /* only ever move the target forwards */
-       if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
-               ailp->xa_target = threshold_lsn;
-               wake_up_process(ailp->xa_task);
-       }
-}
-
-STATIC int
-xfsaild(
-       void    *data)
-{
-       struct xfs_ail  *ailp = data;
-       xfs_lsn_t       last_pushed_lsn = 0;
-       long            tout = 0; /* milliseconds */
-
-       while (!kthread_should_stop()) {
-               /*
-                * for short sleeps indicating congestion, don't allow us to
-                * get woken early. Otherwise all we do is bang on the AIL lock
-                * without making progress.
-                */
-               if (tout && tout <= 20)
-                       __set_current_state(TASK_KILLABLE);
-               else
-                       __set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(tout ?
-                                msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
-
-               /* swsusp */
-               try_to_freeze();
-
-               ASSERT(ailp->xa_mount->m_log);
-               if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
-                       continue;
-
-               tout = xfsaild_push(ailp, &last_pushed_lsn);
-       }
-
-       return 0;
-}      /* xfsaild */
-
-int
-xfsaild_start(
-       struct xfs_ail  *ailp)
-{
-       ailp->xa_target = 0;
-       ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
-                                   ailp->xa_mount->m_fsname);
-       if (IS_ERR(ailp->xa_task))
-               return -PTR_ERR(ailp->xa_task);
-       return 0;
-}
-
-void
-xfsaild_stop(
-       struct xfs_ail  *ailp)
-{
-       kthread_stop(ailp->xa_task);
-}
-
-
 /* Catch misguided souls that try to use this interface on XFS */
 STATIC struct inode *
 xfs_fs_alloc_inode(
@@ -1191,22 +1122,12 @@ xfs_fs_sync_fs(
                return -error;
 
        if (laptop_mode) {
-               int     prev_sync_seq = mp->m_sync_seq;
-
                /*
                 * The disk must be active because we're syncing.
                 * We schedule xfssyncd now (now that the disk is
                 * active) instead of later (when it might not be).
                 */
-               wake_up_process(mp->m_sync_task);
-               /*
-                * We have to wait for the sync iteration to complete.
-                * If we don't, the disk activity caused by the sync
-                * will come after the sync is completed, and that
-                * triggers another sync from laptop mode.
-                */
-               wait_event(mp->m_wait_single_sync_task,
-                               mp->m_sync_seq != prev_sync_seq);
+               flush_delayed_work_sync(&mp->m_sync_work);
        }
 
        return 0;
@@ -1490,9 +1411,6 @@ xfs_fs_fill_super(
        spin_lock_init(&mp->m_sb_lock);
        mutex_init(&mp->m_growlock);
        atomic_set(&mp->m_active_trans, 0);
-       INIT_LIST_HEAD(&mp->m_sync_list);
-       spin_lock_init(&mp->m_sync_lock);
-       init_waitqueue_head(&mp->m_wait_single_sync_task);
 
        mp->m_super = sb;
        sb->s_fs_info = mp;
@@ -1798,6 +1716,38 @@ xfs_destroy_zones(void)
 
 }
 
+STATIC int __init
+xfs_init_workqueues(void)
+{
+       /*
+        * max_active is set to 8 to give enough concurency to allow
+        * multiple work operations on each CPU to run. This allows multiple
+        * filesystems to be running sync work concurrently, and scales with
+        * the number of CPUs in the system.
+        */
+       xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
+       if (!xfs_syncd_wq)
+               goto out;
+
+       xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
+       if (!xfs_ail_wq)
+               goto out_destroy_syncd;
+
+       return 0;
+
+out_destroy_syncd:
+       destroy_workqueue(xfs_syncd_wq);
+out:
+       return -ENOMEM;
+}
+
+STATIC void
+xfs_destroy_workqueues(void)
+{
+       destroy_workqueue(xfs_ail_wq);
+       destroy_workqueue(xfs_syncd_wq);
+}
+
 STATIC int __init
 init_xfs_fs(void)
 {
@@ -1813,10 +1763,14 @@ init_xfs_fs(void)
        if (error)
                goto out;
 
-       error = xfs_mru_cache_init();
+       error = xfs_init_workqueues();
        if (error)
                goto out_destroy_zones;
 
+       error = xfs_mru_cache_init();
+       if (error)
+               goto out_destroy_wq;
+
        error = xfs_filestream_init();
        if (error)
                goto out_mru_cache_uninit;
@@ -1833,6 +1787,10 @@ init_xfs_fs(void)
        if (error)
                goto out_cleanup_procfs;
 
+       error = xfs_init_workqueues();
+       if (error)
+               goto out_sysctl_unregister;
+
        vfs_initquota();
 
        error = register_filesystem(&xfs_fs_type);
@@ -1850,6 +1808,8 @@ init_xfs_fs(void)
        xfs_filestream_uninit();
  out_mru_cache_uninit:
        xfs_mru_cache_uninit();
+ out_destroy_wq:
+       xfs_destroy_workqueues();
  out_destroy_zones:
        xfs_destroy_zones();
  out:
@@ -1866,6 +1826,7 @@ exit_xfs_fs(void)
        xfs_buf_terminate();
        xfs_filestream_uninit();
        xfs_mru_cache_uninit();
+       xfs_destroy_workqueues();
        xfs_destroy_zones();
 }
 
index 9cf35a6..e4f9c1b 100644 (file)
@@ -22,6 +22,7 @@
 #include "xfs_log.h"
 #include "xfs_inum.h"
 #include "xfs_trans.h"
+#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
@@ -39,6 +40,8 @@
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 
+struct workqueue_struct        *xfs_syncd_wq;  /* sync workqueue */
+
 /*
  * The inode lookup is done in batches to keep the amount of lock traffic and
  * radix tree lookups to a minimum. The batch size is a trade off between
@@ -431,62 +434,12 @@ xfs_quiesce_attr(
        xfs_unmountfs_writesb(mp);
 }
 
-/*
- * Enqueue a work item to be picked up by the vfs xfssyncd thread.
- * Doing this has two advantages:
- * - It saves on stack space, which is tight in certain situations
- * - It can be used (with care) as a mechanism to avoid deadlocks.
- * Flushing while allocating in a full filesystem requires both.
- */
-STATIC void
-xfs_syncd_queue_work(
-       struct xfs_mount *mp,
-       void            *data,
-       void            (*syncer)(struct xfs_mount *, void *),
-       struct completion *completion)
-{
-       struct xfs_sync_work *work;
-
-       work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
-       INIT_LIST_HEAD(&work->w_list);
-       work->w_syncer = syncer;
-       work->w_data = data;
-       work->w_mount = mp;
-       work->w_completion = completion;
-       spin_lock(&mp->m_sync_lock);
-       list_add_tail(&work->w_list, &mp->m_sync_list);
-       spin_unlock(&mp->m_sync_lock);
-       wake_up_process(mp->m_sync_task);
-}
-
-/*
- * Flush delayed allocate data, attempting to free up reserved space
- * from existing allocations.  At this point a new allocation attempt
- * has failed with ENOSPC and we are in the process of scratching our
- * heads, looking about for more room...
- */
-STATIC void
-xfs_flush_inodes_work(
-       struct xfs_mount *mp,
-       void            *arg)
-{
-       struct inode    *inode = arg;
-       xfs_sync_data(mp, SYNC_TRYLOCK);
-       xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
-       iput(inode);
-}
-
-void
-xfs_flush_inodes(
-       xfs_inode_t     *ip)
+static void
+xfs_syncd_queue_sync(
+       struct xfs_mount        *mp)
 {
-       struct inode    *inode = VFS_I(ip);
-       DECLARE_COMPLETION_ONSTACK(completion);
-
-       igrab(inode);
-       xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
-       wait_for_completion(&completion);
-       xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
+       queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
+                               msecs_to_jiffies(xfs_syncd_centisecs * 10));
 }
 
 /*
@@ -496,9 +449,10 @@ xfs_flush_inodes(
  */
 STATIC void
 xfs_sync_worker(
-       struct xfs_mount *mp,
-       void            *unused)
+       struct work_struct *work)
 {
+       struct xfs_mount *mp = container_of(to_delayed_work(work),
+                                       struct xfs_mount, m_sync_work);
        int             error;
 
        if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
@@ -508,73 +462,106 @@ xfs_sync_worker(
                        error = xfs_fs_log_dummy(mp);
                else
                        xfs_log_force(mp, 0);
-               xfs_reclaim_inodes(mp, 0);
                error = xfs_qm_sync(mp, SYNC_TRYLOCK);
+
+               /* start pushing all the metadata that is currently dirty */
+               xfs_ail_push_all(mp->m_ail);
        }
-       mp->m_sync_seq++;
-       wake_up(&mp->m_wait_single_sync_task);
+
+       /* queue us up again */
+       xfs_syncd_queue_sync(mp);
 }
 
-STATIC int
-xfssyncd(
-       void                    *arg)
+/*
+ * Queue a new inode reclaim pass if there are reclaimable inodes and there
+ * isn't a reclaim pass already in progress. By default it runs every 5s based
+ * on the xfs syncd work default of 30s. Perhaps this should have it's own
+ * tunable, but that can be done if this method proves to be ineffective or too
+ * aggressive.
+ */
+static void
+xfs_syncd_queue_reclaim(
+       struct xfs_mount        *mp)
 {
-       struct xfs_mount        *mp = arg;
-       long                    timeleft;
-       xfs_sync_work_t         *work, *n;
-       LIST_HEAD               (tmp);
-
-       set_freezable();
-       timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
-       for (;;) {
-               if (list_empty(&mp->m_sync_list))
-                       timeleft = schedule_timeout_interruptible(timeleft);
-               /* swsusp */
-               try_to_freeze();
-               if (kthread_should_stop() && list_empty(&mp->m_sync_list))
-                       break;
 
-               spin_lock(&mp->m_sync_lock);
-               /*
-                * We can get woken by laptop mode, to do a sync -
-                * that's the (only!) case where the list would be
-                * empty with time remaining.
-                */
-               if (!timeleft || list_empty(&mp->m_sync_list)) {
-                       if (!timeleft)
-                               timeleft = xfs_syncd_centisecs *
-                                                       msecs_to_jiffies(10);
-                       INIT_LIST_HEAD(&mp->m_sync_work.w_list);
-                       list_add_tail(&mp->m_sync_work.w_list,
-                                       &mp->m_sync_list);
-               }
-               list_splice_init(&mp->m_sync_list, &tmp);
-               spin_unlock(&mp->m_sync_lock);
+       /*
+        * We can have inodes enter reclaim after we've shut down the syncd
+        * workqueue during unmount, so don't allow reclaim work to be queued
+        * during unmount.
+        */
+       if (!(mp->m_super->s_flags & MS_ACTIVE))
+               return;
 
-               list_for_each_entry_safe(work, n, &tmp, w_list) {
-                       (*work->w_syncer)(mp, work->w_data);
-                       list_del(&work->w_list);
-                       if (work == &mp->m_sync_work)
-                               continue;
-                       if (work->w_completion)
-                               complete(work->w_completion);
-                       kmem_free(work);
-               }
+       rcu_read_lock();
+       if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+               queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
+                       msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
        }
+       rcu_read_unlock();
+}
 
-       return 0;
+/*
+ * This is a fast pass over the inode cache to try to get reclaim moving on as
+ * many inodes as possible in a short period of time. It kicks itself every few
+ * seconds, as well as being kicked by the inode cache shrinker when memory
+ * goes low. It scans as quickly as possible avoiding locked inodes or those
+ * already being flushed, and once done schedules a future pass.
+ */
+STATIC void
+xfs_reclaim_worker(
+       struct work_struct *work)
+{
+       struct xfs_mount *mp = container_of(to_delayed_work(work),
+                                       struct xfs_mount, m_reclaim_work);
+
+       xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
+       xfs_syncd_queue_reclaim(mp);
+}
+
+/*
+ * Flush delayed allocate data, attempting to free up reserved space
+ * from existing allocations.  At this point a new allocation attempt
+ * has failed with ENOSPC and we are in the process of scratching our
+ * heads, looking about for more room.
+ *
+ * Queue a new data flush if there isn't one already in progress and
+ * wait for completion of the flush. This means that we only ever have one
+ * inode flush in progress no matter how many ENOSPC events are occurring and
+ * so will prevent the system from bogging down due to every concurrent
+ * ENOSPC event scanning all the active inodes in the system for writeback.
+ */
+void
+xfs_flush_inodes(
+       struct xfs_inode        *ip)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+
+       queue_work(xfs_syncd_wq, &mp->m_flush_work);
+       flush_work_sync(&mp->m_flush_work);
+}
+
+STATIC void
+xfs_flush_worker(
+       struct work_struct *work)
+{
+       struct xfs_mount *mp = container_of(work,
+                                       struct xfs_mount, m_flush_work);
+
+       xfs_sync_data(mp, SYNC_TRYLOCK);
+       xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
 }
 
 int
 xfs_syncd_init(
        struct xfs_mount        *mp)
 {
-       mp->m_sync_work.w_syncer = xfs_sync_worker;
-       mp->m_sync_work.w_mount = mp;
-       mp->m_sync_work.w_completion = NULL;
-       mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
-       if (IS_ERR(mp->m_sync_task))
-               return -PTR_ERR(mp->m_sync_task);
+       INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
+       INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
+       INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
+
+       xfs_syncd_queue_sync(mp);
+       xfs_syncd_queue_reclaim(mp);
+
        return 0;
 }
 
@@ -582,7 +569,9 @@ void
 xfs_syncd_stop(
        struct xfs_mount        *mp)
 {
-       kthread_stop(mp->m_sync_task);
+       cancel_delayed_work_sync(&mp->m_sync_work);
+       cancel_delayed_work_sync(&mp->m_reclaim_work);
+       cancel_work_sync(&mp->m_flush_work);
 }
 
 void
@@ -601,6 +590,10 @@ __xfs_inode_set_reclaim_tag(
                                XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
                                XFS_ICI_RECLAIM_TAG);
                spin_unlock(&ip->i_mount->m_perag_lock);
+
+               /* schedule periodic background inode reclaim */
+               xfs_syncd_queue_reclaim(ip->i_mount);
+
                trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
                                                        -1, _RET_IP_);
        }
@@ -1017,7 +1010,13 @@ xfs_reclaim_inodes(
 }
 
 /*
- * Shrinker infrastructure.
+ * Inode cache shrinker.
+ *
+ * When called we make sure that there is a background (fast) inode reclaim in
+ * progress, while we will throttle the speed of reclaim via doiing synchronous
+ * reclaim of inodes. That means if we come across dirty inodes, we wait for
+ * them to be cleaned, which we hope will not be very long due to the
+ * background walker having already kicked the IO off on those dirty inodes.
  */
 static int
 xfs_reclaim_inode_shrink(
@@ -1032,10 +1031,15 @@ xfs_reclaim_inode_shrink(
 
        mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
        if (nr_to_scan) {
+               /* kick background reclaimer and push the AIL */
+               xfs_syncd_queue_reclaim(mp);
+               xfs_ail_push_all(mp->m_ail);
+
                if (!(gfp_mask & __GFP_FS))
                        return -1;
 
-               xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan);
+               xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT,
+                                       &nr_to_scan);
                /* terminate if we don't exhaust the scan */
                if (nr_to_scan > 0)
                        return -1;
index 32ba662..e3a6ad2 100644 (file)
@@ -32,6 +32,8 @@ typedef struct xfs_sync_work {
 #define SYNC_WAIT              0x0001  /* wait for i/o to complete */
 #define SYNC_TRYLOCK           0x0002  /* only try to lock inodes */
 
+extern struct workqueue_struct *xfs_syncd_wq;  /* sync workqueue */
+
 int xfs_syncd_init(struct xfs_mount *mp);
 void xfs_syncd_stop(struct xfs_mount *mp);
 
index 254ee06..69228aa 100644 (file)
@@ -461,12 +461,10 @@ xfs_qm_dqflush_all(
        struct xfs_quotainfo    *q = mp->m_quotainfo;
        int                     recl;
        struct xfs_dquot        *dqp;
-       int                     niters;
        int                     error;
 
        if (!q)
                return 0;
-       niters = 0;
 again:
        mutex_lock(&q->qi_dqlist_lock);
        list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
@@ -1314,14 +1312,9 @@ xfs_qm_dqiter_bufs(
 {
        xfs_buf_t       *bp;
        int             error;
-       int             notcommitted;
-       int             incr;
        int             type;
 
        ASSERT(blkcnt > 0);
-       notcommitted = 0;
-       incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
-               XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
        type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
                (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
        error = 0;
index c9446f1..567b29b 100644 (file)
@@ -65,11 +65,6 @@ extern kmem_zone_t   *qm_dqtrxzone;
  * block in the dquot/xqm code.
  */
 #define XFS_DQUOT_CLUSTER_SIZE_FSB     (xfs_filblks_t)1
-/*
- * When doing a quotacheck, we log dquot clusters of this many FSBs at most
- * in a single transaction. We don't want to ask for too huge a log reservation.
- */
-#define XFS_QM_MAX_DQCLUSTER_LOGSZ     3
 
 typedef xfs_dqhash_t   xfs_dqlist_t;
 
index 0d62a07..2dadb15 100644 (file)
@@ -313,14 +313,12 @@ xfs_qm_scall_quotaon(
 {
        int             error;
        uint            qf;
-       uint            accflags;
        __int64_t       sbflags;
 
        flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
        /*
         * Switching on quota accounting must be done at mount time.
         */
-       accflags = flags & XFS_ALL_QUOTA_ACCT;
        flags &= ~(XFS_ALL_QUOTA_ACCT);
 
        sbflags = 0;
index 4bc3c64..27d64d7 100644 (file)
@@ -2395,17 +2395,33 @@ xfs_free_extent(
        memset(&args, 0, sizeof(xfs_alloc_arg_t));
        args.tp = tp;
        args.mp = tp->t_mountp;
+
+       /*
+        * validate that the block number is legal - the enables us to detect
+        * and handle a silent filesystem corruption rather than crashing.
+        */
        args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
-       ASSERT(args.agno < args.mp->m_sb.sb_agcount);
+       if (args.agno >= args.mp->m_sb.sb_agcount)
+               return EFSCORRUPTED;
+
        args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
+       if (args.agbno >= args.mp->m_sb.sb_agblocks)
+               return EFSCORRUPTED;
+
        args.pag = xfs_perag_get(args.mp, args.agno);
-       if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
+       ASSERT(args.pag);
+
+       error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
+       if (error)
                goto error0;
-#ifdef DEBUG
-       ASSERT(args.agbp != NULL);
-       ASSERT((args.agbno + len) <=
-               be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length));
-#endif
+
+       /* validate the extent size is legal now we have the agf locked */
+       if (args.agbno + len >
+                       be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
+               error = EFSCORRUPTED;
+               goto error0;
+       }
+
        error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
 error0:
        xfs_perag_put(args.pag);
index 46cc401..576fdfe 100644 (file)
@@ -197,6 +197,41 @@ xfs_inode_item_size(
        return nvecs;
 }
 
+/*
+ * xfs_inode_item_format_extents - convert in-core extents to on-disk form
+ *
+ * For either the data or attr fork in extent format, we need to endian convert
+ * the in-core extent as we place them into the on-disk inode. In this case, we
+ * need to do this conversion before we write the extents into the log. Because
+ * we don't have the disk inode to write into here, we allocate a buffer and
+ * format the extents into it via xfs_iextents_copy(). We free the buffer in
+ * the unlock routine after the copy for the log has been made.
+ *
+ * In the case of the data fork, the in-core and on-disk fork sizes can be
+ * different due to delayed allocation extents. We only log on-disk extents
+ * here, so always use the physical fork size to determine the size of the
+ * buffer we need to allocate.
+ */
+STATIC void
+xfs_inode_item_format_extents(
+       struct xfs_inode        *ip,
+       struct xfs_log_iovec    *vecp,
+       int                     whichfork,
+       int                     type)
+{
+       xfs_bmbt_rec_t          *ext_buffer;
+
+       ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP);
+       if (whichfork == XFS_DATA_FORK)
+               ip->i_itemp->ili_extents_buf = ext_buffer;
+       else
+               ip->i_itemp->ili_aextents_buf = ext_buffer;
+
+       vecp->i_addr = ext_buffer;
+       vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork);
+       vecp->i_type = type;
+}
+
 /*
  * This is called to fill in the vector of log iovecs for the
  * given inode log item.  It fills the first item with an inode
@@ -213,7 +248,6 @@ xfs_inode_item_format(
        struct xfs_inode        *ip = iip->ili_inode;
        uint                    nvecs;
        size_t                  data_bytes;
-       xfs_bmbt_rec_t          *ext_buffer;
        xfs_mount_t             *mp;
 
        vecp->i_addr = &iip->ili_format;
@@ -320,22 +354,8 @@ xfs_inode_item_format(
                        } else
 #endif
                        {
-                               /*
-                                * There are delayed allocation extents
-                                * in the inode, or we need to convert
-                                * the extents to on disk format.
-                                * Use xfs_iextents_copy()
-                                * to copy only the real extents into
-                                * a separate buffer.  We'll free the
-                                * buffer in the unlock routine.
-                                */
-                               ext_buffer = kmem_alloc(ip->i_df.if_bytes,
-                                       KM_SLEEP);
-                               iip->ili_extents_buf = ext_buffer;
-                               vecp->i_addr = ext_buffer;
-                               vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
-                                               XFS_DATA_FORK);
-                               vecp->i_type = XLOG_REG_TYPE_IEXT;
+                               xfs_inode_item_format_extents(ip, vecp,
+                                       XFS_DATA_FORK, XLOG_REG_TYPE_IEXT);
                        }
                        ASSERT(vecp->i_len <= ip->i_df.if_bytes);
                        iip->ili_format.ilf_dsize = vecp->i_len;
@@ -445,19 +465,12 @@ xfs_inode_item_format(
                         */
                        vecp->i_addr = ip->i_afp->if_u1.if_extents;
                        vecp->i_len = ip->i_afp->if_bytes;
+                       vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
 #else
                        ASSERT(iip->ili_aextents_buf == NULL);
-                       /*
-                        * Need to endian flip before logging
-                        */
-                       ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
-                               KM_SLEEP);
-                       iip->ili_aextents_buf = ext_buffer;
-                       vecp->i_addr = ext_buffer;
-                       vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
-                                       XFS_ATTR_FORK);
+                       xfs_inode_item_format_extents(ip, vecp,
+                                       XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT);
 #endif
-                       vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
                        iip->ili_format.ilf_asize = vecp->i_len;
                        vecp++;
                        nvecs++;
index dc1882a..751e94f 100644 (file)
@@ -204,7 +204,6 @@ xfs_bulkstat(
        xfs_agi_t               *agi;   /* agi header data */
        xfs_agino_t             agino;  /* inode # in allocation group */
        xfs_agnumber_t          agno;   /* allocation group number */
-       xfs_daddr_t             bno;    /* inode cluster start daddr */
        int                     chunkidx; /* current index into inode chunk */
        int                     clustidx; /* current index into inode cluster */
        xfs_btree_cur_t         *cur;   /* btree cursor for ialloc btree */
@@ -463,7 +462,6 @@ xfs_bulkstat(
                                                 mp->m_sb.sb_inopblog);
                                }
                                ino = XFS_AGINO_TO_INO(mp, agno, agino);
-                               bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
                                /*
                                 * Skip if this inode is free.
                                 */
index 25efa9b..b612ce4 100644 (file)
@@ -761,7 +761,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
                break;
        case XLOG_STATE_COVER_NEED:
        case XLOG_STATE_COVER_NEED2:
-               if (!xfs_trans_ail_tail(log->l_ailp) &&
+               if (!xfs_ail_min_lsn(log->l_ailp) &&
                    xlog_iclogs_empty(log)) {
                        if (log->l_covered_state == XLOG_STATE_COVER_NEED)
                                log->l_covered_state = XLOG_STATE_COVER_DONE;
@@ -801,7 +801,7 @@ xlog_assign_tail_lsn(
        xfs_lsn_t               tail_lsn;
        struct log              *log = mp->m_log;
 
-       tail_lsn = xfs_trans_ail_tail(mp->m_ail);
+       tail_lsn = xfs_ail_min_lsn(mp->m_ail);
        if (!tail_lsn)
                tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 
@@ -1239,7 +1239,7 @@ xlog_grant_push_ail(
         * the filesystem is shutting down.
         */
        if (!XLOG_FORCED_SHUTDOWN(log))
-               xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+               xfs_ail_push(log->l_ailp, threshold_lsn);
 }
 
 /*
@@ -3407,6 +3407,17 @@ xlog_verify_dest_ptr(
                xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
 }
 
+/*
+ * Check to make sure the grant write head didn't just over lap the tail.  If
+ * the cycles are the same, we can't be overlapping.  Otherwise, make sure that
+ * the cycles differ by exactly one and check the byte count.
+ *
+ * This check is run unlocked, so can give false positives. Rather than assert
+ * on failures, use a warn-once flag and a panic tag to allow the admin to
+ * determine if they want to panic the machine when such an error occurs. For
+ * debug kernels this will have the same effect as using an assert but, unlinke
+ * an assert, it can be turned off at runtime.
+ */
 STATIC void
 xlog_verify_grant_tail(
        struct log      *log)
@@ -3414,17 +3425,22 @@ xlog_verify_grant_tail(
        int             tail_cycle, tail_blocks;
        int             cycle, space;
 
-       /*
-        * Check to make sure the grant write head didn't just over lap the
-        * tail.  If the cycles are the same, we can't be overlapping.
-        * Otherwise, make sure that the cycles differ by exactly one and
-        * check the byte count.
-        */
        xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
        xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
        if (tail_cycle != cycle) {
-               ASSERT(cycle - 1 == tail_cycle);
-               ASSERT(space <= BBTOB(tail_blocks));
+               if (cycle - 1 != tail_cycle &&
+                   !(log->l_flags & XLOG_TAIL_WARN)) {
+                       xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
+                               "%s: cycle - 1 != tail_cycle", __func__);
+                       log->l_flags |= XLOG_TAIL_WARN;
+               }
+
+               if (space > BBTOB(tail_blocks) &&
+                   !(log->l_flags & XLOG_TAIL_WARN)) {
+                       xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
+                               "%s: space > BBTOB(tail_blocks)", __func__);
+                       log->l_flags |= XLOG_TAIL_WARN;
+               }
        }
 }
 
index ffae692..5864850 100644 (file)
@@ -144,6 +144,7 @@ static inline uint xlog_get_client_id(__be32 i)
 #define        XLOG_RECOVERY_NEEDED    0x4     /* log was recovered */
 #define XLOG_IO_ERROR          0x8     /* log hit an I/O error, and being
                                           shutdown */
+#define XLOG_TAIL_WARN         0x10    /* log tail verify warning issued */
 
 #ifdef __KERNEL__
 /*
index a62e897..19af0ab 100644 (file)
@@ -203,12 +203,9 @@ typedef struct xfs_mount {
        struct mutex            m_icsb_mutex;   /* balancer sync lock */
 #endif
        struct xfs_mru_cache    *m_filestream;  /* per-mount filestream data */
-       struct task_struct      *m_sync_task;   /* generalised sync thread */
-       xfs_sync_work_t         m_sync_work;    /* work item for VFS_SYNC */
-       struct list_head        m_sync_list;    /* sync thread work item list */
-       spinlock_t              m_sync_lock;    /* work item list lock */
-       int                     m_sync_seq;     /* sync thread generation no. */
-       wait_queue_head_t       m_wait_single_sync_task;
+       struct delayed_work     m_sync_work;    /* background sync work */
+       struct delayed_work     m_reclaim_work; /* background inode reclaim */
+       struct work_struct      m_flush_work;   /* background inode flush */
        __int64_t               m_update_flags; /* sb flags we need to update
                                                   on the next remount,rw */
        struct shrinker         m_inode_shrink; /* inode reclaim shrinker */
index 12aff95..acdb92f 100644 (file)
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 
-STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
-STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
-STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
+struct workqueue_struct        *xfs_ail_wq;    /* AIL workqueue */
 
 #ifdef DEBUG
-STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *);
-#else
+/*
+ * Check that the list is sorted as it should be.
+ */
+STATIC void
+xfs_ail_check(
+       struct xfs_ail  *ailp,
+       xfs_log_item_t  *lip)
+{
+       xfs_log_item_t  *prev_lip;
+
+       if (list_empty(&ailp->xa_ail))
+               return;
+
+       /*
+        * Check the next and previous entries are valid.
+        */
+       ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+       prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
+       if (&prev_lip->li_ail != &ailp->xa_ail)
+               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+
+       prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
+       if (&prev_lip->li_ail != &ailp->xa_ail)
+               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
+
+
+#ifdef XFS_TRANS_DEBUG
+       /*
+        * Walk the list checking lsn ordering, and that every entry has the
+        * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
+        * when specifically debugging the transaction subsystem.
+        */
+       prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+       list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
+               if (&prev_lip->li_ail != &ailp->xa_ail)
+                       ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+               ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+               prev_lip = lip;
+       }
+#endif /* XFS_TRANS_DEBUG */
+}
+#else /* !DEBUG */
 #define        xfs_ail_check(a,l)
 #endif /* DEBUG */
 
+/*
+ * Return a pointer to the first item in the AIL.  If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_min(
+       struct xfs_ail  *ailp)
+{
+       if (list_empty(&ailp->xa_ail))
+               return NULL;
+
+       return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+}
+
+ /*
+ * Return a pointer to the last item in the AIL.  If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_max(
+       struct xfs_ail  *ailp)
+{
+       if (list_empty(&ailp->xa_ail))
+               return NULL;
+
+       return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
+}
+
+/*
+ * Return a pointer to the item which follows the given item in the AIL.  If
+ * the given item is the last item in the list, then return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_next(
+       struct xfs_ail  *ailp,
+       xfs_log_item_t  *lip)
+{
+       if (lip->li_ail.next == &ailp->xa_ail)
+               return NULL;
+
+       return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
+}
 
 /*
- * This is called by the log manager code to determine the LSN
- * of the tail of the log.  This is exactly the LSN of the first
- * item in the AIL.  If the AIL is empty, then this function
- * returns 0.
+ * This is called by the log manager code to determine the LSN of the tail of
+ * the log.  This is exactly the LSN of the first item in the AIL.  If the AIL
+ * is empty, then this function returns 0.
  *
- * We need the AIL lock in order to get a coherent read of the
- * lsn of the last item in the AIL.
+ * We need the AIL lock in order to get a coherent read of the lsn of the last
+ * item in the AIL.
  */
 xfs_lsn_t
-xfs_trans_ail_tail(
+xfs_ail_min_lsn(
        struct xfs_ail  *ailp)
 {
-       xfs_lsn_t       lsn;
+       xfs_lsn_t       lsn = 0;
        xfs_log_item_t  *lip;
 
        spin_lock(&ailp->xa_lock);
        lip = xfs_ail_min(ailp);
-       if (lip == NULL) {
-               lsn = (xfs_lsn_t)0;
-       } else {
+       if (lip)
                lsn = lip->li_lsn;
-       }
        spin_unlock(&ailp->xa_lock);
 
        return lsn;
 }
 
 /*
- * xfs_trans_push_ail
- *
- * This routine is called to move the tail of the AIL forward.  It does this by
- * trying to flush items in the AIL whose lsns are below the given
- * threshold_lsn.
- *
- * the push is run asynchronously in a separate thread, so we return the tail
- * of the log right now instead of the tail after the push. This means we will
- * either continue right away, or we will sleep waiting on the async thread to
- * do its work.
- *
- * We do this unlocked - we only need to know whether there is anything in the
- * AIL at the time we are called. We don't need to access the contents of
- * any of the objects, so the lock is not needed.
+ * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
  */
-void
-xfs_trans_ail_push(
-       struct xfs_ail  *ailp,
-       xfs_lsn_t       threshold_lsn)
+static xfs_lsn_t
+xfs_ail_max_lsn(
+       struct xfs_ail  *ailp)
 {
-       xfs_log_item_t  *lip;
+       xfs_lsn_t       lsn = 0;
+       xfs_log_item_t  *lip;
 
-       lip = xfs_ail_min(ailp);
-       if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
-               if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0)
-                       xfsaild_wakeup(ailp, threshold_lsn);
-       }
+       spin_lock(&ailp->xa_lock);
+       lip = xfs_ail_max(ailp);
+       if (lip)
+               lsn = lip->li_lsn;
+       spin_unlock(&ailp->xa_lock);
+
+       return lsn;
 }
 
 /*
@@ -236,16 +300,57 @@ out:
 }
 
 /*
- * xfsaild_push does the work of pushing on the AIL.  Returning a timeout of
- * zero indicates that the caller should sleep until woken.
+ * splice the log item list into the AIL at the given LSN.
  */
-long
-xfsaild_push(
-       struct xfs_ail  *ailp,
-       xfs_lsn_t       *last_lsn)
+static void
+xfs_ail_splice(
+       struct xfs_ail  *ailp,
+       struct list_head *list,
+       xfs_lsn_t       lsn)
 {
-       long            tout = 0;
-       xfs_lsn_t       last_pushed_lsn = *last_lsn;
+       xfs_log_item_t  *next_lip;
+
+       /* If the list is empty, just insert the item.  */
+       if (list_empty(&ailp->xa_ail)) {
+               list_splice(list, &ailp->xa_ail);
+               return;
+       }
+
+       list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
+               if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
+                       break;
+       }
+
+       ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
+              XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
+
+       list_splice_init(list, &next_lip->li_ail);
+}
+
+/*
+ * Delete the given item from the AIL.  Return a pointer to the item.
+ */
+static void
+xfs_ail_delete(
+       struct xfs_ail  *ailp,
+       xfs_log_item_t  *lip)
+{
+       xfs_ail_check(ailp, lip);
+       list_del(&lip->li_ail);
+       xfs_trans_ail_cursor_clear(ailp, lip);
+}
+
+/*
+ * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
+ * to run at a later time if there is more work to do to complete the push.
+ */
+STATIC void
+xfs_ail_worker(
+       struct work_struct *work)
+{
+       struct xfs_ail  *ailp = container_of(to_delayed_work(work),
+                                       struct xfs_ail, xa_work);
+       long            tout;
        xfs_lsn_t       target =  ailp->xa_target;
        xfs_lsn_t       lsn;
        xfs_log_item_t  *lip;
@@ -256,15 +361,15 @@ xfsaild_push(
 
        spin_lock(&ailp->xa_lock);
        xfs_trans_ail_cursor_init(ailp, cur);
-       lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn);
+       lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
        if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
                /*
                 * AIL is empty or our push has reached the end.
                 */
                xfs_trans_ail_cursor_done(ailp, cur);
                spin_unlock(&ailp->xa_lock);
-               *last_lsn = 0;
-               return tout;
+               ailp->xa_last_pushed_lsn = 0;
+               return;
        }
 
        XFS_STATS_INC(xs_push_ail);
@@ -301,13 +406,13 @@ xfsaild_push(
                case XFS_ITEM_SUCCESS:
                        XFS_STATS_INC(xs_push_ail_success);
                        IOP_PUSH(lip);
-                       last_pushed_lsn = lsn;
+                       ailp->xa_last_pushed_lsn = lsn;
                        break;
 
                case XFS_ITEM_PUSHBUF:
                        XFS_STATS_INC(xs_push_ail_pushbuf);
                        IOP_PUSHBUF(lip);
-                       last_pushed_lsn = lsn;
+                       ailp->xa_last_pushed_lsn = lsn;
                        push_xfsbufd = 1;
                        break;
 
@@ -319,7 +424,7 @@ xfsaild_push(
 
                case XFS_ITEM_LOCKED:
                        XFS_STATS_INC(xs_push_ail_locked);
-                       last_pushed_lsn = lsn;
+                       ailp->xa_last_pushed_lsn = lsn;
                        stuck++;
                        break;
 
@@ -374,9 +479,23 @@ xfsaild_push(
                wake_up_process(mp->m_ddev_targp->bt_task);
        }
 
+       /* assume we have more work to do in a short while */
+       tout = 10;
        if (!count) {
                /* We're past our target or empty, so idle */
-               last_pushed_lsn = 0;
+               ailp->xa_last_pushed_lsn = 0;
+
+               /*
+                * Check for an updated push target before clearing the
+                * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
+                * work to do. Wait a bit longer before starting that work.
+                */
+               smp_rmb();
+               if (ailp->xa_target == target) {
+                       clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
+                       return;
+               }
+               tout = 50;
        } else if (XFS_LSN_CMP(lsn, target) >= 0) {
                /*
                 * We reached the target so wait a bit longer for I/O to
@@ -384,7 +503,7 @@ xfsaild_push(
                 * start the next scan from the start of the AIL.
                 */
                tout = 50;
-               last_pushed_lsn = 0;
+               ailp->xa_last_pushed_lsn = 0;
        } else if ((stuck * 100) / count > 90) {
                /*
                 * Either there is a lot of contention on the AIL or we
@@ -396,14 +515,61 @@ xfsaild_push(
                 * continuing from where we were.
                 */
                tout = 20;
-       } else {
-               /* more to do, but wait a short while before continuing */
-               tout = 10;
        }
-       *last_lsn = last_pushed_lsn;
-       return tout;
+
+       /* There is more to do, requeue us.  */
+       queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
+                                       msecs_to_jiffies(tout));
+}
+
+/*
+ * This routine is called to move the tail of the AIL forward.  It does this by
+ * trying to flush items in the AIL whose lsns are below the given
+ * threshold_lsn.
+ *
+ * The push is run asynchronously in a workqueue, which means the caller needs
+ * to handle waiting on the async flush for space to become available.
+ * We don't want to interrupt any push that is in progress, hence we only queue
+ * work if we set the pushing bit approriately.
+ *
+ * We do this unlocked - we only need to know whether there is anything in the
+ * AIL at the time we are called. We don't need to access the contents of
+ * any of the objects, so the lock is not needed.
+ */
+void
+xfs_ail_push(
+       struct xfs_ail  *ailp,
+       xfs_lsn_t       threshold_lsn)
+{
+       xfs_log_item_t  *lip;
+
+       lip = xfs_ail_min(ailp);
+       if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
+           XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
+               return;
+
+       /*
+        * Ensure that the new target is noticed in push code before it clears
+        * the XFS_AIL_PUSHING_BIT.
+        */
+       smp_wmb();
+       ailp->xa_target = threshold_lsn;
+       if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
+               queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
 }
 
+/*
+ * Push out all items in the AIL immediately
+ */
+void
+xfs_ail_push_all(
+       struct xfs_ail  *ailp)
+{
+       xfs_lsn_t       threshold_lsn = xfs_ail_max_lsn(ailp);
+
+       if (threshold_lsn)
+               xfs_ail_push(ailp, threshold_lsn);
+}
 
 /*
  * This is to be called when an item is unlocked that may have
@@ -615,7 +781,6 @@ xfs_trans_ail_init(
        xfs_mount_t     *mp)
 {
        struct xfs_ail  *ailp;
-       int             error;
 
        ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
        if (!ailp)
@@ -624,15 +789,9 @@ xfs_trans_ail_init(
        ailp->xa_mount = mp;
        INIT_LIST_HEAD(&ailp->xa_ail);
        spin_lock_init(&ailp->xa_lock);
-       error = xfsaild_start(ailp);
-       if (error)
-               goto out_free_ailp;
+       INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
        mp->m_ail = ailp;
        return 0;
-
-out_free_ailp:
-       kmem_free(ailp);
-       return error;
 }
 
 void
@@ -641,124 +800,6 @@ xfs_trans_ail_destroy(
 {
        struct xfs_ail  *ailp = mp->m_ail;
 
-       xfsaild_stop(ailp);
+       cancel_delayed_work_sync(&ailp->xa_work);
        kmem_free(ailp);
 }
-
-/*
- * splice the log item list into the AIL at the given LSN.
- */
-STATIC void
-xfs_ail_splice(
-       struct xfs_ail  *ailp,
-       struct list_head *list,
-       xfs_lsn_t       lsn)
-{
-       xfs_log_item_t  *next_lip;
-
-       /*
-        * If the list is empty, just insert the item.
-        */
-       if (list_empty(&ailp->xa_ail)) {
-               list_splice(list, &ailp->xa_ail);
-               return;
-       }
-
-       list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
-               if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
-                       break;
-       }
-
-       ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
-              (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
-
-       list_splice_init(list, &next_lip->li_ail);
-       return;
-}
-
-/*
- * Delete the given item from the AIL.  Return a pointer to the item.
- */
-STATIC void
-xfs_ail_delete(
-       struct xfs_ail  *ailp,
-       xfs_log_item_t  *lip)
-{
-       xfs_ail_check(ailp, lip);
-       list_del(&lip->li_ail);
-       xfs_trans_ail_cursor_clear(ailp, lip);
-}
-
-/*
- * Return a pointer to the first item in the AIL.
- * If the AIL is empty, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_min(
-       struct xfs_ail  *ailp)
-{
-       if (list_empty(&ailp->xa_ail))
-               return NULL;
-
-       return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
-}
-
-/*
- * Return a pointer to the item which follows
- * the given item in the AIL.  If the given item
- * is the last item in the list, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_next(
-       struct xfs_ail  *ailp,
-       xfs_log_item_t  *lip)
-{
-       if (lip->li_ail.next == &ailp->xa_ail)
-               return NULL;
-
-       return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
-}
-
-#ifdef DEBUG
-/*
- * Check that the list is sorted as it should be.
- */
-STATIC void
-xfs_ail_check(
-       struct xfs_ail  *ailp,
-       xfs_log_item_t  *lip)
-{
-       xfs_log_item_t  *prev_lip;
-
-       if (list_empty(&ailp->xa_ail))
-               return;
-
-       /*
-        * Check the next and previous entries are valid.
-        */
-       ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
-       prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
-       if (&prev_lip->li_ail != &ailp->xa_ail)
-               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-
-       prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
-       if (&prev_lip->li_ail != &ailp->xa_ail)
-               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
-
-
-#ifdef XFS_TRANS_DEBUG
-       /*
-        * Walk the list checking lsn ordering, and that every entry has the
-        * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
-        * when specifically debugging the transaction subsystem.
-        */
-       prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
-       list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
-               if (&prev_lip->li_ail != &ailp->xa_ail)
-                       ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-               ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
-               prev_lip = lip;
-       }
-#endif /* XFS_TRANS_DEBUG */
-}
-#endif /* DEBUG */
index 35162c2..6b164e9 100644 (file)
@@ -65,16 +65,22 @@ struct xfs_ail_cursor {
 struct xfs_ail {
        struct xfs_mount        *xa_mount;
        struct list_head        xa_ail;
-       uint                    xa_gen;
-       struct task_struct      *xa_task;
        xfs_lsn_t               xa_target;
        struct xfs_ail_cursor   xa_cursors;
        spinlock_t              xa_lock;
+       struct delayed_work     xa_work;
+       xfs_lsn_t               xa_last_pushed_lsn;
+       unsigned long           xa_flags;
 };
 
+#define XFS_AIL_PUSHING_BIT    0
+
 /*
  * From xfs_trans_ail.c
  */
+
+extern struct workqueue_struct *xfs_ail_wq;    /* AIL workqueue */
+
 void   xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
                                struct xfs_log_item **log_items, int nr_items,
                                xfs_lsn_t lsn) __releases(ailp->xa_lock);
@@ -98,12 +104,13 @@ xfs_trans_ail_delete(
        xfs_trans_ail_delete_bulk(ailp, &lip, 1);
 }
 
-void                   xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
+void                   xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
+void                   xfs_ail_push_all(struct xfs_ail *);
+xfs_lsn_t              xfs_ail_min_lsn(struct xfs_ail *ailp);
+
 void                   xfs_trans_unlocked_item(struct xfs_ail *,
                                        xfs_log_item_t *);
 
-xfs_lsn_t              xfs_trans_ail_tail(struct xfs_ail *ailp);
-
 struct xfs_log_item    *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
                                        struct xfs_ail_cursor *cur,
                                        xfs_lsn_t lsn);
@@ -112,11 +119,6 @@ struct xfs_log_item        *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
 void                   xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
                                        struct xfs_ail_cursor *cur);
 
-long   xfsaild_push(struct xfs_ail *, xfs_lsn_t *);
-void   xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t);
-int    xfsaild_start(struct xfs_ail *);
-void   xfsaild_stop(struct xfs_ail *);
-
 #if BITS_PER_LONG != 64
 static inline void
 xfs_trans_ail_copy_lsn(
index 32176cc..cbbfd98 100644 (file)
@@ -697,7 +697,7 @@ extern void blk_start_queue(struct request_queue *q);
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
+extern void __blk_run_queue(struct request_queue *q);
 extern void blk_run_queue(struct request_queue *);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
@@ -857,26 +857,39 @@ extern void blk_put_queue(struct request_queue *);
 struct blk_plug {
        unsigned long magic;
        struct list_head list;
+       struct list_head cb_list;
        unsigned int should_sort;
 };
+struct blk_plug_cb {
+       struct list_head list;
+       void (*callback)(struct blk_plug_cb *);
+};
 
 extern void blk_start_plug(struct blk_plug *);
 extern void blk_finish_plug(struct blk_plug *);
-extern void __blk_flush_plug(struct task_struct *, struct blk_plug *);
+extern void blk_flush_plug_list(struct blk_plug *, bool);
 
 static inline void blk_flush_plug(struct task_struct *tsk)
 {
        struct blk_plug *plug = tsk->plug;
 
-       if (unlikely(plug))
-               __blk_flush_plug(tsk, plug);
+       if (plug)
+               blk_flush_plug_list(plug, false);
+}
+
+static inline void blk_schedule_flush_plug(struct task_struct *tsk)
+{
+       struct blk_plug *plug = tsk->plug;
+
+       if (plug)
+               blk_flush_plug_list(plug, true);
 }
 
 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
 {
        struct blk_plug *plug = tsk->plug;
 
-       return plug && !list_empty(&plug->list);
+       return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
 }
 
 /*
@@ -1314,6 +1327,11 @@ static inline void blk_flush_plug(struct task_struct *task)
 {
 }
 
+static inline void blk_schedule_flush_plug(struct task_struct *task)
+{
+}
+
+
 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
 {
        return false;
index e276883..32a4423 100644 (file)
@@ -197,7 +197,6 @@ struct dm_target {
 struct dm_target_callbacks {
        struct list_head list;
        int (*congested_fn) (struct dm_target_callbacks *, int);
-       void (*unplug_fn)(struct dm_target_callbacks *);
 };
 
 int dm_register_target(struct target_type *t);
index f3a7794..771d6d8 100644 (file)
@@ -167,6 +167,7 @@ struct input_keymap_entry {
 #define SYN_REPORT             0
 #define SYN_CONFIG             1
 #define SYN_MT_REPORT          2
+#define SYN_DROPPED            3
 
 /*
  * Keys and buttons
@@ -553,8 +554,8 @@ struct input_keymap_entry {
 #define KEY_DVD                        0x185   /* Media Select DVD */
 #define KEY_AUX                        0x186
 #define KEY_MP3                        0x187
-#define KEY_AUDIO              0x188
-#define KEY_VIDEO              0x189
+#define KEY_AUDIO              0x188   /* AL Audio Browser */
+#define KEY_VIDEO              0x189   /* AL Movie Browser */
 #define KEY_DIRECTORY          0x18a
 #define KEY_LIST               0x18b
 #define KEY_MEMO               0x18c   /* Media Select Messages */
@@ -603,8 +604,9 @@ struct input_keymap_entry {
 #define KEY_FRAMEFORWARD       0x1b5
 #define KEY_CONTEXT_MENU       0x1b6   /* GenDesc - system context menu */
 #define KEY_MEDIA_REPEAT       0x1b7   /* Consumer - transport control */
-#define KEY_10CHANNELSUP        0x1b8   /* 10 channels up (10+) */
-#define KEY_10CHANNELSDOWN      0x1b9   /* 10 channels down (10-) */
+#define KEY_10CHANNELSUP       0x1b8   /* 10 channels up (10+) */
+#define KEY_10CHANNELSDOWN     0x1b9   /* 10 channels down (10-) */
+#define KEY_IMAGES             0x1ba   /* AL Image Browser */
 
 #define KEY_DEL_EOL            0x1c0
 #define KEY_DEL_EOS            0x1c1
index b3ac06a..318bb82 100644 (file)
@@ -48,6 +48,12 @@ static inline void input_mt_slot(struct input_dev *dev, int slot)
        input_event(dev, EV_ABS, ABS_MT_SLOT, slot);
 }
 
+static inline bool input_is_mt_axis(int axis)
+{
+       return axis == ABS_MT_SLOT ||
+               (axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST);
+}
+
 void input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active);
 
index 5a5ce70..5e9840f 100644 (file)
@@ -216,7 +216,7 @@ static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
        return ;
 }
 
-static inline inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
+static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
 {
        return ;
 }
index ad1b19a..aef2330 100644 (file)
@@ -86,16 +86,25 @@ extern int mfd_clone_cell(const char *cell, const char **clones,
  */
 static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
 {
-       return pdev->dev.platform_data;
+       return pdev->mfd_cell;
 }
 
 /*
  * Given a platform device that's been created by mfd_add_devices(), fetch
  * the .mfd_data entry from the mfd_cell that created it.
+ * Otherwise just return the platform_data pointer.
+ * This maintains compatibility with platform drivers whose devices aren't
+ * created by the mfd layer, and expect platform_data to contain what would've
+ * otherwise been in mfd_data.
  */
 static inline void *mfd_get_data(struct platform_device *pdev)
 {
-       return mfd_get_cell(pdev)->mfd_data;
+       const struct mfd_cell *cell = mfd_get_cell(pdev);
+
+       if (cell)
+               return cell->mfd_data;
+       else
+               return pdev->dev.platform_data;
 }
 
 extern int mfd_add_devices(struct device *parent, int id,
index 31afb7e..cdced84 100644 (file)
@@ -117,7 +117,7 @@ extern struct pid *find_vpid(int nr);
  */
 extern struct pid *find_get_pid(int nr);
 extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
-int next_pidmap(struct pid_namespace *pid_ns, int last);
+int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
 
 extern struct pid *alloc_pid(struct pid_namespace *ns);
 extern void free_pid(struct pid *pid);
index d96db98..744942c 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
 
+struct mfd_cell;
+
 struct platform_device {
        const char      * name;
        int             id;
@@ -23,6 +25,9 @@ struct platform_device {
 
        const struct platform_device_id *id_entry;
 
+       /* MFD cell pointer */
+       struct mfd_cell *mfd_cell;
+
        /* arch specific additions */
        struct pdev_archdata    archdata;
 };
index 369e19d..7f1183d 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/fs.h>
 #include <linux/poll.h>
 #include <linux/posix-timers.h>
+#include <linux/rwsem.h>
 
 struct posix_clock;
 
@@ -104,7 +105,7 @@ struct posix_clock_operations {
  * @ops:     Functional interface to the clock
  * @cdev:    Character device instance for this clock
  * @kref:    Reference count.
- * @mutex:   Protects the 'zombie' field from concurrent access.
+ * @rwsem:   Protects the 'zombie' field from concurrent access.
  * @zombie:  If 'zombie' is true, then the hardware has disappeared.
  * @release: A function to free the structure when the reference count reaches
  *           zero. May be NULL if structure is statically allocated.
@@ -117,7 +118,7 @@ struct posix_clock {
        struct posix_clock_operations ops;
        struct cdev cdev;
        struct kref kref;
-       struct mutex mutex;
+       struct rw_semaphore rwsem;
        bool zombie;
        void (*release)(struct posix_clock *clk);
 };
index 4e37a7c..4d50611 100644 (file)
@@ -396,7 +396,7 @@ union rio_pw_msg {
 };
 
 /* Architecture and hardware-specific functions */
-extern void rio_register_mport(struct rio_mport *);
+extern int rio_register_mport(struct rio_mport *);
 extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
 extern void rio_close_inb_mbox(struct rio_mport *, int);
 extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int);
index 7410d33..0cee015 100644 (file)
@@ -35,6 +35,7 @@
 #define RIO_DID_IDTCPS6Q               0x035f
 #define RIO_DID_IDTCPS10Q              0x035e
 #define RIO_DID_IDTCPS1848             0x0374
+#define RIO_DID_IDTCPS1432             0x0375
 #define RIO_DID_IDTCPS1616             0x0379
 #define RIO_DID_IDTVPS1616             0x0377
 #define RIO_DID_IDTSPS1616             0x0378
index 2ca7e8a..877ece4 100644 (file)
@@ -228,6 +228,8 @@ extern int rtc_read_alarm(struct rtc_device *rtc,
                        struct rtc_wkalrm *alrm);
 extern int rtc_set_alarm(struct rtc_device *rtc,
                                struct rtc_wkalrm *alrm);
+extern int rtc_initialize_alarm(struct rtc_device *rtc,
+                               struct rtc_wkalrm *alrm);
 extern void rtc_update_irq(struct rtc_device *rtc,
                        unsigned long num, unsigned long events);
 
index 4ec2c02..18d63ce 100644 (file)
@@ -1254,6 +1254,9 @@ struct task_struct {
 #endif
 
        struct mm_struct *mm, *active_mm;
+#ifdef CONFIG_COMPAT_BRK
+       unsigned brk_randomized:1;
+#endif
 #if defined(SPLIT_RSS_COUNTING)
        struct task_rss_stat    rss_stat;
 #endif
index 5a89e36..083ffea 100644 (file)
@@ -249,6 +249,8 @@ extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
 extern int hibernate(void);
 extern bool system_entering_hibernation(void);
 #else /* CONFIG_HIBERNATION */
+static inline void register_nosave_region(unsigned long b, unsigned long e) {}
+static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
 static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
 static inline void swsusp_set_page_free(struct page *p) {}
 static inline void swsusp_unset_page_free(struct page *p) {}
@@ -297,14 +299,7 @@ static inline bool pm_wakeup_pending(void) { return false; }
 
 extern struct mutex pm_mutex;
 
-#ifndef CONFIG_HIBERNATION
-static inline void register_nosave_region(unsigned long b, unsigned long e)
-{
-}
-static inline void register_nosave_region_late(unsigned long b, unsigned long e)
-{
-}
-
+#ifndef CONFIG_HIBERNATE_CALLBACKS
 static inline void lock_system_sleep(void) {}
 static inline void unlock_system_sleep(void) {}
 
index 461c011..2b3831b 100644 (file)
@@ -58,6 +58,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
                UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
                UNEVICTABLE_MLOCKFREED,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               THP_FAULT_ALLOC,
+               THP_FAULT_FALLBACK,
+               THP_COLLAPSE_ALLOC,
+               THP_COLLAPSE_ALLOC_FAILED,
+               THP_SPLIT,
+#endif
                NR_VM_EVENT_ITEMS
 };
 
index cdf2e8a..d2df55b 100644 (file)
@@ -139,8 +139,6 @@ do { \
  */
 
 enum p9_msg_t {
-       P9_TSYNCFS = 0,
-       P9_RSYNCFS,
        P9_TLERROR = 6,
        P9_RLERROR,
        P9_TSTATFS = 8,
index 85c1413..051a99f 100644 (file)
@@ -218,8 +218,8 @@ void p9_client_disconnect(struct p9_client *clnt);
 void p9_client_begin_disconnect(struct p9_client *clnt);
 struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
                                        char *uname, u32 n_uname, char *aname);
-struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
-                                                               int clone);
+struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
+               char **wnames, int clone);
 int p9_client_open(struct p9_fid *fid, int mode);
 int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
                                                        char *extension);
@@ -230,7 +230,6 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
                gid_t gid, struct p9_qid *qid);
 int p9_client_clunk(struct p9_fid *fid);
 int p9_client_fsync(struct p9_fid *fid, int datasync);
-int p9_client_sync_fs(struct p9_fid *fid);
 int p9_client_remove(struct p9_fid *fid);
 int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
                                                        u64 offset, u32 count);
index 78f18ad..bf36654 100644 (file)
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
 
 DECLARE_EVENT_CLASS(block_unplug,
 
-       TP_PROTO(struct request_queue *q),
+       TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
 
-       TP_ARGS(q),
+       TP_ARGS(q, depth, explicit),
 
        TP_STRUCT__entry(
                __field( int,           nr_rq                   )
@@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug,
        ),
 
        TP_fast_assign(
-               __entry->nr_rq  = q->rq.count[READ] + q->rq.count[WRITE];
+               __entry->nr_rq = depth;
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -419,31 +419,19 @@ DECLARE_EVENT_CLASS(block_unplug,
 );
 
 /**
- * block_unplug_timer - timed release of operations requests in queue to device driver
- * @q: request queue to unplug
- *
- * Unplug the request queue @q because a timer expired and allow block
- * operation requests to be sent to the device driver.
- */
-DEFINE_EVENT(block_unplug, block_unplug_timer,
-
-       TP_PROTO(struct request_queue *q),
-
-       TP_ARGS(q)
-);
-
-/**
- * block_unplug_io - release of operations requests in request queue
+ * block_unplug - release of operations requests in request queue
  * @q: request queue to unplug
+ * @depth: number of requests just added to the queue
+ * @explicit: whether this was an explicit unplug, or one from schedule()
  *
  * Unplug request queue @q because device driver is scheduled to work
  * on elements in the request queue.
  */
-DEFINE_EVENT(block_unplug, block_unplug_io,
+DEFINE_EVENT(block_unplug, block_unplug,
 
-       TP_PROTO(struct request_queue *q),
+       TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
 
-       TP_ARGS(q)
+       TP_ARGS(q, depth, explicit)
 );
 
 /**
index dfb924f..fe28dc2 100644 (file)
@@ -1886,7 +1886,7 @@ retry:
        restart->futex.val = val;
        restart->futex.time = abs_time->tv64;
        restart->futex.bitset = bitset;
-       restart->futex.flags = flags;
+       restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
 
        ret = -ERESTART_RESTARTBLOCK;
 
index 27960f1..8e81a98 100644 (file)
@@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
                        }
 
                        if (mode & PERF_CGROUP_SWIN) {
+                               WARN_ON_ONCE(cpuctx->cgrp);
                                /* set cgrp before ctxsw in to
                                 * allow event_filter_match() to not
                                 * have to pass task around
@@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
        if (!ctx || !ctx->nr_events)
                goto out;
 
+       /*
+        * We must ctxsw out cgroup events to avoid conflict
+        * when invoking perf_task_event_sched_in() later on
+        * in this function. Otherwise we end up trying to
+        * ctxswin cgroup events which are already scheduled
+        * in.
+        */
+       perf_cgroup_sched_out(current);
        task_ctx_sched_out(ctx, EVENT_ALL);
 
        raw_spin_lock(&ctx->lock);
@@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
 
        raw_spin_unlock(&ctx->lock);
 
+       /*
+        * Also calls ctxswin for cgroup events, if any:
+        */
        perf_event_context_sched_in(ctx, ctx->task);
 out:
        local_irq_restore(flags);
index 02f2212..57a8346 100644 (file)
@@ -217,11 +217,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
        return -1;
 }
 
-int next_pidmap(struct pid_namespace *pid_ns, int last)
+int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
 {
        int offset;
        struct pidmap *map, *end;
 
+       if (last >= PID_MAX_LIMIT)
+               return -1;
+
        offset = (last + 1) & BITS_PER_PAGE_MASK;
        map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
        end = &pid_ns->pidmap[PIDMAP_ENTRIES];
index 4603f08..6de9a8f 100644 (file)
@@ -18,9 +18,13 @@ config SUSPEND_FREEZER
 
          Turning OFF this setting is NOT recommended! If in doubt, say Y.
 
+config HIBERNATE_CALLBACKS
+       bool
+
 config HIBERNATION
        bool "Hibernation (aka 'suspend to disk')"
        depends on SWAP && ARCH_HIBERNATION_POSSIBLE
+       select HIBERNATE_CALLBACKS
        select LZO_COMPRESS
        select LZO_DECOMPRESS
        ---help---
@@ -85,7 +89,7 @@ config PM_STD_PARTITION
 
 config PM_SLEEP
        def_bool y
-       depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE
+       depends on SUSPEND || HIBERNATE_CALLBACKS
 
 config PM_SLEEP_SMP
        def_bool y
index 4801363..312f8b9 100644 (file)
@@ -4111,20 +4111,20 @@ need_resched:
                                        try_to_wake_up_local(to_wakeup);
                        }
                        deactivate_task(rq, prev, DEQUEUE_SLEEP);
+
+                       /*
+                        * If we are going to sleep and we have plugged IO queued, make
+                        * sure to submit it to avoid deadlocks.
+                        */
+                       if (blk_needs_flush_plug(prev)) {
+                               raw_spin_unlock(&rq->lock);
+                               blk_schedule_flush_plug(prev);
+                               raw_spin_lock(&rq->lock);
+                       }
                }
                switch_count = &prev->nvcsw;
        }
 
-       /*
-        * If we are going to sleep and we have plugged IO queued, make
-        * sure to submit it to avoid deadlocks.
-        */
-       if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) {
-               raw_spin_unlock(&rq->lock);
-               blk_flush_plug(prev);
-               raw_spin_lock(&rq->lock);
-       }
-
        pre_schedule(rq, prev);
 
        if (unlikely(!rq->nr_running))
index 7f00772..6fa833a 100644 (file)
@@ -2104,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
              enum cpu_idle_type idle, int *all_pinned,
              int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
 {
-       int loops = 0, pulled = 0, pinned = 0;
+       int loops = 0, pulled = 0;
        long rem_load_move = max_load_move;
        struct task_struct *p, *n;
 
        if (max_load_move == 0)
                goto out;
 
-       pinned = 1;
-
        list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
                if (loops++ > sysctl_sched_nr_migrate)
                        break;
 
                if ((p->se.load.weight >> 1) > rem_load_move ||
-                   !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
+                   !can_migrate_task(p, busiest, this_cpu, sd, idle,
+                                     all_pinned))
                        continue;
 
                pull_task(busiest, p, this_rq, this_cpu);
@@ -2153,9 +2152,6 @@ out:
         */
        schedstat_add(sd, lb_gained[idle], pulled);
 
-       if (all_pinned)
-               *all_pinned = pinned;
-
        return max_load_move - rem_load_move;
 }
 
@@ -3127,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
        if (!sds.busiest || sds.busiest_nr_running == 0)
                goto out_balanced;
 
+       sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
+
        /*
         * If the busiest group is imbalanced the below checks don't
         * work because they assumes all things are equal, which typically
@@ -3151,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
         * Don't pull any tasks if this group is already above the domain
         * average load.
         */
-       sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
        if (sds.this_load >= sds.avg_load)
                goto out_balanced;
 
@@ -3340,6 +3337,7 @@ redo:
                 * still unbalanced. ld_moved simply stays zero, so it is
                 * correctly treated as an imbalance.
                 */
+               all_pinned = 1;
                local_irq_save(flags);
                double_rq_lock(this_rq, busiest);
                ld_moved = move_tasks(this_rq, this_cpu, busiest,
index 25028dd..c340ca6 100644 (file)
@@ -19,7 +19,6 @@
  */
 #include <linux/device.h>
 #include <linux/file.h>
-#include <linux/mutex.h>
 #include <linux/posix-clock.h>
 #include <linux/slab.h>
 #include <linux/syscalls.h>
@@ -34,19 +33,19 @@ static struct posix_clock *get_posix_clock(struct file *fp)
 {
        struct posix_clock *clk = fp->private_data;
 
-       mutex_lock(&clk->mutex);
+       down_read(&clk->rwsem);
 
        if (!clk->zombie)
                return clk;
 
-       mutex_unlock(&clk->mutex);
+       up_read(&clk->rwsem);
 
        return NULL;
 }
 
 static void put_posix_clock(struct posix_clock *clk)
 {
-       mutex_unlock(&clk->mutex);
+       up_read(&clk->rwsem);
 }
 
 static ssize_t posix_clock_read(struct file *fp, char __user *buf,
@@ -156,7 +155,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
        struct posix_clock *clk =
                container_of(inode->i_cdev, struct posix_clock, cdev);
 
-       mutex_lock(&clk->mutex);
+       down_read(&clk->rwsem);
 
        if (clk->zombie) {
                err = -ENODEV;
@@ -172,7 +171,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
                fp->private_data = clk;
        }
 out:
-       mutex_unlock(&clk->mutex);
+       up_read(&clk->rwsem);
        return err;
 }
 
@@ -211,25 +210,20 @@ int posix_clock_register(struct posix_clock *clk, dev_t devid)
        int err;
 
        kref_init(&clk->kref);
-       mutex_init(&clk->mutex);
+       init_rwsem(&clk->rwsem);
 
        cdev_init(&clk->cdev, &posix_clock_file_operations);
        clk->cdev.owner = clk->ops.owner;
        err = cdev_add(&clk->cdev, devid, 1);
-       if (err)
-               goto no_cdev;
 
        return err;
-no_cdev:
-       mutex_destroy(&clk->mutex);
-       return err;
 }
 EXPORT_SYMBOL_GPL(posix_clock_register);
 
 static void delete_clock(struct kref *kref)
 {
        struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
-       mutex_destroy(&clk->mutex);
+
        if (clk->release)
                clk->release(clk);
 }
@@ -238,9 +232,9 @@ void posix_clock_unregister(struct posix_clock *clk)
 {
        cdev_del(&clk->cdev);
 
-       mutex_lock(&clk->mutex);
+       down_write(&clk->rwsem);
        clk->zombie = true;
-       mutex_unlock(&clk->mutex);
+       up_write(&clk->rwsem);
 
        kref_put(&clk->kref, delete_clock);
 }
index 7aa40f8..6957aa2 100644 (file)
@@ -850,29 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
                __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
 }
 
-static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
+static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
+                                   unsigned int depth, bool explicit)
 {
        struct blk_trace *bt = q->blk_trace;
 
        if (bt) {
-               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
-               __be64 rpdu = cpu_to_be64(pdu);
+               __be64 rpdu = cpu_to_be64(depth);
+               u32 what;
 
-               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
-                               sizeof(rpdu), &rpdu);
-       }
-}
-
-static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
-{
-       struct blk_trace *bt = q->blk_trace;
-
-       if (bt) {
-               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
-               __be64 rpdu = cpu_to_be64(pdu);
+               if (explicit)
+                       what = BLK_TA_UNPLUG_IO;
+               else
+                       what = BLK_TA_UNPLUG_TIMER;
 
-               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
-                               sizeof(rpdu), &rpdu);
+               __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
        }
 }
 
@@ -1015,9 +1007,7 @@ static void blk_register_tracepoints(void)
        WARN_ON(ret);
        ret = register_trace_block_plug(blk_add_trace_plug, NULL);
        WARN_ON(ret);
-       ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
-       WARN_ON(ret);
-       ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
+       ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
        WARN_ON(ret);
        ret = register_trace_block_split(blk_add_trace_split, NULL);
        WARN_ON(ret);
@@ -1032,8 +1022,7 @@ static void blk_unregister_tracepoints(void)
        unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
        unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
        unregister_trace_block_split(blk_add_trace_split, NULL);
-       unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
-       unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
+       unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
        unregister_trace_block_plug(blk_add_trace_plug, NULL);
        unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
        unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
index 05672e8..a235f3c 100644 (file)
@@ -49,12 +49,9 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
                        val = *s - '0';
                else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
                        val = _tolower(*s) - 'a' + 10;
-               else if (*s == '\n') {
-                       if (*(s + 1) == '\0')
-                               break;
-                       else
-                               return -EINVAL;
-               } else
+               else if (*s == '\n' && *(s + 1) == '\0')
+                       break;
+               else
                        return -EINVAL;
 
                if (val >= base)
index 325c2f9..d55769d 100644 (file)
@@ -315,12 +315,12 @@ static void __init test_kstrtou64_ok(void)
                {"65537",       10,     65537},
                {"2147483646",  10,     2147483646},
                {"2147483647",  10,     2147483647},
-               {"2147483648",  10,     2147483648},
-               {"2147483649",  10,     2147483649},
-               {"4294967294",  10,     4294967294},
-               {"4294967295",  10,     4294967295},
-               {"4294967296",  10,     4294967296},
-               {"4294967297",  10,     4294967297},
+               {"2147483648",  10,     2147483648ULL},
+               {"2147483649",  10,     2147483649ULL},
+               {"4294967294",  10,     4294967294ULL},
+               {"4294967295",  10,     4294967295ULL},
+               {"4294967296",  10,     4294967296ULL},
+               {"4294967297",  10,     4294967297ULL},
                {"9223372036854775806", 10,     9223372036854775806ULL},
                {"9223372036854775807", 10,     9223372036854775807ULL},
                {"9223372036854775808", 10,     9223372036854775808ULL},
@@ -369,12 +369,12 @@ static void __init test_kstrtos64_ok(void)
                {"65537",       10,     65537},
                {"2147483646",  10,     2147483646},
                {"2147483647",  10,     2147483647},
-               {"2147483648",  10,     2147483648},
-               {"2147483649",  10,     2147483649},
-               {"4294967294",  10,     4294967294},
-               {"4294967295",  10,     4294967295},
-               {"4294967296",  10,     4294967296},
-               {"4294967297",  10,     4294967297},
+               {"2147483648",  10,     2147483648LL},
+               {"2147483649",  10,     2147483649LL},
+               {"4294967294",  10,     4294967294LL},
+               {"4294967295",  10,     4294967295LL},
+               {"4294967296",  10,     4294967296LL},
+               {"4294967297",  10,     4294967297LL},
                {"9223372036854775806", 10,     9223372036854775806LL},
                {"9223372036854775807", 10,     9223372036854775807LL},
        };
@@ -418,10 +418,10 @@ static void __init test_kstrtou32_ok(void)
                {"65537",       10,     65537},
                {"2147483646",  10,     2147483646},
                {"2147483647",  10,     2147483647},
-               {"2147483648",  10,     2147483648},
-               {"2147483649",  10,     2147483649},
-               {"4294967294",  10,     4294967294},
-               {"4294967295",  10,     4294967295},
+               {"2147483648",  10,     2147483648U},
+               {"2147483649",  10,     2147483649U},
+               {"4294967294",  10,     4294967294U},
+               {"4294967295",  10,     4294967295U},
        };
        TEST_OK(kstrtou32, u32, "%u", test_u32_ok);
 }
index 0a619e0..470dcda 100644 (file)
@@ -244,24 +244,28 @@ static ssize_t single_flag_show(struct kobject *kobj,
                                struct kobj_attribute *attr, char *buf,
                                enum transparent_hugepage_flag flag)
 {
-       if (test_bit(flag, &transparent_hugepage_flags))
-               return sprintf(buf, "[yes] no\n");
-       else
-               return sprintf(buf, "yes [no]\n");
+       return sprintf(buf, "%d\n",
+                      !!test_bit(flag, &transparent_hugepage_flags));
 }
+
 static ssize_t single_flag_store(struct kobject *kobj,
                                 struct kobj_attribute *attr,
                                 const char *buf, size_t count,
                                 enum transparent_hugepage_flag flag)
 {
-       if (!memcmp("yes", buf,
-                   min(sizeof("yes")-1, count))) {
+       unsigned long value;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &value);
+       if (ret < 0)
+               return ret;
+       if (value > 1)
+               return -EINVAL;
+
+       if (value)
                set_bit(flag, &transparent_hugepage_flags);
-       } else if (!memcmp("no", buf,
-                          min(sizeof("no")-1, count))) {
+       else
                clear_bit(flag, &transparent_hugepage_flags);
-       } else
-               return -EINVAL;
 
        return count;
 }
@@ -680,8 +684,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        return VM_FAULT_OOM;
                page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
                                          vma, haddr, numa_node_id(), 0);
-               if (unlikely(!page))
+               if (unlikely(!page)) {
+                       count_vm_event(THP_FAULT_FALLBACK);
                        goto out;
+               }
+               count_vm_event(THP_FAULT_ALLOC);
                if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
                        put_page(page);
                        goto out;
@@ -909,11 +916,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                new_page = NULL;
 
        if (unlikely(!new_page)) {
+               count_vm_event(THP_FAULT_FALLBACK);
                ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
                                                   pmd, orig_pmd, page, haddr);
                put_page(page);
                goto out;
        }
+       count_vm_event(THP_FAULT_ALLOC);
 
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
@@ -1390,6 +1399,7 @@ int split_huge_page(struct page *page)
 
        BUG_ON(!PageSwapBacked(page));
        __split_huge_page(page, anon_vma);
+       count_vm_event(THP_SPLIT);
 
        BUG_ON(PageCompound(page));
 out_unlock:
@@ -1784,9 +1794,11 @@ static void collapse_huge_page(struct mm_struct *mm,
                                      node, __GFP_OTHER_NODE);
        if (unlikely(!new_page)) {
                up_read(&mm->mmap_sem);
+               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                *hpage = ERR_PTR(-ENOMEM);
                return;
        }
+       count_vm_event(THP_COLLAPSE_ALLOC);
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                up_read(&mm->mmap_sem);
                put_page(new_page);
@@ -2151,8 +2163,11 @@ static void khugepaged_do_scan(struct page **hpage)
 #ifndef CONFIG_NUMA
                if (!*hpage) {
                        *hpage = alloc_hugepage(khugepaged_defrag());
-                       if (unlikely(!*hpage))
+                       if (unlikely(!*hpage)) {
+                               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                                break;
+                       }
+                       count_vm_event(THP_COLLAPSE_ALLOC);
                }
 #else
                if (IS_ERR(*hpage))
@@ -2192,8 +2207,11 @@ static struct page *khugepaged_alloc_hugepage(void)
 
        do {
                hpage = alloc_hugepage(khugepaged_defrag());
-               if (!hpage)
+               if (!hpage) {
+                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                        khugepaged_alloc_sleep();
+               } else
+                       count_vm_event(THP_COLLAPSE_ALLOC);
        } while (unlikely(!hpage) &&
                 likely(khugepaged_enabled()));
        return hpage;
@@ -2210,8 +2228,11 @@ static void khugepaged_loop(void)
        while (likely(khugepaged_enabled())) {
 #ifndef CONFIG_NUMA
                hpage = khugepaged_alloc_hugepage();
-               if (unlikely(!hpage))
+               if (unlikely(!hpage)) {
+                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                        break;
+               }
+               count_vm_event(THP_COLLAPSE_ALLOC);
 #else
                if (IS_ERR(hpage)) {
                        khugepaged_alloc_sleep();
index 9da8cab..ce22a25 100644 (file)
@@ -1410,6 +1410,13 @@ no_page_table:
        return page;
 }
 
+static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+{
+       return (vma->vm_flags & VM_GROWSDOWN) &&
+               (vma->vm_start == addr) &&
+               !vma_stack_continue(vma->vm_prev, addr);
+}
+
 /**
  * __get_user_pages() - pin user pages in memory
  * @tsk:       task_struct of target task
@@ -1488,7 +1495,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                vma = find_extend_vma(mm, start);
                if (!vma && in_gate_area(mm, start)) {
                        unsigned long pg = start & PAGE_MASK;
-                       struct vm_area_struct *gate_vma = get_gate_vma(mm);
                        pgd_t *pgd;
                        pud_t *pud;
                        pmd_t *pmd;
@@ -1513,10 +1519,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                pte_unmap(pte);
                                return i ? : -EFAULT;
                        }
+                       vma = get_gate_vma(mm);
                        if (pages) {
                                struct page *page;
 
-                               page = vm_normal_page(gate_vma, start, *pte);
+                               page = vm_normal_page(vma, start, *pte);
                                if (!page) {
                                        if (!(gup_flags & FOLL_DUMP) &&
                                             is_zero_pfn(pte_pfn(*pte)))
@@ -1530,12 +1537,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                get_page(page);
                        }
                        pte_unmap(pte);
-                       if (vmas)
-                               vmas[i] = gate_vma;
-                       i++;
-                       start += PAGE_SIZE;
-                       nr_pages--;
-                       continue;
+                       goto next_page;
                }
 
                if (!vma ||
@@ -1549,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        continue;
                }
 
+               /*
+                * If we don't actually want the page itself,
+                * and it's the stack guard page, just skip it.
+                */
+               if (!pages && stack_guard_page(vma, start))
+                       goto next_page;
+
                do {
                        struct page *page;
                        unsigned int foll_flags = gup_flags;
@@ -1631,6 +1640,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                flush_anon_page(vma, page, start);
                                flush_dcache_page(page);
                        }
+next_page:
                        if (vmas)
                                vmas[i] = vma;
                        i++;
@@ -3678,7 +3688,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
                         */
 #ifdef CONFIG_HAVE_IOREMAP_PROT
                        vma = find_vma(mm, addr);
-                       if (!vma)
+                       if (!vma || vma->vm_start > addr)
                                break;
                        if (vma->vm_ops && vma->vm_ops->access)
                                ret = vma->vm_ops->access(vma, addr, buf,
index a2acaf8..9ca1d60 100644 (file)
@@ -375,7 +375,7 @@ void online_page(struct page *page)
 #endif
 
 #ifdef CONFIG_FLATMEM
-       max_mapnr = max(page_to_pfn(page), max_mapnr);
+       max_mapnr = max(pfn, max_mapnr);
 #endif
 
        ClearPageReserved(page);
index 2689a08..6b55e3e 100644 (file)
@@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page)
        }
 }
 
-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
-{
-       return (vma->vm_flags & VM_GROWSDOWN) &&
-               (vma->vm_start == addr) &&
-               !vma_stack_continue(vma->vm_prev, addr);
-}
-
 /**
  * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
  * @vma:   target vma
@@ -188,12 +181,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        if (vma->vm_flags & VM_LOCKED)
                gup_flags |= FOLL_MLOCK;
 
-       /* We don't try to access the guard page of a stack vma */
-       if (stack_guard_page(vma, start)) {
-               addr += PAGE_SIZE;
-               nr_pages--;
-       }
-
        return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
                                NULL, NULL, nonblocking);
 }
index 2ec8eb5..e27e0cf 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -259,7 +259,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
         * randomize_va_space to 2, which will still cause mm->start_brk
         * to be arbitrarily shifted
         */
-       if (mm->start_brk > PAGE_ALIGN(mm->end_data))
+       if (current->brk_randomized)
                min_brk = mm->start_brk;
        else
                min_brk = mm->end_data;
@@ -1814,11 +1814,14 @@ static int expand_downwards(struct vm_area_struct *vma,
                size = vma->vm_end - address;
                grow = (vma->vm_start - address) >> PAGE_SHIFT;
 
-               error = acct_stack_growth(vma, size, grow);
-               if (!error) {
-                       vma->vm_start = address;
-                       vma->vm_pgoff -= grow;
-                       perf_event_mmap(vma);
+               error = -ENOMEM;
+               if (grow <= vma->vm_pgoff) {
+                       error = acct_stack_growth(vma, size, grow);
+                       if (!error) {
+                               vma->vm_start = address;
+                               vma->vm_pgoff -= grow;
+                               perf_event_mmap(vma);
+                       }
                }
        }
        vma_unlock_anon_vma(vma);
index 6a819d1..83fb72c 100644 (file)
@@ -83,24 +83,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
 }
 #endif /* CONFIG_NUMA */
 
-/*
- * If this is a system OOM (not a memcg OOM) and the task selected to be
- * killed is not already running at high (RT) priorities, speed up the
- * recovery by boosting the dying task to the lowest FIFO priority.
- * That helps with the recovery and avoids interfering with RT tasks.
- */
-static void boost_dying_task_prio(struct task_struct *p,
-                                 struct mem_cgroup *mem)
-{
-       struct sched_param param = { .sched_priority = 1 };
-
-       if (mem)
-               return;
-
-       if (!rt_task(p))
-               sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
-}
-
 /*
  * The process p may have detached its own ->mm while exiting or through
  * use_mm(), but one or more of its subthreads may still have a valid
@@ -452,13 +434,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
        set_tsk_thread_flag(p, TIF_MEMDIE);
        force_sig(SIGKILL, p);
 
-       /*
-        * We give our sacrificial lamb high priority and access to
-        * all the memory it needs. That way it should be able to
-        * exit() and clear out its resources quickly...
-        */
-       boost_dying_task_prio(p, mem);
-
        return 0;
 }
 #undef K
@@ -482,7 +457,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         */
        if (p->flags & PF_EXITING) {
                set_tsk_thread_flag(p, TIF_MEMDIE);
-               boost_dying_task_prio(p, mem);
                return 0;
        }
 
@@ -556,7 +530,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
         */
        if (fatal_signal_pending(current)) {
                set_thread_flag(TIF_MEMDIE);
-               boost_dying_task_prio(current, NULL);
                return;
        }
 
@@ -712,7 +685,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
         */
        if (fatal_signal_pending(current)) {
                set_thread_flag(TIF_MEMDIE);
-               boost_dying_task_prio(current, NULL);
                return;
        }
 
index 2747f5e..9f8a97b 100644 (file)
@@ -3176,7 +3176,7 @@ static __init_refok int __build_all_zonelists(void *data)
  * Called with zonelists_mutex held always
  * unless system_state == SYSTEM_BOOTING.
  */
-void build_all_zonelists(void *data)
+void __ref build_all_zonelists(void *data)
 {
        set_zonelist_order();
 
index 58da7c1..8fa27e4 100644 (file)
@@ -421,7 +421,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
                 * a waste to allocate index if we cannot allocate data.
                 */
                if (sbinfo->max_blocks) {
-                       if (percpu_counter_compare(&sbinfo->used_blocks, (sbinfo->max_blocks - 1)) > 0)
+                       if (percpu_counter_compare(&sbinfo->used_blocks,
+                                               sbinfo->max_blocks - 1) >= 0)
                                return ERR_PTR(-ENOSPC);
                        percpu_counter_inc(&sbinfo->used_blocks);
                        spin_lock(&inode->i_lock);
@@ -1397,7 +1398,8 @@ repeat:
                shmem_swp_unmap(entry);
                sbinfo = SHMEM_SB(inode->i_sb);
                if (sbinfo->max_blocks) {
-                       if ((percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) > 0) ||
+                       if (percpu_counter_compare(&sbinfo->used_blocks,
+                                               sbinfo->max_blocks) >= 0 ||
                            shmem_acct_block(info->flags)) {
                                spin_unlock(&info->lock);
                                error = -ENOSPC;
index c7f5a6d..f6b435c 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/memcontrol.h>
 #include <linux/delayacct.h>
 #include <linux/sysctl.h>
+#include <linux/oom.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -1988,17 +1989,12 @@ static bool zone_reclaimable(struct zone *zone)
        return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
 }
 
-/*
- * As hibernation is going on, kswapd is freezed so that it can't mark
- * the zone into all_unreclaimable. It can't handle OOM during hibernation.
- * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
- */
+/* All zones in zonelist are unreclaimable? */
 static bool all_unreclaimable(struct zonelist *zonelist,
                struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
-       bool all_unreclaimable = true;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2006,13 +2002,11 @@ static bool all_unreclaimable(struct zonelist *zonelist,
                        continue;
                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                        continue;
-               if (zone_reclaimable(zone)) {
-                       all_unreclaimable = false;
-                       break;
-               }
+               if (!zone->all_unreclaimable)
+                       return false;
        }
 
-       return all_unreclaimable;
+       return true;
 }
 
 /*
@@ -2108,6 +2102,14 @@ out:
        if (sc->nr_reclaimed)
                return sc->nr_reclaimed;
 
+       /*
+        * As hibernation is going on, kswapd is freezed so that it can't mark
+        * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
+        * check.
+        */
+       if (oom_killer_disabled)
+               return 0;
+
        /* top priority shrink_zones still had more to do? don't OOM, then */
        if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
                return 1;
index 772b39b..897ea9e 100644 (file)
@@ -321,9 +321,12 @@ static inline void mod_state(struct zone *zone,
                /*
                 * The fetching of the stat_threshold is racy. We may apply
                 * a counter threshold to the wrong the cpu if we get
-                * rescheduled while executing here. However, the following
-                * will apply the threshold again and therefore bring the
-                * counter under the threshold.
+                * rescheduled while executing here. However, the next
+                * counter update will apply the threshold again and
+                * therefore bring the counter under the threshold again.
+                *
+                * Most of the time the thresholds are the same anyways
+                * for all cpus in a zone.
                 */
                t = this_cpu_read(pcp->stat_threshold);
 
@@ -945,7 +948,16 @@ static const char * const vmstat_text[] = {
        "unevictable_pgs_cleared",
        "unevictable_pgs_stranded",
        "unevictable_pgs_mlockfreed",
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       "thp_fault_alloc",
+       "thp_fault_fallback",
+       "thp_collapse_alloc",
+       "thp_collapse_alloc_failed",
+       "thp_split",
 #endif
+
+#endif /* CONFIG_VM_EVENTS_COUNTERS */
 };
 
 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
index 48b8e08..7736774 100644 (file)
@@ -929,15 +929,15 @@ error:
 }
 EXPORT_SYMBOL(p9_client_attach);
 
-struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
-       int clone)
+struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
+               char **wnames, int clone)
 {
        int err;
        struct p9_client *clnt;
        struct p9_fid *fid;
        struct p9_qid *wqids;
        struct p9_req_t *req;
-       int16_t nwqids, count;
+       uint16_t nwqids, count;
 
        err = 0;
        wqids = NULL;
@@ -955,7 +955,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
                fid = oldfid;
 
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %d wname[0] %s\n",
+       P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n",
                oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
 
        req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid,
@@ -1220,27 +1220,6 @@ error:
 }
 EXPORT_SYMBOL(p9_client_fsync);
 
-int p9_client_sync_fs(struct p9_fid *fid)
-{
-       int err = 0;
-       struct p9_req_t *req;
-       struct p9_client *clnt;
-
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TSYNC_FS fid %d\n", fid->fid);
-
-       clnt = fid->clnt;
-       req = p9_client_rpc(clnt, P9_TSYNCFS, "d", fid->fid);
-       if (IS_ERR(req)) {
-               err = PTR_ERR(req);
-               goto error;
-       }
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RSYNCFS fid %d\n", fid->fid);
-       p9_free_req(clnt, req);
-error:
-       return err;
-}
-EXPORT_SYMBOL(p9_client_sync_fs);
-
 int p9_client_clunk(struct p9_fid *fid)
 {
        int err;
index 8a4084f..b58a501 100644 (file)
@@ -265,7 +265,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
                        }
                        break;
                case 'T':{
-                               int16_t *nwname = va_arg(ap, int16_t *);
+                               uint16_t *nwname = va_arg(ap, uint16_t *);
                                char ***wnames = va_arg(ap, char ***);
 
                                errcode = p9pdu_readf(pdu, proto_version,
@@ -468,7 +468,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
                case 'E':{
                                 int32_t cnt = va_arg(ap, int32_t);
                                 const char *k = va_arg(ap, const void *);
-                                const char *u = va_arg(ap, const void *);
+                                const char __user *u = va_arg(ap,
+                                                       const void __user *);
                                 errcode = p9pdu_writef(pdu, proto_version, "d",
                                                 cnt);
                                 if (!errcode && pdu_write_urw(pdu, k, u, cnt))
@@ -495,7 +496,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
                        }
                        break;
                case 'T':{
-                               int16_t nwname = va_arg(ap, int);
+                               uint16_t nwname = va_arg(ap, int);
                                const char **wnames = va_arg(ap, const char **);
 
                                errcode = p9pdu_writef(pdu, proto_version, "w",
index d47880e..e883172 100644 (file)
@@ -66,7 +66,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
        uint32_t pdata_mapped_pages;
        struct trans_rpage_info  *rpinfo;
 
-       *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1);
+       *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
 
        if (*pdata_off)
                first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off),
index e8f046b..244e707 100644 (file)
@@ -326,8 +326,11 @@ req_retry_pinned:
                        outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
                                        pdata_off, rpinfo->rp_data, pdata_len);
                } else {
-                       char *pbuf = req->tc->pubuf ? req->tc->pubuf :
-                                                               req->tc->pkbuf;
+                       char *pbuf;
+                       if (req->tc->pubuf)
+                               pbuf = (__force char *) req->tc->pubuf;
+                       else
+                               pbuf = req->tc->pkbuf;
                        outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
                                        req->tc->pbuf_size);
                }
@@ -352,8 +355,12 @@ req_retry_pinned:
                        in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
                                        pdata_off, rpinfo->rp_data, pdata_len);
                } else {
-                       char *pbuf = req->tc->pubuf ? req->tc->pubuf :
-                                                               req->tc->pkbuf;
+                       char *pbuf;
+                       if (req->tc->pubuf)
+                               pbuf = (__force char *) req->tc->pubuf;
+                       else
+                               pbuf = req->tc->pkbuf;
+
                        in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
                                        pbuf, req->tc->pbuf_size);
                }
index 50af027..5a80f41 100644 (file)
@@ -579,9 +579,15 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
 
        list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
                                 r_linger_osd) {
-               __unregister_linger_request(osdc, req);
+               /*
+                * reregister request prior to unregistering linger so
+                * that r_osd is preserved.
+                */
+               BUG_ON(!list_empty(&req->r_req_lru_item));
                __register_request(osdc, req);
-               list_move(&req->r_req_lru_item, &osdc->req_unsent);
+               list_add(&req->r_req_lru_item, &osdc->req_unsent);
+               list_add(&req->r_osd_item, &req->r_osd->o_requests);
+               __unregister_linger_request(osdc, req);
                dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
                     osd->o_osd);
        }
@@ -798,7 +804,7 @@ static void __register_request(struct ceph_osd_client *osdc,
        req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
        INIT_LIST_HEAD(&req->r_req_lru_item);
 
-       dout("register_request %p tid %lld\n", req, req->r_tid);
+       dout("__register_request %p tid %lld\n", req, req->r_tid);
        __insert_request(osdc, req);
        ceph_osdc_get_request(req);
        osdc->num_requests++;
index 17d1dcb..4165382 100644 (file)
@@ -163,6 +163,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
        struct perf_event_attr *attr = &evsel->attr;
        int track = !evsel->idx; /* only the first counter needs these */
 
+       attr->inherit           = !no_inherit;
        attr->read_format       = PERF_FORMAT_TOTAL_TIME_ENABLED |
                                  PERF_FORMAT_TOTAL_TIME_RUNNING |
                                  PERF_FORMAT_ID;
@@ -251,6 +252,9 @@ static void open_counters(struct perf_evlist *evlist)
 {
        struct perf_evsel *pos;
 
+       if (evlist->cpus->map[0] < 0)
+               no_inherit = true;
+
        list_for_each_entry(pos, &evlist->entries, node) {
                struct perf_event_attr *attr = &pos->attr;
                /*
@@ -271,8 +275,7 @@ static void open_counters(struct perf_evlist *evlist)
 retry_sample_id:
                attr->sample_id_all = sample_id_all_avail ? 1 : 0;
 try_again:
-               if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
-                                    !no_inherit) < 0) {
+               if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) {
                        int err = errno;
 
                        if (err == EPERM || err == EACCES) {
index e2109f9..03f0e45 100644 (file)
@@ -167,16 +167,17 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
                attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
                                    PERF_FORMAT_TOTAL_TIME_RUNNING;
 
+       attr->inherit = !no_inherit;
+
        if (system_wide)
-               return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false);
+               return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false);
 
-       attr->inherit = !no_inherit;
        if (target_pid == -1 && target_tid == -1) {
                attr->disabled = 1;
                attr->enable_on_exec = 1;
        }
 
-       return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false);
+       return perf_evsel__open_per_thread(evsel, evsel_list->threads, false);
 }
 
 /*
index 1b2106c..11e3c84 100644 (file)
@@ -290,7 +290,7 @@ static int test__open_syscall_event(void)
                goto out_thread_map_delete;
        }
 
-       if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) {
+       if (perf_evsel__open_per_thread(evsel, threads, false) < 0) {
                pr_debug("failed to open counter: %s, "
                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
                         strerror(errno));
@@ -303,7 +303,7 @@ static int test__open_syscall_event(void)
        }
 
        if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
-               pr_debug("perf_evsel__open_read_on_cpu\n");
+               pr_debug("perf_evsel__read_on_cpu\n");
                goto out_close_fd;
        }
 
@@ -365,7 +365,7 @@ static int test__open_syscall_event_on_all_cpus(void)
                goto out_thread_map_delete;
        }
 
-       if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) {
+       if (perf_evsel__open(evsel, cpus, threads, false) < 0) {
                pr_debug("failed to open counter: %s, "
                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
                         strerror(errno));
@@ -418,7 +418,7 @@ static int test__open_syscall_event_on_all_cpus(void)
                        continue;
 
                if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
-                       pr_debug("perf_evsel__open_read_on_cpu\n");
+                       pr_debug("perf_evsel__read_on_cpu\n");
                        err = -1;
                        break;
                }
@@ -529,7 +529,7 @@ static int test__basic_mmap(void)
 
                perf_evlist__add(evlist, evsels[i]);
 
-               if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) {
+               if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) {
                        pr_debug("failed to open counter: %s, "
                                 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
                                 strerror(errno));
index fc1273e..7e3d6e3 100644 (file)
@@ -845,9 +845,10 @@ static void start_counters(struct perf_evlist *evlist)
                }
 
                attr->mmap = 1;
+               attr->inherit = inherit;
 try_again:
                if (perf_evsel__open(counter, top.evlist->cpus,
-                                    top.evlist->threads, group, inherit) < 0) {
+                                    top.evlist->threads, group) < 0) {
                        int err = errno;
 
                        if (err == EPERM || err == EACCES) {
index 9fea755..96bee5c 100644 (file)
@@ -13,7 +13,7 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen)
 {
        FILE *fp;
        char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1];
-       char *token, *saved_ptr;
+       char *token, *saved_ptr = NULL;
        int found = 0;
 
        fp = fopen("/proc/mounts", "r");
index d852cef..45da8d1 100644 (file)
@@ -12,6 +12,7 @@
 #include "evlist.h"
 #include "evsel.h"
 #include "util.h"
+#include "debug.h"
 
 #include <sys/mman.h>
 
@@ -250,15 +251,19 @@ int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
        return evlist->mmap != NULL ? 0 : -ENOMEM;
 }
 
-static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
-                              int mask, int fd)
+static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
+                              int cpu, int prot, int mask, int fd)
 {
        evlist->mmap[cpu].prev = 0;
        evlist->mmap[cpu].mask = mask;
        evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
                                      MAP_SHARED, fd, 0);
-       if (evlist->mmap[cpu].base == MAP_FAILED)
+       if (evlist->mmap[cpu].base == MAP_FAILED) {
+               if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit)
+                       ui__warning("Inherit is not allowed on per-task "
+                                   "events using mmap.\n");
                return -1;
+       }
 
        perf_evlist__add_pollfd(evlist, fd);
        return 0;
@@ -312,7 +317,8 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
                                        if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
                                                  FD(first_evsel, cpu, 0)) != 0)
                                                goto out_unmap;
-                               } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
+                               } else if (__perf_evlist__mmap(evlist, evsel, cpu,
+                                                              prot, mask, fd) < 0)
                                        goto out_unmap;
 
                                if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
index 662596a..d6fd59b 100644 (file)
@@ -175,7 +175,7 @@ int __perf_evsel__read(struct perf_evsel *evsel,
 }
 
 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-                             struct thread_map *threads, bool group, bool inherit)
+                             struct thread_map *threads, bool group)
 {
        int cpu, thread;
        unsigned long flags = 0;
@@ -192,19 +192,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
 
        for (cpu = 0; cpu < cpus->nr; cpu++) {
                int group_fd = -1;
-               /*
-                * Don't allow mmap() of inherited per-task counters. This
-                * would create a performance issue due to all children writing
-                * to the same buffer.
-                *
-                * FIXME:
-                * Proper fix is not to pass 'inherit' to perf_evsel__open*,
-                * but a 'flags' parameter, with 'group' folded there as well,
-                * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
-                * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
-                * set. Lets go for the minimal fix first tho.
-                */
-               evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
 
                for (thread = 0; thread < threads->nr; thread++) {
 
@@ -253,7 +240,7 @@ static struct {
 };
 
 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-                    struct thread_map *threads, bool group, bool inherit)
+                    struct thread_map *threads, bool group)
 {
        if (cpus == NULL) {
                /* Work around old compiler warnings about strict aliasing */
@@ -263,19 +250,19 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
        if (threads == NULL)
                threads = &empty_thread_map.map;
 
-       return __perf_evsel__open(evsel, cpus, threads, group, inherit);
+       return __perf_evsel__open(evsel, cpus, threads, group);
 }
 
 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
-                            struct cpu_map *cpus, bool group, bool inherit)
+                            struct cpu_map *cpus, bool group)
 {
-       return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
+       return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
 }
 
 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
-                               struct thread_map *threads, bool group, bool inherit)
+                               struct thread_map *threads, bool group)
 {
-       return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
+       return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
 }
 
 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
index 6710ab5..f79bb2c 100644 (file)
@@ -81,11 +81,11 @@ void perf_evsel__free_id(struct perf_evsel *evsel);
 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 
 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
-                            struct cpu_map *cpus, bool group, bool inherit);
+                            struct cpu_map *cpus, bool group);
 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
-                               struct thread_map *threads, bool group, bool inherit);
+                               struct thread_map *threads, bool group);
 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-                    struct thread_map *threads, bool group, bool inherit);
+                    struct thread_map *threads, bool group);
 
 #define perf_evsel__match(evsel, t, c)         \
        (evsel->attr.type == PERF_TYPE_##t &&   \
index a9f2d7e..f5e3845 100644 (file)
@@ -498,11 +498,11 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
        struct cpu_map *cpus = NULL;
        struct thread_map *threads = NULL;
        PyObject *pcpus = NULL, *pthreads = NULL;
-       int group = 0, overwrite = 0;
-       static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL};
+       int group = 0, inherit = 0;
+       static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL};
 
        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
-                                        &pcpus, &pthreads, &group, &overwrite))
+                                        &pcpus, &pthreads, &group, &inherit))
                return NULL;
 
        if (pthreads != NULL)
@@ -511,7 +511,8 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
        if (pcpus != NULL)
                cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
 
-       if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) {
+       evsel->attr.inherit = inherit;
+       if (perf_evsel__open(evsel, cpus, threads, group) < 0) {
                PyErr_SetFromErrno(PyExc_OSError);
                return NULL;
        }
index 8c17a87..15633d6 100644 (file)
@@ -256,10 +256,9 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
                         int refresh)
 {
        struct objdump_line *pos, *n;
-       struct annotation *notes = symbol__annotation(sym);
+       struct annotation *notes;
        struct annotate_browser browser = {
                .b = {
-                       .entries = &notes->src->source,
                        .refresh = ui_browser__list_head_refresh,
                        .seek    = ui_browser__list_head_seek,
                        .write   = annotate_browser__write,
@@ -281,6 +280,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
 
        ui_helpline__push("Press <- or ESC to exit");
 
+       notes = symbol__annotation(sym);
+
        list_for_each_entry(pos, &notes->src->source, node) {
                struct objdump_line_rb_node *rbpos;
                size_t line_len = strlen(pos->line);
@@ -291,6 +292,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
                rbpos->idx = browser.b.nr_entries++;
        }
 
+       browser.b.entries = &notes->src->source,
        browser.b.width += 18; /* Percentage */
        ret = annotate_browser__run(&browser, evidx, refresh);
        list_for_each_entry_safe(pos, n, &notes->src->source, node) {
index 798efdc..5d767c6 100644 (file)
@@ -851,7 +851,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel,
                        goto out_free_stack;
                case 'a':
                        if (browser->selection == NULL ||
-                           browser->selection->map == NULL ||
+                           browser->selection->sym == NULL ||
                            browser->selection->map->dso->annotate_warned)
                                continue;
                        goto do_annotate;