Merge branch 'upstream/pvhvm' into upstream/xen
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Wed, 4 Aug 2010 21:49:16 +0000 (14:49 -0700)
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Wed, 4 Aug 2010 21:49:16 +0000 (14:49 -0700)
* upstream/pvhvm:
  Introduce CONFIG_XEN_PVHVM compile option
  blkfront: do not create a PV cdrom device if xen_hvm_guest
  support multiple .discard.* sections to avoid section type conflicts
  xen/pvhvm: fix build problem when !CONFIG_XEN
  xenfs: enable for HVM domains too
  x86: Call HVMOP_pagetable_dying on exit_mmap.
  x86: Unplug emulated disks and nics.
  x86: Use xen_vcpuop_clockevent, xen_clocksource and xen wallclock.
  xen: Fix find_unbound_irq in presence of ioapic irqs.
  xen: Add suspend/resume support for PV on HVM guests.
  xen: Xen PCI platform device driver.
  x86/xen: event channels delivery on HVM.
  x86: early PV on HVM features initialization.
  xen: Add support for HVM hypercalls.

Conflicts:
arch/x86/xen/enlighten.c
arch/x86/xen/time.c

444 files changed:
Documentation/00-INDEX
Documentation/bus-virt-phys-mapping.txt [moved from Documentation/IO-mapping.txt with 100% similarity]
Documentation/credentials.txt
Documentation/feature-removal-schedule.txt
Documentation/kernel-parameters.txt
MAINTAINERS
Makefile
arch/arm/boot/compressed/Makefile
arch/arm/common/sa1111.c
arch/arm/include/asm/atomic.h
arch/arm/include/asm/io.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/kprobes-decode.c
arch/arm/kernel/process.c
arch/arm/lib/csumpartialcopyuser.S
arch/arm/mach-clps711x/include/mach/debug-macro.S
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-footbridge/common.c
arch/arm/mach-h720x/include/mach/debug-macro.S
arch/arm/mach-kirkwood/tsx1x-common.c
arch/arm/mach-kirkwood/tsx1x-common.h
arch/arm/mach-ns9xxx/include/mach/debug-macro.S
arch/arm/mach-ns9xxx/include/mach/uncompress.h
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-pxa/colibri-pxa300.c
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-realview/core.c
arch/arm/mach-shark/include/mach/debug-macro.S
arch/arm/mach-ux500/clock.c
arch/arm/mach-ux500/cpu-db5500.c
arch/arm/mach-ux500/include/mach/uncompress.h
arch/arm/mach-vexpress/v2m.c
arch/arm/mach-w90x900/cpu.c
arch/arm/mm/cache-l2x0.c
arch/arm/mm/highmem.c
arch/arm/plat-spear/include/plat/debug-macro.S
arch/arm/tools/mach-types
arch/avr32/include/asm/ioctls.h
arch/avr32/mach-at32ap/include/mach/board.h
arch/microblaze/Kconfig
arch/microblaze/include/asm/memblock.h [moved from arch/microblaze/include/asm/lmb.h with 57% similarity]
arch/microblaze/kernel/prom.c
arch/microblaze/mm/init.c
arch/mips/alchemy/common/platform.c
arch/mips/alchemy/mtx-1/board_setup.c
arch/mips/bcm63xx/dev-enet.c
arch/mips/include/asm/atomic.h
arch/mips/include/asm/unistd.h
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/vdso.c
arch/mips/mti-malta/malta-pci.c
arch/mips/nxp/pnx8550/common/pci.c
arch/mips/nxp/pnx8550/common/setup.c
arch/mips/pci/ops-pmcmsp.c
arch/mips/pci/pci-yosemite.c
arch/mips/powertv/asic/asic_devices.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/abs_addr.h
arch/powerpc/include/asm/cpm.h
arch/powerpc/include/asm/cpm1.h
arch/powerpc/include/asm/kexec.h
arch/powerpc/include/asm/lmb.h [deleted file]
arch/powerpc/include/asm/memblock.h [new file with mode: 0644]
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/kernel/btext.c
arch/powerpc/kernel/crash.c
arch/powerpc/kernel/crash_dump.c
arch/powerpc/kernel/dma-swiotlb.c
arch/powerpc/kernel/dma.c
arch/powerpc/kernel/fsl_booke_entry_mapping.S
arch/powerpc/kernel/machine_kexec.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/perf_event_fsl_emb.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/vdso.c
arch/powerpc/mm/40x_mmu.c
arch/powerpc/mm/hash_low_64.S
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage-hash64.c
arch/powerpc/mm/init_32.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/ppc_mmu_32.c
arch/powerpc/mm/stab.c
arch/powerpc/mm/tlb_nohash.c
arch/powerpc/platforms/85xx/corenet_ds.c
arch/powerpc/platforms/85xx/mpc8536_ds.c
arch/powerpc/platforms/85xx/mpc85xx_ds.c
arch/powerpc/platforms/85xx/mpc85xx_mds.c
arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/embedded6xx/wii.c
arch/powerpc/platforms/maple/setup.c
arch/powerpc/platforms/pasemi/iommu.c
arch/powerpc/platforms/powermac/setup.c
arch/powerpc/platforms/ps3/htab.c
arch/powerpc/platforms/ps3/mm.c
arch/powerpc/platforms/ps3/os-area.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/phyp_dump.c
arch/powerpc/sysdev/dart_iommu.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/micropatch.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/kernel/time.c
arch/sh/Kconfig
arch/sh/include/asm/lmb.h [deleted file]
arch/sh/include/asm/memblock.h [new file with mode: 0644]
arch/sh/kernel/machine_kexec.c
arch/sh/kernel/setup.c
arch/sh/mm/init.c
arch/sh/mm/numa.c
arch/sparc/Kconfig
arch/sparc/configs/sparc64_defconfig
arch/sparc/include/asm/cache.h
arch/sparc/include/asm/lmb.h [deleted file]
arch/sparc/include/asm/memblock.h [new file with mode: 0644]
arch/sparc/include/asm/pgtable_32.h
arch/sparc/kernel/mdesc.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/prom_64.c
arch/sparc/kernel/sun4d_irq.c
arch/sparc/kernel/ttable.S
arch/sparc/mm/init_64.c
arch/sparc/mm/srmmu.c
arch/sparc/mm/sun4c.c
arch/x86/include/asm/x86_init.h
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/hpet.c
arch/x86/kernel/i8259.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/kprobes.c
arch/x86/kernel/mrst.c
arch/x86/kernel/quirks.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/x86_init.c
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/pci/i386.c
arch/x86/pci/mrst.c
arch/x86/xen/enlighten.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
arch/x86/xen/time.c
arch/x86/xen/xen-ops.h
crypto/ablkcipher.c
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/sleep.c
drivers/base/core.c
drivers/char/agp/intel-gtt.c
drivers/char/sysrq.c
drivers/char/tpm/tpm_tis.c
drivers/clocksource/cs5535-clockevt.c
drivers/cpufreq/cpufreq.c
drivers/crypto/talitos.c
drivers/edac/Kconfig
drivers/edac/i7core_edac.c
drivers/edac/mpc85xx_edac.c
drivers/gpio/cs5535-gpio.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_blit.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_legacy_tv.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/input/joystick/gamecon.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/twl4030_keypad.c
drivers/input/keyboard/w90p910_keypad.c
drivers/input/mouse/Kconfig
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/input/serio/Kconfig
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/w90p910_ts.c
drivers/misc/cs5535-mfgpt.c
drivers/mmc/host/mmci.c
drivers/mmc/host/sdhci-s3c.c
drivers/net/bnx2x.h
drivers/net/bnx2x_main.c
drivers/net/bonding/bond_alb.c
drivers/net/declance.c
drivers/net/ibmveth.c
drivers/net/igb/igb_main.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/pcmcia/axnet_cs.c
drivers/net/r8169.c
drivers/net/s2io.h
drivers/net/tun.c
drivers/net/wimax/i2400m/i2400m-usb.h
drivers/net/wimax/i2400m/usb.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/hostap/hostap_pci.c
drivers/net/wireless/iwlwifi/iwl-sta.h
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/pci/setup-res.c
drivers/pcmcia/pcmcia_resource.c
drivers/pcmcia/pxa2xx_base.c
drivers/platform/x86/intel_scu_ipc.c
drivers/power/ds2782_battery.c
drivers/regulator/ab3100.c
drivers/regulator/tps6507x-regulator.c
drivers/regulator/wm8350-regulator.c
drivers/rtc/rtc-rx8581.c
drivers/s390/block/dasd_devmap.c
drivers/s390/cio/chsc.c
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/scsi/zfcp_qdio.c
drivers/sbus/char/openprom.c
drivers/scsi/ibmvscsi/rpa_vscsi.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/serial/atmel_serial.c
drivers/serial/suncore.c
drivers/serial/sunsu.c
drivers/spi/spi_mpc8xxx.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/pxa27x_udc.c
drivers/usb/gadget/s3c2410_udc.c
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/sisusbvga/sisusb.c
drivers/usb/musb/tusb6010.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/sierra.c
drivers/usb/storage/transport.c
drivers/vhost/net.c
drivers/video/aty/radeon_pm.c
drivers/video/au1100fb.c
drivers/video/cyber2000fb.c
drivers/video/gbefb.c
drivers/video/pmag-ba-fb.c
drivers/video/pmagb-b-fb.c
drivers/virtio/virtio_ring.c
drivers/xen/xenbus/xenbus_probe.c
fs/9p/vfs_dir.c
fs/btrfs/ctree.c
fs/btrfs/ioctl.c
fs/ceph/Kconfig
fs/ceph/auth_x.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/messenger.c
fs/ceph/mon_client.c
fs/ceph/osd_client.c
fs/ceph/osdmap.c
fs/cifs/cifsfs.c
fs/cifs/dns_resolve.c
fs/cifs/dns_resolve.h
fs/dcache.c
fs/ecryptfs/messaging.c
fs/gfs2/bmap.c
fs/gfs2/dir.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/gfs2/quota.c
fs/gfs2/quota.h
fs/inode.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/jffs2/xattr.c
fs/mbcache.c
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/internal.h
fs/nfs/nfsroot.c
fs/nfs/write.c
fs/ocfs2/aops.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/file.c
fs/ocfs2/file.h
fs/ocfs2/journal.c
fs/ocfs2/localalloc.c
fs/ocfs2/quota_global.c
fs/ocfs2/quota_local.c
fs/ocfs2/refcounttree.c
fs/ocfs2/suballoc.c
fs/ocfs2/xattr.c
fs/partitions/ibm.c
fs/proc/array.c
fs/quota/dquot.c
fs/sysfs/symlink.c
fs/ubifs/shrinker.c
fs/ubifs/ubifs.h
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/linux-2.6/xfs_sync.h
fs/xfs/linux-2.6/xfs_trace.h
fs/xfs/quota/xfs_qm.c
fs/xfs/xfs_mount.h
include/acpi/processor.h
include/asm-generic/vmlinux.lds.h
include/linux/acpi.h
include/linux/cred.h
include/linux/fb.h
include/linux/fdtable.h
include/linux/i8042.h
include/linux/if_macvlan.h
include/linux/jbd2.h
include/linux/lmb.h [deleted file]
include/linux/memblock.h [new file with mode: 0644]
include/linux/mm.h
include/linux/nfs_fs.h
include/linux/pci.h
include/linux/regulator/tps6507x.h [new file with mode: 0644]
include/linux/sched.h
include/linux/syscalls.h
include/linux/sysrq.h
include/linux/vgaarb.h
include/math-emu/op-common.h
include/net/sock.h
include/net/tc_act/tc_mirred.h
ipc/sem.c
kernel/cred.c
kernel/debug/debug_core.c
kernel/debug/gdbstub.c
kernel/debug/kdb/kdb_main.c
kernel/early_res.c
kernel/module.c
lib/Kconfig
lib/Makefile
lib/lmb.c [deleted file]
mm/Kconfig
mm/Makefile
mm/bootmem.c
mm/memblock.c [new file with mode: 0644]
mm/memory.c
mm/page_alloc.c
mm/page_cgroup.c
mm/vmscan.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap.c
net/bridge/br_device.c
net/bridge/br_forward.c
net/core/dev.c
net/core/neighbour.c
net/core/skbuff.c
net/dsa/Kconfig
net/ipv4/ipmr.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/mip6.c
net/mac80211/cfg.c
net/phonet/pep.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sunrpc/auth.c
net/xfrm/xfrm_policy.c
scripts/kconfig/nconf.gui.c
scripts/package/Makefile
scripts/setlocalversion
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_nvhdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/au1x/psc-i2s.c
sound/soc/codecs/Kconfig
sound/soc/codecs/wm8727.c
sound/soc/codecs/wm8776.c
sound/soc/codecs/wm8988.c
sound/soc/sh/fsi.c
tools/perf/Makefile
tools/perf/arch/sparc/Makefile [new file with mode: 0644]
tools/perf/arch/sparc/util/dwarf-regs.c [new file with mode: 0644]
tools/perf/builtin-report.c
tools/perf/util/PERF-VERSION-GEN
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/hist.c
tools/perf/util/symbol.c

index dd10b51..5405f7a 100644 (file)
@@ -32,8 +32,6 @@ DocBook/
        - directory with DocBook templates etc. for kernel documentation.
 HOWTO
        - the process and procedures of how to do Linux kernel development.
-IO-mapping.txt
-       - how to access I/O mapped memory from within device drivers.
 IPMI.txt
        - info on Linux Intelligent Platform Management Interface (IPMI) Driver.
 IRQ-affinity.txt
@@ -84,6 +82,8 @@ blockdev/
        - info on block devices & drivers
 btmrvl.txt
        - info on Marvell Bluetooth driver usage.
+bus-virt-phys-mapping.txt
+       - how to access I/O mapped memory from within device drivers.
 cachetlb.txt
        - describes the cache/TLB flushing interfaces Linux uses.
 cdrom/
@@ -168,6 +168,8 @@ initrd.txt
        - how to use the RAM disk as an initial/temporary root filesystem.
 input/
        - info on Linux input device support.
+io-mapping.txt
+       - description of io_mapping functions in linux/io-mapping.h
 io_ordering.txt
        - info on ordering I/O writes to memory-mapped addresses.
 ioctl/
index a2db352..995baf3 100644 (file)
@@ -417,6 +417,9 @@ reference on them using:
 This does all the RCU magic inside of it.  The caller must call put_cred() on
 the credentials so obtained when they're finished with.
 
+ [*] Note: The result of __task_cred() should not be passed directly to
+     get_cred() as this may race with commit_cred().
+
 There are a couple of convenience functions to access bits of another task's
 credentials, hiding the RCU magic from the caller:
 
index c268783..1571c0c 100644 (file)
@@ -647,3 +647,10 @@ Who:       Stefan Richter <stefanr@s5r6.in-berlin.de>
 
 ----------------------------
 
+What:  The acpi_sleep=s4_nonvs command line option
+When:  2.6.37
+Files: arch/x86/kernel/acpi/sleep.c
+Why:   superseded by acpi_sleep=nonvs
+Who:   Rafael J. Wysocki <rjw@sisk.pl>
+
+----------------------------
index eefcd80..2f32da7 100644 (file)
@@ -255,8 +255,8 @@ and is between 256 and 4096 characters. It is defined in the file
                        control method, with respect to putting devices into
                        low power states, to be enforced (the ACPI 2.0 ordering
                        of _PTS is used by default).
-                       s4_nonvs prevents the kernel from saving/restoring the
-                       ACPI NVS memory during hibernation.
+                       nonvs prevents the kernel from saving/restoring the
+                       ACPI NVS memory during suspend/hibernation and resume.
                        sci_force_enable causes the kernel to set SCI_EN directly
                        on resume from S1/S3 (which is against the ACPI spec,
                        but some broken systems don't work without it).
@@ -1266,7 +1266,7 @@ and is between 256 and 4096 characters. It is defined in the file
                        If there are multiple matching configurations changing
                        the same attribute, the last one is used.
 
-       lmb=debug       [KNL] Enable lmb debug messages.
+       memblock=debug  [KNL] Enable memblock debug messages.
 
        load_ramdisk=   [RAM] List of ramdisks to load from floppy
                        See Documentation/blockdev/ramdisk.txt.
index 5884812..02f75fc 100644 (file)
@@ -5336,6 +5336,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git
 S:     Maintained
 F:     arch/sparc/
+F:     drivers/sbus
 
 SPARC SERIAL DRIVERS
 M:     "David S. Miller" <davem@davemloft.net>
@@ -6242,6 +6243,8 @@ F:        drivers/mmc/host/wbsd.*
 
 WATCHDOG DEVICE DRIVERS
 M:     Wim Van Sebroeck <wim@iguana.be>
+L:     linux-watchdog@vger.kernel.org
+W:     http://www.linux-watchdog.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog.git
 S:     Maintained
 F:     Documentation/watchdog/
index 037ff4e..141da26 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 35
-EXTRAVERSION = -rc5
+EXTRAVERSION =
 NAME = Sheep on Meth
 
 # *DOCUMENTATION*
index 53faa90..864a002 100644 (file)
@@ -71,6 +71,9 @@ targets       := vmlinux vmlinux.lds \
                 piggy.$(suffix_y) piggy.$(suffix_y).o \
                 font.o font.c head.o misc.o $(OBJS)
 
+# Make sure files are removed during clean
+extra-y       += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S
+
 ifeq ($(CONFIG_FUNCTION_TRACER),y)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
index 6f80665..9eaf65f 100644 (file)
@@ -1028,13 +1028,12 @@ static int sa1111_remove(struct platform_device *pdev)
        struct sa1111 *sachip = platform_get_drvdata(pdev);
 
        if (sachip) {
-               __sa1111_remove(sachip);
-               platform_set_drvdata(pdev, NULL);
-
 #ifdef CONFIG_PM
                kfree(sachip->saved_state);
                sachip->saved_state = NULL;
 #endif
+               __sa1111_remove(sachip);
+               platform_set_drvdata(pdev, NULL);
        }
 
        return 0;
index a0162fa..7e79503 100644 (file)
@@ -40,12 +40,12 @@ static inline void atomic_add(int i, atomic_t *v)
        int result;
 
        __asm__ __volatile__("@ atomic_add\n"
-"1:    ldrex   %0, [%2]\n"
-"      add     %0, %0, %3\n"
-"      strex   %1, %0, [%2]\n"
+"1:    ldrex   %0, [%3]\n"
+"      add     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "Ir" (i)
        : "cc");
 }
@@ -58,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
        smp_mb();
 
        __asm__ __volatile__("@ atomic_add_return\n"
-"1:    ldrex   %0, [%2]\n"
-"      add     %0, %0, %3\n"
-"      strex   %1, %0, [%2]\n"
+"1:    ldrex   %0, [%3]\n"
+"      add     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "Ir" (i)
        : "cc");
 
@@ -78,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v)
        int result;
 
        __asm__ __volatile__("@ atomic_sub\n"
-"1:    ldrex   %0, [%2]\n"
-"      sub     %0, %0, %3\n"
-"      strex   %1, %0, [%2]\n"
+"1:    ldrex   %0, [%3]\n"
+"      sub     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "Ir" (i)
        : "cc");
 }
@@ -96,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
        smp_mb();
 
        __asm__ __volatile__("@ atomic_sub_return\n"
-"1:    ldrex   %0, [%2]\n"
-"      sub     %0, %0, %3\n"
-"      strex   %1, %0, [%2]\n"
+"1:    ldrex   %0, [%3]\n"
+"      sub     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "Ir" (i)
        : "cc");
 
@@ -118,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 
        do {
                __asm__ __volatile__("@ atomic_cmpxchg\n"
-               "ldrex  %1, [%2]\n"
+               "ldrex  %1, [%3]\n"
                "mov    %0, #0\n"
-               "teq    %1, %3\n"
-               "strexeq %0, %4, [%2]\n"
-                   : "=&r" (res), "=&r" (oldval)
+               "teq    %1, %4\n"
+               "strexeq %0, %5, [%3]\n"
+                   : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
                    : "r" (&ptr->counter), "Ir" (old), "r" (new)
                    : "cc");
        } while (res);
@@ -137,12 +137,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
        unsigned long tmp, tmp2;
 
        __asm__ __volatile__("@ atomic_clear_mask\n"
-"1:    ldrex   %0, [%2]\n"
-"      bic     %0, %0, %3\n"
-"      strex   %1, %0, [%2]\n"
+"1:    ldrex   %0, [%3]\n"
+"      bic     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (tmp), "=&r" (tmp2)
+       : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
        : "r" (addr), "Ir" (mask)
        : "cc");
 }
@@ -249,7 +249,7 @@ static inline u64 atomic64_read(atomic64_t *v)
        __asm__ __volatile__("@ atomic64_read\n"
 "      ldrexd  %0, %H0, [%1]"
        : "=&r" (result)
-       : "r" (&v->counter)
+       : "r" (&v->counter), "Qo" (v->counter)
        );
 
        return result;
@@ -260,11 +260,11 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
        u64 tmp;
 
        __asm__ __volatile__("@ atomic64_set\n"
-"1:    ldrexd  %0, %H0, [%1]\n"
-"      strexd  %0, %2, %H2, [%1]\n"
+"1:    ldrexd  %0, %H0, [%2]\n"
+"      strexd  %0, %3, %H3, [%2]\n"
 "      teq     %0, #0\n"
 "      bne     1b"
-       : "=&r" (tmp)
+       : "=&r" (tmp), "=Qo" (v->counter)
        : "r" (&v->counter), "r" (i)
        : "cc");
 }
@@ -275,13 +275,13 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
        unsigned long tmp;
 
        __asm__ __volatile__("@ atomic64_add\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
-"      adds    %0, %0, %3\n"
-"      adc     %H0, %H0, %H3\n"
-"      strexd  %1, %0, %H0, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
+"      adds    %0, %0, %4\n"
+"      adc     %H0, %H0, %H4\n"
+"      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (i)
        : "cc");
 }
@@ -294,13 +294,13 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
        smp_mb();
 
        __asm__ __volatile__("@ atomic64_add_return\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
-"      adds    %0, %0, %3\n"
-"      adc     %H0, %H0, %H3\n"
-"      strexd  %1, %0, %H0, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
+"      adds    %0, %0, %4\n"
+"      adc     %H0, %H0, %H4\n"
+"      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (i)
        : "cc");
 
@@ -315,13 +315,13 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
        unsigned long tmp;
 
        __asm__ __volatile__("@ atomic64_sub\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
-"      subs    %0, %0, %3\n"
-"      sbc     %H0, %H0, %H3\n"
-"      strexd  %1, %0, %H0, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
+"      subs    %0, %0, %4\n"
+"      sbc     %H0, %H0, %H4\n"
+"      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (i)
        : "cc");
 }
@@ -334,13 +334,13 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
        smp_mb();
 
        __asm__ __volatile__("@ atomic64_sub_return\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
-"      subs    %0, %0, %3\n"
-"      sbc     %H0, %H0, %H3\n"
-"      strexd  %1, %0, %H0, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
+"      subs    %0, %0, %4\n"
+"      sbc     %H0, %H0, %H4\n"
+"      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (i)
        : "cc");
 
@@ -358,12 +358,12 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
 
        do {
                __asm__ __volatile__("@ atomic64_cmpxchg\n"
-               "ldrexd         %1, %H1, [%2]\n"
+               "ldrexd         %1, %H1, [%3]\n"
                "mov            %0, #0\n"
-               "teq            %1, %3\n"
-               "teqeq          %H1, %H3\n"
-               "strexdeq       %0, %4, %H4, [%2]"
-               : "=&r" (res), "=&r" (oldval)
+               "teq            %1, %4\n"
+               "teqeq          %H1, %H4\n"
+               "strexdeq       %0, %5, %H5, [%3]"
+               : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
                : "r" (&ptr->counter), "r" (old), "r" (new)
                : "cc");
        } while (res);
@@ -381,11 +381,11 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
        smp_mb();
 
        __asm__ __volatile__("@ atomic64_xchg\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
-"      strexd  %1, %3, %H3, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
+"      strexd  %1, %4, %H4, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
        : "r" (&ptr->counter), "r" (new)
        : "cc");
 
@@ -402,16 +402,16 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
        smp_mb();
 
        __asm__ __volatile__("@ atomic64_dec_if_positive\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
 "      subs    %0, %0, #1\n"
 "      sbc     %H0, %H0, #0\n"
 "      teq     %H0, #0\n"
 "      bmi     2f\n"
-"      strexd  %1, %0, %H0, [%2]\n"
+"      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b\n"
 "2:"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter)
        : "cc");
 
@@ -429,18 +429,18 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
        smp_mb();
 
        __asm__ __volatile__("@ atomic64_add_unless\n"
-"1:    ldrexd  %0, %H0, [%3]\n"
-"      teq     %0, %4\n"
-"      teqeq   %H0, %H4\n"
+"1:    ldrexd  %0, %H0, [%4]\n"
+"      teq     %0, %5\n"
+"      teqeq   %H0, %H5\n"
 "      moveq   %1, #0\n"
 "      beq     2f\n"
-"      adds    %0, %0, %5\n"
-"      adc     %H0, %H0, %H5\n"
-"      strexd  %2, %0, %H0, [%3]\n"
+"      adds    %0, %0, %6\n"
+"      adc     %H0, %H0, %H6\n"
+"      strexd  %2, %0, %H0, [%4]\n"
 "      teq     %2, #0\n"
 "      bne     1b\n"
 "2:"
-       : "=&r" (val), "=&r" (ret), "=&r" (tmp)
+       : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (u), "r" (a)
        : "cc");
 
index c980156..1261b1f 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <asm/memory.h>
+#include <asm/system.h>
 
 /*
  * ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -179,25 +180,38 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
  * IO port primitives for more information.
  */
 #ifdef __mem_pci
-#define readb(c) ({ __u8  __v = __raw_readb(__mem_pci(c)); __v; })
-#define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \
+#define readb_relaxed(c) ({ u8  __v = __raw_readb(__mem_pci(c)); __v; })
+#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
                                        __raw_readw(__mem_pci(c))); __v; })
-#define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \
+#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
                                        __raw_readl(__mem_pci(c))); __v; })
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
+
+#define writeb_relaxed(v,c)    ((void)__raw_writeb(v,__mem_pci(c)))
+#define writew_relaxed(v,c)    ((void)__raw_writew((__force u16) \
+                                       cpu_to_le16(v),__mem_pci(c)))
+#define writel_relaxed(v,c)    ((void)__raw_writel((__force u32) \
+                                       cpu_to_le32(v),__mem_pci(c)))
+
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+#define __iormb()              rmb()
+#define __iowmb()              wmb()
+#else
+#define __iormb()              do { } while (0)
+#define __iowmb()              do { } while (0)
+#endif
+
+#define readb(c)               ({ u8  __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c)               ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c)               ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+
+#define writeb(v,c)            ({ __iowmb(); writeb_relaxed(v,c); })
+#define writew(v,c)            ({ __iowmb(); writew_relaxed(v,c); })
+#define writel(v,c)            ({ __iowmb(); writel_relaxed(v,c); })
 
 #define readsb(p,d,l)          __raw_readsb(__mem_pci(p),d,l)
 #define readsw(p,d,l)          __raw_readsw(__mem_pci(p),d,l)
 #define readsl(p,d,l)          __raw_readsl(__mem_pci(p),d,l)
 
-#define writeb(v,c)            __raw_writeb(v,__mem_pci(c))
-#define writew(v,c)            __raw_writew((__force __u16) \
-                                       cpu_to_le16(v),__mem_pci(c))
-#define writel(v,c)            __raw_writel((__force __u32) \
-                                       cpu_to_le32(v),__mem_pci(c))
-
 #define writesb(p,d,l)         __raw_writesb(__mem_pci(p),d,l)
 #define writesw(p,d,l)         __raw_writesw(__mem_pci(p),d,l)
 #define writesl(p,d,l)         __raw_writesl(__mem_pci(p),d,l)
@@ -244,13 +258,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
  * io{read,write}{8,16,32} macros
  */
 #ifndef ioread8
-#define ioread8(p)     ({ unsigned int __v = __raw_readb(p); __v; })
-#define ioread16(p)    ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; })
-#define ioread32(p)    ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; })
+#define ioread8(p)     ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
+#define ioread16(p)    ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32(p)    ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
 
-#define iowrite8(v,p)  __raw_writeb(v, p)
-#define iowrite16(v,p) __raw_writew((__force __u16)cpu_to_le16(v), p)
-#define iowrite32(v,p) __raw_writel((__force __u32)cpu_to_le32(v), p)
+#define iowrite8(v,p)  ({ __iowmb(); (void)__raw_writeb(v, p); })
+#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
+#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
 
 #define ioread8_rep(p,d,c)     __raw_readsb(p,d,c)
 #define ioread16_rep(p,d,c)    __raw_readsw(p,d,c)
index 7ee48e7..3fd7861 100644 (file)
@@ -162,8 +162,6 @@ ENDPROC(__und_invalid)
        @  r4 - orig_r0 (see pt_regs definition in ptrace.h)
        @
        stmia   r5, {r0 - r4}
-
-       asm_trace_hardirqs_off
        .endm
 
        .align  5
@@ -204,7 +202,7 @@ __dabt_svc:
        @
        @ IRQs off again before pulling preserved data off the stack
        @
-       disable_irq
+       disable_irq_notrace
 
        @
        @ restore SPSR and restart the instruction
@@ -218,6 +216,9 @@ ENDPROC(__dabt_svc)
 __irq_svc:
        svc_entry
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+       bl      trace_hardirqs_off
+#endif
 #ifdef CONFIG_PREEMPT
        get_thread_info tsk
        ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
@@ -291,7 +292,7 @@ __und_svc:
        @
        @ IRQs off again before pulling preserved data off the stack
        @
-1:     disable_irq
+1:     disable_irq_notrace
 
        @
        @ restore SPSR and restart the instruction
@@ -327,7 +328,7 @@ __pabt_svc:
        @
        @ IRQs off again before pulling preserved data off the stack
        @
-       disable_irq
+       disable_irq_notrace
 
        @
        @ restore SPSR and restart the instruction
@@ -393,8 +394,6 @@ ENDPROC(__pabt_svc)
        @ Clear FP to mark the first stack frame
        @
        zero_fp
-
-       asm_trace_hardirqs_off
        .endm
 
        .macro  kuser_cmpxchg_check
@@ -465,9 +464,6 @@ __irq_usr:
  THUMB(        movne   r0, #0          )
  THUMB(        strne   r0, [r0]        )
 #endif
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_on
-#endif
 
        mov     why, #0
        b       ret_to_user
index da1f949..8bccbfa 100644 (file)
@@ -583,13 +583,14 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
 {
        insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0];
        kprobe_opcode_t insn = p->opcode;
+       long ppc = (long)p->addr + 8;
        union reg_pair fnr;
        int rd = (insn >> 12) & 0xf;
        int rn = (insn >> 16) & 0xf;
        int rm = insn & 0xf;
        long rdv;
-       long rnv  = regs->uregs[rn];
-       long rmv  = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
+       long rnv = (rn == 15) ? ppc : regs->uregs[rn];
+       long rmv = (rm == 15) ? ppc : regs->uregs[rm];
        long cpsr = regs->ARM_cpsr;
 
        fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
index acf5e6f..a4a9cc8 100644 (file)
@@ -351,17 +351,21 @@ EXPORT_SYMBOL(dump_fpu);
 
 /*
  * Shuffle the argument into the correct register before calling the
- * thread function.  r1 is the thread argument, r2 is the pointer to
- * the thread function, and r3 points to the exit function.
+ * thread function.  r4 is the thread argument, r5 is the pointer to
+ * the thread function, and r6 points to the exit function.
  */
 extern void kernel_thread_helper(void);
 asm(   ".pushsection .text\n"
 "      .align\n"
 "      .type   kernel_thread_helper, #function\n"
 "kernel_thread_helper:\n"
-"      mov     r0, r1\n"
-"      mov     lr, r3\n"
-"      mov     pc, r2\n"
+#ifdef CONFIG_TRACE_IRQFLAGS
+"      bl      trace_hardirqs_on\n"
+#endif
+"      msr     cpsr_c, r7\n"
+"      mov     r0, r4\n"
+"      mov     lr, r6\n"
+"      mov     pc, r5\n"
 "      .size   kernel_thread_helper, . - kernel_thread_helper\n"
 "      .popsection");
 
@@ -391,11 +395,12 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
 
        memset(&regs, 0, sizeof(regs));
 
-       regs.ARM_r1 = (unsigned long)arg;
-       regs.ARM_r2 = (unsigned long)fn;
-       regs.ARM_r3 = (unsigned long)kernel_thread_exit;
+       regs.ARM_r4 = (unsigned long)arg;
+       regs.ARM_r5 = (unsigned long)fn;
+       regs.ARM_r6 = (unsigned long)kernel_thread_exit;
+       regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
        regs.ARM_pc = (unsigned long)kernel_thread_helper;
-       regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
+       regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
 
        return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
 }
index 59ff6fd..7d08b43 100644 (file)
@@ -71,7 +71,7 @@
                .pushsection .fixup,"ax"
                .align  4
 9001:          mov     r4, #-EFAULT
-               ldr     r5, [fp, #4]            @ *err_ptr
+               ldr     r5, [sp, #8*4]          @ *err_ptr
                str     r4, [r5]
                ldmia   sp, {r1, r2}            @ retrieve dst, len
                add     r2, r2, r1
index fedd807..072cc6b 100644 (file)
@@ -11,6 +11,7 @@
  *
 */
 
+#include <mach/hardware.h>
 #include <asm/hardware/clps7111.h>
 
                .macro  addruart, rx, tmp
index 2ec3095..b280efb 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/tps6507x.h>
 #include <linux/mfd/tps6507x.h>
 #include <linux/input/tps6507x-ts.h>
 
@@ -469,6 +470,11 @@ struct regulator_consumer_supply tps65070_ldo2_consumers[] = {
        },
 };
 
+/* We take advantage of the fact that both defdcdc{2,3} are tied high */
+static struct tps6507x_reg_platform_data tps6507x_platform_data = {
+       .defdcdc_default = true,
+};
+
 struct regulator_init_data tps65070_regulator_data[] = {
        /* dcdc1 */
        {
@@ -494,6 +500,7 @@ struct regulator_init_data tps65070_regulator_data[] = {
                },
                .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc2_consumers),
                .consumer_supplies = tps65070_dcdc2_consumers,
+               .driver_data = &tps6507x_platform_data,
        },
 
        /* dcdc3 */
@@ -507,6 +514,7 @@ struct regulator_init_data tps65070_regulator_data[] = {
                },
                .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc3_consumers),
                .consumer_supplies = tps65070_dcdc3_consumers,
+               .driver_data = &tps6507x_platform_data,
        },
 
        /* ldo1 */
index e3bc3f6..88b3dd8 100644 (file)
@@ -232,7 +232,7 @@ EXPORT_SYMBOL(__bus_to_virt);
 
 unsigned long __pfn_to_bus(unsigned long pfn)
 {
-       return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET));
+       return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET);
 }
 EXPORT_SYMBOL(__pfn_to_bus);
 
index a9ee8f0..27cafd1 100644 (file)
  *
 */
 
-               .equ    io_virt, IO_BASE
-               .equ    io_phys, IO_START
+#include <mach/hardware.h>
+
+               .equ    io_virt, IO_VIRT
+               .equ    io_phys, IO_PHYS
 
                .macro  addruart, rx, tmp
                mrc     p15, 0, \rx, c1, c0
index 7221c20..f781164 100644 (file)
@@ -77,7 +77,7 @@ struct spi_board_info __initdata qnap_tsx1x_spi_slave_info[] = {
        },
 };
 
-void qnap_tsx1x_register_flash(void)
+void __init qnap_tsx1x_register_flash(void)
 {
        spi_register_board_info(qnap_tsx1x_spi_slave_info,
                                ARRAY_SIZE(qnap_tsx1x_spi_slave_info));
index 9a59296..7fa0373 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __ARCH_KIRKWOOD_TSX1X_COMMON_H
 #define __ARCH_KIRKWOOD_TSX1X_COMMON_H
 
-extern void qnap_tsx1x_register_flash(void);
+extern void __init qnap_tsx1x_register_flash(void);
 extern void qnap_tsx1x_power_off(void);
 
 #endif
index 0859336..5c934bd 100644 (file)
@@ -8,6 +8,7 @@
  * the Free Software Foundation.
  */
 #include <mach/hardware.h>
+#include <asm/memory.h>
 
 #include <mach/regs-board-a9m9750dev.h>
 
index 1b12d32..770a68c 100644 (file)
@@ -20,50 +20,49 @@ static void putc_dummy(char c, void __iomem *base)
        /* nothing */
 }
 
+static int timeout;
+
 static void putc_ns9360(char c, void __iomem *base)
 {
-       static int t = 0x10000;
        do {
-               if (t)
-                       --t;
+               if (timeout)
+                       --timeout;
 
                if (__raw_readl(base + 8) & (1 << 3)) {
                        __raw_writeb(c, base + 16);
-                       t = 0x10000;
+                       timeout = 0x10000;
                        break;
                }
-       } while (t);
+       } while (timeout);
 }
 
 static void putc_a9m9750dev(char c, void __iomem *base)
 {
-       static int t = 0x10000;
        do {
-               if (t)
-                       --t;
+               if (timeout)
+                       --timeout;
 
                if (__raw_readb(base + 5) & (1 << 5)) {
                        __raw_writeb(c, base);
-                       t = 0x10000;
+                       timeout = 0x10000;
                        break;
                }
-       } while (t);
+       } while (timeout);
 
 }
 
 static void putc_ns921x(char c, void __iomem *base)
 {
-       static int t = 0x10000;
        do {
-               if (t)
-                       --t;
+               if (timeout)
+                       --timeout;
 
                if (!(__raw_readl(base) & (1 << 11))) {
                        __raw_writeb(c, base + 0x0028);
-                       t = 0x10000;
+                       timeout = 0x10000;
                        break;
                }
-       } while (t);
+       } while (timeout);
 }
 
 #define MSCS __REG(0xA0900184)
@@ -89,6 +88,7 @@ static void putc_ns921x(char c, void __iomem *base)
 
 static void autodetect(void (**putc)(char, void __iomem *), void __iomem **base)
 {
+       timeout = 0x10000;
        if (((__raw_readl(MSCS) >> 16) & 0xfe) == 0x00) {
                /* ns9360 or ns9750 */
                if (NS9360_UART_ENABLED(NS9360_UARTA)) {
index abdf321..0348392 100644 (file)
@@ -175,6 +175,10 @@ static void __init rx51_add_gpio_keys(void)
 #endif /* CONFIG_KEYBOARD_GPIO || CONFIG_KEYBOARD_GPIO_MODULE */
 
 static int board_keymap[] = {
+       /*
+        * Note that KEY(x, 8, KEY_XXX) entries represent "entrire row
+        * connected to the ground" matrix state.
+        */
        KEY(0, 0, KEY_Q),
        KEY(0, 1, KEY_O),
        KEY(0, 2, KEY_P),
@@ -182,6 +186,7 @@ static int board_keymap[] = {
        KEY(0, 4, KEY_BACKSPACE),
        KEY(0, 6, KEY_A),
        KEY(0, 7, KEY_S),
+
        KEY(1, 0, KEY_W),
        KEY(1, 1, KEY_D),
        KEY(1, 2, KEY_F),
@@ -190,6 +195,7 @@ static int board_keymap[] = {
        KEY(1, 5, KEY_J),
        KEY(1, 6, KEY_K),
        KEY(1, 7, KEY_L),
+
        KEY(2, 0, KEY_E),
        KEY(2, 1, KEY_DOT),
        KEY(2, 2, KEY_UP),
@@ -197,6 +203,8 @@ static int board_keymap[] = {
        KEY(2, 5, KEY_Z),
        KEY(2, 6, KEY_X),
        KEY(2, 7, KEY_C),
+       KEY(2, 8, KEY_F9),
+
        KEY(3, 0, KEY_R),
        KEY(3, 1, KEY_V),
        KEY(3, 2, KEY_B),
@@ -205,20 +213,23 @@ static int board_keymap[] = {
        KEY(3, 5, KEY_SPACE),
        KEY(3, 6, KEY_SPACE),
        KEY(3, 7, KEY_LEFT),
+
        KEY(4, 0, KEY_T),
        KEY(4, 1, KEY_DOWN),
        KEY(4, 2, KEY_RIGHT),
        KEY(4, 4, KEY_LEFTCTRL),
        KEY(4, 5, KEY_RIGHTALT),
        KEY(4, 6, KEY_LEFTSHIFT),
+       KEY(4, 8, KEY_F10),
+
        KEY(5, 0, KEY_Y),
+       KEY(5, 8, KEY_F11),
+
        KEY(6, 0, KEY_U),
+
        KEY(7, 0, KEY_I),
        KEY(7, 1, KEY_F7),
        KEY(7, 2, KEY_F8),
-       KEY(0xff, 2, KEY_F9),
-       KEY(0xff, 4, KEY_F10),
-       KEY(0xff, 5, KEY_F11),
 };
 
 static struct matrix_keymap_data board_map_data = {
index 45c23fd..40b6ac2 100644 (file)
@@ -26,6 +26,7 @@
 #include <mach/colibri.h>
 #include <mach/ohci.h>
 #include <mach/pxafb.h>
+#include <mach/audio.h>
 
 #include "generic.h"
 #include "devices.h"
@@ -145,7 +146,7 @@ static void __init colibri_pxa300_init_lcd(void)
 static inline void colibri_pxa300_init_lcd(void) {}
 #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */
 
-#if defined(SND_AC97_CODEC) || defined(SND_AC97_CODEC_MODULE)
+#if defined(CONFIG_SND_AC97_CODEC) || defined(CONFIG_SND_AC97_CODEC_MODULE)
 static mfp_cfg_t colibri_pxa310_ac97_pin_config[] __initdata = {
        GPIO24_AC97_SYSCLK,
        GPIO23_AC97_nACRESET,
index 3d1dcb9..51ffa6a 100644 (file)
@@ -446,7 +446,7 @@ static struct platform_device corgiled_device = {
 static struct pxamci_platform_data corgi_mci_platform_data = {
        .detect_delay_ms        = 250,
        .ocr_mask               = MMC_VDD_32_33|MMC_VDD_33_34,
-       .gpio_card_detect       = -1,
+       .gpio_card_detect       = CORGI_GPIO_nSD_DETECT,
        .gpio_card_ro           = CORGI_GPIO_nSD_WP,
        .gpio_power             = CORGI_GPIO_SD_PWR,
 };
index 9e4d981..268a9bc 100644 (file)
@@ -256,13 +256,9 @@ static void init_sdram_rows(void)
 
 static u32 mdrefr_dri(unsigned int freq)
 {
-       u32 dri = 0;
+       u32 interval = freq * SDRAM_TREF / sdram_rows;
 
-       if (cpu_is_pxa25x())
-               dri = ((freq * SDRAM_TREF) / (sdram_rows * 32));
-       if (cpu_is_pxa27x())
-               dri = ((freq * SDRAM_TREF) / (sdram_rows - 31)) / 32;
-       return dri;
+       return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
 }
 
 /* find a valid frequency point */
index 0af3617..c059dac 100644 (file)
@@ -41,10 +41,10 @@ void pxa27x_clear_otgph(void)
 EXPORT_SYMBOL(pxa27x_clear_otgph);
 
 static unsigned long ac97_reset_config[] = {
-       GPIO95_AC97_nRESET,
-       GPIO95_GPIO,
-       GPIO113_AC97_nRESET,
        GPIO113_GPIO,
+       GPIO113_AC97_nRESET,
+       GPIO95_GPIO,
+       GPIO95_AC97_nRESET,
 };
 
 void pxa27x_assert_ac97reset(int reset_gpio, int on)
index 595be19..02e9fde 100644 (file)
@@ -237,7 +237,7 @@ static unsigned int realview_mmc_status(struct device *dev)
        else
                mask = 2;
 
-       return !(readl(REALVIEW_SYSMCI) & mask);
+       return readl(REALVIEW_SYSMCI) & mask;
 }
 
 struct mmci_platform_data realview_mmc0_plat_data = {
index 50f071c..5ea24d4 100644 (file)
@@ -20,6 +20,9 @@
                strb    \rd, [\rx]
                .endm
 
+               .macro waituart,rd,rx
+               .endm
+
                .macro  busyuart,rd,rx
                mov     \rd, #0
 1001:          add     \rd, \rd, #1
index fe84b90..0a1318f 100644 (file)
@@ -131,7 +131,7 @@ EXPORT_SYMBOL(clk_disable);
  */
 static unsigned long clk_mtu_get_rate(struct clk *clk)
 {
-       void __iomem *addr = __io_address(U8500_PRCMU_BASE)
+       void __iomem *addr = __io_address(UX500_PRCMU_BASE)
                + PRCM_TCR;
        u32 tcr = readl(addr);
        int mtu = (int) clk->data;
index 6a3ac45..e9278f6 100644 (file)
@@ -21,6 +21,7 @@ static struct map_desc u5500_io_desc[] __initdata = {
        __IO_DEV_DESC(U5500_GPIO2_BASE, SZ_4K),
        __IO_DEV_DESC(U5500_GPIO3_BASE, SZ_4K),
        __IO_DEV_DESC(U5500_GPIO4_BASE, SZ_4K),
+       __IO_DEV_DESC(U5500_PRCMU_BASE, SZ_4K),
 };
 
 static struct platform_device *u5500_platform_devs[] __initdata = {
index 8552eb1..0271ca0 100644 (file)
 static void putc(const char c)
 {
        /* Do nothing if the UART is not enabled. */
-       if (!(readb(U8500_UART_CR) & 0x1))
+       if (!(__raw_readb(U8500_UART_CR) & 0x1))
                return;
 
        if (c == '\n')
                putc('\r');
 
-       while (readb(U8500_UART_FR) & (1 << 5))
+       while (__raw_readb(U8500_UART_FR) & (1 << 5))
                barrier();
-       writeb(c, U8500_UART_DR);
+       __raw_writeb(c, U8500_UART_DR);
 }
 
 static void flush(void)
 {
-       if (!(readb(U8500_UART_CR) & 0x1))
+       if (!(__raw_readb(U8500_UART_CR) & 0x1))
                return;
-       while (readb(U8500_UART_FR) & (1 << 3))
+       while (__raw_readb(U8500_UART_FR) & (1 << 3))
                barrier();
 }
 
index d250711..c842397 100644 (file)
@@ -241,7 +241,7 @@ static struct platform_device v2m_flash_device = {
 
 static unsigned int v2m_mmci_status(struct device *dev)
 {
-       return !(readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0));
+       return readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0);
 }
 
 static struct mmci_platform_data v2m_mmci_data = {
index 642207e..83c5632 100644 (file)
@@ -93,7 +93,7 @@ static struct clk_lookup nuc900_clkregs[] = {
        DEF_CLKLOOK(&clk_kpi, "nuc900-kpi", NULL),
        DEF_CLKLOOK(&clk_wdt, "nuc900-wdt", NULL),
        DEF_CLKLOOK(&clk_gdma, "nuc900-gdma", NULL),
-       DEF_CLKLOOK(&clk_adc, "nuc900-adc", NULL),
+       DEF_CLKLOOK(&clk_adc, "nuc900-ts", NULL),
        DEF_CLKLOOK(&clk_usi, "nuc900-spi", NULL),
        DEF_CLKLOOK(&clk_ext, NULL, "ext"),
        DEF_CLKLOOK(&clk_timer0, NULL, "timer0"),
index 9819869..9982eb3 100644 (file)
@@ -32,14 +32,14 @@ static uint32_t l2x0_way_mask;      /* Bitmask of active ways */
 static inline void cache_wait(void __iomem *reg, unsigned long mask)
 {
        /* wait for the operation to complete */
-       while (readl(reg) & mask)
+       while (readl_relaxed(reg) & mask)
                ;
 }
 
 static inline void cache_sync(void)
 {
        void __iomem *base = l2x0_base;
-       writel(0, base + L2X0_CACHE_SYNC);
+       writel_relaxed(0, base + L2X0_CACHE_SYNC);
        cache_wait(base + L2X0_CACHE_SYNC, 1);
 }
 
@@ -47,14 +47,14 @@ static inline void l2x0_clean_line(unsigned long addr)
 {
        void __iomem *base = l2x0_base;
        cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
-       writel(addr, base + L2X0_CLEAN_LINE_PA);
+       writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
 }
 
 static inline void l2x0_inv_line(unsigned long addr)
 {
        void __iomem *base = l2x0_base;
        cache_wait(base + L2X0_INV_LINE_PA, 1);
-       writel(addr, base + L2X0_INV_LINE_PA);
+       writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 }
 
 #ifdef CONFIG_PL310_ERRATA_588369
@@ -75,9 +75,9 @@ static inline void l2x0_flush_line(unsigned long addr)
 
        /* Clean by PA followed by Invalidate by PA */
        cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
-       writel(addr, base + L2X0_CLEAN_LINE_PA);
+       writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
        cache_wait(base + L2X0_INV_LINE_PA, 1);
-       writel(addr, base + L2X0_INV_LINE_PA);
+       writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 }
 #else
 
@@ -90,7 +90,7 @@ static inline void l2x0_flush_line(unsigned long addr)
 {
        void __iomem *base = l2x0_base;
        cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
-       writel(addr, base + L2X0_CLEAN_INV_LINE_PA);
+       writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
 }
 #endif
 
@@ -109,7 +109,7 @@ static inline void l2x0_inv_all(void)
 
        /* invalidate all ways */
        spin_lock_irqsave(&l2x0_lock, flags);
-       writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+       writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
        cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
        cache_sync();
        spin_unlock_irqrestore(&l2x0_lock, flags);
@@ -215,8 +215,11 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
 
        l2x0_base = base;
 
-       cache_id = readl(l2x0_base + L2X0_CACHE_ID);
-       aux = readl(l2x0_base + L2X0_AUX_CTRL);
+       cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+       aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
+
+       aux &= aux_mask;
+       aux |= aux_val;
 
        /* Determine the number of ways */
        switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
@@ -245,17 +248,15 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
         * If you are booting from non-secure mode
         * accessing the below registers will fault.
         */
-       if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
+       if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
 
                /* l2x0 controller is disabled */
-               aux &= aux_mask;
-               aux |= aux_val;
-               writel(aux, l2x0_base + L2X0_AUX_CTRL);
+               writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
 
                l2x0_inv_all();
 
                /* enable L2X0 */
-               writel(1, l2x0_base + L2X0_CTRL);
+               writel_relaxed(1, l2x0_base + L2X0_CTRL);
        }
 
        outer_cache.inv_range = l2x0_inv_range;
index 086816b..6ab2440 100644 (file)
@@ -163,19 +163,22 @@ static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
 
 void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
 {
-       unsigned int idx, cpu = smp_processor_id();
-       int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+       unsigned int idx, cpu;
+       int *depth;
        unsigned long vaddr, flags;
        pte_t pte, *ptep;
 
+       if (!in_interrupt())
+               preempt_disable();
+
+       cpu = smp_processor_id();
+       depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+
        idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        ptep = TOP_PTE(vaddr);
        pte = mk_pte(page, kmap_prot);
 
-       if (!in_interrupt())
-               preempt_disable();
-
        raw_local_irq_save(flags);
        (*depth)++;
        if (pte_val(*ptep) == pte_val(pte)) {
index 1670734..37fa593 100644 (file)
@@ -17,8 +17,8 @@
                .macro  addruart, rx
                mrc     p15, 0, \rx, c1, c0
                tst     \rx, #1                                 @ MMU enabled?
-               moveq   \rx, =SPEAR_DBG_UART_BASE               @ Physical base
-               movne   \rx, =VA_SPEAR_DBG_UART_BASE            @ Virtual base
+               moveq   \rx, #SPEAR_DBG_UART_BASE               @ Physical base
+               movne   \rx, #VA_SPEAR_DBG_UART_BASE            @ Virtual base
                .endm
 
                .macro  senduart, rd, rx
index 8f10d24..48cbdcb 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Sat May 1 10:36:42 2010
+# Last update: Mon Jul 12 21:10:14 2010
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -1994,7 +1994,7 @@ spark                     MACH_SPARK              SPARK                   2002
 benzina                        MACH_BENZINA            BENZINA                 2003
 blaze                  MACH_BLAZE              BLAZE                   2004
 linkstation_ls_hgl     MACH_LINKSTATION_LS_HGL LINKSTATION_LS_HGL      2005
-htckovsky              MACH_HTCVENUS           HTCVENUS                2006
+htckovsky              MACH_HTCKOVSKY          HTCKOVSKY               2006
 sony_prs505            MACH_SONY_PRS505        SONY_PRS505             2007
 hanlin_v3              MACH_HANLIN_V3          HANLIN_V3               2008
 sapphira               MACH_SAPPHIRA           SAPPHIRA                2009
@@ -2609,7 +2609,7 @@ fujitsu_tvstbsoc1 MACH_FUJITSU_TVSTBSOC1  FUJITSU_TVSTBSOC1       2621
 lexikon                        MACH_LEXIKON            LEXIKON                 2622
 mini2440v2             MACH_MINI2440V2         MINI2440V2              2623
 icontrol               MACH_ICONTROL           ICONTROL                2624
-sheevad                        MACH_SHEEVAD            SHEEVAD                 2625
+gplugd                 MACH_SHEEVAD            SHEEVAD                 2625
 qsd8x50a_st1_1         MACH_QSD8X50A_ST1_1     QSD8X50A_ST1_1          2626
 qsd8x50a_st1_5         MACH_QSD8X50A_ST1_5     QSD8X50A_ST1_5          2627
 bee                    MACH_BEE                BEE                     2628
@@ -2804,3 +2804,149 @@ teton_bga               MACH_TETON_BGA          TETON_BGA               2816
 snapper9g45            MACH_SNAPPER9G45        SNAPPER9G45             2817
 tam3517                        MACH_TAM3517            TAM3517                 2818
 pdc100                 MACH_PDC100             PDC100                  2819
+eukrea_cpuimx25sd      MACH_EUKREA_CPUIMX25    EUKREA_CPUIMX25         2820
+eukrea_cpuimx35sd      MACH_EUKREA_CPUIMX35    EUKREA_CPUIMX35         2821
+eukrea_cpuimx51sd      MACH_EUKREA_CPUIMX51SD  EUKREA_CPUIMX51SD       2822
+eukrea_cpuimx51                MACH_EUKREA_CPUIMX51    EUKREA_CPUIMX51         2823
+p565                   MACH_P565               P565                    2824
+acer_a4                        MACH_ACER_A4            ACER_A4                 2825
+davinci_dm368_bip      MACH_DAVINCI_DM368_BIP  DAVINCI_DM368_BIP       2826
+eshare                 MACH_ESHARE             ESHARE                  2827
+hw_omapl138_europa     MACH_HW_OMAPL138_EUROPA HW_OMAPL138_EUROPA      2828
+wlbargn                        MACH_WLBARGN            WLBARGN                 2829
+bm170                  MACH_BM170              BM170                   2830
+netspace_mini_v2       MACH_NETSPACE_MINI_V2   NETSPACE_MINI_V2        2831
+netspace_plug_v2       MACH_NETSPACE_PLUG_V2   NETSPACE_PLUG_V2        2832
+siemens_l1             MACH_SIEMENS_L1         SIEMENS_L1              2833
+elv_lcu1               MACH_ELV_LCU1           ELV_LCU1                2834
+mcu1                   MACH_MCU1               MCU1                    2835
+omap3_tao3530          MACH_OMAP3_TAO3530      OMAP3_TAO3530           2836
+omap3_pcutouch         MACH_OMAP3_PCUTOUCH     OMAP3_PCUTOUCH          2837
+smdkc210               MACH_SMDKC210           SMDKC210                2838
+omap3_braillo          MACH_OMAP3_BRAILLO      OMAP3_BRAILLO           2839
+spyplug                        MACH_SPYPLUG            SPYPLUG                 2840
+ginger                 MACH_GINGER             GINGER                  2841
+tny_t3530              MACH_TNY_T3530          TNY_T3530               2842
+pca102                 MACH_PCA102             PCA102                  2843
+spade                  MACH_SPADE              SPADE                   2844
+mxc25_topaz            MACH_MXC25_TOPAZ        MXC25_TOPAZ             2845
+t5325                  MACH_T5325              T5325                   2846
+gw2361                 MACH_GW2361             GW2361                  2847
+elog                   MACH_ELOG               ELOG                    2848
+income                 MACH_INCOME             INCOME                  2849
+bcm589x                        MACH_BCM589X            BCM589X                 2850
+etna                   MACH_ETNA               ETNA                    2851
+hawks                  MACH_HAWKS              HAWKS                   2852
+meson                  MACH_MESON              MESON                   2853
+xsbase255              MACH_XSBASE255          XSBASE255               2854
+pvm2030                        MACH_PVM2030            PVM2030                 2855
+mioa502                        MACH_MIOA502            MIOA502                 2856
+vvbox_sdorig2          MACH_VVBOX_SDORIG2      VVBOX_SDORIG2           2857
+vvbox_sdlite2          MACH_VVBOX_SDLITE2      VVBOX_SDLITE2           2858
+vvbox_sdpro4           MACH_VVBOX_SDPRO4       VVBOX_SDPRO4            2859
+htc_spv_m700           MACH_HTC_SPV_M700       HTC_SPV_M700            2860
+mx257sx                        MACH_MX257SX            MX257SX                 2861
+goni                   MACH_GONI               GONI                    2862
+msm8x55_svlte_ffa      MACH_MSM8X55_SVLTE_FFA  MSM8X55_SVLTE_FFA       2863
+msm8x55_svlte_surf     MACH_MSM8X55_SVLTE_SURF MSM8X55_SVLTE_SURF      2864
+quickstep              MACH_QUICKSTEP          QUICKSTEP               2865
+dmw96                  MACH_DMW96              DMW96                   2866
+hammerhead             MACH_HAMMERHEAD         HAMMERHEAD              2867
+trident                        MACH_TRIDENT            TRIDENT                 2868
+lightning              MACH_LIGHTNING          LIGHTNING               2869
+iconnect               MACH_ICONNECT           ICONNECT                2870
+autobot                        MACH_AUTOBOT            AUTOBOT                 2871
+coconut                        MACH_COCONUT            COCONUT                 2872
+durian                 MACH_DURIAN             DURIAN                  2873
+cayenne                        MACH_CAYENNE            CAYENNE                 2874
+fuji                   MACH_FUJI               FUJI                    2875
+synology_6282          MACH_SYNOLOGY_6282      SYNOLOGY_6282           2876
+em1sy                  MACH_EM1SY              EM1SY                   2877
+m502                   MACH_M502               M502                    2878
+matrix518              MACH_MATRIX518          MATRIX518               2879
+tiny_gurnard           MACH_TINY_GURNARD       TINY_GURNARD            2880
+spear1310              MACH_SPEAR1310          SPEAR1310               2881
+bv07                   MACH_BV07               BV07                    2882
+mxt_td61               MACH_MXT_TD61           MXT_TD61                2883
+openrd_ultimate                MACH_OPENRD_ULTIMATE    OPENRD_ULTIMATE         2884
+devixp                 MACH_DEVIXP             DEVIXP                  2885
+miccpt                 MACH_MICCPT             MICCPT                  2886
+mic256                 MACH_MIC256             MIC256                  2887
+as1167                 MACH_AS1167             AS1167                  2888
+omap3_ibiza            MACH_OMAP3_IBIZA        OMAP3_IBIZA             2889
+u5500                  MACH_U5500              U5500                   2890
+davinci_picto          MACH_DAVINCI_PICTO      DAVINCI_PICTO           2891
+mecha                  MACH_MECHA              MECHA                   2892
+bubba3                 MACH_BUBBA3             BUBBA3                  2893
+pupitre                        MACH_PUPITRE            PUPITRE                 2894
+tegra_harmony          MACH_TEGRA_HARMONY      TEGRA_HARMONY           2895
+tegra_vogue            MACH_TEGRA_VOGUE        TEGRA_VOGUE             2896
+tegra_e1165            MACH_TEGRA_E1165        TEGRA_E1165             2897
+simplenet              MACH_SIMPLENET          SIMPLENET               2898
+ec4350tbm              MACH_EC4350TBM          EC4350TBM               2899
+pec_tc                 MACH_PEC_TC             PEC_TC                  2900
+pec_hc2                        MACH_PEC_HC2            PEC_HC2                 2901
+esl_mobilis_a          MACH_ESL_MOBILIS_A      ESL_MOBILIS_A           2902
+esl_mobilis_b          MACH_ESL_MOBILIS_B      ESL_MOBILIS_B           2903
+esl_wave_a             MACH_ESL_WAVE_A         ESL_WAVE_A              2904
+esl_wave_b             MACH_ESL_WAVE_B         ESL_WAVE_B              2905
+unisense_mmm           MACH_UNISENSE_MMM       UNISENSE_MMM            2906
+blueshark              MACH_BLUESHARK          BLUESHARK               2907
+e10                    MACH_E10                E10                     2908
+app3k_robin            MACH_APP3K_ROBIN        APP3K_ROBIN             2909
+pov15hd                        MACH_POV15HD            POV15HD                 2910
+stella                 MACH_STELLA             STELLA                  2911
+linkstation_lschl      MACH_LINKSTATION_LSCHL  LINKSTATION_LSCHL       2913
+netwalker              MACH_NETWALKER          NETWALKER               2914
+acsx106                        MACH_ACSX106            ACSX106                 2915
+atlas5_c1              MACH_ATLAS5_C1          ATLAS5_C1               2916
+nsb3ast                        MACH_NSB3AST            NSB3AST                 2917
+gnet_slc               MACH_GNET_SLC           GNET_SLC                2918
+af4000                 MACH_AF4000             AF4000                  2919
+ark9431                        MACH_ARK9431            ARK9431                 2920
+fs_s5pc100             MACH_FS_S5PC100         FS_S5PC100              2921
+omap3505nova8          MACH_OMAP3505NOVA8      OMAP3505NOVA8           2922
+omap3621_edp1          MACH_OMAP3621_EDP1      OMAP3621_EDP1           2923
+oratisaes              MACH_ORATISAES          ORATISAES               2924
+smdkv310               MACH_SMDKV310           SMDKV310                2925
+siemens_l0             MACH_SIEMENS_L0         SIEMENS_L0              2926
+ventana                        MACH_VENTANA            VENTANA                 2927
+wm8505_7in_netbook     MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK      2928
+ec4350sdb              MACH_EC4350SDB          EC4350SDB               2929
+mimas                  MACH_MIMAS              MIMAS                   2930
+titan                  MACH_TITAN              TITAN                   2931
+craneboard             MACH_CRANEBOARD         CRANEBOARD              2932
+es2440                 MACH_ES2440             ES2440                  2933
+najay_a9263            MACH_NAJAY_A9263        NAJAY_A9263             2934
+htctornado             MACH_HTCTORNADO         HTCTORNADO              2935
+dimm_mx257             MACH_DIMM_MX257         DIMM_MX257              2936
+jigen301               MACH_JIGEN              JIGEN                   2937
+smdk6450               MACH_SMDK6450           SMDK6450                2938
+meno_qng               MACH_MENO_QNG           MENO_QNG                2939
+ns2416                 MACH_NS2416             NS2416                  2940
+rpc353                 MACH_RPC353             RPC353                  2941
+tq6410                 MACH_TQ6410             TQ6410                  2942
+sky6410                        MACH_SKY6410            SKY6410                 2943
+dynasty                        MACH_DYNASTY            DYNASTY                 2944
+vivo                   MACH_VIVO               VIVO                    2945
+bury_bl7582            MACH_BURY_BL7582        BURY_BL7582             2946
+bury_bps5270           MACH_BURY_BPS5270       BURY_BPS5270            2947
+basi                   MACH_BASI               BASI                    2948
+tn200                  MACH_TN200              TN200                   2949
+c2mmi                  MACH_C2MMI              C2MMI                   2950
+meson_6236m            MACH_MESON_6236M        MESON_6236M             2951
+meson_8626m            MACH_MESON_8626M        MESON_8626M             2952
+tube                   MACH_TUBE               TUBE                    2953
+messina                        MACH_MESSINA            MESSINA                 2954
+mx50_arm2              MACH_MX50_ARM2          MX50_ARM2               2955
+cetus9263              MACH_CETUS9263          CETUS9263               2956
+brownstone             MACH_BROWNSTONE         BROWNSTONE              2957
+vmx25                  MACH_VMX25              VMX25                   2958
+vmx51                  MACH_VMX51              VMX51                   2959
+abacus                 MACH_ABACUS             ABACUS                  2960
+cm4745                 MACH_CM4745             CM4745                  2961
+oratislink             MACH_ORATISLINK         ORATISLINK              2962
+davinci_dm365_dvr      MACH_DAVINCI_DM365_DVR  DAVINCI_DM365_DVR       2963
+netviz                 MACH_NETVIZ             NETVIZ                  2964
+flexibity              MACH_FLEXIBITY          FLEXIBITY               2965
+wlan_computer          MACH_WLAN_COMPUTER      WLAN_COMPUTER           2966
index 0cf2c0a..e6ac0b6 100644 (file)
@@ -54,6 +54,9 @@
 #define TIOCGPTN       _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK     _IOW('T',0x31, int)  /* Lock/unlock Pty */
 
+#define TIOCGRS485      0x542E
+#define TIOCSRS485      0x542F
+
 #define FIONCLEX       0x5450
 #define FIOCLEX                0x5451
 #define FIOASYNC       0x5452
index c7f25bb..6174020 100644 (file)
@@ -5,6 +5,7 @@
 #define __ASM_ARCH_BOARD_H
 
 #include <linux/types.h>
+#include <linux/serial.h>
 
 #define GPIO_PIN_NONE  (-1)
 
@@ -35,6 +36,7 @@ struct atmel_uart_data {
        short           use_dma_tx;     /* use transmit DMA? */
        short           use_dma_rx;     /* use receive DMA? */
        void __iomem    *regs;          /* virtual base address, if any */
+       struct serial_rs485     rs485;          /* rs485 settings */
 };
 void at32_map_usart(unsigned int hw_id, unsigned int line, int flags);
 struct platform_device *at32_add_device_usart(unsigned int id);
index 76818f9..505a085 100644 (file)
@@ -5,7 +5,7 @@ mainmenu "Linux/Microblaze Kernel Configuration"
 
 config MICROBLAZE
        def_bool y
-       select HAVE_LMB
+       select HAVE_MEMBLOCK
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_FUNCTION_GRAPH_TRACER
similarity index 57%
rename from arch/microblaze/include/asm/lmb.h
rename to arch/microblaze/include/asm/memblock.h
index a0a0a92..f9c2fa3 100644 (file)
@@ -6,12 +6,12 @@
  * for more details.
  */
 
-#ifndef _ASM_MICROBLAZE_LMB_H
-#define _ASM_MICROBLAZE_LMB_H
+#ifndef _ASM_MICROBLAZE_MEMBLOCK_H
+#define _ASM_MICROBLAZE_MEMBLOCK_H
 
-/* LMB limit is OFF */
-#define LMB_REAL_LIMIT 0xFFFFFFFF
+/* MEMBLOCK limit is OFF */
+#define MEMBLOCK_REAL_LIMIT    0xFFFFFFFF
 
-#endif /* _ASM_MICROBLAZE_LMB_H */
+#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */
 
 
index a15ef6d..427b13b 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/kexec.h>
 #include <linux/debugfs.h>
 #include <linux/irq.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/prom.h>
 #include <asm/page.h>
@@ -49,12 +49,12 @@ void __init early_init_dt_scan_chosen_arch(unsigned long node)
 
 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-       lmb_add(base, size);
+       memblock_add(base, size);
 }
 
 u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 {
-       return lmb_alloc(size, align);
+       return memblock_alloc(size, align);
 }
 
 #ifdef CONFIG_EARLY_PRINTK
@@ -104,8 +104,8 @@ void __init early_init_devtree(void *params)
         */
        of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
 
-       /* Scan memory nodes and rebuild LMBs */
-       lmb_init();
+       /* Scan memory nodes and rebuild MEMBLOCKs */
+       memblock_init();
        of_scan_flat_dt(early_init_dt_scan_root, NULL);
        of_scan_flat_dt(early_init_dt_scan_memory, NULL);
 
@@ -113,9 +113,9 @@ void __init early_init_devtree(void *params)
        strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
        parse_early_param();
 
-       lmb_analyze();
+       memblock_analyze();
 
-       pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
+       pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());
 
        pr_debug(" <- early_init_devtree()\n");
 }
index cca3579..db59349 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/bootmem.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/mm.h> /* mem_init */
 #include <linux/initrd.h>
 #include <linux/pagemap.h>
@@ -76,10 +76,10 @@ void __init setup_memory(void)
        u32 kernel_align_start, kernel_align_size;
 
        /* Find main memory where is the kernel */
-       for (i = 0; i < lmb.memory.cnt; i++) {
-               memory_start = (u32) lmb.memory.region[i].base;
-               memory_end = (u32) lmb.memory.region[i].base
-                               + (u32) lmb.memory.region[i].size;
+       for (i = 0; i < memblock.memory.cnt; i++) {
+               memory_start = (u32) memblock.memory.region[i].base;
+               memory_end = (u32) memblock.memory.region[i].base
+                               + (u32) memblock.memory.region[i].size;
                if ((memory_start <= (u32)_text) &&
                                        ((u32)_text <= memory_end)) {
                        memory_size = memory_end - memory_start;
@@ -100,7 +100,7 @@ void __init setup_memory(void)
        kernel_align_start = PAGE_DOWN((u32)_text);
        /* ALIGN can be remove because _end in vmlinux.lds.S is align */
        kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
-       lmb_reserve(kernel_align_start, kernel_align_size);
+       memblock_reserve(kernel_align_start, kernel_align_size);
        printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n",
                __func__, kernel_align_start, kernel_align_start
                        + kernel_align_size, kernel_align_size);
@@ -141,18 +141,18 @@ void __init setup_memory(void)
        map_size = init_bootmem_node(&contig_page_data,
                PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
 #endif
-       lmb_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
+       memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
 
        /* free bootmem is whole main memory */
        free_bootmem(memory_start, memory_size);
 
        /* reserve allocate blocks */
-       for (i = 0; i < lmb.reserved.cnt; i++) {
+       for (i = 0; i < memblock.reserved.cnt; i++) {
                pr_debug("reserved %d - 0x%08x-0x%08x\n", i,
-                       (u32) lmb.reserved.region[i].base,
-                       (u32) lmb_size_bytes(&lmb.reserved, i));
-               reserve_bootmem(lmb.reserved.region[i].base,
-                       lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT);
+                       (u32) memblock.reserved.region[i].base,
+                       (u32) memblock_size_bytes(&memblock.reserved, i));
+               reserve_bootmem(memblock.reserved.region[i].base,
+                       memblock_size_bytes(&memblock.reserved, i) - 1, BOOTMEM_DEFAULT);
        }
 #ifdef CONFIG_MMU
        init_bootmem_done = 1;
@@ -235,7 +235,7 @@ static void mm_cmdline_setup(void)
                if (maxmem && memory_size > maxmem) {
                        memory_size = maxmem;
                        memory_end = memory_start + memory_size;
-                       lmb.memory.region[0].size = memory_size;
+                       memblock.memory.region[0].size = memory_size;
                }
        }
 }
@@ -273,19 +273,19 @@ asmlinkage void __init mmu_init(void)
 {
        unsigned int kstart, ksize;
 
-       if (!lmb.reserved.cnt) {
+       if (!memblock.reserved.cnt) {
                printk(KERN_EMERG "Error memory count\n");
                machine_restart(NULL);
        }
 
-       if ((u32) lmb.memory.region[0].size < 0x1000000) {
+       if ((u32) memblock.memory.region[0].size < 0x1000000) {
                printk(KERN_EMERG "Memory must be greater than 16MB\n");
                machine_restart(NULL);
        }
        /* Find main memory where the kernel is */
-       memory_start = (u32) lmb.memory.region[0].base;
-       memory_end = (u32) lmb.memory.region[0].base +
-                               (u32) lmb.memory.region[0].size;
+       memory_start = (u32) memblock.memory.region[0].base;
+       memory_end = (u32) memblock.memory.region[0].base +
+                               (u32) memblock.memory.region[0].size;
        memory_size = memory_end - memory_start;
 
        mm_cmdline_setup(); /* FIXME parse args from command line - not used */
@@ -297,7 +297,7 @@ asmlinkage void __init mmu_init(void)
        kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
        /* kernel size */
        ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
-       lmb_reserve(kstart, ksize);
+       memblock_reserve(kstart, ksize);
 
 #if defined(CONFIG_BLK_DEV_INITRD)
        /* Remove the init RAM disk from the available memory. */
@@ -335,7 +335,7 @@ void __init *early_get_page(void)
                 * Mem start + 32MB -> here is limit
                 * because of mem mapping from head.S
                 */
-               p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
+               p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
                                        memory_start + 0x2000000));
        }
        return p;
index 2580e77..f9e5622 100644 (file)
@@ -435,20 +435,21 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = {
 static int __init au1xxx_platform_init(void)
 {
        unsigned int uartclk = get_au1x00_uart_baud_base() * 16;
-       int i;
+       int err, i;
 
        /* Fill up uartclk. */
        for (i = 0; au1x00_uart_data[i].flags; i++)
                au1x00_uart_data[i].uartclk = uartclk;
 
+       err = platform_add_devices(au1xxx_platform_devices,
+                                  ARRAY_SIZE(au1xxx_platform_devices));
 #ifndef CONFIG_SOC_AU1100
        /* Register second MAC if enabled in pinfunc */
-       if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2))
+       if (!err && !(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2))
                platform_device_register(&au1xxx_eth1_device);
 #endif
 
-       return platform_add_devices(au1xxx_platform_devices,
-                                   ARRAY_SIZE(au1xxx_platform_devices));
+       return err;
 }
 
 arch_initcall(au1xxx_platform_init);
index a9f0336..52d883d 100644 (file)
@@ -67,8 +67,6 @@ static void mtx1_power_off(void)
 
 void __init board_setup(void)
 {
-       alchemy_gpio2_enable();
-
 #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
        /* Enable USB power switch */
        alchemy_gpio_direction_output(204, 0);
@@ -117,11 +115,11 @@ mtx1_pci_idsel(unsigned int devsel, int assert)
 
        if (assert && devsel != 0)
                /* Suppress signal to Cardbus */
-               gpio_set_value(1, 0);   /* set EXT_IO3 OFF */
+               alchemy_gpio_set_value(1, 0);   /* set EXT_IO3 OFF */
        else
-               gpio_set_value(1, 1);   /* set EXT_IO3 ON */
+               alchemy_gpio_set_value(1, 1);   /* set EXT_IO3 ON */
 
-       au_sync_udelay(1);
+       udelay(1);
        return 1;
 }
 
index 9f544ba..39c2336 100644 (file)
@@ -104,6 +104,9 @@ int __init bcm63xx_enet_register(int unit,
        if (unit > 1)
                return -ENODEV;
 
+       if (unit == 1 && BCMCPU_IS_6338())
+               return -ENODEV;
+
        if (!shared_device_registered) {
                shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
                shared_res[0].end = shared_res[0].start;
index 59dc0c7..c63c56b 100644 (file)
@@ -434,7 +434,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_add          \n"
-               "       addu    %0, %2                                  \n"
+               "       daddu   %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
@@ -446,7 +446,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_add          \n"
-               "       addu    %0, %2                                  \n"
+               "       daddu   %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqz    %0, 2f                                  \n"
                "       .subsection 2                                   \n"
@@ -479,7 +479,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_sub          \n"
-               "       subu    %0, %2                                  \n"
+               "       dsubu   %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
@@ -491,7 +491,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_sub          \n"
-               "       subu    %0, %2                                  \n"
+               "       dsubu   %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqz    %0, 2f                                  \n"
                "       .subsection 2                                   \n"
@@ -524,10 +524,10 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_add_return   \n"
-               "       addu    %0, %1, %3                              \n"
+               "       daddu   %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
-               "       addu    %0, %1, %3                              \n"
+               "       daddu   %0, %1, %3                              \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -538,10 +538,10 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_add_return   \n"
-               "       addu    %0, %1, %3                              \n"
+               "       daddu   %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqz    %0, 2f                                  \n"
-               "       addu    %0, %1, %3                              \n"
+               "       daddu   %0, %1, %3                              \n"
                "       .subsection 2                                   \n"
                "2:     b       1b                                      \n"
                "       .previous                                       \n"
@@ -576,10 +576,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_sub_return   \n"
-               "       subu    %0, %1, %3                              \n"
+               "       dsubu   %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
-               "       subu    %0, %1, %3                              \n"
+               "       dsubu   %0, %1, %3                              \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -590,10 +590,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_sub_return   \n"
-               "       subu    %0, %1, %3                              \n"
+               "       dsubu   %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqz    %0, 2f                                  \n"
-               "       subu    %0, %1, %3                              \n"
+               "       dsubu   %0, %1, %3                              \n"
                "       .subsection 2                                   \n"
                "2:     b       1b                                      \n"
                "       .previous                                       \n"
index 1b5a664..baa318a 100644 (file)
 #define __NR_perf_event_open           (__NR_Linux + 296)
 #define __NR_accept4                   (__NR_Linux + 297)
 #define __NR_recvmmsg                  (__NR_Linux + 298)
+#define __NR_getdents64                        (__NR_Linux + 299)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            298
+#define __NR_Linux_syscalls            299
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                298
+#define __NR_N32_Linux_syscalls                299
 
 #ifdef __KERNEL__
 
index a5297e2..a4facee 100644 (file)
@@ -419,4 +419,5 @@ EXPORT(sysn32_call_table)
        PTR     sys_perf_event_open
        PTR     sys_accept4
        PTR     compat_sys_recvmmsg
+       PTR     sys_getdents
        .size   sysn32_call_table,.-sysn32_call_table
index b773c11..e5cdfd6 100644 (file)
@@ -61,11 +61,9 @@ static int __init init_vdso(void)
 
        vunmap(vdso);
 
-       pr_notice("init_vdso successfull\n");
-
        return 0;
 }
-device_initcall(init_vdso);
+subsys_initcall(init_vdso);
 
 static unsigned long vdso_addr(unsigned long start)
 {
index 2fbfa1a..bf80921 100644 (file)
@@ -247,6 +247,8 @@ void __init mips_pcibios_init(void)
        iomem_resource.end &= 0xfffffffffULL;                   /* 64 GB */
        ioport_resource.end = controller->io_resource->end;
 
+       controller->io_map_base = mips_io_port_base;
+
        register_pci_controller(controller);
 }
 
index eee4f3d..98e86dd 100644 (file)
@@ -44,6 +44,7 @@ extern struct pci_ops pnx8550_pci_ops;
 
 static struct pci_controller pnx8550_controller = {
        .pci_ops        = &pnx8550_pci_ops,
+       .io_map_base    = PNX8550_PORT_BASE,
        .io_resource    = &pci_io_resource,
        .mem_resource   = &pci_mem_resource,
 };
index 2aed50f..64246c9 100644 (file)
@@ -113,7 +113,7 @@ void __init plat_mem_setup(void)
        PNX8550_GLB2_ENAB_INTA_O = 0;
 
        /* IO/MEM resources. */
-       set_io_port_base(KSEG1);
+       set_io_port_base(PNX8550_PORT_BASE);
        ioport_resource.start = 0;
        ioport_resource.end = ~0;
        iomem_resource.start = 0;
index 04b3147..b7c03d8 100644 (file)
@@ -944,6 +944,7 @@ static struct pci_controller msp_pci_controller = {
        .pci_ops        = &msp_pci_ops,
        .mem_resource   = &pci_mem_resource,
        .mem_offset     = 0,
+       .io_map_base    = MSP_PCI_IOSPACE_BASE,
        .io_resource    = &pci_io_resource,
        .io_offset      = 0
 };
index 0357946..cf5e1a2 100644 (file)
@@ -54,6 +54,7 @@ static int __init pmc_yosemite_setup(void)
                panic(ioremap_failed);
 
        set_io_port_base(io_v_base);
+       py_controller.io_map_base = io_v_base;
        TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1);
 
        ioport_resource.end = TITAN_IO_SIZE - 1;
index 8ee7788..9ec523e 100644 (file)
@@ -472,6 +472,9 @@ void __init configure_platform(void)
                 * it*/
                platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
 
+               /* Cronus and Cronus Lite have the same register map */
+               set_register_map(CRONUS_IO_BASE, &cronus_register_map);
+
                /* ASIC version will determine if this is a real CronusLite or
                 * Castrati(Cronus) */
                chipversion  = asic_read(chipver3) << 24;
@@ -484,8 +487,6 @@ void __init configure_platform(void)
                else
                        asic = ASIC_CRONUSLITE;
 
-               /* Cronus and Cronus Lite have the same register map */
-               set_register_map(CRONUS_IO_BASE, &cronus_register_map);
                gp_resources = non_dvr_cronuslite_resources;
                pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, "
                        "chipversion=0x%08X\n",
index 6506bf4..2031a28 100644 (file)
@@ -132,7 +132,7 @@ config PPC
        select HAVE_ARCH_KGDB
        select HAVE_KRETPROBES
        select HAVE_ARCH_TRACEHOOK
-       select HAVE_LMB
+       select HAVE_MEMBLOCK
        select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
        select USE_GENERIC_SMP_HELPERS if SMP
index 98324c5..9a846ef 100644 (file)
@@ -12,7 +12,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/types.h>
 #include <asm/page.h>
index 0835eb9..e50323f 100644 (file)
@@ -6,6 +6,30 @@
 #include <linux/errno.h>
 #include <linux/of.h>
 
+/*
+ * SPI Parameter RAM common to QE and CPM.
+ */
+struct spi_pram {
+       __be16  rbase;  /* Rx Buffer descriptor base address */
+       __be16  tbase;  /* Tx Buffer descriptor base address */
+       u8      rfcr;   /* Rx function code */
+       u8      tfcr;   /* Tx function code */
+       __be16  mrblr;  /* Max receive buffer length */
+       __be32  rstate; /* Internal */
+       __be32  rdp;    /* Internal */
+       __be16  rbptr;  /* Internal */
+       __be16  rbc;    /* Internal */
+       __be32  rxtmp;  /* Internal */
+       __be32  tstate; /* Internal */
+       __be32  tdp;    /* Internal */
+       __be16  tbptr;  /* Internal */
+       __be16  tbc;    /* Internal */
+       __be32  txtmp;  /* Internal */
+       __be32  res;    /* Tx temp. */
+       __be16  rpbase; /* Relocation pointer (CPM1 only) */
+       __be16  res1;   /* Reserved */
+};
+
 /*
  * USB Controller pram common to QE and CPM.
  */
index 81b0119..bd07650 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef __CPM1__
 #define __CPM1__
 
+#include <linux/init.h>
 #include <asm/8xx_immap.h>
 #include <asm/ptrace.h>
 #include <asm/cpm.h>
@@ -54,7 +55,7 @@ extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */
 
 extern void cpm_setbrg(uint brg, uint rate);
 
-extern void cpm_load_patch(cpm8xx_t *cp);
+extern void __init cpm_load_patch(cpm8xx_t *cp);
 
 extern void cpm_reset(void);
 
index 2a9cd74..076327f 100644 (file)
@@ -8,9 +8,9 @@
  * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory
  * and therefore we can only deal with memory within this range
  */
-#define KEXEC_SOURCE_MEMORY_LIMIT      (2 * 1024 * 1024 * 1024UL)
-#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL)
-#define KEXEC_CONTROL_MEMORY_LIMIT     (2 * 1024 * 1024 * 1024UL)
+#define KEXEC_SOURCE_MEMORY_LIMIT      (2 * 1024 * 1024 * 1024UL - 1)
+#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
+#define KEXEC_CONTROL_MEMORY_LIMIT     (2 * 1024 * 1024 * 1024UL - 1)
 
 #else
 
diff --git a/arch/powerpc/include/asm/lmb.h b/arch/powerpc/include/asm/lmb.h
deleted file mode 100644 (file)
index 6f5fdf0..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_POWERPC_LMB_H
-#define _ASM_POWERPC_LMB_H
-
-#include <asm/udbg.h>
-
-#define LMB_DBG(fmt...) udbg_printf(fmt)
-
-#ifdef CONFIG_PPC32
-extern phys_addr_t lowmem_end_addr;
-#define LMB_REAL_LIMIT lowmem_end_addr
-#else
-#define LMB_REAL_LIMIT 0
-#endif
-
-#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h
new file mode 100644 (file)
index 0000000..3c29728
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef _ASM_POWERPC_MEMBLOCK_H
+#define _ASM_POWERPC_MEMBLOCK_H
+
+#include <asm/udbg.h>
+
+#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
+
+#ifdef CONFIG_PPC32
+extern phys_addr_t lowmem_end_addr;
+#define MEMBLOCK_REAL_LIMIT    lowmem_end_addr
+#else
+#define MEMBLOCK_REAL_LIMIT    0
+#endif
+
+#endif /* _ASM_POWERPC_MEMBLOCK_H */
index 2102b21..0e398cf 100644 (file)
@@ -250,7 +250,9 @@ extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                     pte_t *ptep, unsigned long trap, int local, int ssize,
                     unsigned int shift, unsigned int mmu_psize);
-
+extern void hash_failure_debug(unsigned long ea, unsigned long access,
+                              unsigned long vsid, unsigned long trap,
+                              int ssize, int psize, unsigned long pte);
 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
                             unsigned long pstart, unsigned long prot,
                             int psize, int ssize);
index 26e5863..625942a 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/sections.h>
 #include <asm/prom.h>
index 29df48f..417f7b0 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/types.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/processor.h>
 #include <asm/machdep.h>
index 5fb667a..40f5246 100644 (file)
@@ -13,7 +13,7 @@
 
 #include <linux/crash_dump.h>
 #include <linux/bootmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/code-patching.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
@@ -33,7 +33,7 @@ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
 #ifndef CONFIG_RELOCATABLE
 void __init reserve_kdump_trampoline(void)
 {
-       lmb_reserve(0, KDUMP_RESERVE_LIMIT);
+       memblock_reserve(0, KDUMP_RESERVE_LIMIT);
 }
 
 static void __init create_trampoline(unsigned long addr)
index e7fe218..02f724f 100644 (file)
@@ -71,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
        sd->max_direct_dma_addr = 0;
 
        /* May need to bounce if the device can't address all of DRAM */
-       if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM())
+       if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
                set_dma_ops(dev, &swiotlb_dma_ops);
 
        return NOTIFY_DONE;
index 8d1de6f..84d6367 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-debug.h>
 #include <linux/gfp.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/bug.h>
 #include <asm/abs_addr.h>
 
@@ -89,7 +89,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
        /* Could be improved so platforms can set the limit in case
         * they have limited DMA windows
         */
-       return mask >= (lmb_end_of_DRAM() - 1);
+       return mask >= (memblock_end_of_DRAM() - 1);
 #else
        return 1;
 #endif
index beb4d78..a92c79b 100644 (file)
@@ -205,8 +205,7 @@ next_tlb_setup:
        bdnz+   next_tlb_setup
 
 /* 7. Jump to our 1:1 mapping */
-       li      r6, 0
-
+       mr      r6, r25
 #else
        #error You need to specify the mapping or not use this at all.
 #endif
@@ -217,7 +216,6 @@ next_tlb_setup:
 1:     mflr    r9
        rlwimi  r6,r9,0,20,31
        addi    r6,r6,(2f - 1b)
-       add     r6, r6, r25
        mtspr   SPRN_SRR0,r6
        mtspr   SPRN_SRR1,r7
        rfi                             /* start execution out of TLB1[0] entry */
index bb3d893..89f0051 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/kexec.h>
 #include <linux/reboot.h>
 #include <linux/threads.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/of.h>
 #include <asm/machdep.h>
 #include <asm/prom.h>
@@ -66,11 +66,11 @@ void __init reserve_crashkernel(void)
        unsigned long long crash_size, crash_base;
        int ret;
 
-       /* this is necessary because of lmb_phys_mem_size() */
-       lmb_analyze();
+       /* this is necessary because of memblock_phys_mem_size() */
+       memblock_analyze();
 
        /* use common parsing */
-       ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(),
+       ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
                        &crash_size, &crash_base);
        if (ret == 0 && crash_size > 0) {
                crashk_res.start = crash_base;
@@ -133,9 +133,9 @@ void __init reserve_crashkernel(void)
                        "for crashkernel (System RAM: %ldMB)\n",
                        (unsigned long)(crash_size >> 20),
                        (unsigned long)(crashk_res.start >> 20),
-                       (unsigned long)(lmb_phys_mem_size() >> 20));
+                       (unsigned long)(memblock_phys_mem_size() >> 20));
 
-       lmb_reserve(crashk_res.start, crash_size);
+       memblock_reserve(crashk_res.start, crash_size);
 }
 
 int overlaps_crashkernel(unsigned long start, unsigned long size)
index f88acf0..139a773 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <linux/threads.h>
 #include <linux/module.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/firmware.h>
 #include <asm/lppaca.h>
@@ -117,7 +117,7 @@ void __init allocate_pacas(void)
         * the first segment. On iSeries they must be within the area mapped
         * by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
         */
-       limit = min(0x10000000ULL, lmb.rmo_size);
+       limit = min(0x10000000ULL, memblock.rmo_size);
        if (firmware_has_feature(FW_FEATURE_ISERIES))
                limit = min(limit, HvPagesToMap * HVPAGESIZE);
 
@@ -128,7 +128,7 @@ void __init allocate_pacas(void)
 
        paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);
 
-       paca = __va(lmb_alloc_base(paca_size, PAGE_SIZE, limit));
+       paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
        memset(paca, 0, paca_size);
 
        printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
@@ -148,7 +148,7 @@ void __init free_unused_pacas(void)
        if (new_size >= paca_size)
                return;
 
-       lmb_free(__pa(paca) + new_size, paca_size - new_size);
+       memblock_free(__pa(paca) + new_size, paca_size - new_size);
 
        printk(KERN_DEBUG "Freed %u bytes for unused pacas\n",
                paca_size - new_size);
index 369872f..babccee 100644 (file)
@@ -566,9 +566,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
         * Finally record data if requested.
         */
        if (record) {
-               struct perf_sample_data data = {
-                       .period = event->hw.last_period,
-               };
+               struct perf_sample_data data;
+
+               perf_sample_data_init(&data, 0);
 
                if (perf_event_overflow(event, nmi, &data, regs)) {
                        /*
index 05131d6..fed9bf6 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/kexec.h>
 #include <linux/debugfs.h>
 #include <linux/irq.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/prom.h>
 #include <asm/rtas.h>
@@ -98,7 +98,7 @@ static void __init move_device_tree(void)
 
        if ((memory_limit && (start + size) > memory_limit) ||
                        overlaps_crashkernel(start, size)) {
-               p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
+               p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size));
                memcpy(p, initial_boot_params, size);
                initial_boot_params = (struct boot_param_header *)p;
                DBG("Moved device tree to 0x%p\n", p);
@@ -411,13 +411,13 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
 {
        __be32 *dm, *ls, *usm;
        unsigned long l, n, flags;
-       u64 base, size, lmb_size;
+       u64 base, size, memblock_size;
        unsigned int is_kexec_kdump = 0, rngs;
 
        ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
        if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
                return 0;
-       lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
+       memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls);
 
        dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
        if (dm == NULL || l < sizeof(__be32))
@@ -442,11 +442,11 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
                   or if the block is not assigned to this partition (0x8) */
                if ((flags & 0x80) || !(flags & 0x8))
                        continue;
-               size = lmb_size;
+               size = memblock_size;
                rngs = 1;
                if (is_kexec_kdump) {
                        /*
-                        * For each lmb in ibm,dynamic-memory, a corresponding
+                        * For each memblock in ibm,dynamic-memory, a corresponding
                         * entry in linux,drconf-usable-memory property contains
                         * a counter 'p' followed by 'p' (base, size) duple.
                         * Now read the counter from
@@ -469,10 +469,10 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
                                if ((base + size) > 0x80000000ul)
                                        size = 0x80000000ul - base;
                        }
-                       lmb_add(base, size);
+                       memblock_add(base, size);
                } while (--rngs);
        }
-       lmb_dump_all();
+       memblock_dump_all();
        return 0;
 }
 #else
@@ -501,14 +501,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
        }
 #endif
 
-       lmb_add(base, size);
+       memblock_add(base, size);
 
        memstart_addr = min((u64)memstart_addr, base);
 }
 
 u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 {
-       return lmb_alloc(size, align);
+       return memblock_alloc(size, align);
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -534,12 +534,12 @@ static void __init early_reserve_mem(void)
        /* before we do anything, lets reserve the dt blob */
        self_base = __pa((unsigned long)initial_boot_params);
        self_size = initial_boot_params->totalsize;
-       lmb_reserve(self_base, self_size);
+       memblock_reserve(self_base, self_size);
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /* then reserve the initrd, if any */
        if (initrd_start && (initrd_end > initrd_start))
-               lmb_reserve(__pa(initrd_start), initrd_end - initrd_start);
+               memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
 #endif /* CONFIG_BLK_DEV_INITRD */
 
 #ifdef CONFIG_PPC32
@@ -560,7 +560,7 @@ static void __init early_reserve_mem(void)
                        if (base_32 == self_base && size_32 == self_size)
                                continue;
                        DBG("reserving: %x -> %x\n", base_32, size_32);
-                       lmb_reserve(base_32, size_32);
+                       memblock_reserve(base_32, size_32);
                }
                return;
        }
@@ -571,7 +571,7 @@ static void __init early_reserve_mem(void)
                if (size == 0)
                        break;
                DBG("reserving: %llx -> %llx\n", base, size);
-               lmb_reserve(base, size);
+               memblock_reserve(base, size);
        }
 }
 
@@ -594,7 +594,7 @@ static inline unsigned long phyp_dump_calculate_reserve_size(void)
                return phyp_dump_info->reserve_bootvar;
 
        /* divide by 20 to get 5% of value */
-       tmp = lmb_end_of_DRAM();
+       tmp = memblock_end_of_DRAM();
        do_div(tmp, 20);
 
        /* round it down in multiples of 256 */
@@ -633,11 +633,11 @@ static void __init phyp_dump_reserve_mem(void)
        if (phyp_dump_info->phyp_dump_is_active) {
                /* Reserve *everything* above RMR.Area freed by userland tools*/
                base = variable_reserve_size;
-               size = lmb_end_of_DRAM() - base;
+               size = memblock_end_of_DRAM() - base;
 
                /* XXX crashed_ram_end is wrong, since it may be beyond
                 * the memory_limit, it will need to be adjusted. */
-               lmb_reserve(base, size);
+               memblock_reserve(base, size);
 
                phyp_dump_info->init_reserve_start = base;
                phyp_dump_info->init_reserve_size = size;
@@ -645,8 +645,8 @@ static void __init phyp_dump_reserve_mem(void)
                size = phyp_dump_info->cpu_state_size +
                        phyp_dump_info->hpte_region_size +
                        variable_reserve_size;
-               base = lmb_end_of_DRAM() - size;
-               lmb_reserve(base, size);
+               base = memblock_end_of_DRAM() - size;
+               memblock_reserve(base, size);
                phyp_dump_info->init_reserve_start = base;
                phyp_dump_info->init_reserve_size = size;
        }
@@ -681,8 +681,8 @@ void __init early_init_devtree(void *params)
         */
        of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
 
-       /* Scan memory nodes and rebuild LMBs */
-       lmb_init();
+       /* Scan memory nodes and rebuild MEMBLOCKs */
+       memblock_init();
        of_scan_flat_dt(early_init_dt_scan_root, NULL);
        of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
 
@@ -690,11 +690,11 @@ void __init early_init_devtree(void *params)
        strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
        parse_early_param();
 
-       /* Reserve LMB regions used by kernel, initrd, dt, etc... */
-       lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
+       /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
+       memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
        /* If relocatable, reserve first 32k for interrupt vectors etc. */
        if (PHYSICAL_START > MEMORY_START)
-               lmb_reserve(MEMORY_START, 0x8000);
+               memblock_reserve(MEMORY_START, 0x8000);
        reserve_kdump_trampoline();
        reserve_crashkernel();
        early_reserve_mem();
@@ -706,17 +706,17 @@ void __init early_init_devtree(void *params)
 
                /* Ensure that total memory size is page-aligned, because
                 * otherwise mark_bootmem() gets upset. */
-               lmb_analyze();
-               memsize = lmb_phys_mem_size();
+               memblock_analyze();
+               memsize = memblock_phys_mem_size();
                if ((memsize & PAGE_MASK) != memsize)
                        limit = memsize & PAGE_MASK;
        }
-       lmb_enforce_memory_limit(limit);
+       memblock_enforce_memory_limit(limit);
 
-       lmb_analyze();
-       lmb_dump_all();
+       memblock_analyze();
+       memblock_dump_all();
 
-       DBG("Phys. mem: %llx\n", lmb_phys_mem_size());
+       DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
 
        /* We may need to relocate the flat tree, do it now.
         * FIXME .. and the initrd too? */
index 0e1ec6f..d0516db 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/smp.h>
 #include <linux/completion.h>
 #include <linux/cpumask.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 
 #include <asm/prom.h>
@@ -934,11 +934,11 @@ void __init rtas_initialize(void)
         */
 #ifdef CONFIG_PPC64
        if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
-               rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
+               rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX);
                ibm_suspend_me_token = rtas_token("ibm,suspend-me");
        }
 #endif
-       rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
+       rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
 
 #ifdef CONFIG_RTAS_ERROR_LOGGING
        rtas_last_error_token = rtas_token("rtas-last-error");
index 5e4d852..b7e6c7e 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/serial_8250.h>
 #include <linux/debugfs.h>
 #include <linux/percpu.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/of_platform.h>
 #include <asm/io.h>
 #include <asm/paca.h>
index 7d84b21..a10ffc8 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/root_dev.h>
 #include <linux/cpu.h>
 #include <linux/console.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/io.h>
 #include <asm/prom.h>
@@ -246,12 +246,12 @@ static void __init irqstack_early_init(void)
        unsigned int i;
 
        /* interrupt stacks must be in lowmem, we get that for free on ppc32
-        * as the lmb is limited to lowmem by LMB_REAL_LIMIT */
+        * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
        for_each_possible_cpu(i) {
                softirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
                hardirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
        }
 }
 
@@ -261,15 +261,15 @@ static void __init exc_lvl_early_init(void)
        unsigned int i;
 
        /* interrupt stacks must be in lowmem, we get that for free on ppc32
-        * as the lmb is limited to lowmem by LMB_REAL_LIMIT */
+        * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
        for_each_possible_cpu(i) {
                critirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
 #ifdef CONFIG_BOOKE
                dbgirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
                mcheckirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
 #endif
        }
 }
index 643dcac..d135f93 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/bootmem.h>
 #include <linux/pci.h>
 #include <linux/lockdep.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/io.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
@@ -158,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca)
  * the CPU that ignores the top 2 bits of the address in real
  * mode so we can access kernel globals normally provided we
  * only toy with things in the RMO region. From here, we do
- * some early parsing of the device-tree to setup out LMB
+ * some early parsing of the device-tree to setup out MEMBLOCK
  * data structures, and allocate & initialize the hash table
  * and segment tables so we can start running with translation
  * enabled.
@@ -404,7 +404,7 @@ void __init setup_system(void)
 
        printk("-----------------------------------------------------\n");
        printk("ppc64_pft_size                = 0x%llx\n", ppc64_pft_size);
-       printk("physicalMemorySize            = 0x%llx\n", lmb_phys_mem_size());
+       printk("physicalMemorySize            = 0x%llx\n", memblock_phys_mem_size());
        if (ppc64_caches.dline_size != 0x80)
                printk("ppc64_caches.dcache_line_size = 0x%x\n",
                       ppc64_caches.dline_size);
@@ -443,10 +443,10 @@ static void __init irqstack_early_init(void)
         */
        for_each_possible_cpu(i) {
                softirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc_base(THREAD_SIZE,
+                       __va(memblock_alloc_base(THREAD_SIZE,
                                            THREAD_SIZE, limit));
                hardirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc_base(THREAD_SIZE,
+                       __va(memblock_alloc_base(THREAD_SIZE,
                                            THREAD_SIZE, limit));
        }
 }
@@ -458,11 +458,11 @@ static void __init exc_lvl_early_init(void)
 
        for_each_possible_cpu(i) {
                critirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
                dbgirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
                mcheckirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
        }
 }
 #else
@@ -487,11 +487,11 @@ static void __init emergency_stack_init(void)
         * bringup, we need to get at them in real mode. This means they
         * must also be within the RMO region.
         */
-       limit = min(slb0_limit(), lmb.rmo_size);
+       limit = min(slb0_limit(), memblock.rmo_size);
 
        for_each_possible_cpu(i) {
                unsigned long sp;
-               sp  = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
+               sp  = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
                sp += THREAD_SIZE;
                paca[i].emergency_sp = __va(sp);
        }
index d84d192..13002fe 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/elf.h>
 #include <linux/security.h>
 #include <linux/bootmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -734,7 +734,7 @@ static int __init vdso_init(void)
        vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100;
        if (firmware_has_feature(FW_FEATURE_LPAR))
                vdso_data->platform |= 1;
-       vdso_data->physicalMemorySize = lmb_phys_mem_size();
+       vdso_data->physicalMemorySize = memblock_phys_mem_size();
        vdso_data->dcache_size = ppc64_caches.dsize;
        vdso_data->dcache_line_size = ppc64_caches.dline_size;
        vdso_data->icache_size = ppc64_caches.isize;
index 65abfcf..1dc2fa5 100644 (file)
@@ -135,7 +135,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
        /* If the size of RAM is not an exact power of two, we may not
         * have covered RAM in its entirety with 16 and 4 MiB
         * pages. Consequently, restrict the top end of RAM currently
-        * allocable so that calls to the LMB to allocate PTEs for "tail"
+        * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
         * coverage with normal-sized pages (or other reasons) do not
         * attempt to allocate outside the allowed range.
         */
index a719f53..3079f6b 100644 (file)
@@ -68,9 +68,6 @@ _GLOBAL(__hash_page_4K)
        std     r8,STK_PARM(r8)(r1)
        std     r9,STK_PARM(r9)(r1)
        
-       /* Add _PAGE_PRESENT to access */
-       ori     r4,r4,_PAGE_PRESENT
-
        /* Save non-volatile registers.
         * r31 will hold "old PTE"
         * r30 is "new PTE"
@@ -347,9 +344,6 @@ _GLOBAL(__hash_page_4K)
        std     r8,STK_PARM(r8)(r1)
        std     r9,STK_PARM(r9)(r1)
 
-       /* Add _PAGE_PRESENT to access */
-       ori     r4,r4,_PAGE_PRESENT
-
        /* Save non-volatile registers.
         * r31 will hold "old PTE"
         * r30 is "new PTE"
@@ -687,9 +681,6 @@ _GLOBAL(__hash_page_64K)
        std     r8,STK_PARM(r8)(r1)
        std     r9,STK_PARM(r9)(r1)
 
-       /* Add _PAGE_PRESENT to access */
-       ori     r4,r4,_PAGE_PRESENT
-
        /* Save non-volatile registers.
         * r31 will hold "old PTE"
         * r30 is "new PTE"
index 3ecdcec..09dffe6 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/cache.h>
 #include <linux/init.h>
 #include <linux/signal.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/processor.h>
 #include <asm/pgtable.h>
@@ -384,8 +384,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
        printk(KERN_INFO "Huge page(16GB) memory: "
                        "addr = 0x%lX size = 0x%lX pages = %d\n",
                        phys_addr, block_size, expected_pages);
-       if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) {
-               lmb_reserve(phys_addr, block_size * expected_pages);
+       if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
+               memblock_reserve(phys_addr, block_size * expected_pages);
                add_gpage(phys_addr, block_size, expected_pages);
        }
        return 0;
@@ -458,7 +458,7 @@ static void __init htab_init_page_sizes(void)
         * and we have at least 1G of RAM at boot
         */
        if (mmu_psize_defs[MMU_PAGE_16M].shift &&
-           lmb_phys_mem_size() >= 0x40000000)
+           memblock_phys_mem_size() >= 0x40000000)
                mmu_vmemmap_psize = MMU_PAGE_16M;
        else if (mmu_psize_defs[MMU_PAGE_64K].shift)
                mmu_vmemmap_psize = MMU_PAGE_64K;
@@ -520,7 +520,7 @@ static unsigned long __init htab_get_table_size(void)
                return 1UL << ppc64_pft_size;
 
        /* round mem_size up to next power of 2 */
-       mem_size = lmb_phys_mem_size();
+       mem_size = memblock_phys_mem_size();
        rnd_mem_size = 1UL << __ilog2(mem_size);
        if (rnd_mem_size < mem_size)
                rnd_mem_size <<= 1;
@@ -627,7 +627,7 @@ static void __init htab_initialize(void)
                else
                        limit = 0;
 
-               table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit);
+               table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
 
                DBG("Hash table allocated at %lx, size: %lx\n", table,
                    htab_size_bytes);
@@ -647,9 +647,9 @@ static void __init htab_initialize(void)
        prot = pgprot_val(PAGE_KERNEL);
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
-       linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
-       linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
-                                                   1, lmb.rmo_size));
+       linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
+       linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
+                                                   1, memblock.rmo_size));
        memset(linear_map_hash_slots, 0, linear_map_hash_count);
 #endif /* CONFIG_DEBUG_PAGEALLOC */
 
@@ -659,16 +659,16 @@ static void __init htab_initialize(void)
         */
 
        /* create bolted the linear mapping in the hash table */
-       for (i=0; i < lmb.memory.cnt; i++) {
-               base = (unsigned long)__va(lmb.memory.region[i].base);
-               size = lmb.memory.region[i].size;
+       for (i=0; i < memblock.memory.cnt; i++) {
+               base = (unsigned long)__va(memblock.memory.region[i].base);
+               size = memblock.memory.region[i].size;
 
                DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
                    base, size, prot);
 
 #ifdef CONFIG_U3_DART
                /* Do not map the DART space. Fortunately, it will be aligned
-                * in such a way that it will not cross two lmb regions and
+                * in such a way that it will not cross two memblock regions and
                 * will fit within a single 16Mb page.
                 * The DART space is assumed to be a full 16Mb region even if
                 * we only use 2Mb of that space. We will use more of it later
@@ -871,6 +871,18 @@ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
 }
 #endif
 
+void hash_failure_debug(unsigned long ea, unsigned long access,
+                       unsigned long vsid, unsigned long trap,
+                       int ssize, int psize, unsigned long pte)
+{
+       if (!printk_ratelimit())
+               return;
+       pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
+               ea, access, current->comm);
+       pr_info("    trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n",
+               trap, vsid, ssize, psize, pte);
+}
+
 /* Result code is:
  *  0 - handled
  *  1 - normal page fault
@@ -955,6 +967,17 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                return 1;
        }
 
+       /* Add _PAGE_PRESENT to the required access perm */
+       access |= _PAGE_PRESENT;
+
+       /* Pre-check access permissions (will be re-checked atomically
+        * in __hash_page_XX but this pre-check is a fast path
+        */
+       if (access & ~pte_val(*ptep)) {
+               DBG_LOW(" no access !\n");
+               return 1;
+       }
+
 #ifdef CONFIG_HUGETLB_PAGE
        if (hugeshift)
                return __hash_page_huge(ea, access, vsid, ptep, trap, local,
@@ -967,14 +990,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
                pte_val(*(ptep + PTRS_PER_PTE)));
 #endif
-       /* Pre-check access permissions (will be re-checked atomically
-        * in __hash_page_XX but this pre-check is a fast path
-        */
-       if (access & ~pte_val(*ptep)) {
-               DBG_LOW(" no access !\n");
-               return 1;
-       }
-
        /* Do actual hashing */
 #ifdef CONFIG_PPC_64K_PAGES
        /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
@@ -1033,6 +1048,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                                            local, ssize, spp);
        }
 
+       /* Dump some info in case of hash insertion failure, they should
+        * never happen so it is really useful to know if/when they do
+        */
+       if (rc == -1)
+               hash_failure_debug(ea, access, vsid, trap, ssize, psize,
+                                  pte_val(*ptep));
 #ifndef CONFIG_PPC_64K_PAGES
        DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
 #else
@@ -1051,8 +1072,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
        void *pgdir;
        pte_t *ptep;
        unsigned long flags;
-       int local = 0;
-       int ssize;
+       int rc, ssize, local = 0;
 
        BUG_ON(REGION_ID(ea) != USER_REGION_ID);
 
@@ -1098,11 +1118,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
        /* Hash it in */
 #ifdef CONFIG_PPC_HAS_HASH_64K
        if (mm->context.user_psize == MMU_PAGE_64K)
-               __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
+               rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
        else
 #endif /* CONFIG_PPC_HAS_HASH_64K */
-               __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
-                              subpage_protection(pgdir, ea));
+               rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
+                                   subpage_protection(pgdir, ea));
+
+       /* Dump some info in case of hash insertion failure, they should
+        * never happen so it is really useful to know if/when they do
+        */
+       if (rc == -1)
+               hash_failure_debug(ea, access, vsid, trap, ssize,
+                                  mm->context.user_psize, pte_val(*ptep));
 
        local_irq_restore(flags);
 }
index 1995398..cc5c273 100644 (file)
@@ -21,21 +21,13 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
        unsigned long old_pte, new_pte;
        unsigned long va, rflags, pa, sz;
        long slot;
-       int err = 1;
 
        BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
 
        /* Search the Linux page table for a match with va */
        va = hpt_va(ea, vsid, ssize);
 
-       /*
-        * Check the user's access rights to the page.  If access should be
-        * prevented then send the problem up to do_page_fault.
-        */
-       if (unlikely(access & ~pte_val(*ptep)))
-               goto out;
-       /*
-        * At this point, we have a pte (old_pte) which can be used to build
+       /* At this point, we have a pte (old_pte) which can be used to build
         * or update an HPTE. There are 2 cases:
         *
         * 1. There is a valid (present) pte with no associated HPTE (this is
@@ -49,9 +41,17 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 
        do {
                old_pte = pte_val(*ptep);
-               if (old_pte & _PAGE_BUSY)
-                       goto out;
+               /* If PTE busy, retry the access */
+               if (unlikely(old_pte & _PAGE_BUSY))
+                       return 0;
+               /* If PTE permissions don't match, take page fault */
+               if (unlikely(access & ~old_pte))
+                       return 1;
+               /* Try to lock the PTE, add ACCESSED and DIRTY if it was
+                * a write access */
                new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
+               if (access & _PAGE_RW)
+                       new_pte |= _PAGE_DIRTY;
        } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
                                         old_pte, new_pte));
 
@@ -121,8 +121,16 @@ repeat:
                         }
                }
 
-               if (unlikely(slot == -2))
-                       panic("hash_huge_page: pte_insert failed\n");
+               /*
+                * Hypervisor failure. Restore old pte and return -1
+                * similar to __hash_page_*
+                */
+               if (unlikely(slot == -2)) {
+                       *ptep = __pte(old_pte);
+                       hash_failure_debug(ea, access, vsid, trap, ssize,
+                                          mmu_psize, old_pte);
+                       return -1;
+               }
 
                new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
        }
@@ -131,9 +139,5 @@ repeat:
         * No need to use ldarx/stdcx here
         */
        *ptep = __pte(new_pte & ~_PAGE_BUSY);
-
-       err = 0;
-
- out:
-       return err;
+       return 0;
 }
index 7673330..6a6975d 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/highmem.h>
 #include <linux/initrd.h>
 #include <linux/pagemap.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/gfp.h>
 
 #include <asm/pgalloc.h>
@@ -136,17 +136,17 @@ void __init MMU_init(void)
        /* parse args from command line */
        MMU_setup();
 
-       if (lmb.memory.cnt > 1) {
+       if (memblock.memory.cnt > 1) {
 #ifndef CONFIG_WII
-               lmb.memory.cnt = 1;
-               lmb_analyze();
+               memblock.memory.cnt = 1;
+               memblock_analyze();
                printk(KERN_WARNING "Only using first contiguous memory region");
 #else
                wii_memory_fixups();
 #endif
        }
 
-       total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
+       total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr;
        lowmem_end_addr = memstart_addr + total_lowmem;
 
 #ifdef CONFIG_FSL_BOOKE
@@ -161,8 +161,8 @@ void __init MMU_init(void)
                lowmem_end_addr = memstart_addr + total_lowmem;
 #ifndef CONFIG_HIGHMEM
                total_memory = total_lowmem;
-               lmb_enforce_memory_limit(lowmem_end_addr);
-               lmb_analyze();
+               memblock_enforce_memory_limit(lowmem_end_addr);
+               memblock_analyze();
 #endif /* CONFIG_HIGHMEM */
        }
 
@@ -200,7 +200,7 @@ void __init *early_get_page(void)
        if (init_bootmem_done) {
                p = alloc_bootmem_pages(PAGE_SIZE);
        } else {
-               p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
+               p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
                                        __initial_memory_limit_addr));
        }
        return p;
index e267f22..71f1415 100644 (file)
@@ -40,7 +40,7 @@
 #include <linux/nodemask.h>
 #include <linux/module.h>
 #include <linux/poison.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/hugetlb.h>
 #include <linux/slab.h>
 
index 0f594d7..1a84a8d 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/initrd.h>
 #include <linux/pagemap.h>
 #include <linux/suspend.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/hugetlb.h>
 
 #include <asm/pgalloc.h>
@@ -83,13 +83,13 @@ int page_is_ram(unsigned long pfn)
 #else
        unsigned long paddr = (pfn << PAGE_SHIFT);
        int i;
-       for (i=0; i < lmb.memory.cnt; i++) {
+       for (i=0; i < memblock.memory.cnt; i++) {
                unsigned long base;
 
-               base = lmb.memory.region[i].base;
+               base = memblock.memory.region[i].base;
 
                if ((paddr >= base) &&
-                       (paddr < (base + lmb.memory.region[i].size))) {
+                       (paddr < (base + memblock.memory.region[i].size))) {
                        return 1;
                }
        }
@@ -142,14 +142,14 @@ int arch_add_memory(int nid, u64 start, u64 size)
 /*
  * walk_memory_resource() needs to make sure there is no holes in a given
  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
- * Instead it maintains it in lmb.memory structures.  Walk through the
+ * Instead it maintains it in memblock.memory structures.  Walk through the
  * memory regions, find holes and callback for contiguous regions.
  */
 int
 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
                void *arg, int (*func)(unsigned long, unsigned long, void *))
 {
-       struct lmb_property res;
+       struct memblock_property res;
        unsigned long pfn, len;
        u64 end;
        int ret = -1;
@@ -158,7 +158,7 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
        res.size = (u64) nr_pages << PAGE_SHIFT;
 
        end = res.base + res.size - 1;
-       while ((res.base < end) && (lmb_find(&res) >= 0)) {
+       while ((res.base < end) && (memblock_find(&res) >= 0)) {
                pfn = (unsigned long)(res.base >> PAGE_SHIFT);
                len = (unsigned long)(res.size >> PAGE_SHIFT);
                ret = (*func)(pfn, len, arg);
@@ -184,8 +184,8 @@ void __init do_init_bootmem(void)
        unsigned long total_pages;
        int boot_mapsize;
 
-       max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
-       total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
+       max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
+       total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
 #ifdef CONFIG_HIGHMEM
        total_pages = total_lowmem >> PAGE_SHIFT;
        max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
@@ -198,16 +198,16 @@ void __init do_init_bootmem(void)
         */
        bootmap_pages = bootmem_bootmap_pages(total_pages);
 
-       start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
+       start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
 
        min_low_pfn = MEMORY_START >> PAGE_SHIFT;
        boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
 
        /* Add active regions with valid PFNs */
-       for (i = 0; i < lmb.memory.cnt; i++) {
+       for (i = 0; i < memblock.memory.cnt; i++) {
                unsigned long start_pfn, end_pfn;
-               start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
-               end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+               start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
+               end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
                add_active_range(0, start_pfn, end_pfn);
        }
 
@@ -218,17 +218,17 @@ void __init do_init_bootmem(void)
        free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
 
        /* reserve the sections we're already using */
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               unsigned long addr = lmb.reserved.region[i].base +
-                                    lmb_size_bytes(&lmb.reserved, i) - 1;
+       for (i = 0; i < memblock.reserved.cnt; i++) {
+               unsigned long addr = memblock.reserved.region[i].base +
+                                    memblock_size_bytes(&memblock.reserved, i) - 1;
                if (addr < lowmem_end_addr)
-                       reserve_bootmem(lmb.reserved.region[i].base,
-                                       lmb_size_bytes(&lmb.reserved, i),
+                       reserve_bootmem(memblock.reserved.region[i].base,
+                                       memblock_size_bytes(&memblock.reserved, i),
                                        BOOTMEM_DEFAULT);
-               else if (lmb.reserved.region[i].base < lowmem_end_addr) {
+               else if (memblock.reserved.region[i].base < lowmem_end_addr) {
                        unsigned long adjusted_size = lowmem_end_addr -
-                                     lmb.reserved.region[i].base;
-                       reserve_bootmem(lmb.reserved.region[i].base,
+                                     memblock.reserved.region[i].base;
+                       reserve_bootmem(memblock.reserved.region[i].base,
                                        adjusted_size, BOOTMEM_DEFAULT);
                }
        }
@@ -236,9 +236,9 @@ void __init do_init_bootmem(void)
        free_bootmem_with_active_regions(0, max_pfn);
 
        /* reserve the sections we're already using */
-       for (i = 0; i < lmb.reserved.cnt; i++)
-               reserve_bootmem(lmb.reserved.region[i].base,
-                               lmb_size_bytes(&lmb.reserved, i),
+       for (i = 0; i < memblock.reserved.cnt; i++)
+               reserve_bootmem(memblock.reserved.region[i].base,
+                               memblock_size_bytes(&memblock.reserved, i),
                                BOOTMEM_DEFAULT);
 
 #endif
@@ -251,20 +251,20 @@ void __init do_init_bootmem(void)
 /* mark pages that don't exist as nosave */
 static int __init mark_nonram_nosave(void)
 {
-       unsigned long lmb_next_region_start_pfn,
-                     lmb_region_max_pfn;
+       unsigned long memblock_next_region_start_pfn,
+                     memblock_region_max_pfn;
        int i;
 
-       for (i = 0; i < lmb.memory.cnt - 1; i++) {
-               lmb_region_max_pfn =
-                       (lmb.memory.region[i].base >> PAGE_SHIFT) +
-                       (lmb.memory.region[i].size >> PAGE_SHIFT);
-               lmb_next_region_start_pfn =
-                       lmb.memory.region[i+1].base >> PAGE_SHIFT;
+       for (i = 0; i < memblock.memory.cnt - 1; i++) {
+               memblock_region_max_pfn =
+                       (memblock.memory.region[i].base >> PAGE_SHIFT) +
+                       (memblock.memory.region[i].size >> PAGE_SHIFT);
+               memblock_next_region_start_pfn =
+                       memblock.memory.region[i+1].base >> PAGE_SHIFT;
 
-               if (lmb_region_max_pfn < lmb_next_region_start_pfn)
-                       register_nosave_region(lmb_region_max_pfn,
-                                              lmb_next_region_start_pfn);
+               if (memblock_region_max_pfn < memblock_next_region_start_pfn)
+                       register_nosave_region(memblock_region_max_pfn,
+                                              memblock_next_region_start_pfn);
        }
 
        return 0;
@@ -275,8 +275,8 @@ static int __init mark_nonram_nosave(void)
  */
 void __init paging_init(void)
 {
-       unsigned long total_ram = lmb_phys_mem_size();
-       phys_addr_t top_of_ram = lmb_end_of_DRAM();
+       unsigned long total_ram = memblock_phys_mem_size();
+       phys_addr_t top_of_ram = memblock_end_of_DRAM();
        unsigned long max_zone_pfns[MAX_NR_ZONES];
 
 #ifdef CONFIG_PPC32
@@ -327,7 +327,7 @@ void __init mem_init(void)
                swiotlb_init(1);
 #endif
 
-       num_physpages = lmb.memory.size >> PAGE_SHIFT;
+       num_physpages = memblock.memory.size >> PAGE_SHIFT;
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -364,7 +364,7 @@ void __init mem_init(void)
                highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
                for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
                        struct page *page = pfn_to_page(pfn);
-                       if (lmb_is_reserved(pfn << PAGE_SHIFT))
+                       if (memblock_is_reserved(pfn << PAGE_SHIFT))
                                continue;
                        ClearPageReserved(page);
                        init_page_count(page);
index 80d1106..aa731af 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/nodemask.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/pfn.h>
 #include <asm/sparsemem.h>
@@ -351,7 +351,7 @@ struct of_drconf_cell {
 #define DRCONF_MEM_RESERVED    0x00000080
 
 /*
- * Read the next lmb list entry from the ibm,dynamic-memory property
+ * Read the next memblock list entry from the ibm,dynamic-memory property
  * and return the information in the provided of_drconf_cell structure.
  */
 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
@@ -372,8 +372,8 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
 /*
  * Retreive and validate the ibm,dynamic-memory property of the device tree.
  *
- * The layout of the ibm,dynamic-memory property is a number N of lmb
- * list entries followed by N lmb list entries.  Each lmb list entry
+ * The layout of the ibm,dynamic-memory property is a number N of memblock
+ * list entries followed by N memblock list entries.  Each memblock list entry
  * contains information as layed out in the of_drconf_cell struct above.
  */
 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
@@ -540,19 +540,19 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
                                                      unsigned long size)
 {
        /*
-        * We use lmb_end_of_DRAM() in here instead of memory_limit because
+        * We use memblock_end_of_DRAM() in here instead of memory_limit because
         * we've already adjusted it for the limit and it takes care of
         * having memory holes below the limit.  Also, in the case of
         * iommu_is_off, memory_limit is not set but is implicitly enforced.
         */
 
-       if (start + size <= lmb_end_of_DRAM())
+       if (start + size <= memblock_end_of_DRAM())
                return size;
 
-       if (start >= lmb_end_of_DRAM())
+       if (start >= memblock_end_of_DRAM())
                return 0;
 
-       return lmb_end_of_DRAM() - start;
+       return memblock_end_of_DRAM() - start;
 }
 
 /*
@@ -731,7 +731,7 @@ new_range:
        }
 
        /*
-        * Now do the same thing for each LMB listed in the ibm,dynamic-memory
+        * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory
         * property in the ibm,dynamic-reconfiguration-memory node.
         */
        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
@@ -743,8 +743,8 @@ new_range:
 
 static void __init setup_nonnuma(void)
 {
-       unsigned long top_of_ram = lmb_end_of_DRAM();
-       unsigned long total_ram = lmb_phys_mem_size();
+       unsigned long top_of_ram = memblock_end_of_DRAM();
+       unsigned long total_ram = memblock_phys_mem_size();
        unsigned long start_pfn, end_pfn;
        unsigned int i, nid = 0;
 
@@ -753,9 +753,9 @@ static void __init setup_nonnuma(void)
        printk(KERN_DEBUG "Memory hole size: %ldMB\n",
               (top_of_ram - total_ram) >> 20);
 
-       for (i = 0; i < lmb.memory.cnt; ++i) {
-               start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
-               end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+       for (i = 0; i < memblock.memory.cnt; ++i) {
+               start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
+               end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
 
                fake_numa_create_new_node(end_pfn, &nid);
                add_active_range(nid, start_pfn, end_pfn);
@@ -813,7 +813,7 @@ static void __init dump_numa_memory_topology(void)
 
                count = 0;
 
-               for (i = 0; i < lmb_end_of_DRAM();
+               for (i = 0; i < memblock_end_of_DRAM();
                     i += (1 << SECTION_SIZE_BITS)) {
                        if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
                                if (count == 0)
@@ -833,7 +833,7 @@ static void __init dump_numa_memory_topology(void)
 }
 
 /*
- * Allocate some memory, satisfying the lmb or bootmem allocator where
+ * Allocate some memory, satisfying the memblock or bootmem allocator where
  * required. nid is the preferred node and end is the physical address of
  * the highest address in the node.
  *
@@ -847,11 +847,11 @@ static void __init *careful_zallocation(int nid, unsigned long size,
        int new_nid;
        unsigned long ret_paddr;
 
-       ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
+       ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
 
        /* retry over all memory */
        if (!ret_paddr)
-               ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
+               ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
 
        if (!ret_paddr)
                panic("numa.c: cannot allocate %lu bytes for node %d",
@@ -861,14 +861,14 @@ static void __init *careful_zallocation(int nid, unsigned long size,
 
        /*
         * We initialize the nodes in numeric order: 0, 1, 2...
-        * and hand over control from the LMB allocator to the
+        * and hand over control from the MEMBLOCK allocator to the
         * bootmem allocator.  If this function is called for
         * node 5, then we know that all nodes <5 are using the
-        * bootmem allocator instead of the LMB allocator.
+        * bootmem allocator instead of the MEMBLOCK allocator.
         *
         * So, check the nid from which this allocation came
         * and double check to see if we need to use bootmem
-        * instead of the LMB.  We don't free the LMB memory
+        * instead of the MEMBLOCK.  We don't free the MEMBLOCK memory
         * since it would be useless.
         */
        new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
@@ -893,9 +893,9 @@ static void mark_reserved_regions_for_nid(int nid)
        struct pglist_data *node = NODE_DATA(nid);
        int i;
 
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               unsigned long physbase = lmb.reserved.region[i].base;
-               unsigned long size = lmb.reserved.region[i].size;
+       for (i = 0; i < memblock.reserved.cnt; i++) {
+               unsigned long physbase = memblock.reserved.region[i].base;
+               unsigned long size = memblock.reserved.region[i].size;
                unsigned long start_pfn = physbase >> PAGE_SHIFT;
                unsigned long end_pfn = PFN_UP(physbase + size);
                struct node_active_region node_ar;
@@ -903,7 +903,7 @@ static void mark_reserved_regions_for_nid(int nid)
                                             node->node_spanned_pages;
 
                /*
-                * Check to make sure that this lmb.reserved area is
+                * Check to make sure that this memblock.reserved area is
                 * within the bounds of the node that we care about.
                 * Checking the nid of the start and end points is not
                 * sufficient because the reserved area could span the
@@ -961,7 +961,7 @@ void __init do_init_bootmem(void)
        int nid;
 
        min_low_pfn = 0;
-       max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
        max_pfn = max_low_pfn;
 
        if (parse_numa_properties())
@@ -1038,7 +1038,7 @@ void __init paging_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES];
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-       max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
        free_area_init_nodes(max_zone_pfns);
 }
 
@@ -1113,7 +1113,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
 /*
  * Find the node associated with a hot added memory section for memory
  * represented in the device tree as a node (i.e. memory@XXXX) for
- * each lmb.
+ * each memblock.
  */
 int hot_add_node_scn_to_nid(unsigned long scn_addr)
 {
@@ -1154,8 +1154,8 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
 
 /*
  * Find the node associated with a hot added memory section.  Section
- * corresponds to a SPARSEMEM section, not an LMB.  It is assumed that
- * sections are fully contained within a single LMB.
+ * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
+ * sections are fully contained within a single MEMBLOCK.
  */
 int hot_add_scn_to_nid(unsigned long scn_addr)
 {
index 34347b2..a87ead0 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 
 #include <asm/pgtable.h>
@@ -198,7 +198,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
         * mem_init() sets high_memory so only do the check after that.
         */
        if (mem_init_done && (p < virt_to_phys(high_memory)) &&
-           !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) {
+           !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
                printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
                       (unsigned long long)p, __builtin_return_address(0));
                return NULL;
@@ -331,7 +331,7 @@ void __init mapin_ram(void)
                s = mmu_mapin_ram(top);
                __mapin_ram_chunk(s, top);
 
-               top = lmb_end_of_DRAM();
+               top = memblock_end_of_DRAM();
                s = wii_mmu_mapin_mem2(top);
                __mapin_ram_chunk(s, top);
        }
index d050fc8..21d6dfa 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 
 #include <asm/pgalloc.h>
@@ -67,7 +67,7 @@ static void *early_alloc_pgtable(unsigned long size)
        if (init_bootmem_done)
                pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
        else
-               pt = __va(lmb_alloc_base(size, size,
+               pt = __va(memblock_alloc_base(size, size,
                                         __pa(MAX_DMA_ADDRESS)));
        memset(pt, 0, size);
 
index f11c2cd..f8a0182 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/prom.h>
 #include <asm/mmu.h>
@@ -223,7 +223,7 @@ void __init MMU_init_hw(void)
         * Find some memory for the hash table.
         */
        if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
-       Hash = __va(lmb_alloc_base(Hash_size, Hash_size,
+       Hash = __va(memblock_alloc_base(Hash_size, Hash_size,
                                   __initial_memory_limit_addr));
        cacheable_memzero(Hash, Hash_size);
        _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
index 687fdda..446a018 100644 (file)
@@ -12,7 +12,7 @@
  *      2 of the License, or (at your option) any later version.
  */
 
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/pgtable.h>
 #include <asm/mmu.h>
@@ -252,7 +252,7 @@ void __init stabs_alloc(void)
                if (cpu == 0)
                        continue; /* stab for CPU 0 is statically allocated */
 
-               newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
+               newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
                                         1<<SID_SHIFT);
                newstab = (unsigned long)__va(newstab);
 
index e81d5d6..d8695b0 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/pagemap.h>
 #include <linux/preempt.h>
 #include <linux/spinlock.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/tlbflush.h>
 #include <asm/tlb.h>
@@ -426,7 +426,7 @@ static void __early_init_mmu(int boot_cpu)
        /* Set the global containing the top of the linear mapping
         * for use by the TLB miss code
         */
-       linear_map_top = lmb_end_of_DRAM();
+       linear_map_top = memblock_end_of_DRAM();
 
        /* A sync won't hurt us after mucking around with
         * the MMU configuration
index 534c2ec..2ab338c 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/kdev_t.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/system.h>
 #include <asm/time.h>
@@ -100,7 +100,7 @@ void __init corenet_ds_setup_arch(void)
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 004b7d3..f79f2f1 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/system.h>
 #include <asm/time.h>
@@ -94,7 +94,7 @@ static void __init mpc8536_ds_setup_arch(void)
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 544011a..8190bc2 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/system.h>
 #include <asm/time.h>
@@ -190,7 +190,7 @@ static void __init mpc85xx_ds_setup_arch(void)
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 8fe87fc..4945136 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_device.h>
 #include <linux/phy.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/system.h>
 #include <asm/atomic.h>
@@ -325,7 +325,7 @@ static void __init mpc85xx_mds_setup_arch(void)
 #endif /* CONFIG_QUICC_ENGINE */
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 2aa69a6..b11c353 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/system.h>
 #include <asm/time.h>
@@ -103,7 +103,7 @@ mpc86xx_hpcn_setup_arch(void)
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 4326b73..3712900 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/prom.h>
 #include <asm/iommu.h>
@@ -845,10 +845,10 @@ static int __init cell_iommu_init_disabled(void)
        /* If we found a DMA window, we check if it's big enough to enclose
         * all of physical memory. If not, we force enable IOMMU
         */
-       if (np && size < lmb_end_of_DRAM()) {
+       if (np && size < memblock_end_of_DRAM()) {
                printk(KERN_WARNING "iommu: force-enabled, dma window"
                       " (%ldMB) smaller than total memory (%lldMB)\n",
-                      size >> 20, lmb_end_of_DRAM() >> 20);
+                      size >> 20, memblock_end_of_DRAM() >> 20);
                return -ENODEV;
        }
 
@@ -1064,7 +1064,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
        }
 
        fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
-       fsize = lmb_phys_mem_size();
+       fsize = memblock_phys_mem_size();
 
        if ((fbase + fsize) <= 0x800000000ul)
                hbase = 0; /* use the device tree window */
@@ -1169,7 +1169,7 @@ static int __init cell_iommu_init(void)
         * Note: should we make sure we have the IOMMU actually disabled ?
         */
        if (iommu_is_off ||
-           (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull))
+           (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull))
                if (cell_iommu_init_disabled() == 0)
                        goto bail;
 
index 174a04a..5cdcc7c 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/seq_file.h>
 #include <linux/kexec.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <mm/mmu_decl.h>
 
 #include <asm/io.h>
@@ -65,7 +65,7 @@ static int __init page_aligned(unsigned long x)
 
 void __init wii_memory_fixups(void)
 {
-       struct lmb_property *p = lmb.memory.region;
+       struct memblock_property *p = memblock.memory.region;
 
        /*
         * This is part of a workaround to allow the use of two
@@ -77,7 +77,7 @@ void __init wii_memory_fixups(void)
         * between both ranges.
         */
 
-       BUG_ON(lmb.memory.cnt != 2);
+       BUG_ON(memblock.memory.cnt != 2);
        BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
 
        p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
@@ -92,11 +92,11 @@ void __init wii_memory_fixups(void)
 
        p[0].size += wii_hole_size + p[1].size;
 
-       lmb.memory.cnt = 1;
-       lmb_analyze();
+       memblock.memory.cnt = 1;
+       memblock_analyze();
 
        /* reserve the hole */
-       lmb_reserve(wii_hole_start, wii_hole_size);
+       memblock_reserve(wii_hole_start, wii_hole_size);
 
        /* allow ioremapping the address space in the hole */
        __allow_ioremap_reserved = 1;
index 39df705..3fff8d9 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/smp.h>
 #include <linux/bitops.h>
 #include <linux/of_device.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/processor.h>
 #include <asm/sections.h>
index 7b1d608..1f9fb2c 100644 (file)
@@ -204,7 +204,7 @@ int __init iob_init(struct device_node *dn)
        pr_debug(" -> %s\n", __func__);
 
        /* Allocate a spare page to map all invalid IOTLB pages. */
-       tmp = lmb_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
+       tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
        if (!tmp)
                panic("IOBMAP: Cannot allocate spare page!");
        /* Empty l1 is marked invalid */
@@ -275,7 +275,7 @@ void __init alloc_iobmap_l2(void)
        return;
 #endif
        /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
-       iob_l2_base = (u32 *)abs_to_virt(lmb_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
+       iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
 
        printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
 }
index f1d0132..9deb274 100644 (file)
@@ -51,7 +51,7 @@
 #include <linux/suspend.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/reg.h>
 #include <asm/sections.h>
@@ -619,7 +619,7 @@ static int __init pmac_probe(void)
         * driver needs that. We have to allocate it now. We allocate 4k
         * (1 small page) for now.
         */
-       smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
+       smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
 #endif /* CONFIG_PMAC_SMU */
 
        return 1;
index 1e8a1e3..2c0ed87 100644 (file)
@@ -19,7 +19,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/machdep.h>
 #include <asm/prom.h>
index 7925751..c204588 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/memory_hotplug.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 
 #include <asm/cell-regs.h>
@@ -318,8 +318,8 @@ static int __init ps3_mm_add_memory(void)
                return result;
        }
 
-       lmb_add(start_addr, map.r1.size);
-       lmb_analyze();
+       memblock_add(start_addr, map.r1.size);
+       memblock_analyze();
 
        result = online_pages(start_pfn, nr_pages);
 
index dd521a1..5b759b6 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/fs.h>
 #include <linux/syscalls.h>
 #include <linux/ctype.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 
@@ -723,7 +723,7 @@ static void os_area_queue_work(void)
  * flash to a high address in the boot memory region and then puts that RAM
  * address and the byte count into the repository for retrieval by the guest.
  * We copy the data we want into a static variable and allow the memory setup
- * by the HV to be claimed by the lmb manager.
+ * by the HV to be claimed by the memblock manager.
  *
  * The os area mirror will not be available to a second stage kernel, and
  * the header verify will fail.  In this case, the saved_params values will
index 01e7b5b..bc88036 100644 (file)
  */
 
 #include <linux/of.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/vmalloc.h>
 #include <asm/firmware.h>
 #include <asm/machdep.h>
 #include <asm/pSeries_reconfig.h>
 #include <asm/sparsemem.h>
 
-static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
+static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
 {
        unsigned long start, start_pfn;
        struct zone *zone;
@@ -26,7 +26,7 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
        start_pfn = base >> PAGE_SHIFT;
 
        if (!pfn_valid(start_pfn)) {
-               lmb_remove(base, lmb_size);
+               memblock_remove(base, memblock_size);
                return 0;
        }
 
@@ -41,20 +41,20 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
         * to sysfs "state" file and we can't remove sysfs entries
         * while writing to it. So we have to defer it to here.
         */
-       ret = __remove_pages(zone, start_pfn, lmb_size >> PAGE_SHIFT);
+       ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT);
        if (ret)
                return ret;
 
        /*
         * Update memory regions for memory remove
         */
-       lmb_remove(base, lmb_size);
+       memblock_remove(base, memblock_size);
 
        /*
         * Remove htab bolted mappings for this section of memory
         */
        start = (unsigned long)__va(base);
-       ret = remove_section_mapping(start, start + lmb_size);
+       ret = remove_section_mapping(start, start + memblock_size);
 
        /* Ensure all vmalloc mappings are flushed in case they also
         * hit that section of memory
@@ -80,7 +80,7 @@ static int pseries_remove_memory(struct device_node *np)
                return 0;
 
        /*
-        * Find the bae address and size of the lmb
+        * Find the bae address and size of the memblock
         */
        regs = of_get_property(np, "reg", NULL);
        if (!regs)
@@ -89,7 +89,7 @@ static int pseries_remove_memory(struct device_node *np)
        base = *(unsigned long *)regs;
        lmb_size = regs[3];
 
-       ret = pseries_remove_lmb(base, lmb_size);
+       ret = pseries_remove_memblock(base, lmb_size);
        return ret;
 }
 
@@ -109,7 +109,7 @@ static int pseries_add_memory(struct device_node *np)
                return 0;
 
        /*
-        * Find the base and size of the lmb
+        * Find the base and size of the memblock
         */
        regs = of_get_property(np, "reg", NULL);
        if (!regs)
@@ -121,7 +121,7 @@ static int pseries_add_memory(struct device_node *np)
        /*
         * Update memory region to represent the memory add
         */
-       ret = lmb_add(base, lmb_size);
+       ret = memblock_add(base, lmb_size);
        return (ret < 0) ? -EINVAL : 0;
 }
 
@@ -142,10 +142,10 @@ static int pseries_drconf_memory(unsigned long *base, unsigned int action)
        }
 
        if (action == PSERIES_DRCONF_MEM_ADD) {
-               rc = lmb_add(*base, *lmb_size);
+               rc = memblock_add(*base, *lmb_size);
                rc = (rc < 0) ? -EINVAL : 0;
        } else if (action == PSERIES_DRCONF_MEM_REMOVE) {
-               rc = pseries_remove_lmb(*base, *lmb_size);
+               rc = pseries_remove_memblock(*base, *lmb_size);
        } else {
                rc = -EINVAL;
        }
index d26182d..395848e 100644 (file)
@@ -66,7 +66,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
        tcep = ((u64 *)tbl->it_base) + index;
 
        while (npages--) {
-               /* can't move this out since we might cross LMB boundary */
+               /* can't move this out since we might cross MEMBLOCK boundary */
                rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
                *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
 
index 7ebd9e8..6e7742d 100644 (file)
@@ -255,12 +255,12 @@ void invalidate_last_dump(struct phyp_dump_header *ph, unsigned long addr)
 
 /* ------------------------------------------------- */
 /**
- * release_memory_range -- release memory previously lmb_reserved
+ * release_memory_range -- release memory previously memblock_reserved
  * @start_pfn: starting physical frame number
  * @nr_pages: number of pages to free.
  *
  * This routine will release memory that had been previously
- * lmb_reserved in early boot. The released memory becomes
+ * memblock_reserved in early boot. The released memory becomes
  * available for genreal use.
  */
 static void release_memory_range(unsigned long start_pfn,
index c8b96ed..559db2b 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
 #include <linux/suspend.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/gfp.h>
 #include <asm/io.h>
 #include <asm/prom.h>
@@ -232,7 +232,7 @@ static int __init dart_init(struct device_node *dart_node)
         * that to work around what looks like a problem with the HT bridge
         * prefetching into invalid pages and corrupting data
         */
-       tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
+       tmp = memblock_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
        dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
                                         DARTMAP_RPNMASK);
 
@@ -407,7 +407,7 @@ void __init alloc_dart_table(void)
        if (iommu_is_off)
                return;
 
-       if (!iommu_force_on && lmb_end_of_DRAM() <= 0x40000000ull)
+       if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull)
                return;
 
        /* 512 pages (2MB) is max DART tablesize. */
@@ -416,7 +416,7 @@ void __init alloc_dart_table(void)
         * will blow up an entire large page anyway in the kernel mapping
         */
        dart_tablebase = (unsigned long)
-               abs_to_virt(lmb_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
+               abs_to_virt(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
 
        printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
 }
index a14760f..356c6a0 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/log2.h>
 #include <linux/slab.h>
 
@@ -190,7 +190,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
        pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar);
 
        /* Setup inbound mem window */
-       mem = lmb_end_of_DRAM();
+       mem = memblock_end_of_DRAM();
        sz = min(mem, paddr_lo);
        mem_log = __ilog2_u64(sz);
 
index d8d6028..c0bb76e 100644 (file)
@@ -4,6 +4,7 @@
  * also relocates SMC2, but this would require additional changes
  * to uart.c, so I am holding off on that for a moment.
  */
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
@@ -16,6 +17,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/8xx_immap.h>
+#include <asm/cpm.h>
 #include <asm/cpm1.h>
 
 /*
@@ -24,7 +26,7 @@
 
 #ifdef CONFIG_I2C_SPI_UCODE_PATCH
 
-uint patch_2000[] = {
+static uint patch_2000[] __initdata = {
        0x7FFFEFD9,
        0x3FFD0000,
        0x7FFB49F7,
@@ -143,7 +145,7 @@ uint patch_2000[] = {
        0x5F8247F8
 };
 
-uint patch_2f00[] = {
+static uint patch_2f00[] __initdata = {
        0x3E303430,
        0x34343737,
        0xABF7BF9B,
@@ -182,7 +184,7 @@ uint patch_2f00[] = {
 
 #ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH
 
-uint patch_2000[] = {
+static uint patch_2000[] __initdata = {
        0x3fff0000,
        0x3ffd0000,
        0x3ffb0000,
@@ -505,7 +507,7 @@ uint patch_2000[] = {
        0x6079e2bb
 };
 
-uint patch_2f00[] = {
+static uint patch_2f00[] __initdata = {
        0x30303030,
        0x3e3e3434,
        0xabbf9b99,
@@ -572,7 +574,7 @@ uint patch_2f00[] = {
        0xf22f3f23
 };
 
-uint patch_2e00[] = {
+static uint patch_2e00[] __initdata = {
        0x27eeeeee,
        0xeeeeeeee,
        0xeeeeeeee,
@@ -598,7 +600,7 @@ uint patch_2e00[] = {
 
 #ifdef CONFIG_USB_SOF_UCODE_PATCH
 
-uint patch_2000[] = {
+static uint patch_2000[] __initdata = {
        0x7fff0000,
        0x7ffd0000,
        0x7ffb0000,
@@ -613,21 +615,25 @@ uint patch_2000[] = {
        0x60750000
 };
 
-uint patch_2f00[] = {
+static uint patch_2f00[] __initdata = {
        0x3030304c,
        0xcab9e441,
        0xa1aaf220
 };
 #endif
 
-void
-cpm_load_patch(cpm8xx_t        *cp)
+void __init cpm_load_patch(cpm8xx_t *cp)
 {
        volatile uint           *dp;            /* Dual-ported RAM. */
        volatile cpm8xx_t       *commproc;
+#if defined(CONFIG_I2C_SPI_UCODE_PATCH) || \
+    defined(CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
        volatile iic_t          *iip;
-       volatile spi_t          *spp;
+       volatile struct spi_pram *spp;
+#ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH
        volatile smc_uart_t     *smp;
+#endif
+#endif
        int     i;
 
        commproc = cp;
@@ -668,8 +674,8 @@ cpm_load_patch(cpm8xx_t     *cp)
        /* Put SPI above the IIC, also 32-byte aligned.
        */
        i = (RPBASE + sizeof(iic_t) + 31) & ~31;
-       spp = (spi_t *)&commproc->cp_dparam[PROFF_SPI];
-       spp->spi_rpbase = i;
+       spp = (struct spi_pram *)&commproc->cp_dparam[PROFF_SPI];
+       spp->rpbase = i;
 
 # if defined(CONFIG_I2C_SPI_UCODE_PATCH)
        commproc->cp_cpmcr1 = 0x802a;
index d5e3e60..bea9ee3 100644 (file)
@@ -535,8 +535,16 @@ pgm_no_vtime2:
        l       %r3,__LC_PGM_ILC        # load program interruption code
        la      %r8,0x7f
        nr      %r8,%r3                 # clear per-event-bit and ilc
-       be      BASED(pgm_exit)         # only per or per+check ?
-       b       BASED(pgm_do_call)
+       be      BASED(pgm_exit2)        # only per or per+check ?
+       l       %r7,BASED(.Ljump_table)
+       sll     %r8,2
+       l       %r7,0(%r8,%r7)          # load address of handler routine
+       la      %r2,SP_PTREGS(%r15)     # address of register-save area
+       basr    %r14,%r7                # branch to interrupt-handler
+pgm_exit2:
+       TRACE_IRQS_ON
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       b       BASED(sysc_return)
 
 #
 # it was a single stepped SVC that is causing all the trouble
index e7192e1..8bccec1 100644 (file)
@@ -544,8 +544,16 @@ pgm_no_vtime2:
        lgf     %r3,__LC_PGM_ILC        # load program interruption code
        lghi    %r8,0x7f
        ngr     %r8,%r3                 # clear per-event-bit and ilc
-       je      pgm_exit
-       j       pgm_do_call
+       je      pgm_exit2
+       sll     %r8,3
+       larl    %r1,pgm_check_table
+       lg      %r1,0(%r8,%r1)          # load address of handler routine
+       la      %r2,SP_PTREGS(%r15)     # address of register-save area
+       basr    %r14,%r1                # branch to interrupt-handler
+pgm_exit2:
+       TRACE_IRQS_ON
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       j       sysc_return
 
 #
 # it was a single stepped SVC that is causing all the trouble
index a2163c9..15a7536 100644 (file)
@@ -524,8 +524,11 @@ void etr_switch_to_local(void)
        if (!etr_eacr.sl)
                return;
        disable_sync_clock(NULL);
-       set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
-       queue_work(time_sync_wq, &etr_work);
+       if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
+               etr_eacr.es = etr_eacr.sl = 0;
+               etr_setr(&etr_eacr);
+               queue_work(time_sync_wq, &etr_work);
+       }
 }
 
 /*
@@ -539,8 +542,11 @@ void etr_sync_check(void)
        if (!etr_eacr.es)
                return;
        disable_sync_clock(NULL);
-       set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
-       queue_work(time_sync_wq, &etr_work);
+       if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
+               etr_eacr.es = 0;
+               etr_setr(&etr_eacr);
+               queue_work(time_sync_wq, &etr_work);
+       }
 }
 
 /*
@@ -902,7 +908,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
         * Do not try to get the alternate port aib if the clock
         * is not in sync yet.
         */
-       if (!check_sync_clock())
+       if (!eacr.es || !check_sync_clock())
                return eacr;
 
        /*
@@ -1064,7 +1070,7 @@ static void etr_work_fn(struct work_struct *work)
         * If the clock is in sync just update the eacr and return.
         * If there is no valid sync port wait for a port update.
         */
-       if (check_sync_clock() || sync_port < 0) {
+       if ((eacr.es && check_sync_clock()) || sync_port < 0) {
                etr_update_eacr(eacr);
                etr_set_tolec_timeout(now);
                goto out_unlock;
index 573fca1..82868fe 100644 (file)
@@ -10,7 +10,7 @@ config SUPERH
        select EMBEDDED
        select HAVE_CLK
        select HAVE_IDE if HAS_IOPORT
-       select HAVE_LMB
+       select HAVE_MEMBLOCK
        select HAVE_OPROFILE
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_ARCH_TRACEHOOK
diff --git a/arch/sh/include/asm/lmb.h b/arch/sh/include/asm/lmb.h
deleted file mode 100644 (file)
index 9b437f6..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_LMB_H
-#define __ASM_SH_LMB_H
-
-#define LMB_REAL_LIMIT 0
-
-#endif /* __ASM_SH_LMB_H */
diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h
new file mode 100644 (file)
index 0000000..dfe683b
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH_MEMBLOCK_H
+#define __ASM_SH_MEMBLOCK_H
+
+#define MEMBLOCK_REAL_LIMIT    0
+
+#endif /* __ASM_SH_MEMBLOCK_H */
index 5a559e6..e2a3af3 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/numa.h>
 #include <linux/ftrace.h>
 #include <linux/suspend.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
@@ -157,10 +157,10 @@ void __init reserve_crashkernel(void)
        unsigned long long crash_size, crash_base;
        int ret;
 
-       /* this is necessary because of lmb_phys_mem_size() */
-       lmb_analyze();
+       /* this is necessary because of memblock_phys_mem_size() */
+       memblock_analyze();
 
-       ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(),
+       ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
                        &crash_size, &crash_base);
        if (ret == 0 && crash_size > 0) {
                crashk_res.start = crash_base;
@@ -172,14 +172,14 @@ void __init reserve_crashkernel(void)
 
        crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1);
        if (!crashk_res.start) {
-               unsigned long max = lmb_end_of_DRAM() - memory_limit;
-               crashk_res.start = __lmb_alloc_base(crash_size, PAGE_SIZE, max);
+               unsigned long max = memblock_end_of_DRAM() - memory_limit;
+               crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max);
                if (!crashk_res.start) {
                        pr_err("crashkernel allocation failed\n");
                        goto disable;
                }
        } else {
-               ret = lmb_reserve(crashk_res.start, crash_size);
+               ret = memblock_reserve(crashk_res.start, crash_size);
                if (unlikely(ret < 0)) {
                        pr_err("crashkernel reservation failed - "
                               "memory is in use\n");
@@ -192,7 +192,7 @@ void __init reserve_crashkernel(void)
        /*
         * Crash kernel trumps memory limit
         */
-       if ((lmb_end_of_DRAM() - memory_limit) <= crashk_res.end) {
+       if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) {
                memory_limit = 0;
                pr_info("Disabled memory limit for crashkernel\n");
        }
@@ -201,7 +201,7 @@ void __init reserve_crashkernel(void)
                "for crashkernel (System RAM: %ldMB)\n",
                (unsigned long)(crash_size >> 20),
                (unsigned long)(crashk_res.start),
-               (unsigned long)(lmb_phys_mem_size() >> 20));
+               (unsigned long)(memblock_phys_mem_size() >> 20));
 
        return;
 
index 2727346..e769401 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/page.h>
@@ -141,10 +141,10 @@ void __init check_for_initrd(void)
                goto disable;
        }
 
-       if (unlikely(end > lmb_end_of_DRAM())) {
+       if (unlikely(end > memblock_end_of_DRAM())) {
                pr_err("initrd extends beyond end of memory "
                       "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-                      end, (unsigned long)lmb_end_of_DRAM());
+                      end, (unsigned long)memblock_end_of_DRAM());
                goto disable;
        }
 
@@ -161,7 +161,7 @@ void __init check_for_initrd(void)
        initrd_start = (unsigned long)__va(__pa(start));
        initrd_end = initrd_start + INITRD_SIZE;
 
-       lmb_reserve(__pa(initrd_start), INITRD_SIZE);
+       memblock_reserve(__pa(initrd_start), INITRD_SIZE);
 
        return;
 
index 46f84de..d0e2491 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/pagemap.h>
 #include <linux/percpu.h>
 #include <linux/io.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/dma-mapping.h>
 #include <asm/mmu_context.h>
 #include <asm/mmzone.h>
@@ -33,7 +33,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
 void __init generic_mem_init(void)
 {
-       lmb_add(__MEMORY_START, __MEMORY_SIZE);
+       memblock_add(__MEMORY_START, __MEMORY_SIZE);
 }
 
 void __init __weak plat_mem_setup(void)
@@ -176,12 +176,12 @@ void __init allocate_pgdat(unsigned int nid)
        get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
-       phys = __lmb_alloc_base(sizeof(struct pglist_data),
+       phys = __memblock_alloc_base(sizeof(struct pglist_data),
                                SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
        /* Retry with all of system memory */
        if (!phys)
-               phys = __lmb_alloc_base(sizeof(struct pglist_data),
-                                       SMP_CACHE_BYTES, lmb_end_of_DRAM());
+               phys = __memblock_alloc_base(sizeof(struct pglist_data),
+                                       SMP_CACHE_BYTES, memblock_end_of_DRAM());
        if (!phys)
                panic("Can't allocate pgdat for node %d\n", nid);
 
@@ -212,7 +212,7 @@ static void __init bootmem_init_one_node(unsigned int nid)
 
        total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
 
-       paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
+       paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
        if (!paddr)
                panic("Can't allocate bootmap for nid[%d]\n", nid);
 
@@ -227,9 +227,9 @@ static void __init bootmem_init_one_node(unsigned int nid)
         */
        if (nid == 0) {
                /* Reserve the sections we're already using. */
-               for (i = 0; i < lmb.reserved.cnt; i++)
-                       reserve_bootmem(lmb.reserved.region[i].base,
-                                       lmb_size_bytes(&lmb.reserved, i),
+               for (i = 0; i < memblock.reserved.cnt; i++)
+                       reserve_bootmem(memblock.reserved.region[i].base,
+                                       memblock_size_bytes(&memblock.reserved, i),
                                        BOOTMEM_DEFAULT);
        }
 
@@ -241,10 +241,10 @@ static void __init do_init_bootmem(void)
        int i;
 
        /* Add active regions with valid PFNs. */
-       for (i = 0; i < lmb.memory.cnt; i++) {
+       for (i = 0; i < memblock.memory.cnt; i++) {
                unsigned long start_pfn, end_pfn;
-               start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
-               end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+               start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
+               end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
                __add_active_range(0, start_pfn, end_pfn);
        }
 
@@ -276,7 +276,7 @@ static void __init early_reserve_mem(void)
         * this catches the (definitely buggy) case of us accidentally
         * initializing the bootmem allocator with an invalid RAM area.
         */
-       lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
+       memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
                    (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
                    (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
 
@@ -284,7 +284,7 @@ static void __init early_reserve_mem(void)
         * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
         */
        if (CONFIG_ZERO_PAGE_OFFSET != 0)
-               lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
+               memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
 
        /*
         * Handle additional early reservations
@@ -299,27 +299,27 @@ void __init paging_init(void)
        unsigned long vaddr, end;
        int nid;
 
-       lmb_init();
+       memblock_init();
 
        sh_mv.mv_mem_init();
 
        early_reserve_mem();
 
-       lmb_enforce_memory_limit(memory_limit);
-       lmb_analyze();
+       memblock_enforce_memory_limit(memory_limit);
+       memblock_analyze();
 
-       lmb_dump_all();
+       memblock_dump_all();
 
        /*
         * Determine low and high memory ranges:
         */
-       max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
        min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
 
        nodes_clear(node_online_map);
 
        memory_start = (unsigned long)__va(__MEMORY_START);
-       memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size());
+       memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
 
        uncached_init();
        pmb_init();
index a2e645f..3d85225 100644 (file)
@@ -9,7 +9,7 @@
  */
 #include <linux/module.h>
 #include <linux/bootmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/numa.h>
 #include <linux/pfn.h>
@@ -39,12 +39,12 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
        pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
                         PAGE_KERNEL);
 
-       lmb_add(start, end - start);
+       memblock_add(start, end - start);
 
        __add_active_range(nid, start_pfn, end_pfn);
 
        /* Node-local pgdat */
-       NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data),
+       NODE_DATA(nid) = __va(memblock_alloc_base(sizeof(struct pglist_data),
                                             SMP_CACHE_BYTES, end));
        memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 
@@ -54,7 +54,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
 
        /* Node-local bootmap */
        bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
-       bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
+       bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
                                       PAGE_SIZE, end);
        init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
                          start_pfn, end_pfn);
index 6f1470b..c0015db 100644 (file)
@@ -42,7 +42,7 @@ config SPARC64
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KRETPROBES
        select HAVE_KPROBES
-       select HAVE_LMB
+       select HAVE_MEMBLOCK
        select HAVE_SYSCALL_WRAPPERS
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
index 259e3fd..1dc07a0 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.34-rc3
-# Sat Apr  3 15:49:56 2010
+# Linux kernel version: 2.6.34
+# Wed May 26 21:14:01 2010
 #
 CONFIG_64BIT=y
 CONFIG_SPARC=y
@@ -107,10 +107,9 @@ CONFIG_PERF_COUNTERS=y
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_PCI_QUIRKS=y
-CONFIG_SLUB_DEBUG=y
 # CONFIG_COMPAT_BRK is not set
-# CONFIG_SLAB is not set
-CONFIG_SLUB=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
 CONFIG_PROFILING=y
 CONFIG_TRACEPOINTS=y
@@ -239,6 +238,7 @@ CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
 CONFIG_SPARSEMEM_VMEMMAP=y
 CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_COMPACTION is not set
 CONFIG_MIGRATION=y
 CONFIG_PHYS_ADDR_T_64BIT=y
 CONFIG_ZONE_DMA_FLAG=0
@@ -351,6 +351,7 @@ CONFIG_IPV6_TUNNEL=m
 # CONFIG_RDS is not set
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
+# CONFIG_L2TP is not set
 # CONFIG_BRIDGE is not set
 # CONFIG_NET_DSA is not set
 CONFIG_VLAN_8021Q=m
@@ -367,6 +368,7 @@ CONFIG_VLAN_8021Q=m
 # CONFIG_IEEE802154 is not set
 # CONFIG_NET_SCHED is not set
 # CONFIG_DCB is not set
+CONFIG_RPS=y
 
 #
 # Network testing
@@ -386,9 +388,14 @@ CONFIG_WIRELESS=y
 #
 # CFG80211 needs to be enabled for MAC80211
 #
+
+#
+# Some wireless drivers require a rate control algorithm
+#
 # CONFIG_WIMAX is not set
 # CONFIG_RFKILL is not set
 # CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
 
 #
 # Device Drivers
@@ -658,6 +665,7 @@ CONFIG_PHYLIB=m
 # CONFIG_NATIONAL_PHY is not set
 # CONFIG_STE10XP is not set
 # CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
 # CONFIG_MDIO_BITBANG is not set
 CONFIG_NET_ETHERNET=y
 CONFIG_MII=m
@@ -734,6 +742,8 @@ CONFIG_NETDEV_10000=y
 # CONFIG_CHELSIO_T1 is not set
 CONFIG_CHELSIO_T3_DEPENDS=y
 # CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
 # CONFIG_ENIC is not set
 # CONFIG_IXGBE is not set
 # CONFIG_IXGBEVF is not set
@@ -766,6 +776,7 @@ CONFIG_NIU=m
 # CONFIG_USB_PEGASUS is not set
 # CONFIG_USB_RTL8150 is not set
 # CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
 # CONFIG_WAN is not set
 # CONFIG_FDDI is not set
 # CONFIG_HIPPI is not set
@@ -778,7 +789,6 @@ CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
-# CONFIG_PPPOL2TP is not set
 # CONFIG_SLIP is not set
 CONFIG_SLHC=m
 # CONFIG_NET_FC is not set
@@ -816,6 +826,7 @@ CONFIG_INPUT_KEYBOARD=y
 CONFIG_KEYBOARD_ATKBD=y
 # CONFIG_QT2160 is not set
 CONFIG_KEYBOARD_LKKBD=m
+# CONFIG_KEYBOARD_TCA6416 is not set
 # CONFIG_KEYBOARD_MAX7359 is not set
 # CONFIG_KEYBOARD_NEWTON is not set
 # CONFIG_KEYBOARD_OPENCORES is not set
@@ -840,6 +851,7 @@ CONFIG_MOUSE_SERIAL=y
 # CONFIG_INPUT_TABLET is not set
 # CONFIG_INPUT_TOUCHSCREEN is not set
 CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
 CONFIG_INPUT_SPARCSPKR=y
 # CONFIG_INPUT_ATI_REMOTE is not set
 # CONFIG_INPUT_ATI_REMOTE2 is not set
@@ -848,6 +860,7 @@ CONFIG_INPUT_SPARCSPKR=y
 # CONFIG_INPUT_YEALINK is not set
 # CONFIG_INPUT_CM109 is not set
 # CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_PCF8574 is not set
 
 #
 # Hardware I/O ports
@@ -871,6 +884,7 @@ CONFIG_HW_CONSOLE=y
 # CONFIG_VT_HW_CONSOLE_BINDING is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
 # CONFIG_NOZOMI is not set
 
 #
@@ -893,6 +907,8 @@ CONFIG_SERIAL_CORE_CONSOLE=y
 # CONFIG_SERIAL_JSM is not set
 # CONFIG_SERIAL_TIMBERDALE is not set
 # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
 CONFIG_UNIX98_PTYS=y
 # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 # CONFIG_LEGACY_PTYS is not set
@@ -1306,11 +1322,14 @@ CONFIG_USB_HIDDEV=y
 CONFIG_HID_A4TECH=y
 CONFIG_HID_APPLE=y
 CONFIG_HID_BELKIN=y
+# CONFIG_HID_CANDO is not set
 CONFIG_HID_CHERRY=y
 CONFIG_HID_CHICONY=y
+# CONFIG_HID_PRODIKEYS is not set
 CONFIG_HID_CYPRESS=y
 CONFIG_HID_DRAGONRISE=y
 # CONFIG_DRAGONRISE_FF is not set
+# CONFIG_HID_EGALAX is not set
 CONFIG_HID_EZKEY=y
 CONFIG_HID_KYE=y
 CONFIG_HID_GYRATION=y
@@ -1328,7 +1347,9 @@ CONFIG_HID_ORTEK=y
 CONFIG_HID_PANTHERLORD=y
 # CONFIG_PANTHERLORD_FF is not set
 CONFIG_HID_PETALYNX=y
+# CONFIG_HID_PICOLCD is not set
 # CONFIG_HID_QUANTA is not set
+# CONFIG_HID_ROCCAT_KONE is not set
 CONFIG_HID_SAMSUNG=y
 CONFIG_HID_SONY=y
 # CONFIG_HID_STANTUM is not set
@@ -1342,6 +1363,7 @@ CONFIG_HID_THRUSTMASTER=y
 # CONFIG_THRUSTMASTER_FF is not set
 CONFIG_HID_ZEROPLUS=y
 # CONFIG_ZEROPLUS_FF is not set
+# CONFIG_HID_ZYDACRON is not set
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1356,7 +1378,6 @@ CONFIG_USB=y
 # CONFIG_USB_DEVICEFS is not set
 # CONFIG_USB_DEVICE_CLASS is not set
 # CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_OTG is not set
 # CONFIG_USB_MON is not set
 # CONFIG_USB_WUSB is not set
 # CONFIG_USB_WUSB_CBAF is not set
@@ -1521,10 +1542,6 @@ CONFIG_RTC_DRV_STARFIRE=y
 # CONFIG_DMADEVICES is not set
 # CONFIG_AUXDISPLAY is not set
 # CONFIG_UIO is not set
-
-#
-# TI VLYNQ
-#
 # CONFIG_STAGING is not set
 
 #
@@ -1706,8 +1723,8 @@ CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
 CONFIG_SCHEDSTATS=y
 # CONFIG_TIMER_STATS is not set
 # CONFIG_DEBUG_OBJECTS is not set
-# CONFIG_SLUB_DEBUG_ON is not set
-# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
 # CONFIG_DEBUG_RT_MUTEXES is not set
 # CONFIG_RT_MUTEX_TESTER is not set
 # CONFIG_DEBUG_SPINLOCK is not set
@@ -1742,6 +1759,9 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
 # CONFIG_DEBUG_PAGEALLOC is not set
 CONFIG_NOP_TRACER=y
 CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
 CONFIG_HAVE_DYNAMIC_FTRACE=y
 CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
 CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
@@ -1769,12 +1789,12 @@ CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_RING_BUFFER_BENCHMARK is not set
 # CONFIG_DYNAMIC_DEBUG is not set
 # CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
 # CONFIG_SAMPLES is not set
 CONFIG_HAVE_ARCH_KGDB=y
 # CONFIG_KGDB is not set
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_DCFLUSH is not set
-# CONFIG_STACK_DEBUG is not set
 # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
 
 #
@@ -1895,6 +1915,7 @@ CONFIG_CRYPTO_DEFLATE=y
 #
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_NIAGARA2 is not set
 # CONFIG_CRYPTO_DEV_HIFN_795X is not set
 CONFIG_BINARY_PRINTF=y
 
index 0588b8c..69358b5 100644 (file)
@@ -11,7 +11,6 @@
 
 #define L1_CACHE_SHIFT 5
 #define L1_CACHE_BYTES 32
-#define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
 
 #ifdef CONFIG_SPARC32
 #define SMP_CACHE_BYTES_SHIFT 5
diff --git a/arch/sparc/include/asm/lmb.h b/arch/sparc/include/asm/lmb.h
deleted file mode 100644 (file)
index 6a352cb..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _SPARC64_LMB_H
-#define _SPARC64_LMB_H
-
-#include <asm/oplib.h>
-
-#define LMB_DBG(fmt...) prom_printf(fmt)
-
-#define LMB_REAL_LIMIT 0
-
-#endif /* !(_SPARC64_LMB_H) */
diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h
new file mode 100644 (file)
index 0000000..f12af88
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _SPARC64_MEMBLOCK_H
+#define _SPARC64_MEMBLOCK_H
+
+#include <asm/oplib.h>
+
+#define MEMBLOCK_DBG(fmt...) prom_printf(fmt)
+
+#define MEMBLOCK_REAL_LIMIT    0
+
+#endif /* !(_SPARC64_MEMBLOCK_H) */
index 77f906d..0ece77f 100644 (file)
@@ -142,13 +142,12 @@ BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
 #define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
 #define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
 
-BTFIXUPDEF_SETHI(none_mask)
 BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
 BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
 
 static inline int pte_none(pte_t pte)
 {
-       return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
+       return !pte_val(pte);
 }
 
 #define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
@@ -160,7 +159,7 @@ BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
 
 static inline int pmd_none(pmd_t pmd)
 {
-       return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
+       return !pmd_val(pmd);
 }
 
 #define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
index cdc91d9..83e85c2 100644 (file)
@@ -4,7 +4,7 @@
  */
 #include <linux/kernel.h>
 #include <linux/types.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/log2.h>
 #include <linux/list.h>
 #include <linux/slab.h>
@@ -86,7 +86,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
        hp->handle_size = handle_size;
 }
 
-static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
+static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size)
 {
        unsigned int handle_size, alloc_size;
        struct mdesc_handle *hp;
@@ -97,7 +97,7 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
                       mdesc_size);
        alloc_size = PAGE_ALIGN(handle_size);
 
-       paddr = lmb_alloc(alloc_size, PAGE_SIZE);
+       paddr = memblock_alloc(alloc_size, PAGE_SIZE);
 
        hp = NULL;
        if (paddr) {
@@ -107,7 +107,7 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
        return hp;
 }
 
-static void mdesc_lmb_free(struct mdesc_handle *hp)
+static void mdesc_memblock_free(struct mdesc_handle *hp)
 {
        unsigned int alloc_size;
        unsigned long start;
@@ -120,9 +120,9 @@ static void mdesc_lmb_free(struct mdesc_handle *hp)
        free_bootmem_late(start, alloc_size);
 }
 
-static struct mdesc_mem_ops lmb_mdesc_ops = {
-       .alloc = mdesc_lmb_alloc,
-       .free  = mdesc_lmb_free,
+static struct mdesc_mem_ops memblock_mdesc_ops = {
+       .alloc = mdesc_memblock_alloc,
+       .free  = mdesc_memblock_free,
 };
 
 static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
@@ -914,7 +914,7 @@ void __init sun4v_mdesc_init(void)
 
        printk("MDESC: Size is %lu bytes.\n", len);
 
-       hp = mdesc_alloc(len, &lmb_mdesc_ops);
+       hp = mdesc_alloc(len, &memblock_mdesc_ops);
        if (hp == NULL) {
                prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
                prom_halt();
index 0ec92c8..44faabc 100644 (file)
@@ -657,6 +657,7 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
                cpuc->current_idx[i] = idx;
 
                enc = perf_event_get_enc(cpuc->events[i]);
+               pcr &= ~mask_for_index(idx);
                pcr |= event_encoding(enc, idx);
        }
 out:
index fb06ac2..466a327 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/of_device.h>
 
 #include <asm/prom.h>
@@ -34,7 +34,7 @@
 
 void * __init prom_early_alloc(unsigned long size)
 {
-       unsigned long paddr = lmb_alloc(size, SMP_CACHE_BYTES);
+       unsigned long paddr = memblock_alloc(size, SMP_CACHE_BYTES);
        void *ret;
 
        if (!paddr) {
index ab036a7..e11b461 100644 (file)
@@ -183,7 +183,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
                goto out_unlock;
        }
        
-       if (action && tmp)
+       if (tmp)
                tmp->next = action->next;
        else
                *actionp = action->next;
index 76d837f..c6dfdaa 100644 (file)
@@ -64,7 +64,7 @@ tl0_irq6:     TRAP_IRQ(smp_call_function_single_client, 6)
 tl0_irq6:      BTRAP(0x46)
 #endif
 tl0_irq7:      TRAP_IRQ(deferred_pcr_work_irq, 7)
-#ifdef CONFIG_KGDB
+#if defined(CONFIG_KGDB) && defined(CONFIG_SMP)
 tl0_irq8:      TRAP_IRQ(smp_kgdb_capture_client, 8)
 #else
 tl0_irq8:      BTRAP(0x48)
index b2831dc..f043451 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/cache.h>
 #include <linux/sort.h>
 #include <linux/percpu.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/mmzone.h>
 #include <linux/gfp.h>
 
@@ -726,7 +726,7 @@ static void __init find_ramdisk(unsigned long phys_base)
                initrd_start = ramdisk_image;
                initrd_end = ramdisk_image + sparc_ramdisk_size;
 
-               lmb_reserve(initrd_start, sparc_ramdisk_size);
+               memblock_reserve(initrd_start, sparc_ramdisk_size);
 
                initrd_start += PAGE_OFFSET;
                initrd_end += PAGE_OFFSET;
@@ -822,7 +822,7 @@ static void __init allocate_node_data(int nid)
        struct pglist_data *p;
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
-       paddr = lmb_alloc_nid(sizeof(struct pglist_data),
+       paddr = memblock_alloc_nid(sizeof(struct pglist_data),
                              SMP_CACHE_BYTES, nid, nid_range);
        if (!paddr) {
                prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
@@ -843,7 +843,7 @@ static void __init allocate_node_data(int nid)
        if (p->node_spanned_pages) {
                num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
 
-               paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid,
+               paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid,
                                      nid_range);
                if (!paddr) {
                        prom_printf("Cannot allocate bootmap for nid[%d]\n",
@@ -974,11 +974,11 @@ static void __init add_node_ranges(void)
 {
        int i;
 
-       for (i = 0; i < lmb.memory.cnt; i++) {
-               unsigned long size = lmb_size_bytes(&lmb.memory, i);
+       for (i = 0; i < memblock.memory.cnt; i++) {
+               unsigned long size = memblock_size_bytes(&memblock.memory, i);
                unsigned long start, end;
 
-               start = lmb.memory.region[i].base;
+               start = memblock.memory.region[i].base;
                end = start + size;
                while (start < end) {
                        unsigned long this_end;
@@ -1010,7 +1010,7 @@ static int __init grab_mlgroups(struct mdesc_handle *md)
        if (!count)
                return -ENOENT;
 
-       paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup),
+       paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
                          SMP_CACHE_BYTES);
        if (!paddr)
                return -ENOMEM;
@@ -1051,7 +1051,7 @@ static int __init grab_mblocks(struct mdesc_handle *md)
        if (!count)
                return -ENOENT;
 
-       paddr = lmb_alloc(count * sizeof(struct mdesc_mblock),
+       paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
                          SMP_CACHE_BYTES);
        if (!paddr)
                return -ENOMEM;
@@ -1279,8 +1279,8 @@ static int bootmem_init_numa(void)
 
 static void __init bootmem_init_nonnuma(void)
 {
-       unsigned long top_of_ram = lmb_end_of_DRAM();
-       unsigned long total_ram = lmb_phys_mem_size();
+       unsigned long top_of_ram = memblock_end_of_DRAM();
+       unsigned long total_ram = memblock_phys_mem_size();
        unsigned int i;
 
        numadbg("bootmem_init_nonnuma()\n");
@@ -1292,15 +1292,15 @@ static void __init bootmem_init_nonnuma(void)
 
        init_node_masks_nonnuma();
 
-       for (i = 0; i < lmb.memory.cnt; i++) {
-               unsigned long size = lmb_size_bytes(&lmb.memory, i);
+       for (i = 0; i < memblock.memory.cnt; i++) {
+               unsigned long size = memblock_size_bytes(&memblock.memory, i);
                unsigned long start_pfn, end_pfn;
 
                if (!size)
                        continue;
 
-               start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
-               end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+               start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
+               end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
                add_active_range(0, start_pfn, end_pfn);
        }
 
@@ -1338,9 +1338,9 @@ static void __init trim_reserved_in_node(int nid)
 
        numadbg("  trim_reserved_in_node(%d)\n", nid);
 
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               unsigned long start = lmb.reserved.region[i].base;
-               unsigned long size = lmb_size_bytes(&lmb.reserved, i);
+       for (i = 0; i < memblock.reserved.cnt; i++) {
+               unsigned long start = memblock.reserved.region[i].base;
+               unsigned long size = memblock_size_bytes(&memblock.reserved, i);
                unsigned long end = start + size;
 
                reserve_range_in_node(nid, start, end);
@@ -1384,7 +1384,7 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
        unsigned long end_pfn;
        int nid;
 
-       end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
        max_pfn = max_low_pfn = end_pfn;
        min_low_pfn = (phys_base >> PAGE_SHIFT);
 
@@ -1734,7 +1734,7 @@ void __init paging_init(void)
                sun4v_ktsb_init();
        }
 
-       lmb_init();
+       memblock_init();
 
        /* Find available physical memory...
         *
@@ -1752,17 +1752,17 @@ void __init paging_init(void)
        phys_base = 0xffffffffffffffffUL;
        for (i = 0; i < pavail_ents; i++) {
                phys_base = min(phys_base, pavail[i].phys_addr);
-               lmb_add(pavail[i].phys_addr, pavail[i].reg_size);
+               memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
        }
 
-       lmb_reserve(kern_base, kern_size);
+       memblock_reserve(kern_base, kern_size);
 
        find_ramdisk(phys_base);
 
-       lmb_enforce_memory_limit(cmdline_memory_size);
+       memblock_enforce_memory_limit(cmdline_memory_size);
 
-       lmb_analyze();
-       lmb_dump_all();
+       memblock_analyze();
+       memblock_dump_all();
 
        set_bit(0, mmu_context_bmap);
 
@@ -1816,8 +1816,8 @@ void __init paging_init(void)
         */
        for_each_possible_cpu(i) {
                /* XXX Use node local allocations... XXX */
-               softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
-               hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+               softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+               hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
        }
 
        /* Setup bootmem... */
index f5f75a5..b0b43aa 100644 (file)
@@ -2215,8 +2215,6 @@ void __init ld_mmu_srmmu(void)
        BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
 
-       BTFIXUPSET_SETHI(none_mask, 0xF0000000);
-
        BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
 
index cf38846..4289f90 100644 (file)
@@ -2087,9 +2087,6 @@ void __init ld_mmu_sun4c(void)
 
        BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
 
-       /* The 2.4.18 code does not set this on sun4c, how does it work? XXX */
-       /* BTFIXUPSET_SETHI(none_mask, 0x00000000); */  /* Defaults to zero? */
-
        BTFIXUPSET_CALL(pte_pfn, sun4c_pte_pfn, BTFIXUPCALL_NORM);
 #if 0 /* PAGE_SHIFT <= 12 */ /* Eek. Investigate. XXX */
        BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
index 519b543..baa579c 100644 (file)
@@ -142,6 +142,7 @@ struct x86_cpuinit_ops {
  * @set_wallclock:             set time back to HW clock
  * @is_untracked_pat_range     exclude from PAT logic
  * @nmi_init                   enable NMI on cpus
+ * @i8042_detect               pre-detect if i8042 controller exists
  */
 struct x86_platform_ops {
        unsigned long (*calibrate_tsc)(void);
@@ -150,6 +151,7 @@ struct x86_platform_ops {
        void (*iommu_shutdown)(void);
        bool (*is_untracked_pat_range)(u64 start, u64 end);
        void (*nmi_init)(void);
+       int (*i8042_detect)(void);
 };
 
 extern struct x86_init_ops x86_init;
index 2e837f5..fb7a5f0 100644 (file)
@@ -145,6 +145,15 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
                percpu_entry->states[cx->index].eax = cx->address;
                percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
        }
+
+       /*
+        * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
+        * then we should skip checking BM_STS for this C-state.
+        * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
+        */
+       if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
+               cx->bm_sts_skip = 1;
+
        return retval;
 }
 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
index 82e5086..fcc3c61 100644 (file)
@@ -157,9 +157,14 @@ static int __init acpi_sleep_setup(char *str)
 #ifdef CONFIG_HIBERNATION
                if (strncmp(str, "s4_nohwsig", 10) == 0)
                        acpi_no_s4_hw_signature();
-               if (strncmp(str, "s4_nonvs", 8) == 0)
-                       acpi_s4_no_nvs();
+               if (strncmp(str, "s4_nonvs", 8) == 0) {
+                       pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, "
+                                       "please use acpi_sleep=nonvs instead");
+                       acpi_nvs_nosave();
+               }
 #endif
+               if (strncmp(str, "nonvs", 5) == 0)
+                       acpi_nvs_nosave();
                if (strncmp(str, "old_ordering", 12) == 0)
                        acpi_old_suspend_ordering();
                str = strchr(str, ',');
index c02cc69..a96489e 100644 (file)
@@ -921,7 +921,7 @@ void disable_local_APIC(void)
        unsigned int value;
 
        /* APIC hasn't been mapped yet */
-       if (!apic_phys)
+       if (!x2apic_mode && !apic_phys)
                return;
 
        clear_local_APIC();
index ce7cde7..a36de5b 100644 (file)
@@ -368,22 +368,16 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
                return -ENODEV;
 
        out_obj = output.pointer;
-       if (out_obj->type != ACPI_TYPE_BUFFER) {
-               ret = -ENODEV;
-               goto out_free;
-       }
+       if (out_obj->type != ACPI_TYPE_BUFFER)
+               return -ENODEV;
 
        errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
-       if (errors) {
-               ret = -ENODEV;
-               goto out_free;
-       }
+       if (errors)
+               return -ENODEV;
 
        supported = *((u32 *)(out_obj->buffer.pointer + 4));
-       if (!(supported & 0x1)) {
-               ret = -ENODEV;
-               goto out_free;
-       }
+       if (!(supported & 0x1))
+               return -ENODEV;
 
 out_free:
        kfree(output.pointer);
@@ -397,13 +391,17 @@ static int __init pcc_cpufreq_probe(void)
        struct pcc_memory_resource *mem_resource;
        struct pcc_register_resource *reg_resource;
        union acpi_object *out_obj, *member;
-       acpi_handle handle, osc_handle;
+       acpi_handle handle, osc_handle, pcch_handle;
        int ret = 0;
 
        status = acpi_get_handle(NULL, "\\_SB", &handle);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
+       status = acpi_get_handle(handle, "PCCH", &pcch_handle);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
        status = acpi_get_handle(handle, "_OSC", &osc_handle);
        if (ACPI_SUCCESS(status)) {
                ret = pcc_cpufreq_do_osc(&osc_handle);
@@ -543,13 +541,13 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        if (!pcch_virt_addr) {
                result = -1;
-               goto pcch_null;
+               goto out;
        }
 
        result = pcc_get_offset(cpu);
        if (result) {
                dprintk("init: PCCP evaluation failed\n");
-               goto free;
+               goto out;
        }
 
        policy->max = policy->cpuinfo.max_freq =
@@ -558,14 +556,15 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
                ioread32(&pcch_hdr->minimum_frequency) * 1000;
        policy->cur = pcc_get_freq(cpu);
 
+       if (!policy->cur) {
+               dprintk("init: Unable to get current CPU frequency\n");
+               result = -EINVAL;
+               goto out;
+       }
+
        dprintk("init: policy->max is %d, policy->min is %d\n",
                policy->max, policy->min);
-
-       return 0;
-free:
-       pcc_clear_mapping();
-       free_percpu(pcc_cpu_info);
-pcch_null:
+out:
        return result;
 }
 
index 7ec2123..3e90cce 100644 (file)
@@ -1023,13 +1023,12 @@ static int get_transition_latency(struct powernow_k8_data *data)
        }
        if (max_latency == 0) {
                /*
-                * Fam 11h always returns 0 as transition latency.
-                * This is intended and means "very fast". While cpufreq core
-                * and governors currently can handle that gracefully, better
-                * set it to 1 to avoid problems in the future.
-                * For all others it's a BIOS bug.
+                * Fam 11h and later may return 0 as transition latency. This
+                * is intended and means "very fast". While cpufreq core and
+                * governors currently can handle that gracefully, better set it
+                * to 1 to avoid problems in the future.
                 */
-               if (boot_cpu_data.x86 != 0x11)
+               if (boot_cpu_data.x86 < 0x11)
                        printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
                                "latency\n");
                max_latency = 1;
index ebdb85c..e5cc7e8 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/apic.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
+#include <asm/hpet.h>
 
 static void __init fix_hypertransport_config(int num, int slot, int func)
 {
@@ -191,6 +192,21 @@ static void __init ati_bugs_contd(int num, int slot, int func)
 }
 #endif
 
+/*
+ * Force the read back of the CMP register in hpet_next_event()
+ * to work around the problem that the CMP register write seems to be
+ * delayed. See hpet_next_event() for details.
+ *
+ * We do this on all SMBUS incarnations for now until we have more
+ * information about the affected chipsets.
+ */
+static void __init ati_hpet_bugs(int num, int slot, int func)
+{
+#ifdef CONFIG_HPET_TIMER
+       hpet_readback_cmp = 1;
+#endif
+}
+
 #define QFLAG_APPLY_ONCE       0x1
 #define QFLAG_APPLIED          0x2
 #define QFLAG_DONE             (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -220,6 +236,8 @@ static struct chipset early_qrk[] __initdata = {
          PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
        { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
          PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
+       { PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+         PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs },
        {}
 };
 
index 490ae2b..649ed17 100644 (file)
@@ -571,8 +571,8 @@ auditsys:
         * masked off.
         */
 sysret_audit:
-       movq %rax,%rsi          /* second arg, syscall return value */
-       cmpq $0,%rax            /* is it < 0? */
+       movq RAX-ARGOFFSET(%rsp),%rsi   /* second arg, syscall return value */
+       cmpq $0,%rsi            /* is it < 0? */
        setl %al                /* 1 if so, 0 if not */
        movzbl %al,%edi         /* zero-extend that into %edi */
        inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
index a198b7c..ba390d7 100644 (file)
@@ -964,7 +964,7 @@ fs_initcall(hpet_late_init);
 
 void hpet_disable(void)
 {
-       if (is_hpet_capable()) {
+       if (is_hpet_capable() && hpet_virt_address) {
                unsigned int cfg = hpet_readl(HPET_CFG);
 
                if (hpet_legacy_int_enabled) {
index 7c9f02c..cafa7c8 100644 (file)
@@ -276,16 +276,6 @@ static struct sys_device device_i8259A = {
        .cls    = &i8259_sysdev_class,
 };
 
-static int __init i8259A_init_sysfs(void)
-{
-       int error = sysdev_class_register(&i8259_sysdev_class);
-       if (!error)
-               error = sysdev_register(&device_i8259A);
-       return error;
-}
-
-device_initcall(i8259A_init_sysfs);
-
 static void mask_8259A(void)
 {
        unsigned long flags;
@@ -407,3 +397,18 @@ struct legacy_pic default_legacy_pic = {
 };
 
 struct legacy_pic *legacy_pic = &default_legacy_pic;
+
+static int __init i8259A_init_sysfs(void)
+{
+       int error;
+
+       if (legacy_pic != &default_legacy_pic)
+               return 0;
+
+       error = sysdev_class_register(&i8259_sysdev_class);
+       if (!error)
+               error = sysdev_register(&device_i8259A);
+       return error;
+}
+
+device_initcall(i8259A_init_sysfs);
index 4f4af75..01ab17a 100644 (file)
@@ -572,7 +572,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
        return NOTIFY_STOP;
 }
 
-#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 int kgdb_ll_trap(int cmd, const char *str,
                 struct pt_regs *regs, long err, int trap, int sig)
 {
@@ -590,7 +589,6 @@ int kgdb_ll_trap(int cmd, const char *str,
 
        return __kgdb_notify(&args, cmd);
 }
-#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 
 static int
 kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
@@ -625,6 +623,12 @@ int kgdb_arch_init(void)
        return register_die_notifier(&kgdb_notifier);
 }
 
+static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi,
+               struct perf_sample_data *data, struct pt_regs *regs)
+{
+       kgdb_ll_trap(DIE_DEBUG, "debug", regs, 0, 0, SIGTRAP);
+}
+
 void kgdb_arch_late(void)
 {
        int i, cpu;
@@ -655,6 +659,7 @@ void kgdb_arch_late(void)
                for_each_online_cpu(cpu) {
                        pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
                        pevent[0]->hw.sample_period = 1;
+                       pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
                        if (pevent[0]->destroy != NULL) {
                                pevent[0]->destroy = NULL;
                                release_bp_slot(*pevent);
index 345a4b1..675879b 100644 (file)
@@ -640,8 +640,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
        /* Skip cs, ip, orig_ax and gs. */      \
        "       subl $16, %esp\n"       \
        "       pushl %fs\n"            \
-       "       pushl %ds\n"            \
        "       pushl %es\n"            \
+       "       pushl %ds\n"            \
        "       pushl %eax\n"           \
        "       pushl %ebp\n"           \
        "       pushl %edi\n"           \
index e796448..5915e0b 100644 (file)
@@ -216,6 +216,12 @@ static void __init mrst_setup_boot_clock(void)
                setup_boot_APIC_clock();
 };
 
+/* MID systems don't have i8042 controller */
+static int mrst_i8042_detect(void)
+{
+       return 0;
+}
+
 /*
  * Moorestown specific x86_init function overrides and early setup
  * calls.
@@ -233,6 +239,7 @@ void __init x86_mrst_early_setup(void)
        x86_cpuinit.setup_percpu_clockev = mrst_setup_secondary_clock;
 
        x86_platform.calibrate_tsc = mrst_calibrate_tsc;
+       x86_platform.i8042_detect = mrst_i8042_detect;
        x86_init.pci.init = pci_mrst_init;
        x86_init.pci.fixup_irqs = x86_init_noop;
 
index e72d3fc..939b9e9 100644 (file)
@@ -498,15 +498,10 @@ void force_hpet_resume(void)
  * See erratum #27 (Misinterpreted MSI Requests May Result in
  * Corrupted LPC DMA Data) in AMD Publication #46837,
  * "SB700 Family Product Errata", Rev. 1.0, March 2010.
- *
- * Also force the read back of the CMP register in hpet_next_event()
- * to work around the problem that the CMP register write seems to be
- * delayed. See hpet_next_event() for details.
  */
 static void force_disable_hpet_msi(struct pci_dev *unused)
 {
        hpet_msi_disable = 1;
-       hpet_readback_cmp = 1;
 }
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
index de3b63a..a60df9a 100644 (file)
@@ -238,6 +238,15 @@ void __init setup_per_cpu_areas(void)
 #ifdef CONFIG_NUMA
                per_cpu(x86_cpu_to_node_map, cpu) =
                        early_per_cpu_map(x86_cpu_to_node_map, cpu);
+               /*
+                * Ensure that the boot cpu numa_node is correct when the boot
+                * cpu is on a node that doesn't have memory installed.
+                * Also cpu_up() will call cpu_to_node() for APs when
+                * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
+                * up later with c_init aka intel_init/amd_init.
+                * So set them all (boot cpu and all APs).
+                */
+               set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
 #endif
 #endif
                /*
@@ -257,14 +266,6 @@ void __init setup_per_cpu_areas(void)
        early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
 #endif
 
-#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
-       /*
-        * make sure boot cpu numa_node is right, when boot cpu is on the
-        * node that doesn't have mem installed
-        */
-       set_cpu_numa_node(boot_cpu_id, early_cpu_to_node(boot_cpu_id));
-#endif
-
        /* Setup node to cpumask map */
        setup_node_to_cpumask_map();
 
index 61a1e8c..cd6da6b 100644 (file)
@@ -5,6 +5,7 @@
  */
 #include <linux/init.h>
 #include <linux/ioport.h>
+#include <linux/module.h>
 
 #include <asm/bios_ebda.h>
 #include <asm/paravirt.h>
@@ -85,6 +86,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
 };
 
 static void default_nmi_init(void) { };
+static int default_i8042_detect(void) { return 1; };
 
 struct x86_platform_ops x86_platform = {
        .calibrate_tsc                  = native_calibrate_tsc,
@@ -92,5 +94,8 @@ struct x86_platform_ops x86_platform = {
        .set_wallclock                  = mach_set_rtc_mmss,
        .iommu_shutdown                 = iommu_shutdown_noop,
        .is_untracked_pat_range         = is_ISA_range,
-       .nmi_init                       = default_nmi_init
+       .nmi_init                       = default_nmi_init,
+       .i8042_detect                   = default_i8042_detect
 };
+
+EXPORT_SYMBOL_GPL(x86_platform);
index a6f695d..b1ed0a1 100644 (file)
@@ -1879,6 +1879,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        pgprintk("hfn old %lx new %lx\n",
                                 spte_to_pfn(*sptep), pfn);
                        rmap_remove(vcpu->kvm, sptep);
+                       __set_spte(sptep, shadow_trap_nonpresent_pte);
+                       kvm_flush_remote_tlbs(vcpu->kvm);
                } else
                        was_rmapped = 1;
        }
@@ -2924,7 +2926,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm)
        return kvm_mmu_zap_page(kvm, page) + 1;
 }
 
-static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
+static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        struct kvm *kvm;
        struct kvm *kvm_freed = NULL;
index 89d66ca..2331bdc 100644 (file)
@@ -342,6 +342,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                        /* advance table_gfn when emulating 1gb pages with 4k */
                        if (delta == 0)
                                table_gfn += PT_INDEX(addr, level);
+                       access &= gw->pte_access;
                } else {
                        direct = 0;
                        table_gfn = gw->table_gfn[level - 2];
index 859a01a..ee03679 100644 (file)
@@ -1744,18 +1744,15 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
                             (guest_tr_ar & ~AR_TYPE_MASK)
                             | AR_TYPE_BUSY_64_TSS);
        }
-       vcpu->arch.efer |= EFER_LMA;
-       vmx_set_efer(vcpu, vcpu->arch.efer);
+       vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
 }
 
 static void exit_lmode(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.efer &= ~EFER_LMA;
-
        vmcs_write32(VM_ENTRY_CONTROLS,
                     vmcs_read32(VM_ENTRY_CONTROLS)
                     & ~VM_ENTRY_IA32E_MODE);
-       vmx_set_efer(vcpu, vcpu->arch.efer);
+       vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
 }
 
 #endif
index 05d571f..7fa89c3 100644 (file)
@@ -1562,7 +1562,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
 
        r = -ENOMEM;
        size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
-       entries = vmalloc(size);
+       entries = kmalloc(size, GFP_KERNEL);
        if (!entries)
                goto out;
 
@@ -1581,7 +1581,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
        r = n;
 
 out_free:
-       vfree(entries);
+       kfree(entries);
 out:
        return r;
 }
index 6fdb3ec..5525309 100644 (file)
@@ -184,6 +184,7 @@ static void __init pcibios_allocate_resources(int pass)
                                        idx, r, disabled, pass);
                                if (pci_claim_resource(dev, idx) < 0) {
                                        /* We'll assign a new address later */
+                                       dev->fw_addr[idx] = r->start;
                                        r->end -= r->start;
                                        r->start = 0;
                                }
index 7ef3a27..cb29191 100644 (file)
@@ -66,8 +66,9 @@ static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
                                          devfn, pos, 4, &pcie_cap))
                        return 0;
 
-               if (pcie_cap == 0xffffffff)
-                       return 0;
+               if (PCI_EXT_CAP_ID(pcie_cap) == 0x0000 ||
+                       PCI_EXT_CAP_ID(pcie_cap) == 0xffff)
+                       break;
 
                if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
                        raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
@@ -76,7 +77,7 @@ static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
                                return pos;
                }
 
-               pos = pcie_cap >> 20;
+               pos = PCI_EXT_CAP_NEXT(pcie_cap);
        }
 
        return 0;
index 6f53453..d4ff5e8 100644 (file)
@@ -106,6 +106,14 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
  */
 static int have_vcpu_info_placement = 1;
 
+static void clamp_max_cpus(void)
+{
+#ifdef CONFIG_SMP
+       if (setup_max_cpus > MAX_VIRT_CPUS)
+               setup_max_cpus = MAX_VIRT_CPUS;
+#endif
+}
+
 static void xen_vcpu_setup(int cpu)
 {
        struct vcpu_register_vcpu_info info;
@@ -113,13 +121,17 @@ static void xen_vcpu_setup(int cpu)
        struct vcpu_info *vcpup;
 
        BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
-       per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
 
-       if (!have_vcpu_info_placement)
-               return;         /* already tested, not available */
+       if (cpu < MAX_VIRT_CPUS)
+               per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
 
-       vcpup = &per_cpu(xen_vcpu_info, cpu);
+       if (!have_vcpu_info_placement) {
+               if (cpu >= MAX_VIRT_CPUS)
+                       clamp_max_cpus();
+               return;
+       }
 
+       vcpup = &per_cpu(xen_vcpu_info, cpu);
        info.mfn = arbitrary_virt_to_mfn(vcpup);
        info.offset = offset_in_page(vcpup);
 
@@ -134,6 +146,7 @@ static void xen_vcpu_setup(int cpu)
        if (err) {
                printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
                have_vcpu_info_placement = 0;
+               clamp_max_cpus();
        } else {
                /* This cpu is using the registered vcpu info, even if
                   later ones fail to. */
@@ -740,7 +753,6 @@ static void set_xen_basic_apic_ops(void)
 
 #endif
 
-
 static void xen_clts(void)
 {
        struct multicall_space mcs;
@@ -1033,6 +1045,23 @@ static void xen_crash_shutdown(struct pt_regs *regs)
        xen_reboot(SHUTDOWN_crash);
 }
 
+static int
+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+       xen_reboot(SHUTDOWN_crash);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block xen_panic_block = {
+       .notifier_call= xen_panic_event,
+};
+
+int xen_panic_handler_init(void)
+{
+       atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
+       return 0;
+}
+
 static const struct machine_ops __initdata xen_machine_ops = {
        .restart = xen_restart,
        .halt = xen_machine_halt,
index ad0047f..328b003 100644 (file)
@@ -20,6 +20,7 @@
 #include <xen/page.h>
 #include <xen/interface/callback.h>
 #include <xen/interface/physdev.h>
+#include <xen/interface/memory.h>
 #include <xen/features.h>
 
 #include "xen-ops.h"
@@ -32,6 +33,73 @@ extern void xen_sysenter_target(void);
 extern void xen_syscall_target(void);
 extern void xen_syscall32_target(void);
 
+static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
+                                             phys_addr_t end_addr)
+{
+       struct xen_memory_reservation reservation = {
+               .address_bits = 0,
+               .extent_order = 0,
+               .domid        = DOMID_SELF
+       };
+       unsigned long start, end;
+       unsigned long len = 0;
+       unsigned long pfn;
+       int ret;
+
+       start = PFN_UP(start_addr);
+       end = PFN_DOWN(end_addr);
+
+       if (end <= start)
+               return 0;
+
+       printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: ",
+              start, end);
+       for(pfn = start; pfn < end; pfn++) {
+               unsigned long mfn = pfn_to_mfn(pfn);
+
+               /* Make sure pfn exists to start with */
+               if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
+                       continue;
+
+               set_xen_guest_handle(reservation.extent_start, &mfn);
+               reservation.nr_extents = 1;
+
+               ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+                                          &reservation);
+               WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n",
+                    start, end, ret);
+               if (ret == 1) {
+                       set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+                       len++;
+               }
+       }
+       printk(KERN_CONT "%ld pages freed\n", len);
+
+       return len;
+}
+
+static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
+                                                    const struct e820map *e820)
+{
+       phys_addr_t max_addr = PFN_PHYS(max_pfn);
+       phys_addr_t last_end = 0;
+       unsigned long released = 0;
+       int i;
+
+       for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
+               phys_addr_t end = e820->map[i].addr;
+               end = min(max_addr, end);
+
+               released += xen_release_chunk(last_end, end);
+               last_end = e820->map[i].addr + e820->map[i].size;
+       }
+
+       if (last_end < max_addr)
+               released += xen_release_chunk(last_end, max_addr);
+
+       printk(KERN_INFO "released %ld pages of unused memory\n", released);
+       return released;
+}
 
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
@@ -67,6 +135,8 @@ char * __init xen_memory_setup(void)
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
+       xen_return_unused_memory(xen_start_info->nr_pages, &e820);
+
        return "Xen";
 }
 
@@ -156,6 +226,8 @@ void __init xen_arch_setup(void)
        struct physdev_set_iopl set_iopl;
        int rc;
 
+       xen_panic_handler_init();
+
        HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
        HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
 
index a29693f..25f232b 100644 (file)
@@ -394,6 +394,8 @@ static void stop_self(void *v)
        load_cr3(swapper_pg_dir);
        /* should set up a minimal gdt */
 
+       set_cpu_online(cpu, false);
+
        HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
        BUG();
 }
index 2aab4a2..1a5353a 100644 (file)
@@ -156,45 +156,6 @@ static void do_stolen_accounting(void)
        account_idle_ticks(ticks);
 }
 
-/*
- * Xen sched_clock implementation.  Returns the number of unstolen
- * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
- * states.
- */
-static unsigned long long xen_sched_clock(void)
-{
-       struct vcpu_runstate_info state;
-       cycle_t now;
-       u64 ret;
-       s64 offset;
-
-       /*
-        * Ideally sched_clock should be called on a per-cpu basis
-        * anyway, so preempt should already be disabled, but that's
-        * not current practice at the moment.
-        */
-       preempt_disable();
-
-       now = xen_clocksource_read();
-
-       get_runstate_snapshot(&state);
-
-       WARN_ON(state.state != RUNSTATE_running);
-
-       offset = now - state.state_entry_time;
-       if (offset < 0)
-               offset = 0;
-
-       ret = state.time[RUNSTATE_blocked] +
-               state.time[RUNSTATE_running] +
-               offset;
-
-       preempt_enable();
-
-       return ret;
-}
-
-
 /* Get the TSC speed from Xen */
 static unsigned long xen_tsc_khz(void)
 {
@@ -475,7 +436,7 @@ void xen_timer_resume(void)
 }
 
 static const struct pv_time_ops xen_time_ops __initdata = {
-       .sched_clock = xen_sched_clock,
+       .sched_clock = xen_clocksource_read,
 };
 
 static __init void xen_time_init(void)
index ed77694..7c8ab86 100644 (file)
@@ -102,4 +102,6 @@ void xen_sysret32(void);
 void xen_sysret64(void);
 void xen_adjust_exception_frame(void);
 
+extern int xen_panic_handler_init(void);
+
 #endif /* XEN_OPS_H */
index 98a6610..a854df2 100644 (file)
@@ -165,7 +165,7 @@ static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
 
        p = kmalloc(n, GFP_ATOMIC);
        if (!p)
-               ablkcipher_walk_done(req, walk, -ENOMEM);
+               return ablkcipher_walk_done(req, walk, -ENOMEM);
 
        base = p + 1;
 
index d97b8dc..18b3f14 100644 (file)
@@ -70,6 +70,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 acpi_status acpi_enable(void)
 {
        acpi_status status;
+       int retry;
 
        ACPI_FUNCTION_TRACE(acpi_enable);
 
@@ -98,16 +99,18 @@ acpi_status acpi_enable(void)
 
        /* Sanity check that transition succeeded */
 
-       if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
-               ACPI_ERROR((AE_INFO,
-                           "Hardware did not enter ACPI mode"));
-               return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+       for (retry = 0; retry < 30000; ++retry) {
+               if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
+                       if (retry != 0)
+                               ACPI_WARNING((AE_INFO,
+                               "Platform took > %d00 usec to enter ACPI mode", retry));
+                       return_ACPI_STATUS(AE_OK);
+               }
+               acpi_os_stall(100);     /* 100 usec */
        }
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INIT,
-                         "Transition to ACPI mode successful\n"));
-
-       return_ACPI_STATUS(AE_OK);
+       ACPI_ERROR((AE_INFO, "Hardware did not enter ACPI mode"));
+       return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_enable)
index 3026e3f..dc58402 100644 (file)
@@ -868,9 +868,15 @@ static void acpi_battery_remove_fs(struct acpi_device *device)
 static void acpi_battery_notify(struct acpi_device *device, u32 event)
 {
        struct acpi_battery *battery = acpi_driver_data(device);
+#ifdef CONFIG_ACPI_SYSFS_POWER
+       struct device *old;
+#endif
 
        if (!battery)
                return;
+#ifdef CONFIG_ACPI_SYSFS_POWER
+       old = battery->bat.dev;
+#endif
        acpi_battery_update(battery);
        acpi_bus_generate_proc_event(device, event,
                                     acpi_battery_present(battery));
@@ -879,7 +885,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
                                        acpi_battery_present(battery));
 #ifdef CONFIG_ACPI_SYSFS_POWER
        /* acpi_battery_update could remove power_supply object */
-       if (battery->bat.dev)
+       if (old && battery->bat.dev)
                power_supply_changed(&battery->bat);
 #endif
 }
index 01381be..2bb28b9 100644 (file)
@@ -214,7 +214,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
        .ident = "Sony VGN-SR290J",
        .matches = {
                     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-                    DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
                },
        },
        {
index 5128435..e9699aa 100644 (file)
@@ -223,7 +223,7 @@ static bool processor_physically_present(acpi_handle handle)
        type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
        cpuid = acpi_get_cpuid(handle, type, acpi_id);
 
-       if (cpuid == -1)
+       if ((cpuid == -1) && (num_possible_cpus() > 1))
                return false;
 
        return true;
index b1b3856..e9a8026 100644 (file)
@@ -76,14 +76,19 @@ static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
 module_param(max_cstate, uint, 0000);
 static unsigned int nocst __read_mostly;
 module_param(nocst, uint, 0000);
+static int bm_check_disable __read_mostly;
+module_param(bm_check_disable, uint, 0000);
 
 static unsigned int latency_factor __read_mostly = 2;
 module_param(latency_factor, uint, 0644);
 
+#ifdef CONFIG_ACPI_PROCFS
 static u64 us_to_pm_timer_ticks(s64 t)
 {
        return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
 }
+#endif
+
 /*
  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  * For now disable this. Probably a bug somewhere else.
@@ -763,6 +768,9 @@ static int acpi_idle_bm_check(void)
 {
        u32 bm_status = 0;
 
+       if (bm_check_disable)
+               return 0;
+
        acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
        if (bm_status)
                acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
@@ -947,7 +955,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        if (acpi_idle_suspend)
                return(acpi_idle_enter_c1(dev, state));
 
-       if (acpi_idle_bm_check()) {
+       if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
                if (dev->safe_state) {
                        dev->last_state = dev->safe_state;
                        return dev->safe_state->enter(dev, dev->safe_state);
index 5b7c52e..2862c78 100644 (file)
@@ -81,6 +81,20 @@ static int acpi_sleep_prepare(u32 acpi_state)
 #ifdef CONFIG_ACPI_SLEEP
 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
 
+/*
+ * The ACPI specification wants us to save NVS memory regions during hibernation
+ * and to restore them during the subsequent resume.  Windows does that also for
+ * suspend to RAM.  However, it is known that this mechanism does not work on
+ * all machines, so we allow the user to disable it with the help of the
+ * 'acpi_sleep=nonvs' kernel command line option.
+ */
+static bool nvs_nosave;
+
+void __init acpi_nvs_nosave(void)
+{
+       nvs_nosave = true;
+}
+
 /*
  * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
  * user to request that behavior by using the 'acpi_old_suspend_ordering'
@@ -197,8 +211,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
        u32 acpi_state = acpi_suspend_states[pm_state];
        int error = 0;
 
-       error = suspend_nvs_alloc();
-
+       error = nvs_nosave ? 0 : suspend_nvs_alloc();
        if (error)
                return error;
 
@@ -388,20 +401,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
 #endif /* CONFIG_SUSPEND */
 
 #ifdef CONFIG_HIBERNATION
-/*
- * The ACPI specification wants us to save NVS memory regions during hibernation
- * and to restore them during the subsequent resume.  However, it is not certain
- * if this mechanism is going to work on all machines, so we allow the user to
- * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line
- * option.
- */
-static bool s4_no_nvs;
-
-void __init acpi_s4_no_nvs(void)
-{
-       s4_no_nvs = true;
-}
-
 static unsigned long s4_hardware_signature;
 static struct acpi_table_facs *facs;
 static bool nosigcheck;
@@ -415,7 +414,7 @@ static int acpi_hibernation_begin(void)
 {
        int error;
 
-       error = s4_no_nvs ? 0 : suspend_nvs_alloc();
+       error = nvs_nosave ? 0 : suspend_nvs_alloc();
        if (!error) {
                acpi_target_sleep_state = ACPI_STATE_S4;
                acpi_sleep_tts_switch(acpi_target_sleep_state);
@@ -510,7 +509,7 @@ static int acpi_hibernation_begin_old(void)
        error = acpi_sleep_prepare(ACPI_STATE_S4);
 
        if (!error) {
-               if (!s4_no_nvs)
+               if (!nvs_nosave)
                        error = suspend_nvs_alloc();
                if (!error)
                        acpi_target_sleep_state = ACPI_STATE_S4;
index 9630fbd..9b9d3bd 100644 (file)
@@ -673,7 +673,7 @@ static struct kobject *get_device_parent(struct device *dev,
                 */
                if (parent == NULL)
                        parent_kobj = virtual_device_parent(dev);
-               else if (parent->class)
+               else if (parent->class && !dev->class->ns_type)
                        return &parent->kobj;
                else
                        parent_kobj = &parent->kobj;
index 9344216..a754715 100644 (file)
@@ -1216,17 +1216,20 @@ static int intel_i915_get_gtt_size(void)
 
                /* G33's GTT size defined in gmch_ctrl */
                pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-               switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
-               case G33_PGETBL_SIZE_1M:
+               switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+               case I830_GMCH_GMS_STOLEN_512:
+                       size = 512;
+                       break;
+               case I830_GMCH_GMS_STOLEN_1024:
                        size = 1024;
                        break;
-               case G33_PGETBL_SIZE_2M:
-                       size = 2048;
+               case I830_GMCH_GMS_STOLEN_8192:
+                       size = 8*1024;
                        break;
                default:
                        dev_info(&agp_bridge->dev->dev,
                                 "unknown page table size 0x%x, assuming 512KB\n",
-                               (gmch_ctrl & G33_PGETBL_SIZE_MASK));
+                               (gmch_ctrl & I830_GMCH_GMS_MASK));
                        size = 512;
                }
        } else {
index 5d64e3a..878ac0c 100644 (file)
@@ -493,7 +493,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
                 sysrq_key_table[i] = op_p;
 }
 
-static void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
+void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
 {
        struct sysrq_key_op *op_p;
        int orig_log_level;
index 24314a9..1030f84 100644 (file)
@@ -623,7 +623,14 @@ static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
 
 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
 {
-       return tpm_pm_resume(&dev->dev);
+       struct tpm_chip *chip = pnp_get_drvdata(dev);
+       int ret;
+
+       ret = tpm_pm_resume(&dev->dev);
+       if (!ret)
+               tpm_continue_selftest(chip);
+
+       return ret;
 }
 
 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
index d7be69f..b7dab32 100644 (file)
@@ -194,6 +194,6 @@ err_timer:
 
 module_init(cs5535_mfgpt_init);
 
-MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("CS5535/CS5536 MFGPT clock event driver");
 MODULE_LICENSE("GPL");
index 063b218..938b74e 100644 (file)
@@ -1077,6 +1077,7 @@ err_out_unregister:
 
 err_unlock_policy:
        unlock_policy_rwsem_write(cpu);
+       free_cpumask_var(policy->related_cpus);
 err_free_cpumask:
        free_cpumask_var(policy->cpus);
 err_free_policy:
@@ -1762,17 +1763,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
                        dprintk("governor switch\n");
 
                        /* end old governor */
-                       if (data->governor) {
-                               /*
-                                * Need to release the rwsem around governor
-                                * stop due to lock dependency between
-                                * cancel_delayed_work_sync and the read lock
-                                * taken in the delayed work handler.
-                                */
-                               unlock_policy_rwsem_write(data->cpu);
+                       if (data->governor)
                                __cpufreq_governor(data, CPUFREQ_GOV_STOP);
-                               lock_policy_rwsem_write(data->cpu);
-                       }
 
                        /* start new governor */
                        data->governor = policy->governor;
index 637c105..bd78acf 100644 (file)
@@ -1183,10 +1183,14 @@ static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
                                /* Copy part of this segment */
                                ignore = skip - offset;
                                len = miter.length - ignore;
+                               if (boffset + len > buflen)
+                                       len = buflen - boffset;
                                memcpy(buf + boffset, miter.addr + ignore, len);
                        } else {
-                               /* Copy all of this segment */
+                               /* Copy all of this segment (up to buflen) */
                                len = miter.length;
+                               if (boffset + len > buflen)
+                                       len = buflen - boffset;
                                memcpy(buf + boffset, miter.addr, len);
                        }
                        boffset += len;
index aedef79..0d2f9db 100644 (file)
@@ -209,7 +209,7 @@ config EDAC_I5100
 
 config EDAC_MPC85XX
        tristate "Freescale MPC83xx / MPC85xx"
-       depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || MPC85xx)
+       depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx)
        help
          Support for error detection and correction on the Freescale
          MPC8349, MPC8560, MPC8540, MPC8548
index cc9357d..e0187d1 100644 (file)
@@ -1300,7 +1300,7 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
                if (devno == 0)
                        return -ENODEV;
 
-               i7core_printk(KERN_ERR,
+               i7core_printk(KERN_INFO,
                        "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
                        dev_descr->dev, dev_descr->func,
                        PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
index 52ca09b..1052340 100644 (file)
@@ -336,6 +336,7 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = {
        },
        {},
 };
+MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
 
 static struct of_platform_driver mpc85xx_pci_err_driver = {
        .probe = mpc85xx_pci_err_probe,
@@ -650,6 +651,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
        { .compatible = "fsl,p2020-l2-cache-controller", },
        {},
 };
+MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
 
 static struct of_platform_driver mpc85xx_l2_err_driver = {
        .probe = mpc85xx_l2_err_probe,
@@ -1120,11 +1122,13 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
        { .compatible = "fsl,mpc8555-memory-controller", },
        { .compatible = "fsl,mpc8560-memory-controller", },
        { .compatible = "fsl,mpc8568-memory-controller", },
+       { .compatible = "fsl,mpc8569-memory-controller", },
        { .compatible = "fsl,mpc8572-memory-controller", },
        { .compatible = "fsl,mpc8349-memory-controller", },
        { .compatible = "fsl,p2020-memory-controller", },
        {},
 };
+MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
 
 static struct of_platform_driver mpc85xx_mc_err_driver = {
        .probe = mpc85xx_mc_err_probe,
index f73a155..e23c068 100644 (file)
@@ -352,6 +352,6 @@ static void __exit cs5535_gpio_exit(void)
 module_init(cs5535_gpio_init);
 module_exit(cs5535_gpio_exit);
 
-MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
 MODULE_LICENSE("GPL");
index 3ca3654..4e51fe3 100644 (file)
@@ -893,10 +893,12 @@ EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
 void gpio_unexport(unsigned gpio)
 {
        struct gpio_desc        *desc;
-       int                     status = -EINVAL;
+       int                     status = 0;
 
-       if (!gpio_is_valid(gpio))
+       if (!gpio_is_valid(gpio)) {
+               status = -EINVAL;
                goto done;
+       }
 
        mutex_lock(&sysfs_lock);
 
@@ -911,7 +913,6 @@ void gpio_unexport(unsigned gpio)
                        clear_bit(FLAG_EXPORT, &desc->flags);
                        put_device(dev);
                        device_unregister(dev);
-                       status = 0;
                } else
                        status = -ENODEV;
        }
index c198186..f87bf10 100644 (file)
@@ -864,8 +864,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
                mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
                                    false);
                mode->hdisplay = 1366;
-               mode->vsync_start = mode->vsync_start - 1;
-               mode->vsync_end = mode->vsync_end - 1;
+               mode->hsync_start = mode->hsync_start - 1;
+               mode->hsync_end = mode->hsync_end - 1;
                return mode;
        }
 
index aee83fa..9214119 100644 (file)
@@ -605,6 +605,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
                case FBC_NOT_TILED:
                        seq_printf(m, "scanout buffer not tiled");
                        break;
+               case FBC_MULTIPLE_PIPES:
+                       seq_printf(m, "multiple pipes are enabled");
+                       break;
                default:
                        seq_printf(m, "unknown reason");
                }
index f00c5ae..2305a12 100644 (file)
@@ -1300,7 +1300,7 @@ static void i915_cleanup_compression(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        drm_mm_put_block(dev_priv->compressed_fb);
-       if (!IS_GM45(dev))
+       if (dev_priv->compressed_llb)
                drm_mm_put_block(dev_priv->compressed_llb);
 }
 
index d147ab2..2e1744d 100644 (file)
@@ -215,6 +215,7 @@ enum no_fbc_reason {
        FBC_MODE_TOO_LARGE, /* mode too large for compression */
        FBC_BAD_PLANE, /* fbc not supported on plane */
        FBC_NOT_TILED, /* buffer not tiled */
+       FBC_MULTIPLE_PIPES, /* more than one pipe active */
 };
 
 enum intel_pch {
@@ -222,6 +223,8 @@ enum intel_pch {
        PCH_CPT,        /* Cougarpoint PCH */
 };
 
+#define QUIRK_PIPEA_FORCE (1<<0)
+
 struct intel_fbdev;
 
 typedef struct drm_i915_private {
@@ -337,6 +340,8 @@ typedef struct drm_i915_private {
        /* PCH chipset type */
        enum intel_pch pch_type;
 
+       unsigned long quirks;
+
        /* Register state */
        bool modeset_on_lid;
        u8 saveLBB;
index 0743858..5aa747f 100644 (file)
@@ -2241,6 +2241,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
                page = read_cache_page_gfp(mapping, i,
                                           GFP_HIGHUSER |
                                           __GFP_COLD |
+                                          __GFP_RECLAIMABLE |
                                           gfpmask);
                if (IS_ERR(page))
                        goto err_pages;
@@ -3646,6 +3647,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
        return ret;
 }
 
+
 int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file_priv,
@@ -3793,7 +3795,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                                unsigned long long total_size = 0;
                                int num_fences = 0;
                                for (i = 0; i < args->buffer_count; i++) {
-                                       obj_priv = object_list[i]->driver_private;
+                                       obj_priv = to_intel_bo(object_list[i]);
 
                                        total_size += object_list[i]->size;
                                        num_fences +=
@@ -4741,6 +4743,16 @@ i915_gem_load(struct drm_device *dev)
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
        spin_unlock(&shrink_list_lock);
 
+       /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+       if (IS_GEN3(dev)) {
+               u32 tmp = I915_READ(MI_ARB_STATE);
+               if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
+                       /* arb state is a masked write, so set bit + bit in mask */
+                       tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
+                       I915_WRITE(MI_ARB_STATE, tmp);
+               }
+       }
+
        /* Old X drivers will take 0-2 for front, back, depth buffers */
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                dev_priv->fence_reg_start = 3;
@@ -4977,7 +4989,7 @@ i915_gpu_is_active(struct drm_device *dev)
 }
 
 static int
-i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        drm_i915_private_t *dev_priv, *next_dev;
        struct drm_i915_gem_object *obj_priv, *next_obj;
index 150400f..cf41c67 100644 (file)
 #define LM_BURST_LENGTH     0x00000700
 #define LM_FIFO_WATERMARK   0x0000001F
 #define MI_ARB_STATE   0x020e4 /* 915+ only */
+#define   MI_ARB_MASK_SHIFT      16    /* shift for enable bits */
+
+/* Make render/texture TLB fetches lower priorty than associated data
+ *   fetches. This is not turned on by default
+ */
+#define   MI_ARB_RENDER_TLB_LOW_PRIORITY       (1 << 15)
+
+/* Isoch request wait on GTT enable (Display A/B/C streams).
+ * Make isoch requests stall on the TLB update. May cause
+ * display underruns (test mode only)
+ */
+#define   MI_ARB_ISOCH_WAIT_GTT                        (1 << 14)
+
+/* Block grant count for isoch requests when block count is
+ * set to a finite value.
+ */
+#define   MI_ARB_BLOCK_GRANT_MASK              (3 << 12)
+#define   MI_ARB_BLOCK_GRANT_8                 (0 << 12)       /* for 3 display planes */
+#define   MI_ARB_BLOCK_GRANT_4                 (1 << 12)       /* for 2 display planes */
+#define   MI_ARB_BLOCK_GRANT_2                 (2 << 12)       /* for 1 display plane */
+#define   MI_ARB_BLOCK_GRANT_0                 (3 << 12)       /* don't use */
+
+/* Enable render writes to complete in C2/C3/C4 power states.
+ * If this isn't enabled, render writes are prevented in low
+ * power states. That seems bad to me.
+ */
+#define   MI_ARB_C3_LP_WRITE_ENABLE            (1 << 11)
+
+/* This acknowledges an async flip immediately instead
+ * of waiting for 2TLB fetches.
+ */
+#define   MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE      (1 << 10)
+
+/* Enables non-sequential data reads through arbiter
+ */
+#define   MI_ARB_DUAL_DATA_PHASE_DISABLE               (1 << 9)
+
+/* Disable FSB snooping of cacheable write cycles from binner/render
+ * command stream
+ */
+#define   MI_ARB_CACHE_SNOOP_DISABLE           (1 << 8)
+
+/* Arbiter time slice for non-isoch streams */
+#define   MI_ARB_TIME_SLICE_MASK               (7 << 5)
+#define   MI_ARB_TIME_SLICE_1                  (0 << 5)
+#define   MI_ARB_TIME_SLICE_2                  (1 << 5)
+#define   MI_ARB_TIME_SLICE_4                  (2 << 5)
+#define   MI_ARB_TIME_SLICE_6                  (3 << 5)
+#define   MI_ARB_TIME_SLICE_8                  (4 << 5)
+#define   MI_ARB_TIME_SLICE_10                 (5 << 5)
+#define   MI_ARB_TIME_SLICE_14                 (6 << 5)
+#define   MI_ARB_TIME_SLICE_16                 (7 << 5)
+
+/* Low priority grace period page size */
+#define   MI_ARB_LOW_PRIORITY_GRACE_4KB                (0 << 4)        /* default */
+#define   MI_ARB_LOW_PRIORITY_GRACE_8KB                (1 << 4)
+
+/* Disable display A/B trickle feed */
+#define   MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE  (1 << 2)
+
+/* Set display plane priority */
+#define   MI_ARB_DISPLAY_PRIORITY_A_B          (0 << 0)        /* display A > display B */
+#define   MI_ARB_DISPLAY_PRIORITY_B_A          (1 << 0)        /* display B > display A */
+
 #define CACHE_MODE_0   0x02120 /* 915+ only */
 #define   CM0_MASK_SHIFT          16
 #define   CM0_IZ_OPT_DISABLE      (1<<6)
 
 #define PCH_PP_STATUS          0xc7200
 #define PCH_PP_CONTROL         0xc7204
+#define  PANEL_UNLOCK_REGS     (0xabcd << 16)
 #define  EDP_FORCE_VDD         (1 << 3)
 #define  EDP_BLC_ENABLE                (1 << 2)
 #define  PANEL_POWER_RESET     (1 << 1)
index 68dcf36..5e21b31 100644 (file)
@@ -862,8 +862,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
        intel_clock_t clock;
        int max_n;
        bool found;
-       /* approximately equals target * 0.00488 */
-       int err_most = (target >> 8) + (target >> 10);
+       /* approximately equals target * 0.00585 */
+       int err_most = (target >> 8) + (target >> 9);
        found = false;
 
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -1180,8 +1180,12 @@ static void intel_update_fbc(struct drm_crtc *crtc,
        struct drm_framebuffer *fb = crtc->fb;
        struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj_priv;
+       struct drm_crtc *tmp_crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int plane = intel_crtc->plane;
+       int crtcs_enabled = 0;
+
+       DRM_DEBUG_KMS("\n");
 
        if (!i915_powersave)
                return;
@@ -1199,10 +1203,21 @@ static void intel_update_fbc(struct drm_crtc *crtc,
         * If FBC is already on, we just have to verify that we can
         * keep it that way...
         * Need to disable if:
+        *   - more than one pipe is active
         *   - changing FBC params (stride, fence, mode)
         *   - new fb is too large to fit in compressed buffer
         *   - going to an unsupported config (interlace, pixel multiply, etc.)
         */
+       list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+               if (tmp_crtc->enabled)
+                       crtcs_enabled++;
+       }
+       DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
+       if (crtcs_enabled > 1) {
+               DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+               dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+               goto out_disable;
+       }
        if (intel_fb->obj->size > dev_priv->cfb_size) {
                DRM_DEBUG_KMS("framebuffer too large, disabling "
                                "compression\n");
@@ -1255,7 +1270,7 @@ out_disable:
        }
 }
 
-static int
+int
 intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -2255,6 +2270,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
                        intel_wait_for_vblank(dev);
                }
 
+               /* Don't disable pipe A or pipe A PLLs if needed */
+               if (pipeconf_reg == PIPEACONF &&
+                   (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+                       goto skip_pipe_off;
+
                /* Next, disable display pipes */
                temp = I915_READ(pipeconf_reg);
                if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -2270,7 +2290,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
                        I915_READ(dpll_reg);
                }
-
+       skip_pipe_off:
                /* Wait for the clocks to turn off. */
                udelay(150);
                break;
@@ -2356,8 +2376,6 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
                if (mode->clock * 3 > 27000 * 4)
                        return MODE_CLOCK_HIGH;
        }
-
-       drm_mode_set_crtcinfo(adjusted_mode, 0);
        return true;
 }
 
@@ -3736,6 +3754,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        if (dev_priv->lvds_dither) {
                                if (HAS_PCH_SPLIT(dev)) {
                                        pipeconf |= PIPE_ENABLE_DITHER;
+                                       pipeconf &= ~PIPE_DITHER_TYPE_MASK;
                                        pipeconf |= PIPE_DITHER_TYPE_ST01;
                                } else
                                        lvds |= LVDS_ENABLE_DITHER;
@@ -4412,7 +4431,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
                DRM_DEBUG_DRIVER("upclocking LVDS\n");
 
                /* Unlock panel regs */
-               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+                          PANEL_UNLOCK_REGS);
 
                dpll &= ~DISPLAY_RATE_SELECT_FPA1;
                I915_WRITE(dpll_reg, dpll);
@@ -4455,7 +4475,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
                DRM_DEBUG_DRIVER("downclocking LVDS\n");
 
                /* Unlock panel regs */
-               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+                          PANEL_UNLOCK_REGS);
 
                dpll |= DISPLAY_RATE_SELECT_FPA1;
                I915_WRITE(dpll_reg, dpll);
@@ -4695,7 +4716,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct drm_gem_object *obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
-       unsigned long flags;
+       unsigned long flags, offset;
        int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
        int ret, pipesrc;
        u32 flip_mask;
@@ -4762,19 +4783,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                while (I915_READ(ISR) & flip_mask)
                        ;
 
+       /* Offset into the new buffer for cases of shared fbs between CRTCs */
+       offset = obj_priv->gtt_offset;
+       offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
+
        BEGIN_LP_RING(4);
        if (IS_I965G(dev)) {
                OUT_RING(MI_DISPLAY_FLIP |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+               OUT_RING(offset | obj_priv->tiling_mode);
                pipesrc = I915_READ(pipesrc_reg); 
                OUT_RING(pipesrc & 0x0fff0fff);
        } else {
                OUT_RING(MI_DISPLAY_FLIP_I915 |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(obj_priv->gtt_offset);
+               OUT_RING(offset);
                OUT_RING(MI_NOOP);
        }
        ADVANCE_LP_RING();
@@ -5506,6 +5531,66 @@ static void intel_init_display(struct drm_device *dev)
        }
 }
 
+/*
+ * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
+ * resume, or other times.  This quirk makes sure that's the case for
+ * affected systems.
+ */
+static void quirk_pipea_force (struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       dev_priv->quirks |= QUIRK_PIPEA_FORCE;
+       DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
+}
+
+struct intel_quirk {
+       int device;
+       int subsystem_vendor;
+       int subsystem_device;
+       void (*hook)(struct drm_device *dev);
+};
+
+struct intel_quirk intel_quirks[] = {
+       /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
+       { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
+       /* HP Mini needs pipe A force quirk (LP: #322104) */
+       { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
+
+       /* Thinkpad R31 needs pipe A force quirk */
+       { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
+       /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
+       { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
+
+       /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
+       { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
+       /* ThinkPad X40 needs pipe A force quirk */
+
+       /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+       { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+       /* 855 & before need to leave pipe A & dpll A up */
+       { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+       { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+};
+
+static void intel_init_quirks(struct drm_device *dev)
+{
+       struct pci_dev *d = dev->pdev;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+               struct intel_quirk *q = &intel_quirks[i];
+
+               if (d->device == q->device &&
+                   (d->subsystem_vendor == q->subsystem_vendor ||
+                    q->subsystem_vendor == PCI_ANY_ID) &&
+                   (d->subsystem_device == q->subsystem_device ||
+                    q->subsystem_device == PCI_ANY_ID))
+                       q->hook(dev);
+       }
+}
+
 void intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5518,6 +5603,8 @@ void intel_modeset_init(struct drm_device *dev)
 
        dev->mode_config.funcs = (void *)&intel_mode_funcs;
 
+       intel_init_quirks(dev);
+
        intel_init_display(dev);
 
        if (IS_I965G(dev)) {
index 1aac59e..5dde80f 100644 (file)
@@ -717,6 +717,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        }
 }
 
+static void ironlake_edp_panel_on (struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+       u32 pp, pp_status;
+
+       pp_status = I915_READ(PCH_PP_STATUS);
+       if (pp_status & PP_ON)
+               return;
+
+       pp = I915_READ(PCH_PP_CONTROL);
+       pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
+       I915_WRITE(PCH_PP_CONTROL, pp);
+       do {
+               pp_status = I915_READ(PCH_PP_STATUS);
+       } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
+
+       if (time_after(jiffies, timeout))
+               DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
+
+       pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
+       I915_WRITE(PCH_PP_CONTROL, pp);
+}
+
+static void ironlake_edp_panel_off (struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+       u32 pp, pp_status;
+
+       pp = I915_READ(PCH_PP_CONTROL);
+       pp &= ~POWER_TARGET_ON;
+       I915_WRITE(PCH_PP_CONTROL, pp);
+       do {
+               pp_status = I915_READ(PCH_PP_STATUS);
+       } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
+
+       if (time_after(jiffies, timeout))
+               DRM_DEBUG_KMS("panel off wait timed out\n");
+
+       /* Make sure VDD is enabled so DP AUX will work */
+       pp |= EDP_FORCE_VDD;
+       I915_WRITE(PCH_PP_CONTROL, pp);
+}
+
 static void ironlake_edp_backlight_on (struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -751,14 +796,18 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
        if (mode != DRM_MODE_DPMS_ON) {
                if (dp_reg & DP_PORT_EN) {
                        intel_dp_link_down(intel_encoder, dp_priv->DP);
-                       if (IS_eDP(intel_encoder))
+                       if (IS_eDP(intel_encoder)) {
                                ironlake_edp_backlight_off(dev);
+                               ironlake_edp_panel_off(dev);
+                       }
                }
        } else {
                if (!(dp_reg & DP_PORT_EN)) {
                        intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
-                       if (IS_eDP(intel_encoder))
+                       if (IS_eDP(intel_encoder)) {
+                               ironlake_edp_panel_on(dev);
                                ironlake_edp_backlight_on(dev);
+                       }
                }
        }
        dp_priv->dpms_mode = mode;
index 72206f3..2f7970b 100644 (file)
@@ -215,6 +215,9 @@ extern void intel_init_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 
+extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+                                     struct drm_gem_object *obj);
+
 extern int intel_framebuffer_init(struct drm_device *dev,
                                  struct intel_framebuffer *ifb,
                                  struct drm_mode_fb_cmd *mode_cmd,
index c3c5052..3e18c9e 100644 (file)
@@ -98,7 +98,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 
        mutex_lock(&dev->struct_mutex);
 
-       ret = i915_gem_object_pin(fbo, 64*1024);
+       ret = intel_pin_and_fence_fb_obj(dev, fbo);
        if (ret) {
                DRM_ERROR("failed to pin fb: %d\n", ret);
                goto out_unref;
@@ -236,7 +236,7 @@ int intel_fbdev_destroy(struct drm_device *dev,
 
        drm_framebuffer_cleanup(&ifb->base);
        if (ifb->obj)
-               drm_gem_object_unreference_unlocked(ifb->obj);
+               drm_gem_object_unreference(ifb->obj);
 
        return 0;
 }
index 31df55f..0eab8df 100644 (file)
@@ -599,6 +599,26 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
        return 0;
 }
 
+static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+{
+       DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
+       return 1;
+}
+
+/* The GPU hangs up on these systems if modeset is performed on LID open */
+static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+       {
+               .callback = intel_no_modeset_on_lid_dmi_callback,
+               .ident = "Toshiba Tecra A11",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+               },
+       },
+
+       { }     /* terminating entry */
+};
+
 /*
  * Lid events. Note the use of 'modeset_on_lid':
  *  - we set it on lid close, and reset it on open
@@ -622,6 +642,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
         */
        if (connector)
                connector->status = connector->funcs->detect(connector);
+       /* Don't force modeset on machines where it causes a GPU lockup */
+       if (dmi_check_system(intel_no_modeset_on_lid))
+               return NOTIFY_OK;
        if (!acpi_lid_open()) {
                dev_priv->modeset_on_lid = 1;
                return NOTIFY_OK;
index fc924b6..e492919 100644 (file)
@@ -203,36 +203,26 @@ struct methods {
        const bool rw;
 };
 
-static struct methods nv04_methods[] = {
-       { "PROM", load_vbios_prom, false },
-       { "PRAMIN", load_vbios_pramin, true },
-       { "PCIROM", load_vbios_pci, true },
-};
-
-static struct methods nv50_methods[] = {
-       { "ACPI", load_vbios_acpi, true },
+static struct methods shadow_methods[] = {
        { "PRAMIN", load_vbios_pramin, true },
        { "PROM", load_vbios_prom, false },
        { "PCIROM", load_vbios_pci, true },
+       { "ACPI", load_vbios_acpi, true },
 };
 
-#define METHODCNT 3
-
 static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct methods *methods;
-       int i;
+       const int nr_methods = ARRAY_SIZE(shadow_methods);
+       struct methods *methods = shadow_methods;
        int testscore = 3;
-       int scores[METHODCNT];
+       int scores[nr_methods], i;
 
        if (nouveau_vbios) {
-               methods = nv04_methods;
-               for (i = 0; i < METHODCNT; i++)
+               for (i = 0; i < nr_methods; i++)
                        if (!strcasecmp(nouveau_vbios, methods[i].desc))
                                break;
 
-               if (i < METHODCNT) {
+               if (i < nr_methods) {
                        NV_INFO(dev, "Attempting to use BIOS image from %s\n",
                                methods[i].desc);
 
@@ -244,12 +234,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
                NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
        }
 
-       if (dev_priv->card_type < NV_50)
-               methods = nv04_methods;
-       else
-               methods = nv50_methods;
-
-       for (i = 0; i < METHODCNT; i++) {
+       for (i = 0; i < nr_methods; i++) {
                NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
                         methods[i].desc);
                data[0] = data[1] = 0;  /* avoid reuse of previous image */
@@ -260,7 +245,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
        }
 
        while (--testscore > 0) {
-               for (i = 0; i < METHODCNT; i++) {
+               for (i = 0; i < nr_methods; i++) {
                        if (scores[i] == testscore) {
                                NV_TRACE(dev, "Using BIOS image from %s\n",
                                         methods[i].desc);
index c9a4a0d..257ea13 100644 (file)
@@ -387,7 +387,8 @@ int nouveau_fbcon_init(struct drm_device *dev)
        dev_priv->nfbdev = nfbdev;
        nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
 
-       ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
+       ret = drm_fb_helper_init(dev, &nfbdev->helper,
+                                nv_two_heads(dev) ? 2 : 1, 4);
        if (ret) {
                kfree(nfbdev);
                return ret;
index 010963d..345a75a 100644 (file)
@@ -333,7 +333,6 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
        header = radeon_get_ib_value(p, h_idx);
        crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
        reg = CP_PACKET0_GET_REG(header);
-       mutex_lock(&p->rdev->ddev->mode_config.mutex);
        obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
        if (!obj) {
                DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -368,7 +367,6 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
                }
        }
 out:
-       mutex_unlock(&p->rdev->ddev->mode_config.mutex);
        return r;
 }
 
index 3970e62..a89a15a 100644 (file)
@@ -1230,7 +1230,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
        header = radeon_get_ib_value(p, h_idx);
        crtc_id = radeon_get_ib_value(p, h_idx + 5);
        reg = CP_PACKET0_GET_REG(header);
-       mutex_lock(&p->rdev->ddev->mode_config.mutex);
        obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
        if (!obj) {
                DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -1264,7 +1263,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
                ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
        }
 out:
-       mutex_unlock(&p->rdev->ddev->mode_config.mutex);
        return r;
 }
 
@@ -2354,6 +2352,7 @@ void r100_mc_init(struct radeon_device *rdev)
        if (rdev->flags & RADEON_IS_IGP)
                base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
        radeon_vram_location(rdev, &rdev->mc, base);
+       rdev->mc.gtt_base_align = 0;
        if (!(rdev->flags & RADEON_IS_AGP))
                radeon_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
index 7e81db5..19a7ef7 100644 (file)
@@ -481,6 +481,7 @@ void r300_mc_init(struct radeon_device *rdev)
        if (rdev->flags & RADEON_IS_IGP)
                base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
        radeon_vram_location(rdev, &rdev->mc, base);
+       rdev->mc.gtt_base_align = 0;
        if (!(rdev->flags & RADEON_IS_AGP))
                radeon_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
@@ -1176,6 +1177,8 @@ int r300_cs_parse(struct radeon_cs_parser *p)
        int r;
 
        track = kzalloc(sizeof(*track), GFP_KERNEL);
+       if (track == NULL)
+               return -ENOMEM;
        r100_cs_track_clear(p->rdev, track);
        p->track = track;
        do {
index 34330df..694af7c 100644 (file)
@@ -125,6 +125,7 @@ void r520_mc_init(struct radeon_device *rdev)
        r520_vram_get_type(rdev);
        r100_vram_init_sizes(rdev);
        radeon_vram_location(rdev, &rdev->mc, 0);
+       rdev->mc.gtt_base_align = 0;
        if (!(rdev->flags & RADEON_IS_AGP))
                radeon_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
index 3d6645c..e100f69 100644 (file)
@@ -1179,6 +1179,7 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
                if (rdev->flags & RADEON_IS_IGP)
                        base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
                radeon_vram_location(rdev, &rdev->mc, base);
+               rdev->mc.gtt_base_align = 0;
                radeon_gtt_location(rdev, mc);
        }
 }
index f4fb88e..ca5c29f 100644 (file)
@@ -538,9 +538,12 @@ int
 r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
+       int ret;
        DRM_DEBUG("\n");
 
-       r600_nomm_get_vb(dev);
+       ret = r600_nomm_get_vb(dev);
+       if (ret)
+               return ret;
 
        dev_priv->blit_vb->file_priv = file_priv;
 
index c39c1bc..144c32d 100644 (file)
@@ -585,7 +585,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
        header = radeon_get_ib_value(p, h_idx);
        crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
        reg = CP_PACKET0_GET_REG(header);
-       mutex_lock(&p->rdev->ddev->mode_config.mutex);
+
        obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
        if (!obj) {
                DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -620,7 +620,6 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
                ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
        }
 out:
-       mutex_unlock(&p->rdev->ddev->mode_config.mutex);
        return r;
 }
 
index ab61aaa..2f94dc6 100644 (file)
@@ -351,6 +351,7 @@ struct radeon_mc {
        int                     vram_mtrr;
        bool                    vram_is_ddr;
        bool                    igp_sideport_enabled;
+       u64                     gtt_base_align;
 };
 
 bool radeon_combios_sideport_present(struct radeon_device *rdev);
index 99bd8a9..10673ae 100644 (file)
@@ -280,6 +280,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                }
        }
 
+       /* ASUS HD 3600 board lists the DVI port as HDMI */
+       if ((dev->pdev->device == 0x9598) &&
+           (dev->pdev->subsystem_vendor == 0x1043) &&
+           (dev->pdev->subsystem_device == 0x01e4)) {
+               if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+                       *connector_type = DRM_MODE_CONNECTOR_DVII;
+               }
+       }
+
        /* ASUS HD 3450 board lists the DVI port as HDMI */
        if ((dev->pdev->device == 0x95C5) &&
            (dev->pdev->subsystem_vendor == 0x1043) &&
@@ -1029,8 +1038,15 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
                                      data_offset);
                switch (crev) {
                case 1:
-                       if (igp_info->info.ucMemoryType & 0xf0)
-                               return true;
+                       /* AMD IGPS */
+                       if ((rdev->family == CHIP_RS690) ||
+                           (rdev->family == CHIP_RS740)) {
+                               if (igp_info->info.ulBootUpMemoryClock)
+                                       return true;
+                       } else {
+                               if (igp_info->info.ucMemoryType & 0xf0)
+                                       return true;
+                       }
                        break;
                case 2:
                        if (igp_info->info_2.ucMemoryType & 0x0f)
index d1c1d8d..2417d7b 100644 (file)
@@ -3050,6 +3050,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
            rdev->pdev->subsystem_device == 0x308b)
                return;
 
+       /* quirk for rs4xx HP dv5000 laptop to make it resume
+        * - it hangs on resume inside the dynclk 1 table.
+        */
+       if (rdev->family == CHIP_RS480 &&
+           rdev->pdev->subsystem_vendor == 0x103c &&
+           rdev->pdev->subsystem_device == 0x30a4)
+               return;
+
        /* DYN CLK 1 */
        table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
        if (table)
index f58f8bd..adccbc2 100644 (file)
@@ -771,14 +771,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
                        } else
                                ret = connector_status_connected;
 
-                       /* multiple connectors on the same encoder with the same ddc line
-                        * This tends to be HDMI and DVI on the same encoder with the
-                        * same ddc line.  If the edid says HDMI, consider the HDMI port
-                        * connected and the DVI port disconnected.  If the edid doesn't
-                        * say HDMI, vice versa.
+                       /* This gets complicated.  We have boards with VGA + HDMI with a
+                        * shared DDC line and we have boards with DVI-D + HDMI with a shared
+                        * DDC line.  The latter is more complex because with DVI<->HDMI adapters
+                        * you don't really know what's connected to which port as both are digital.
                         */
                        if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
                                struct drm_device *dev = connector->dev;
+                               struct radeon_device *rdev = dev->dev_private;
                                struct drm_connector *list_connector;
                                struct radeon_connector *list_radeon_connector;
                                list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
@@ -788,15 +788,10 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
                                        if (list_radeon_connector->shared_ddc &&
                                            (list_radeon_connector->ddc_bus->rec.i2c_id ==
                                             radeon_connector->ddc_bus->rec.i2c_id)) {
-                                               if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
-                                                       if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
-                                                               kfree(radeon_connector->edid);
-                                                               radeon_connector->edid = NULL;
-                                                               ret = connector_status_disconnected;
-                                                       }
-                                               } else {
-                                                       if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
-                                                           (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
+                                               /* cases where both connectors are digital */
+                                               if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
+                                                       /* hpd is our only option in this case */
+                                                       if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
                                                                kfree(radeon_connector->edid);
                                                                radeon_connector->edid = NULL;
                                                                ret = connector_status_disconnected;
index 5f31731..dd279da 100644 (file)
@@ -226,20 +226,20 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 {
        u64 size_af, size_bf;
 
-       size_af = 0xFFFFFFFF - mc->vram_end;
-       size_bf = mc->vram_start;
+       size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+       size_bf = mc->vram_start & ~mc->gtt_base_align;
        if (size_bf > size_af) {
                if (mc->gtt_size > size_bf) {
                        dev_warn(rdev->dev, "limiting GTT\n");
                        mc->gtt_size = size_bf;
                }
-               mc->gtt_start = mc->vram_start - mc->gtt_size;
+               mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
        } else {
                if (mc->gtt_size > size_af) {
                        dev_warn(rdev->dev, "limiting GTT\n");
                        mc->gtt_size = size_af;
                }
-               mc->gtt_start = mc->vram_end + 1;
+               mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
        }
        mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
        dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
index 6a70c0d..ab389f8 100644 (file)
@@ -128,7 +128,8 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                for (i = 0, found = 0; i < rdev->num_crtc; i++) {
                        crtc = (struct drm_crtc *)minfo->crtcs[i];
                        if (crtc && crtc->base.id == value) {
-                               value = i;
+                               struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+                               value = radeon_crtc->crtc_id;
                                found = 1;
                                break;
                        }
index bad77f4..5688a0c 100644 (file)
@@ -108,6 +108,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
                udelay(panel_pwr_delay * 1000);
                WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
                WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+               udelay(panel_pwr_delay * 1000);
                break;
        }
 
index f2ed27c..0320403 100644 (file)
@@ -642,8 +642,8 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
        }
        flicker_removal = (tmp + 500) / 1000;
 
-       if (flicker_removal < 2)
-               flicker_removal = 2;
+       if (flicker_removal < 3)
+               flicker_removal = 3;
        for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
                if (flicker_removal == SLOPE_limit[i])
                        break;
index 115d26b..3fa6984 100644 (file)
@@ -333,6 +333,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (cp == PM_PROFILE_AUTO) ? "auto" :
                        (cp == PM_PROFILE_LOW) ? "low" :
+                       (cp == PM_PROFILE_MID) ? "mid" :
                        (cp == PM_PROFILE_HIGH) ? "high" : "default");
 }
 
index 9e4240b..f454c9a 100644 (file)
@@ -57,7 +57,9 @@ void rs400_gart_adjust_size(struct radeon_device *rdev)
        }
        if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
                /* FIXME: RS400 & RS480 seems to have issue with GART size
-                * if 4G of system memory (needs more testing) */
+                * if 4G of system memory (needs more testing)
+                */
+               /* XXX is this still an issue with proper alignment? */
                rdev->mc.gtt_size = 32 * 1024 * 1024;
                DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
        }
@@ -263,6 +265,7 @@ void rs400_mc_init(struct radeon_device *rdev)
        r100_vram_init_sizes(rdev);
        base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
        radeon_vram_location(rdev, &rdev->mc, base);
+       rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
        radeon_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 }
index 7bb4c3e..6dc15ea 100644 (file)
@@ -698,6 +698,7 @@ void rs600_mc_init(struct radeon_device *rdev)
        base = G_000004_MC_FB_START(base) << 16;
        rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
        radeon_vram_location(rdev, &rdev->mc, base);
+       rdev->mc.gtt_base_align = 0;
        radeon_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 }
index f4f0a61..ce4ecbe 100644 (file)
@@ -162,6 +162,7 @@ void rs690_mc_init(struct radeon_device *rdev)
        rs690_pm_info(rdev);
        rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
        radeon_vram_location(rdev, &rdev->mc, base);
+       rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
        radeon_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 }
index 7d9a7b0..0c9c169 100644 (file)
@@ -195,6 +195,7 @@ void rv515_mc_init(struct radeon_device *rdev)
        rv515_vram_get_type(rdev);
        r100_vram_init_sizes(rdev);
        radeon_vram_location(rdev, &rdev->mc, 0);
+       rdev->mc.gtt_base_align = 0;
        if (!(rdev->flags & RADEON_IS_AGP))
                radeon_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
index b1d67dc..ca90479 100644 (file)
 #include <linux/slab.h>
 
 #include <asm/atomic.h>
-#include <asm/agp.h>
 
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_page_alloc.h"
 
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
 
 #define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
 #define SMALL_ALLOCATION               16
@@ -392,7 +394,7 @@ static int ttm_pool_get_num_unused_pages(void)
 /**
  * Callback for mm to request pool to reduce number of page held.
  */
-static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
+static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
 {
        static atomic_t start_pool = ATOMIC_INIT(0);
        unsigned i;
index f1d6261..437ac78 100644 (file)
@@ -972,6 +972,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
        ret = copy_from_user(rects, user_rects, rects_size);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Failed to get rects.\n");
+               ret = -EFAULT;
                goto out_free;
        }
 
index fbd62ab..0ffaf2c 100644 (file)
@@ -89,7 +89,6 @@ struct gc_pad {
 struct gc {
        struct pardevice *pd;
        struct gc_pad pads[GC_MAX_DEVICES];
-       struct input_dev *dev[GC_MAX_DEVICES];
        struct timer_list timer;
        int pad_count[GC_MAX];
        int used;
@@ -387,7 +386,7 @@ static void gc_nes_process_packet(struct gc *gc)
        for (i = 0; i < GC_MAX_DEVICES; i++) {
 
                pad = &gc->pads[i];
-               dev = gc->dev[i];
+               dev = pad->dev;
                s = gc_status_bit[i];
 
                switch (pad->type) {
@@ -579,7 +578,7 @@ static void gc_psx_command(struct gc *gc, int b, unsigned char *data)
                read = parport_read_status(port) ^ 0x80;
 
                for (j = 0; j < GC_MAX_DEVICES; j++) {
-                       struct gc_pad *pad = &gc->pads[i];
+                       struct gc_pad *pad = &gc->pads[j];
 
                        if (pad->type == GC_PSX || pad->type == GC_DDR)
                                data[j] |= (read & gc_status_bit[j]) ? (1 << i) : 0;
index 0f9a478..1ba2514 100644 (file)
@@ -69,11 +69,11 @@ config KEYBOARD_ATARI
          module will be called atakbd.
 
 config KEYBOARD_ATKBD
-       tristate "AT keyboard" if EMBEDDED || !X86 || X86_MRST
+       tristate "AT keyboard" if EMBEDDED || !X86
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86 && !X86_MRST
+       select SERIO_I8042 if X86
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
@@ -124,7 +124,7 @@ config KEYBOARD_ATKBD_RDI_KEYCODES
          right-hand column will be interpreted as the key shown in the
          left-hand column.
 
-config QT2160
+config KEYBOARD_QT2160
        tristate "Atmel AT42QT2160 Touch Sensor Chip"
        depends on I2C && EXPERIMENTAL
        help
index 7aa59e0..fb16b5e 100644 (file)
  */
 #define TWL4030_MAX_ROWS       8       /* TWL4030 hard limit */
 #define TWL4030_MAX_COLS       8
-#define TWL4030_ROW_SHIFT      3
-#define TWL4030_KEYMAP_SIZE    (TWL4030_MAX_ROWS * TWL4030_MAX_COLS)
+/*
+ * Note that we add space for an extra column so that we can handle
+ * row lines connected to the gnd (see twl4030_col_xlate()).
+ */
+#define TWL4030_ROW_SHIFT      4
+#define TWL4030_KEYMAP_SIZE    (TWL4030_MAX_ROWS << TWL4030_ROW_SHIFT)
 
 struct twl4030_keypad {
        unsigned short  keymap[TWL4030_KEYMAP_SIZE];
@@ -182,7 +186,7 @@ static int twl4030_read_kp_matrix_state(struct twl4030_keypad *kp, u16 *state)
        return ret;
 }
 
-static int twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
+static bool twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
 {
        int i;
        u16 check = 0;
@@ -191,12 +195,12 @@ static int twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
                u16 col = key_state[i];
 
                if ((col & check) && hweight16(col) > 1)
-                       return 1;
+                       return true;
 
                check |= col;
        }
 
-       return 0;
+       return false;
 }
 
 static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
@@ -225,7 +229,8 @@ static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
                if (!changed)
                        continue;
 
-               for (col = 0; col < kp->n_cols; col++) {
+               /* Extra column handles "all gnd" rows */
+               for (col = 0; col < kp->n_cols + 1; col++) {
                        int code;
 
                        if (!(changed & (1 << col)))
index 4ef764c..ee2bf6b 100644 (file)
@@ -258,7 +258,7 @@ static struct platform_driver w90p910_keypad_driver = {
        .probe          = w90p910_keypad_probe,
        .remove         = __devexit_p(w90p910_keypad_remove),
        .driver         = {
-               .name   = "nuc900-keypad",
+               .name   = "nuc900-kpi",
                .owner  = THIS_MODULE,
        },
 };
index eeb58c1..c714ca2 100644 (file)
@@ -17,7 +17,7 @@ config MOUSE_PS2
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86 && !X86_MRST
+       select SERIO_I8042 if X86
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you have a PS/2 mouse connected to your system. This
index 40cea33..705589d 100644 (file)
@@ -141,8 +141,13 @@ static int synaptics_capability(struct psmouse *psmouse)
        priv->capabilities = (cap[0] << 16) | (cap[1] << 8) | cap[2];
        priv->ext_cap = priv->ext_cap_0c = 0;
 
-       if (!SYN_CAP_VALID(priv->capabilities))
+       /*
+        * Older firmwares had submodel ID fixed to 0x47
+        */
+       if (SYN_ID_FULL(priv->identity) < 0x705 &&
+           SYN_CAP_SUBMODEL_ID(priv->capabilities) != 0x47) {
                return -1;
+       }
 
        /*
         * Unless capExtended is set the rest of the flags should be ignored
@@ -206,6 +211,7 @@ static int synaptics_resolution(struct psmouse *psmouse)
        unsigned char max[3];
 
        if (SYN_ID_MAJOR(priv->identity) < 4)
+               return 0;
 
        if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res) == 0) {
                if (res[0] != 0 && (res[1] & 0x80) && res[2] != 0) {
index 7d4d5e1..b6aa7d2 100644 (file)
@@ -47,7 +47,7 @@
 #define SYN_CAP_FOUR_BUTTON(c)         ((c) & (1 << 3))
 #define SYN_CAP_MULTIFINGER(c)         ((c) & (1 << 1))
 #define SYN_CAP_PALMDETECT(c)          ((c) & (1 << 0))
-#define SYN_CAP_VALID(c)               ((((c) & 0x00ff00) >> 8) == 0x47)
+#define SYN_CAP_SUBMODEL_ID(c)         (((c) & 0x00ff00) >> 8)
 #define SYN_EXT_CAP_REQUESTS(c)                (((c) & 0x700000) >> 20)
 #define SYN_CAP_MULTI_BUTTON_NO(ec)    (((ec) & 0x00f000) >> 12)
 #define SYN_CAP_PRODUCT_ID(ec)         (((ec) & 0xff0000) >> 16)
@@ -66,6 +66,7 @@
 #define SYN_ID_MODEL(i)                        (((i) >> 4) & 0x0f)
 #define SYN_ID_MAJOR(i)                        ((i) & 0x0f)
 #define SYN_ID_MINOR(i)                        (((i) >> 16) & 0xff)
+#define SYN_ID_FULL(i)                 ((SYN_ID_MAJOR(i) << 8) | SYN_ID_MINOR(i))
 #define SYN_ID_IS_SYNAPTICS(i)         ((((i) >> 8) & 0xff) == 0x47)
 
 /* synaptics special commands */
index 256b9e9..3bfe8fa 100644 (file)
@@ -22,7 +22,7 @@ config SERIO_I8042
        tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86
        default y
        depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
-                  (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !X86_MRST
+                  (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
        help
          i8042 is the chip over which the standard AT keyboard and PS/2
          mouse are connected to the computer. If you use these devices,
index 6168469..ed7ad74 100644 (file)
@@ -7,6 +7,10 @@
  * the Free Software Foundation.
  */
 
+#ifdef CONFIG_X86
+#include <asm/x86_init.h>
+#endif
+
 /*
  * Names.
  */
@@ -165,6 +169,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
                },
        },
+       {
+               /* Gigabyte Spring Peak - defines wrong chassis type */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
+               },
+       },
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -840,6 +851,12 @@ static int __init i8042_platform_init(void)
 {
        int retval;
 
+#ifdef CONFIG_X86
+       /* Just return if pre-detection shows no i8042 controller exist */
+       if (!x86_platform.i8042_detect())
+               return -ENODEV;
+#endif
+
 /*
  * On ix86 platforms touching the i8042 data register region can do really
  * bad things. Because of this the region is always reserved on ix86 boxes.
index cc18265..7a45d68 100644 (file)
@@ -233,7 +233,7 @@ static int __devinit w90x900ts_probe(struct platform_device *pdev)
        w90p910_ts->state = TS_IDLE;
        spin_lock_init(&w90p910_ts->lock);
        setup_timer(&w90p910_ts->timer, w90p910_check_pen_up,
-                   (unsigned long)&w90p910_ts);
+                   (unsigned long)w90p910_ts);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
index 9bec24d..2d44b33 100644 (file)
@@ -366,6 +366,6 @@ static int __init cs5535_mfgpt_init(void)
 
 module_init(cs5535_mfgpt_init);
 
-MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
 MODULE_LICENSE("GPL");
index 4917af9..2ed435b 100644 (file)
@@ -539,9 +539,13 @@ static int mmci_get_cd(struct mmc_host *mmc)
        if (host->gpio_cd == -ENOSYS)
                status = host->plat->status(mmc_dev(host->mmc));
        else
-               status = gpio_get_value(host->gpio_cd);
+               status = !gpio_get_value(host->gpio_cd);
 
-       return !status;
+       /*
+        * Use positive logic throughout - status is zero for no card,
+        * non-zero for card inserted.
+        */
+       return status;
 }
 
 static const struct mmc_host_ops mmci_ops = {
index af21792..ad30f07 100644 (file)
@@ -365,6 +365,26 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
 
 static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
 {
+       struct sdhci_host *host =  platform_get_drvdata(pdev);
+       struct sdhci_s3c *sc = sdhci_priv(host);
+       int ptr;
+
+       sdhci_remove_host(host, 1);
+
+       for (ptr = 0; ptr < 3; ptr++) {
+               clk_disable(sc->clk_bus[ptr]);
+               clk_put(sc->clk_bus[ptr]);
+       }
+       clk_disable(sc->clk_io);
+       clk_put(sc->clk_io);
+
+       iounmap(host->ioaddr);
+       release_resource(sc->ioarea);
+       kfree(sc->ioarea);
+
+       sdhci_free_host(host);
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
index 8bd2368..bb0872a 100644 (file)
@@ -1062,6 +1062,10 @@ struct bnx2x {
 
        /* used to synchronize stats collecting */
        int                     stats_state;
+
+       /* used for synchronization of concurrent threads statistics handling */
+       spinlock_t              stats_lock;
+
        /* used by dmae command loader */
        struct dmae_command     stats_dmae;
        int                     executer_idx;
index 57ff5b3..46167c0 100644 (file)
@@ -57,8 +57,8 @@
 #include "bnx2x_init_ops.h"
 #include "bnx2x_dump.h"
 
-#define DRV_MODULE_VERSION     "1.52.53-1"
-#define DRV_MODULE_RELDATE     "2010/18/04"
+#define DRV_MODULE_VERSION     "1.52.53-2"
+#define DRV_MODULE_RELDATE     "2010/21/07"
 #define BNX2X_BC_VER           0x040200
 
 #include <linux/firmware.h>
@@ -3789,6 +3789,8 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
                struct eth_query_ramrod_data ramrod_data = {0};
                int i, rc;
 
+               spin_lock_bh(&bp->stats_lock);
+
                ramrod_data.drv_counter = bp->stats_counter++;
                ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
                for_each_queue(bp, i)
@@ -3802,6 +3804,8 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
                        bp->spq_left++;
                        bp->stats_pending = 1;
                }
+
+               spin_unlock_bh(&bp->stats_lock);
        }
 }
 
@@ -4367,6 +4371,14 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
        struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
        struct bnx2x_eth_stats *estats = &bp->eth_stats;
        int i;
+       u16 cur_stats_counter;
+
+       /* Make sure we use the value of the counter
+        * used for sending the last stats ramrod.
+        */
+       spin_lock_bh(&bp->stats_lock);
+       cur_stats_counter = bp->stats_counter - 1;
+       spin_unlock_bh(&bp->stats_lock);
 
        memcpy(&(fstats->total_bytes_received_hi),
               &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
@@ -4394,25 +4406,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
                u32 diff;
 
                /* are storm stats valid? */
-               if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
-                                                       bp->stats_counter) {
+               if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
                        DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
                           "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
-                          i, xclient->stats_counter, bp->stats_counter);
+                          i, xclient->stats_counter, cur_stats_counter + 1);
                        return -1;
                }
-               if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
-                                                       bp->stats_counter) {
+               if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
                        DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
                           "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
-                          i, tclient->stats_counter, bp->stats_counter);
+                          i, tclient->stats_counter, cur_stats_counter + 1);
                        return -2;
                }
-               if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
-                                                       bp->stats_counter) {
+               if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
                        DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
                           "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
-                          i, uclient->stats_counter, bp->stats_counter);
+                          i, uclient->stats_counter, cur_stats_counter + 1);
                        return -4;
                }
 
@@ -4849,16 +4858,18 @@ static const struct {
 
 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
 {
-       enum bnx2x_stats_state state = bp->stats_state;
+       enum bnx2x_stats_state state;
 
        if (unlikely(bp->panic))
                return;
 
-       bnx2x_stats_stm[state][event].action(bp);
+       /* Protect a state change flow */
+       spin_lock_bh(&bp->stats_lock);
+       state = bp->stats_state;
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+       spin_unlock_bh(&bp->stats_lock);
 
-       /* Make sure the state has been "changed" */
-       smp_wmb();
+       bnx2x_stats_stm[state][event].action(bp);
 
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -9908,6 +9919,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 
        mutex_init(&bp->port.phy_mutex);
        mutex_init(&bp->fw_mb_mutex);
+       spin_lock_init(&bp->stats_lock);
 #ifdef BCM_CNIC
        mutex_init(&bp->cnic_mutex);
 #endif
index df48307..8d7dfd2 100644 (file)
@@ -822,7 +822,7 @@ static int rlb_initialize(struct bonding *bond)
 
        /*initialize packet type*/
        pk_type->type = cpu_to_be16(ETH_P_ARP);
-       pk_type->dev = NULL;
+       pk_type->dev = bond->dev;
        pk_type->func = rlb_arp_recv;
 
        /* register to receive ARPs */
index 1d973db..d7de376 100644 (file)
@@ -1022,7 +1022,7 @@ static const struct net_device_ops lance_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
 };
 
-static int __init dec_lance_probe(struct device *bdev, const int type)
+static int __devinit dec_lance_probe(struct device *bdev, const int type)
 {
        static unsigned version_printed;
        static const char fmt[] = "declance%d";
@@ -1326,7 +1326,7 @@ static void __exit dec_lance_platform_remove(void)
 }
 
 #ifdef CONFIG_TC
-static int __init dec_lance_tc_probe(struct device *dev);
+static int __devinit dec_lance_tc_probe(struct device *dev);
 static int __exit dec_lance_tc_remove(struct device *dev);
 
 static const struct tc_device_id dec_lance_tc_table[] = {
@@ -1345,7 +1345,7 @@ static struct tc_driver dec_lance_tc_driver = {
        },
 };
 
-static int __init dec_lance_tc_probe(struct device *dev)
+static int __devinit dec_lance_tc_probe(struct device *dev)
 {
         int status = dec_lance_probe(dev, PMAD_LANCE);
         if (!status)
index 7acb3ed..2602852 100644 (file)
@@ -677,7 +677,7 @@ static int ibmveth_close(struct net_device *netdev)
        if (!adapter->pool_config)
                netif_stop_queue(netdev);
 
-       free_irq(netdev->irq, netdev);
+       h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 
        do {
                lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
@@ -689,6 +689,8 @@ static int ibmveth_close(struct net_device *netdev)
                                     lpar_rc);
        }
 
+       free_irq(netdev->irq, netdev);
+
        adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
 
        ibmveth_cleanup(adapter);
index 3881918..cea37e0 100644 (file)
@@ -1722,6 +1722,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        u16 eeprom_apme_mask = IGB_EEPROM_APME;
        u32 part_num;
 
+       /* Catch broken hardware that put the wrong VF device ID in
+        * the PCIe SR-IOV capability.
+        */
+       if (pdev->is_virtfn) {
+               WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+                    pci_name(pdev), pdev->vendor, pdev->device);
+               return -EINVAL;
+       }
+
        err = pci_enable_device_mem(pdev);
        if (err)
                return err;
index 7b5d976..74d9b6d 100644 (file)
@@ -6492,6 +6492,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 #endif
        u32 part_num, eec;
 
+       /* Catch broken hardware that put the wrong VF device ID in
+        * the PCIe SR-IOV capability.
+        */
+       if (pdev->is_virtfn) {
+               WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+                    pci_name(pdev), pdev->vendor, pdev->device);
+               return -EINVAL;
+       }
+
        err = pci_enable_device_mem(pdev);
        if (err)
                return err;
index 87e8d4c..f15fe2c 100644 (file)
@@ -499,7 +499,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-static void macvlan_setup(struct net_device *dev)
+void macvlan_common_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
@@ -508,6 +508,12 @@ static void macvlan_setup(struct net_device *dev)
        dev->destructor         = free_netdev;
        dev->header_ops         = &macvlan_hard_header_ops,
        dev->ethtool_ops        = &macvlan_ethtool_ops;
+}
+EXPORT_SYMBOL_GPL(macvlan_common_setup);
+
+static void macvlan_setup(struct net_device *dev)
+{
+       macvlan_common_setup(dev);
        dev->tx_queue_len       = 0;
 }
 
@@ -705,7 +711,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
        /* common fields */
        ops->priv_size          = sizeof(struct macvlan_dev);
        ops->get_tx_queues      = macvlan_get_tx_queues;
-       ops->setup              = macvlan_setup;
        ops->validate           = macvlan_validate;
        ops->maxtype            = IFLA_MACVLAN_MAX;
        ops->policy             = macvlan_policy;
@@ -719,6 +724,7 @@ EXPORT_SYMBOL_GPL(macvlan_link_register);
 
 static struct rtnl_link_ops macvlan_link_ops = {
        .kind           = "macvlan",
+       .setup          = macvlan_setup,
        .newlink        = macvlan_newlink,
        .dellink        = macvlan_dellink,
 };
index a8a94e2..ff02b83 100644 (file)
@@ -180,11 +180,18 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
 {
        struct macvtap_queue *q = macvtap_get_queue(dev, skb);
        if (!q)
-               return -ENOLINK;
+               goto drop;
+
+       if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
+               goto drop;
 
        skb_queue_tail(&q->sk.sk_receive_queue, skb);
        wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
-       return 0;
+       return NET_RX_SUCCESS;
+
+drop:
+       kfree_skb(skb);
+       return NET_RX_DROP;
 }
 
 /*
@@ -235,8 +242,15 @@ static void macvtap_dellink(struct net_device *dev,
        macvlan_dellink(dev, head);
 }
 
+static void macvtap_setup(struct net_device *dev)
+{
+       macvlan_common_setup(dev);
+       dev->tx_queue_len = TUN_READQ_SIZE;
+}
+
 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
        .kind           = "macvtap",
+       .setup          = macvtap_setup,
        .newlink        = macvtap_newlink,
        .dellink        = macvtap_dellink,
 };
index 5b3dfb4..33525bf 100644 (file)
@@ -1168,6 +1168,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
        int interrupts, nr_serviced = 0, i;
        struct ei_device *ei_local;
        int handled = 0;
+       unsigned long flags;
 
        e8390_base = dev->base_addr;
        ei_local = netdev_priv(dev);
@@ -1176,7 +1177,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
         *      Protect the irq test too.
         */
         
-       spin_lock(&ei_local->page_lock);
+       spin_lock_irqsave(&ei_local->page_lock, flags);
 
        if (ei_local->irqlock) 
        {
@@ -1188,7 +1189,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
                           dev->name, inb_p(e8390_base + EN0_ISR),
                           inb_p(e8390_base + EN0_IMR));
 #endif
-               spin_unlock(&ei_local->page_lock);
+               spin_unlock_irqrestore(&ei_local->page_lock, flags);
                return IRQ_NONE;
        }
     
@@ -1261,7 +1262,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
        ei_local->irqlock = 0;
        outb_p(ENISR_ALL, e8390_base + EN0_IMR);
 
-       spin_unlock(&ei_local->page_lock);
+       spin_unlock_irqrestore(&ei_local->page_lock, flags);
        return IRQ_RETVAL(handled);
 }
 
index 96b6cfb..cdc6a5c 100644 (file)
@@ -1316,7 +1316,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
                { 0x7c800000, 0x28000000,       RTL_GIGA_MAC_VER_26 },
 
                /* 8168C family. */
-               { 0x7cf00000, 0x3ca00000,       RTL_GIGA_MAC_VER_24 },
+               { 0x7cf00000, 0x3cb00000,       RTL_GIGA_MAC_VER_24 },
                { 0x7cf00000, 0x3c900000,       RTL_GIGA_MAC_VER_23 },
                { 0x7cf00000, 0x3c800000,       RTL_GIGA_MAC_VER_18 },
                { 0x7c800000, 0x3c800000,       RTL_GIGA_MAC_VER_24 },
index 5e52c75..7f3a53d 100644 (file)
@@ -65,7 +65,7 @@ static int debug_level = ERR_DBG;
 
 /* DEBUG message print. */
 #define DBG_PRINT(dbg_level, fmt, args...) do {                        \
-       if (dbg_level >= debug_level)                           \
+       if (dbg_level <= debug_level)                           \
                pr_info(fmt, ##args);                           \
        } while (0)
 
index 6ad6fe7..6304259 100644 (file)
@@ -736,8 +736,18 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
                        else if (sinfo->gso_type & SKB_GSO_UDP)
                                gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
-                       else
-                               BUG();
+                       else {
+                               printk(KERN_ERR "tun: unexpected GSO type: "
+                                      "0x%x, gso_size %d, hdr_len %d\n",
+                                      sinfo->gso_type, gso.gso_size,
+                                      gso.hdr_len);
+                               print_hex_dump(KERN_ERR, "tun: ",
+                                              DUMP_PREFIX_NONE,
+                                              16, 1, skb->head,
+                                              min((int)gso.hdr_len, 64), true);
+                               WARN_ON_ONCE(1);
+                               return -EINVAL;
+                       }
                        if (sinfo->gso_type & SKB_GSO_TCP_ECN)
                                gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
                } else
index 2d7c96d..eb80243 100644 (file)
@@ -152,6 +152,7 @@ enum {
        /* Device IDs */
        USB_DEVICE_ID_I6050 = 0x0186,
        USB_DEVICE_ID_I6050_2 = 0x0188,
+       USB_DEVICE_ID_I6250 = 0x0187,
 };
 
 
index 0d5081d..d3365ac 100644 (file)
@@ -491,6 +491,7 @@ int i2400mu_probe(struct usb_interface *iface,
        switch (id->idProduct) {
        case USB_DEVICE_ID_I6050:
        case USB_DEVICE_ID_I6050_2:
+       case USB_DEVICE_ID_I6250:
                i2400mu->i6050 = 1;
                break;
        default:
@@ -739,6 +740,7 @@ static
 struct usb_device_id i2400mu_id_table[] = {
        { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
        { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
+       { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
        { USB_DEVICE(0x8086, 0x0181) },
        { USB_DEVICE(0x8086, 0x1403) },
        { USB_DEVICE(0x8086, 0x1405) },
index 77b3591..23c15aa 100644 (file)
@@ -730,13 +730,17 @@ static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
 
        /* RX */
        if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
-               goto err;
+               goto err_rx;
 
        /* Register Read */
        if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
-               goto err;
+               goto err_reg;
 
        return 0;
+err_reg:
+       ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+err_rx:
+       ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
 err:
        return -ENOMEM;
 }
index ca6065b..e3e5291 100644 (file)
@@ -844,9 +844,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
        int dma_type;
 
        if (edma)
-               dma_type = DMA_FROM_DEVICE;
-       else
                dma_type = DMA_BIDIRECTIONAL;
+       else
+               dma_type = DMA_FROM_DEVICE;
 
        qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
        spin_lock_bh(&sc->rx.rxbuflock);
index d24dc7d..972a9c3 100644 (file)
@@ -330,6 +330,7 @@ static int prism2_pci_probe(struct pci_dev *pdev,
 
         dev->irq = pdev->irq;
         hw_priv->mem_start = mem;
+       dev->base_addr = (unsigned long) mem;
 
        prism2_pci_cor_sreset(local);
 
index c2a453a..dc43ebd 100644 (file)
@@ -97,6 +97,17 @@ static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
        spin_lock_irqsave(&priv->sta_lock, flags);
        memset(priv->stations, 0, sizeof(priv->stations));
        priv->num_stations = 0;
+
+       /*
+        * Remove all key information that is not stored as part of station
+        * information since mac80211 may not have had a
+        * chance to remove all the keys. When device is reconfigured by
+        * mac80211 after an error all keys will be reconfigured.
+        */
+       priv->ucode_key_table = 0;
+       priv->key_mapping_key = 0;
+       memset(priv->wep_keys, 0, sizeof(priv->wep_keys));
+
        spin_unlock_irqrestore(&priv->sta_lock, flags);
 }
 
index 3ae468c..f20d3ee 100644 (file)
@@ -853,6 +853,11 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
                    BIT(NL80211_IFTYPE_MESH_POINT) |
                    BIT(NL80211_IFTYPE_WDS);
 
+       /*
+        * Initialize configuration work.
+        */
+       INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
+
        /*
         * Let the driver probe the device to detect the capabilities.
         */
@@ -862,11 +867,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
                goto exit;
        }
 
-       /*
-        * Initialize configuration work.
-        */
-       INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
-
        /*
         * Allocate queue array.
         */
index 92379e2..2aaa131 100644 (file)
@@ -156,6 +156,38 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
                                             pcibios_align_resource, dev);
        }
 
+       if (ret < 0 && dev->fw_addr[resno]) {
+               struct resource *root, *conflict;
+               resource_size_t start, end;
+
+               /*
+                * If we failed to assign anything, let's try the address
+                * where firmware left it.  That at least has a chance of
+                * working, which is better than just leaving it disabled.
+                */
+
+               if (res->flags & IORESOURCE_IO)
+                       root = &ioport_resource;
+               else
+                       root = &iomem_resource;
+
+               start = res->start;
+               end = res->end;
+               res->start = dev->fw_addr[resno];
+               res->end = res->start + size - 1;
+               dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
+                        resno, res);
+               conflict = request_resource_conflict(root, res);
+               if (conflict) {
+                       dev_info(&dev->dev,
+                                "BAR %d: %pR conflicts with %s %pR\n", resno,
+                                res, conflict->name, conflict);
+                       res->start = start;
+                       res->end = end;
+               } else
+                       ret = 0;
+       }
+
        if (!ret) {
                res->flags &= ~IORESOURCE_STARTALIGN;
                dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
index 29f91fa..a4cd9ad 100644 (file)
@@ -857,8 +857,10 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev)
 {
        pcmcia_release_configuration(p_dev);
        pcmcia_release_io(p_dev, &p_dev->io);
-       if (p_dev->_irq)
+       if (p_dev->_irq) {
                free_irq(p_dev->irq, p_dev->priv);
+               p_dev->_irq = 0;
+       }
        if (p_dev->win)
                pcmcia_release_window(p_dev, p_dev->win);
 }
index df4532e..f370476 100644 (file)
@@ -178,7 +178,6 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
                               unsigned long val,
                               struct cpufreq_freqs *freqs)
 {
-#warning "it's not clear if this is right since the core CPU (N) clock has no effect on the memory (L) clock"
        switch (val) {
        case CPUFREQ_PRECHANGE:
                if (freqs->new > freqs->old) {
@@ -186,7 +185,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
                               "pre-updating\n",
                               freqs->new / 1000, (freqs->new / 100) % 10,
                               freqs->old / 1000, (freqs->old / 100) % 10);
-                       pxa2xx_pcmcia_set_mcxx(skt, freqs->new);
+                       pxa2xx_pcmcia_set_timing(skt);
                }
                break;
 
@@ -196,7 +195,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
                               "post-updating\n",
                               freqs->new / 1000, (freqs->new / 100) % 10,
                               freqs->old / 1000, (freqs->old / 100) % 10);
-                       pxa2xx_pcmcia_set_mcxx(skt, freqs->new);
+                       pxa2xx_pcmcia_set_timing(skt);
                }
                break;
        }
index 40658e3..bb2f1fb 100644 (file)
@@ -489,7 +489,7 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
                mutex_unlock(&ipclock);
                return -ENODEV;
        }
-       ipc_command(cmd << 12 | sub);
+       ipc_command(sub << 12 | cmd);
        err = busy_loop();
        mutex_unlock(&ipclock);
        return err;
@@ -501,9 +501,9 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command);
  *     @cmd: command
  *     @sub: sub type
  *     @in: input data
- *     @inlen: input length
+ *     @inlen: input length in dwords
  *     @out: output data
- *     @outlein: output length
+ *     @outlein: output length in dwords
  *
  *     Issue a command to the SCU which involves data transfers. Do the
  *     data copies under the lock but leave it for the caller to interpret
@@ -524,7 +524,7 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
        for (i = 0; i < inlen; i++)
                ipc_data_writel(*in++, 4 * i);
 
-       ipc_command((cmd << 12) | sub | (inlen << 18));
+       ipc_command((sub << 12) | cmd | (inlen << 18));
        err = busy_loop();
 
        for (i = 0; i < outlen; i++)
@@ -556,6 +556,10 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
        u32 cmd = 0;
 
        mutex_lock(&ipclock);
+       if (ipcdev.pdev == NULL) {
+               mutex_unlock(&ipclock);
+               return -ENODEV;
+       }
        cmd = (addr >> 24) & 0xFF;
        if (cmd == IPC_I2C_READ) {
                writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
index d762a0c..84d3c43 100644 (file)
 struct ds278x_info;
 
 struct ds278x_battery_ops {
-       int     (*get_current)(struct ds278x_info *info, int *current_uA);
-       int     (*get_voltage)(struct ds278x_info *info, int *voltage_uA);
-       int     (*get_capacity)(struct ds278x_info *info, int *capacity_uA);
-
+       int (*get_battery_current)(struct ds278x_info *info, int *current_uA);
+       int (*get_battery_voltage)(struct ds278x_info *info, int *voltage_uA);
+       int (*get_battery_capacity)(struct ds278x_info *info, int *capacity_uA);
 };
 
 #define to_ds278x_info(x) container_of(x, struct ds278x_info, battery)
@@ -163,7 +162,7 @@ static int ds2782_get_capacity(struct ds278x_info *info, int *capacity)
        if (err)
                return err;
        *capacity = raw;
-       return raw;
+       return 0;
 }
 
 static int ds2786_get_current(struct ds278x_info *info, int *current_uA)
@@ -213,11 +212,11 @@ static int ds278x_get_status(struct ds278x_info *info, int *status)
        int current_uA;
        int capacity;
 
-       err = info->ops->get_current(info, &current_uA);
+       err = info->ops->get_battery_current(info, &current_uA);
        if (err)
                return err;
 
-       err = info->ops->get_capacity(info, &capacity);
+       err = info->ops->get_battery_capacity(info, &capacity);
        if (err)
                return err;
 
@@ -246,15 +245,15 @@ static int ds278x_battery_get_property(struct power_supply *psy,
                break;
 
        case POWER_SUPPLY_PROP_CAPACITY:
-               ret = info->ops->get_capacity(info, &val->intval);
+               ret = info->ops->get_battery_capacity(info, &val->intval);
                break;
 
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-               ret = info->ops->get_voltage(info, &val->intval);
+               ret = info->ops->get_battery_voltage(info, &val->intval);
                break;
 
        case POWER_SUPPLY_PROP_CURRENT_NOW:
-               ret = info->ops->get_current(info, &val->intval);
+               ret = info->ops->get_battery_current(info, &val->intval);
                break;
 
        case POWER_SUPPLY_PROP_TEMP:
@@ -307,14 +306,14 @@ enum ds278x_num_id {
 
 static struct ds278x_battery_ops ds278x_ops[] = {
        [DS2782] = {
-               .get_current  = ds2782_get_current,
-               .get_voltage  = ds2782_get_voltage,
-               .get_capacity = ds2782_get_capacity,
+               .get_battery_current  = ds2782_get_current,
+               .get_battery_voltage  = ds2782_get_voltage,
+               .get_battery_capacity = ds2782_get_capacity,
        },
        [DS2786] = {
-               .get_current  = ds2786_get_current,
-               .get_voltage  = ds2786_get_voltage,
-               .get_capacity = ds2786_get_capacity,
+               .get_battery_current  = ds2786_get_current,
+               .get_battery_voltage  = ds2786_get_voltage,
+               .get_battery_capacity = ds2786_get_capacity,
        }
 };
 
index 7b14a67..1179099 100644 (file)
@@ -286,7 +286,7 @@ static int ab3100_list_voltage_regulator(struct regulator_dev *reg,
 {
        struct ab3100_regulator *abreg = reg->reg_data;
 
-       if (selector > abreg->voltages_len)
+       if (selector >= abreg->voltages_len)
                return -EINVAL;
        return abreg->typ_voltages[selector];
 }
@@ -318,7 +318,7 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg)
        regval &= 0xE0;
        regval >>= 5;
 
-       if (regval > abreg->voltages_len) {
+       if (regval >= abreg->voltages_len) {
                dev_err(&reg->dev,
                        "regulator register %02x contains an illegal voltage setting\n",
                        abreg->regreg);
index 14b4576..8152d65 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/tps6507x.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/mfd/tps6507x.h>
@@ -101,9 +102,12 @@ struct tps_info {
        unsigned max_uV;
        u8 table_len;
        const u16 *table;
+
+       /* Does DCDC high or the low register defines output voltage? */
+       bool defdcdc_default;
 };
 
-static const struct tps_info tps6507x_pmic_regs[] = {
+static struct tps_info tps6507x_pmic_regs[] = {
        {
                .name = "VDCDC1",
                .min_uV = 725000,
@@ -145,7 +149,7 @@ struct tps6507x_pmic {
        struct regulator_desc desc[TPS6507X_NUM_REGULATOR];
        struct tps6507x_dev *mfd;
        struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR];
-       const struct tps_info *info[TPS6507X_NUM_REGULATOR];
+       struct tps_info *info[TPS6507X_NUM_REGULATOR];
        struct mutex io_lock;
 };
 static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg)
@@ -341,10 +345,16 @@ static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev)
                reg = TPS6507X_REG_DEFDCDC1;
                break;
        case TPS6507X_DCDC_2:
-               reg = TPS6507X_REG_DEFDCDC2_LOW;
+               if (tps->info[dcdc]->defdcdc_default)
+                       reg = TPS6507X_REG_DEFDCDC2_HIGH;
+               else
+                       reg = TPS6507X_REG_DEFDCDC2_LOW;
                break;
        case TPS6507X_DCDC_3:
-               reg = TPS6507X_REG_DEFDCDC3_LOW;
+               if (tps->info[dcdc]->defdcdc_default)
+                       reg = TPS6507X_REG_DEFDCDC3_HIGH;
+               else
+                       reg = TPS6507X_REG_DEFDCDC3_LOW;
                break;
        default:
                return -EINVAL;
@@ -370,10 +380,16 @@ static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev,
                reg = TPS6507X_REG_DEFDCDC1;
                break;
        case TPS6507X_DCDC_2:
-               reg = TPS6507X_REG_DEFDCDC2_LOW;
+               if (tps->info[dcdc]->defdcdc_default)
+                       reg = TPS6507X_REG_DEFDCDC2_HIGH;
+               else
+                       reg = TPS6507X_REG_DEFDCDC2_LOW;
                break;
        case TPS6507X_DCDC_3:
-               reg = TPS6507X_REG_DEFDCDC3_LOW;
+               if (tps->info[dcdc]->defdcdc_default)
+                       reg = TPS6507X_REG_DEFDCDC3_HIGH;
+               else
+                       reg = TPS6507X_REG_DEFDCDC3_LOW;
                break;
        default:
                return -EINVAL;
@@ -532,7 +548,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
 {
        struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
        static int desc_id;
-       const struct tps_info *info = &tps6507x_pmic_regs[0];
+       struct tps_info *info = &tps6507x_pmic_regs[0];
        struct regulator_init_data *init_data;
        struct regulator_dev *rdev;
        struct tps6507x_pmic *tps;
@@ -569,6 +585,12 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
        for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
                /* Register the regulators */
                tps->info[i] = info;
+               if (init_data->driver_data) {
+                       struct tps6507x_reg_platform_data *data =
+                                                       init_data->driver_data;
+                       tps->info[i]->defdcdc_default = data->defdcdc_default;
+               }
+
                tps->desc[i].name = info->name;
                tps->desc[i].id = desc_id++;
                tps->desc[i].n_voltages = num_voltages[i];
index 723cd1f..0e6ed7d 100644 (file)
@@ -1495,7 +1495,7 @@ int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
        if (ret != 0) {
                dev_err(wm8350->dev, "Failed to register regulator %d: %d\n",
                        reg, ret);
-               platform_device_del(pdev);
+               platform_device_put(pdev);
                wm8350->pmic.pdev[reg] = NULL;
        }
 
index 9718aaa..600b890 100644 (file)
@@ -168,7 +168,7 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
                return -EIO;
        }
 
-       err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG,
+       err = i2c_smbus_write_byte_data(client, RX8581_REG_CTRL,
                (data | RX8581_CTRL_STOP));
        if (err < 0) {
                dev_err(&client->dev, "Unable to write control register\n");
@@ -182,6 +182,20 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
                return -EIO;
        }
 
+       /* get VLF and clear it */
+       data = i2c_smbus_read_byte_data(client, RX8581_REG_FLAG);
+       if (data < 0) {
+               dev_err(&client->dev, "Unable to read flag register\n");
+               return -EIO;
+       }
+
+       err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG,
+               (data & ~(RX8581_FLAG_VLF)));
+       if (err != 0) {
+               dev_err(&client->dev, "Unable to write flag register\n");
+               return -EIO;
+       }
+
        /* Restart the clock */
        data = i2c_smbus_read_byte_data(client, RX8581_REG_CTRL);
        if (data < 0) {
@@ -189,8 +203,8 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
                return -EIO;
        }
 
-       err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG,
-               (data | ~(RX8581_CTRL_STOP)));
+       err = i2c_smbus_write_byte_data(client, RX8581_REG_CTRL,
+               (data & ~(RX8581_CTRL_STOP)));
        if (err != 0) {
                dev_err(&client->dev, "Unable to write control register\n");
                return -EIO;
index 34d51dd..bed7b46 100644 (file)
@@ -948,8 +948,10 @@ static ssize_t dasd_alias_show(struct device *dev,
        if (device->discipline && device->discipline->get_uid &&
            !device->discipline->get_uid(device, &uid)) {
                if (uid.type == UA_BASE_PAV_ALIAS ||
-                   uid.type == UA_HYPER_PAV_ALIAS)
+                   uid.type == UA_HYPER_PAV_ALIAS) {
+                       dasd_put_device(device);
                        return sprintf(buf, "1\n");
+               }
        }
        dasd_put_device(device);
 
index ce7cb87..407d0e9 100644 (file)
@@ -713,7 +713,7 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
        ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
        if (ret)
                goto out_free;
-       memcpy(desc, &chsc_resp->data, chsc_resp->length);
+       memcpy(desc, &chsc_resp->data, sizeof(*desc));
 out_free:
        kfree(chsc_resp);
        return ret;
index e3dbeda..fd068bc 100644 (file)
@@ -714,6 +714,14 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
        if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
                return ZFCP_ERP_FAILED;
 
+       if (mempool_resize(act->adapter->pool.status_read_data,
+                          act->adapter->stat_read_buf_num, GFP_KERNEL))
+               return ZFCP_ERP_FAILED;
+
+       if (mempool_resize(act->adapter->pool.status_read_req,
+                          act->adapter->stat_read_buf_num, GFP_KERNEL))
+               return ZFCP_ERP_FAILED;
+
        atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
        if (zfcp_status_read_refill(act->adapter))
                return ZFCP_ERP_FAILED;
index 9ac6a6e..71663fb 100644 (file)
@@ -496,7 +496,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 
        adapter->hydra_version = bottom->adapter_type;
        adapter->timer_ticks = bottom->timer_interval;
-       adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)16);
+       adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
+                                        (u16)FSF_STATUS_READS_RECOM);
 
        if (fc_host_permanent_port_name(shost) == -1)
                fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
@@ -719,11 +720,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
        zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
                           req->qtcb, sizeof(struct fsf_qtcb));
 
-       if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
-               zfcp_fsf_req_free(req);
-               return ERR_PTR(-EIO);
-       }
-
        return req;
 }
 
@@ -981,7 +977,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
        }
 
        /* use single, unchained SBAL if it can hold the request */
-       if (zfcp_qdio_sg_one_sbale(sg_req) || zfcp_qdio_sg_one_sbale(sg_resp)) {
+       if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
                zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
                                                sg_req, sg_resp);
                return 0;
index 28117e1..6fa5e04 100644 (file)
@@ -251,7 +251,8 @@ static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
        struct zfcp_qdio_queue *req_q = &qdio->req_q;
 
        spin_lock_bh(&qdio->req_q_lock);
-       if (atomic_read(&req_q->count))
+       if (atomic_read(&req_q->count) ||
+           !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
                return 1;
        spin_unlock_bh(&qdio->req_q_lock);
        return 0;
@@ -274,8 +275,13 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
        spin_unlock_bh(&qdio->req_q_lock);
        ret = wait_event_interruptible_timeout(qdio->req_q_wq,
                               zfcp_qdio_sbal_check(qdio), 5 * HZ);
+
+       if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+               return -EIO;
+
        if (ret > 0)
                return 0;
+
        if (!ret) {
                atomic_inc(&qdio->req_q_full);
                /* assume hanging outbound queue, try queue recovery */
@@ -375,6 +381,8 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
        atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
        spin_unlock_bh(&qdio->req_q_lock);
 
+       wake_up(&qdio->req_q_wq);
+
        qdio_shutdown(qdio->adapter->ccw_device,
                      QDIO_FLAG_CLEANUP_USING_CLEAR);
 
index d53e62a..aacbe14 100644 (file)
@@ -554,7 +554,7 @@ static int opiocgetnext(unsigned int cmd, void __user *argp)
 static int openprom_bsd_ioctl(struct file * file,
                              unsigned int cmd, unsigned long arg)
 {
-       DATA *data = (DATA *) file->private_data;
+       DATA *data = file->private_data;
        void __user *argp = (void __user *)arg;
        int err;
 
@@ -601,7 +601,7 @@ static int openprom_bsd_ioctl(struct file * file,
 static long openprom_ioctl(struct file * file,
                           unsigned int cmd, unsigned long arg)
 {
-       DATA *data = (DATA *) file->private_data;
+       DATA *data = file->private_data;
 
        switch (cmd) {
        case OPROMGETOPT:
index a864ccc..989b9a8 100644 (file)
@@ -277,6 +277,12 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
                goto reg_crq_failed;
        }
 
+       queue->cur = 0;
+       spin_lock_init(&queue->lock);
+
+       tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
+                    (unsigned long)hostdata);
+
        if (request_irq(vdev->irq,
                        rpavscsi_handle_event,
                        0, "ibmvscsi", (void *)hostdata) != 0) {
@@ -291,15 +297,10 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
                goto req_irq_failed;
        }
 
-       queue->cur = 0;
-       spin_lock_init(&queue->lock);
-
-       tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
-                    (unsigned long)hostdata);
-
        return retrc;
 
       req_irq_failed:
+       tasklet_kill(&hostdata->srp_task);
        do {
                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
        } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
index 82ea4a8..f820cff 100644 (file)
@@ -1129,20 +1129,22 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
 }
 
 /**
- * ipr_format_resource_path - Format the resource path for printing.
+ * ipr_format_res_path - Format the resource path for printing.
  * @res_path:  resource path
  * @buf:       buffer
  *
  * Return value:
  *     pointer to buffer
  **/
-static char *ipr_format_resource_path(u8 *res_path, char *buffer)
+static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
 {
        int i;
+       char *p = buffer;
 
-       sprintf(buffer, "%02X", res_path[0]);
-       for (i=1; res_path[i] != 0xff; i++)
-               sprintf(buffer, "%s-%02X", buffer, res_path[i]);
+       res_path[0] = '\0';
+       p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
+       for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
+               p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
 
        return buffer;
 }
@@ -1187,7 +1189,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
 
                if (res->sdev && new_path)
                        sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
-                                   ipr_format_resource_path(&res->res_path[0], &buffer[0]));
+                                   ipr_format_res_path(res->res_path, buffer,
+                                                       sizeof(buffer)));
        } else {
                res->flags = cfgtew->u.cfgte->flags;
                if (res->flags & IPR_IS_IOA_RESOURCE)
@@ -1573,7 +1576,8 @@ static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
                ipr_err_separator;
 
                ipr_err("Device %d : %s", i + 1,
-                        ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
+                        ipr_format_res_path(dev_entry->res_path, buffer,
+                                            sizeof(buffer)));
                ipr_log_ext_vpd(&dev_entry->vpd);
 
                ipr_err("-----New Device Information-----\n");
@@ -1919,13 +1923,14 @@ static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
 
                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
                                     path_active_desc[i].desc, path_state_desc[j].desc,
-                                    ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
+                                    ipr_format_res_path(fabric->res_path, buffer,
+                                                        sizeof(buffer)));
                        return;
                }
        }
 
        ipr_err("Path state=%02X Resource Path=%s\n", path_state,
-               ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
+               ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
 }
 
 static const struct {
@@ -2066,7 +2071,8 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
 
                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
                                     path_status_desc[j].desc, path_type_desc[i].desc,
-                                    ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
+                                    ipr_format_res_path(cfg->res_path, buffer,
+                                                        sizeof(buffer)),
                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
                        return;
@@ -2074,7 +2080,7 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
        }
        ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
                     "WWN=%08X%08X\n", cfg->type_status,
-                    ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
+                    ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
 }
@@ -2139,7 +2145,7 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
 
        ipr_err("RAID %s Array Configuration: %s\n",
                error->protection_level,
-               ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
+               ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
 
        ipr_err_separator;
 
@@ -2160,9 +2166,11 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
                ipr_err("Array Member %d:\n", i);
                ipr_log_ext_vpd(&array_entry->vpd);
                ipr_err("Current Location: %s",
-                        ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
+                        ipr_format_res_path(array_entry->res_path, buffer,
+                                            sizeof(buffer)));
                ipr_err("Expected Location: %s",
-                        ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
+                        ipr_format_res_path(array_entry->expected_res_path,
+                                            buffer, sizeof(buffer)));
 
                ipr_err_separator;
        }
@@ -4079,7 +4087,8 @@ static struct device_attribute ipr_adapter_handle_attr = {
 };
 
 /**
- * ipr_show_resource_path - Show the resource path for this device.
+ * ipr_show_resource_path - Show the resource path or the resource address for
+ *                         this device.
  * @dev:       device struct
  * @buf:       buffer
  *
@@ -4097,9 +4106,14 @@ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribut
 
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
        res = (struct ipr_resource_entry *)sdev->hostdata;
-       if (res)
+       if (res && ioa_cfg->sis64)
                len = snprintf(buf, PAGE_SIZE, "%s\n",
-                              ipr_format_resource_path(&res->res_path[0], &buffer[0]));
+                              ipr_format_res_path(res->res_path, buffer,
+                                                  sizeof(buffer)));
+       else if (res)
+               len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
+                              res->bus, res->target, res->lun);
+
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
        return len;
 }
@@ -4351,7 +4365,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
                        scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
                if (ioa_cfg->sis64)
                        sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
-                                   ipr_format_resource_path(&res->res_path[0], &buffer[0]));
+                                   ipr_format_res_path(res->res_path, buffer,
+                                                       sizeof(buffer)));
                return 0;
        }
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
index 9ecd225..b965f35 100644 (file)
@@ -1684,8 +1684,9 @@ struct ipr_ucode_image_header {
        if (ipr_is_device(hostrcb)) {                                   \
                if ((hostrcb)->ioa_cfg->sis64) {                        \
                        printk(KERN_ERR IPR_NAME ": %s: " fmt,          \
-                               ipr_format_resource_path(&hostrcb->hcam.u.error64.fd_res_path[0], \
-                                       &hostrcb->rp_buffer[0]),        \
+                               ipr_format_res_path(hostrcb->hcam.u.error64.fd_res_path, \
+                                       hostrcb->rp_buffer,             \
+                                       sizeof(hostrcb->rp_buffer)),    \
                                __VA_ARGS__);                           \
                } else {                                                \
                        ipr_ra_err((hostrcb)->ioa_cfg,                  \
index eed3c2d..a182def 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/uaccess.h>
 
 #include <asm/io.h>
+#include <asm/ioctls.h>
 
 #include <asm/mach/serial_at91.h>
 #include <mach/board.h>
index ed7d958..544f2e2 100644 (file)
@@ -71,7 +71,9 @@ int sunserial_console_match(struct console *con, struct device_node *dp,
 
        con->index = line;
        drv->cons = con;
-       add_preferred_console(con->name, line, NULL);
+
+       if (!console_set_on_cmdline)
+               add_preferred_console(con->name, line, NULL);
 
        return 1;
 }
index 234459c..ffbf455 100644 (file)
@@ -1500,20 +1500,25 @@ out_unmap:
 static int __devexit su_remove(struct of_device *op)
 {
        struct uart_sunsu_port *up = dev_get_drvdata(&op->dev);
+       bool kbdms = false;
 
        if (up->su_type == SU_PORT_MS ||
-           up->su_type == SU_PORT_KBD) {
+           up->su_type == SU_PORT_KBD)
+               kbdms = true;
+
+       if (kbdms) {
 #ifdef CONFIG_SERIO
                serio_unregister_port(&up->serio);
 #endif
-               kfree(up);
-       } else if (up->port.type != PORT_UNKNOWN) {
+       } else if (up->port.type != PORT_UNKNOWN)
                uart_remove_one_port(&sunsu_reg, &up->port);
-       }
 
        if (up->port.membase)
                of_iounmap(&op->resource[0], up->port.membase, up->reg_size);
 
+       if (kbdms)
+               kfree(up);
+
        dev_set_drvdata(&op->dev, NULL);
 
        return 0;
index ffa111a..97ab0a8 100644 (file)
@@ -66,28 +66,6 @@ struct mpc8xxx_spi_reg {
        __be32 receive;
 };
 
-/* SPI Parameter RAM */
-struct spi_pram {
-       __be16  rbase;  /* Rx Buffer descriptor base address */
-       __be16  tbase;  /* Tx Buffer descriptor base address */
-       u8      rfcr;   /* Rx function code */
-       u8      tfcr;   /* Tx function code */
-       __be16  mrblr;  /* Max receive buffer length */
-       __be32  rstate; /* Internal */
-       __be32  rdp;    /* Internal */
-       __be16  rbptr;  /* Internal */
-       __be16  rbc;    /* Internal */
-       __be32  rxtmp;  /* Internal */
-       __be32  tstate; /* Internal */
-       __be32  tdp;    /* Internal */
-       __be16  tbptr;  /* Internal */
-       __be16  tbc;    /* Internal */
-       __be32  txtmp;  /* Internal */
-       __be32  res;    /* Tx temp. */
-       __be16  rpbase; /* Relocation pointer (CPM1 only) */
-       __be16  res1;   /* Reserved */
-};
-
 /* SPI Controller mode register definitions */
 #define        SPMODE_LOOP             (1 << 30)
 #define        SPMODE_CI_INACTIVEHIGH  (1 << 29)
index 61d7550..162c95a 100644 (file)
@@ -1596,6 +1596,7 @@ static const struct usb_device_id acm_ids[] = {
        { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
        { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
        { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
+       { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
 
        /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
 
index 83e7bbb..70cccc7 100644 (file)
@@ -1982,6 +1982,8 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                    (portstatus & USB_PORT_STAT_ENABLE)) {
                        if (hub_is_wusb(hub))
                                udev->speed = USB_SPEED_WIRELESS;
+                       else if (portstatus & USB_PORT_STAT_SUPER_SPEED)
+                               udev->speed = USB_SPEED_SUPER;
                        else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
                                udev->speed = USB_SPEED_HIGH;
                        else if (portstatus & USB_PORT_STAT_LOW_SPEED)
index f22d03d..db99c08 100644 (file)
@@ -41,6 +41,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Philips PSC805 audio device */
        { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Artisman Watchdog Dongle */
+       { USB_DEVICE(0x04b4, 0x0526), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* Roland SC-8820 */
        { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -64,6 +68,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
        { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
 
+       /* Broadcom BCM92035DGROM BT dongle */
+       { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Action Semiconductor flash disk */
        { USB_DEVICE(0x10d6, 0x2200), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index d69eccf..2aaa0f7 100644 (file)
@@ -136,7 +136,7 @@ struct ffs_data {
         * handling setup requests immidiatelly user space may be so
         * slow that another setup will be sent to the gadget but this
         * time not to us but another function and then there could be
-        * a race.  Is taht the case? Or maybe we can use cdev->req
+        * a race.  Is that the case? Or maybe we can use cdev->req
         * after all, maybe we just need some spinlock for that? */
        struct usb_request              *ep0req;                /* P: mutex */
        struct completion               ep0req_completion;      /* P: mutex */
index 85b0d89..9807624 100644 (file)
@@ -2561,7 +2561,7 @@ static void pxa_udc_shutdown(struct platform_device *_dev)
                udc_disable(udc);
 }
 
-#ifdef CONFIG_CPU_PXA27x
+#ifdef CONFIG_PXA27x
 extern void pxa27x_clear_otgph(void);
 #else
 #define pxa27x_clear_otgph()   do {} while (0)
index e724a05..ea2b3c7 100644 (file)
@@ -735,6 +735,10 @@ static void s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev,
        else
                dev->ep0state = EP0_OUT_DATA_PHASE;
 
+       if (!dev->driver)
+               return;
+
+       /* deliver the request to the gadget driver */
        ret = dev->driver->setup(&dev->gadget, crq);
        if (ret < 0) {
                if (dev->req_config) {
index a18debd..4181638 100644 (file)
@@ -203,7 +203,7 @@ static inline void pxa27x_reset_hc(struct pxa27x_ohci *ohci)
        __raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR);
 }
 
-#ifdef CONFIG_CPU_PXA27x
+#ifdef CONFIG_PXA27x
 extern void pxa27x_clear_otgph(void);
 #else
 #define pxa27x_clear_otgph()   do {} while (0)
index fd9e03a..2eb658d 100644 (file)
@@ -835,6 +835,27 @@ fail:
        return 0;
 }
 
+void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
+               struct usb_device *udev)
+{
+       struct xhci_virt_device *virt_dev;
+       struct xhci_ep_ctx      *ep0_ctx;
+       struct xhci_ring        *ep_ring;
+
+       virt_dev = xhci->devs[udev->slot_id];
+       ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
+       ep_ring = virt_dev->eps[0].ring;
+       /*
+        * FIXME we don't keep track of the dequeue pointer very well after a
+        * Set TR dequeue pointer, so we're setting the dequeue pointer of the
+        * host to our enqueue pointer.  This should only be called after a
+        * configured device has reset, so all control transfers should have
+        * been completed or cancelled before the reset.
+        */
+       ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
+       ep0_ctx->deq |= ep_ring->cycle_state;
+}
+
 /* Setup an xHCI virtual device for a Set Address command */
 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
 {
@@ -1002,7 +1023,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
        return EP_INTERVAL(interval);
 }
 
-/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
+/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
  * High speed endpoint descriptors can define "the number of additional
  * transaction opportunities per microframe", but that goes in the Max Burst
  * endpoint context field.
@@ -1010,7 +1031,8 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
 static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
-       if (udev->speed != USB_SPEED_SUPER)
+       if (udev->speed != USB_SPEED_SUPER ||
+                       !usb_endpoint_xfer_isoc(&ep->desc))
                return 0;
        return ep->ss_ep_comp.bmAttributes;
 }
index 94e6934..bfc99a9 100644 (file)
@@ -2380,16 +2380,19 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
                u32 field3, u32 field4, bool command_must_succeed)
 {
        int reserved_trbs = xhci->cmd_ring_reserved_trbs;
+       int ret;
+
        if (!command_must_succeed)
                reserved_trbs++;
 
-       if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) {
-               if (!in_interrupt())
-                       xhci_err(xhci, "ERR: No room for command on command ring\n");
+       ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
+                       reserved_trbs, GFP_ATOMIC);
+       if (ret < 0) {
+               xhci_err(xhci, "ERR: No room for command on command ring\n");
                if (command_must_succeed)
                        xhci_err(xhci, "ERR: Reserved TRB counting for "
                                        "unfailable commands failed.\n");
-               return -ENOMEM;
+               return ret;
        }
        queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
                        field4 | xhci->cmd_ring->cycle_state);
index 27345cd..3998f72 100644 (file)
@@ -2134,6 +2134,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        /* If this is a Set Address to an unconfigured device, setup ep 0 */
        if (!udev->config)
                xhci_setup_addressable_virt_dev(xhci, udev);
+       else
+               xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
        /* Otherwise, assume the core has the device configured how it wants */
        xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
        xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
index 8b4b7d3..6c7e343 100644 (file)
@@ -1292,6 +1292,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
+void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
+               struct usb_device *udev);
 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
index 30d9303..d25814c 100644 (file)
@@ -2436,7 +2436,8 @@ sisusb_open(struct inode *inode, struct file *file)
        }
 
        if (!sisusb->devinit) {
-               if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH) {
+               if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH ||
+                   sisusb->sisusb_dev->speed == USB_SPEED_SUPER) {
                        if (sisusb_init_gfxdevice(sisusb, 0)) {
                                mutex_unlock(&sisusb->lock);
                                dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
@@ -3166,7 +3167,7 @@ static int sisusb_probe(struct usb_interface *intf,
 
        sisusb->present = 1;
 
-       if (dev->speed == USB_SPEED_HIGH) {
+       if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
                int initscreen = 1;
 #ifdef INCL_SISUSB_CON
                if (sisusb_first_vc > 0 &&
index 05c077f..3c48e77 100644 (file)
@@ -29,19 +29,6 @@ static void tusb_source_power(struct musb *musb, int is_on);
 #define TUSB_REV_MAJOR(reg_val)                ((reg_val >> 4) & 0xf)
 #define TUSB_REV_MINOR(reg_val)                (reg_val & 0xf)
 
-#ifdef CONFIG_PM
-/* REVISIT: These should be only needed if somebody implements off idle */
-void musb_platform_save_context(struct musb *musb,
-                       struct musb_context_registers *musb_context)
-{
-}
-
-void musb_platform_restore_context(struct musb *musb,
-                       struct musb_context_registers *musb_context)
-{
-}
-#endif
-
 /*
  * Checks the revision. We need to use the DMA register as 3.0 does not
  * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
index da7e334..e298dc4 100644 (file)
@@ -691,6 +691,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
                .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
        { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+       { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
        { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
@@ -737,6 +738,14 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
        { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
        { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
+       { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_ST_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SLITE_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH2_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
index bbc159a..d01946d 100644 (file)
 #define TELLDUS_VID                    0x1781  /* Vendor ID */
 #define TELLDUS_TELLSTICK_PID          0x0C30  /* RF control dongle 433 MHz using FT232RL */
 
+/*
+ * RT Systems programming cables for various ham radios
+ */
+#define RTSYSTEMS_VID                  0x2100  /* Vendor ID */
+#define RTSYSTEMS_SERIAL_VX7_PID       0x9e52  /* Serial converter for VX-7 Radios using FT232RL */
+
 /*
  * Bayer Ascensia Contour blood glucose meter USB-converter cable.
  * http://winglucofacts.com/cables/
 #define MJSG_SR_RADIO_PID      0x9379
 #define MJSG_XM_RADIO_PID      0x937A
 #define MJSG_HD_RADIO_PID      0x937C
+
+/*
+ * Xverve Signalyzer tools (http://www.signalyzer.com/)
+ */
+#define XVERVE_SIGNALYZER_ST_PID       0xBCA0
+#define XVERVE_SIGNALYZER_SLITE_PID    0xBCA1
+#define XVERVE_SIGNALYZER_SH2_PID      0xBCA2
+#define XVERVE_SIGNALYZER_SH4_PID      0xBCA4
+
index e280ad8..5cd30e4 100644 (file)
@@ -206,6 +206,7 @@ static void option_instat_callback(struct urb *urb);
 #define AMOI_PRODUCT_H01                       0x0800
 #define AMOI_PRODUCT_H01A                      0x7002
 #define AMOI_PRODUCT_H02                       0x0802
+#define AMOI_PRODUCT_SKYPEPHONE_S2             0x0407
 
 #define DELL_VENDOR_ID                         0x413C
 
@@ -302,6 +303,7 @@ static void option_instat_callback(struct urb *urb);
 #define QISDA_PRODUCT_H21_4512                 0x4512
 #define QISDA_PRODUCT_H21_4523                 0x4523
 #define QISDA_PRODUCT_H20_4515                 0x4515
+#define QISDA_PRODUCT_H20_4518                 0x4518
 #define QISDA_PRODUCT_H20_4519                 0x4519
 
 /* TLAYTECH PRODUCTS */
@@ -516,6 +518,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) },
+       { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) },
 
        { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) },             /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
        { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) },             /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
@@ -852,6 +855,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
+       { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) },
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) },
        { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
        { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
index 93d72eb..cde67ca 100644 (file)
@@ -51,6 +51,8 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE(0x1f45, 0x0001)},   /* Unknown Gobi QDL device */
        {USB_DEVICE(0x413c, 0x8185)},   /* Dell Gobi 2000 QDL device (N0218, VU936) */
        {USB_DEVICE(0x413c, 0x8186)},   /* Dell Gobi 2000 Modem device (N0218, VU936) */
+       {USB_DEVICE(0x05c6, 0x9208)},   /* Generic Gobi 2000 QDL device */
+       {USB_DEVICE(0x05c6, 0x920b)},   /* Generic Gobi 2000 Modem device */
        {USB_DEVICE(0x05c6, 0x9224)},   /* Sony Gobi 2000 QDL device (N0279, VU730) */
        {USB_DEVICE(0x05c6, 0x9225)},   /* Sony Gobi 2000 Modem device (N0279, VU730) */
        {USB_DEVICE(0x05c6, 0x9244)},   /* Samsung Gobi 2000 QDL device (VL176) */
index ef0bdb0..d47b56e 100644 (file)
@@ -245,6 +245,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
        { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
        { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
+       { USB_DEVICE(0x1199, 0x0301) }, /* Sierra Wireless USB Dongle 250U */
        /* Sierra Wireless C597 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
        /* Sierra Wireless T598 */
index 4471642..64ec073 100644 (file)
@@ -139,9 +139,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
 
        /* fill the common fields in the URB */
        us->current_urb->context = &urb_done;
-       us->current_urb->actual_length = 0;
-       us->current_urb->error_count = 0;
-       us->current_urb->status = 0;
+       us->current_urb->transfer_flags = 0;
 
        /* we assume that if transfer_buffer isn't us->iobuf then it
         * hasn't been mapped for DMA.  Yes, this is clunky, but it's
index 57a593c..d219070 100644 (file)
@@ -177,8 +177,8 @@ static void handle_tx(struct vhost_net *net)
                        break;
                }
                if (err != len)
-                       pr_err("Truncated TX packet: "
-                              " len %d != %zd\n", err, len);
+                       pr_debug("Truncated TX packet: "
+                                " len %d != %zd\n", err, len);
                vhost_add_used_and_signal(&net->dev, vq, head, 0);
                total_len += len;
                if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
@@ -275,8 +275,8 @@ static void handle_rx(struct vhost_net *net)
                }
                /* TODO: Should check and handle checksum. */
                if (err > len) {
-                       pr_err("Discarded truncated rx packet: "
-                              " len %d > %zd\n", err, len);
+                       pr_debug("Discarded truncated rx packet: "
+                                " len %d > %zd\n", err, len);
                        vhost_discard_vq_desc(vq);
                        continue;
                }
@@ -534,11 +534,16 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        rcu_assign_pointer(vq->private_data, sock);
        vhost_net_enable_vq(n, vq);
 done:
+       mutex_unlock(&vq->mutex);
+
        if (oldsock) {
                vhost_net_flush_vq(n, index);
                fput(oldsock->file);
        }
 
+       mutex_unlock(&n->dev.mutex);
+       return 0;
+
 err_vq:
        mutex_unlock(&vq->mutex);
 err:
index 515cf19..c4e1764 100644 (file)
@@ -2872,7 +2872,7 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis
                }
 
 #if 0
-               /* Power down TV DAC, taht saves a significant amount of power,
+               /* Power down TV DAC, that saves a significant amount of power,
                 * we'll have something better once we actually have some TVOut
                 * support
                 */
index 40f6132..34b2fc4 100644 (file)
@@ -95,7 +95,7 @@ struct fb_bitfield rgb_bitfields[][4] =
        { { 8, 4, 0 },  { 4, 4, 0 }, { 0, 4, 0 }, { 0, 0, 0 } },
 };
 
-static struct fb_fix_screeninfo au1100fb_fix __initdata = {
+static struct fb_fix_screeninfo au1100fb_fix __devinitdata = {
        .id             = "AU1100 FB",
        .xpanstep       = 1,
        .ypanstep       = 1,
@@ -103,7 +103,7 @@ static struct fb_fix_screeninfo au1100fb_fix __initdata = {
        .accel          = FB_ACCEL_NONE,
 };
 
-static struct fb_var_screeninfo au1100fb_var __initdata = {
+static struct fb_var_screeninfo au1100fb_var __devinitdata = {
        .activate       = FB_ACTIVATE_NOW,
        .height         = -1,
        .width          = -1,
@@ -458,7 +458,7 @@ static struct fb_ops au1100fb_ops =
 
 /* AU1100 LCD controller device driver */
 
-static int __init au1100fb_drv_probe(struct platform_device *dev)
+static int __devinit au1100fb_drv_probe(struct platform_device *dev)
 {
        struct au1100fb_device *fbdev = NULL;
        struct resource *regs_res;
index 3a561df..0c1afd1 100644 (file)
@@ -388,6 +388,7 @@ cyber2000fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
                pseudo_val |= convert_bitfield(red, &var->red);
                pseudo_val |= convert_bitfield(green, &var->green);
                pseudo_val |= convert_bitfield(blue, &var->blue);
+               ret = 0;
                break;
        }
 
@@ -436,6 +437,8 @@ static void cyber2000fb_write_ramdac_ctrl(struct cfb_info *cfb)
        cyber2000fb_writeb(i | 4, 0x3cf, cfb);
        cyber2000fb_writeb(val, 0x3c6, cfb);
        cyber2000fb_writeb(i, 0x3cf, cfb);
+       /* prevent card lock-up observed on x86 with CyberPro 2000 */
+       cyber2000fb_readb(0x3cf, cfb);
 }
 
 static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)
index 7d8c55d..ca3355e 100644 (file)
@@ -91,10 +91,10 @@ static uint32_t pseudo_palette[16];
 static uint32_t gbe_cmap[256];
 static int gbe_turned_on; /* 0 turned off, 1 turned on */
 
-static char *mode_option __initdata = NULL;
+static char *mode_option __devinitdata = NULL;
 
 /* default CRT mode */
-static struct fb_var_screeninfo default_var_CRT __initdata = {
+static struct fb_var_screeninfo default_var_CRT __devinitdata = {
        /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
        .xres           = 640,
        .yres           = 480,
@@ -125,7 +125,7 @@ static struct fb_var_screeninfo default_var_CRT __initdata = {
 };
 
 /* default LCD mode */
-static struct fb_var_screeninfo default_var_LCD __initdata = {
+static struct fb_var_screeninfo default_var_LCD __devinitdata = {
        /* 1600x1024, 8 bpp */
        .xres           = 1600,
        .yres           = 1024,
@@ -157,7 +157,7 @@ static struct fb_var_screeninfo default_var_LCD __initdata = {
 
 /* default modedb mode */
 /* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */
-static struct fb_videomode default_mode_CRT __initdata = {
+static struct fb_videomode default_mode_CRT __devinitdata = {
        .refresh        = 60,
        .xres           = 640,
        .yres           = 480,
@@ -172,7 +172,7 @@ static struct fb_videomode default_mode_CRT __initdata = {
        .vmode          = FB_VMODE_NONINTERLACED,
 };
 /* 1600x1024 SGI flatpanel 1600sw */
-static struct fb_videomode default_mode_LCD __initdata = {
+static struct fb_videomode default_mode_LCD __devinitdata = {
        /* 1600x1024, 8 bpp */
        .xres           = 1600,
        .yres           = 1024,
@@ -186,8 +186,8 @@ static struct fb_videomode default_mode_LCD __initdata = {
        .vmode          = FB_VMODE_NONINTERLACED,
 };
 
-static struct fb_videomode *default_mode __initdata = &default_mode_CRT;
-static struct fb_var_screeninfo *default_var __initdata = &default_var_CRT;
+static struct fb_videomode *default_mode __devinitdata = &default_mode_CRT;
+static struct fb_var_screeninfo *default_var __devinitdata = &default_var_CRT;
 
 static int flat_panel_enabled = 0;
 
@@ -1098,7 +1098,7 @@ static void gbefb_create_sysfs(struct device *dev)
  * Initialization
  */
 
-static int __init gbefb_setup(char *options)
+static int __devinit gbefb_setup(char *options)
 {
        char *this_opt;
 
index 0f361b6..0c69fa2 100644 (file)
@@ -44,7 +44,7 @@ struct pmagbafb_par {
 };
 
 
-static struct fb_var_screeninfo pmagbafb_defined __initdata = {
+static struct fb_var_screeninfo pmagbafb_defined __devinitdata = {
        .xres           = 1024,
        .yres           = 864,
        .xres_virtual   = 1024,
@@ -68,7 +68,7 @@ static struct fb_var_screeninfo pmagbafb_defined __initdata = {
        .vmode          = FB_VMODE_NONINTERLACED,
 };
 
-static struct fb_fix_screeninfo pmagbafb_fix __initdata = {
+static struct fb_fix_screeninfo pmagbafb_fix __devinitdata = {
        .id             = "PMAG-BA",
        .smem_len       = (1024 * 1024),
        .type           = FB_TYPE_PACKED_PIXELS,
@@ -142,7 +142,7 @@ static void __init pmagbafb_erase_cursor(struct fb_info *info)
 }
 
 
-static int __init pmagbafb_probe(struct device *dev)
+static int __devinit pmagbafb_probe(struct device *dev)
 {
        struct tc_dev *tdev = to_tc_dev(dev);
        resource_size_t start, len;
index 2de0806..22fcb9a 100644 (file)
@@ -45,7 +45,7 @@ struct pmagbbfb_par {
 };
 
 
-static struct fb_var_screeninfo pmagbbfb_defined __initdata = {
+static struct fb_var_screeninfo pmagbbfb_defined __devinitdata = {
        .bits_per_pixel = 8,
        .red.length     = 8,
        .green.length   = 8,
@@ -58,7 +58,7 @@ static struct fb_var_screeninfo pmagbbfb_defined __initdata = {
        .vmode          = FB_VMODE_NONINTERLACED,
 };
 
-static struct fb_fix_screeninfo pmagbbfb_fix __initdata = {
+static struct fb_fix_screeninfo pmagbbfb_fix __devinitdata = {
        .id             = "PMAGB-BA",
        .smem_len       = (2048 * 1024),
        .type           = FB_TYPE_PACKED_PIXELS,
@@ -148,7 +148,7 @@ static void __init pmagbbfb_erase_cursor(struct fb_info *info)
 /*
  * Set up screen parameters.
  */
-static void __init pmagbbfb_screen_setup(struct fb_info *info)
+static void __devinit pmagbbfb_screen_setup(struct fb_info *info)
 {
        struct pmagbbfb_par *par = info->par;
 
@@ -180,9 +180,9 @@ static void __init pmagbbfb_screen_setup(struct fb_info *info)
 /*
  * Determine oscillator configuration.
  */
-static void __init pmagbbfb_osc_setup(struct fb_info *info)
+static void __devinit pmagbbfb_osc_setup(struct fb_info *info)
 {
-       static unsigned int pmagbbfb_freqs[] __initdata = {
+       static unsigned int pmagbbfb_freqs[] __devinitdata = {
                130808, 119843, 104000, 92980, 74370, 72800,
                69197, 66000, 65000, 50350, 36000, 32000, 25175
        };
@@ -247,7 +247,7 @@ static void __init pmagbbfb_osc_setup(struct fb_info *info)
 };
 
 
-static int __init pmagbbfb_probe(struct device *dev)
+static int __devinit pmagbbfb_probe(struct device *dev)
 {
        struct tc_dev *tdev = to_tc_dev(dev);
        resource_size_t start, len;
index afe7e21..1475ed6 100644 (file)
@@ -164,7 +164,8 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq,
                          gfp_t gfp)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
-       unsigned int i, avail, head, uninitialized_var(prev);
+       unsigned int i, avail, uninitialized_var(prev);
+       int head;
 
        START_USE(vq);
 
@@ -174,7 +175,7 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq,
         * buffers, then go indirect. FIXME: tune this threshold */
        if (vq->indirect && (out + in) > 1 && vq->num_free) {
                head = vring_add_indirect(vq, sg, out, in, gfp);
-               if (head != vq->vring.num)
+               if (likely(head >= 0))
                        goto add_head;
        }
 
index 37e8894..29bac51 100644 (file)
@@ -755,10 +755,7 @@ int register_xenstore_notifier(struct notifier_block *nb)
 {
        int ret = 0;
 
-       if (xenstored_ready > 0)
-               ret = nb->notifier_call(nb, 0, NULL);
-       else
-               blocking_notifier_chain_register(&xenstore_chain, nb);
+       blocking_notifier_chain_register(&xenstore_chain, nb);
 
        return ret;
 }
index d61e3b2..36d961f 100644 (file)
@@ -146,7 +146,7 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
                while (rdir->head < rdir->tail) {
                        p9stat_init(&st);
                        err = p9stat_read(rdir->buf + rdir->head,
-                                               buflen - rdir->head, &st,
+                                               rdir->tail - rdir->head, &st,
                                                fid->clnt->proto_version);
                        if (err) {
                                P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
index 0d1d966..c3df14c 100644 (file)
@@ -2304,12 +2304,17 @@ noinline int btrfs_leaf_free_space(struct btrfs_root *root,
        return ret;
 }
 
+/*
+ * min slot controls the lowest index we're willing to push to the
+ * right.  We'll push up to and including min_slot, but no lower
+ */
 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
                                      struct btrfs_root *root,
                                      struct btrfs_path *path,
                                      int data_size, int empty,
                                      struct extent_buffer *right,
-                                     int free_space, u32 left_nritems)
+                                     int free_space, u32 left_nritems,
+                                     u32 min_slot)
 {
        struct extent_buffer *left = path->nodes[0];
        struct extent_buffer *upper = path->nodes[1];
@@ -2327,7 +2332,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
        if (empty)
                nr = 0;
        else
-               nr = 1;
+               nr = max_t(u32, 1, min_slot);
 
        if (path->slots[0] >= left_nritems)
                push_space += data_size;
@@ -2469,10 +2474,14 @@ out_unlock:
  *
  * returns 1 if the push failed because the other node didn't have enough
  * room, 0 if everything worked out and < 0 if there were major errors.
+ *
+ * this will push starting from min_slot to the end of the leaf.  It won't
+ * push any slot lower than min_slot
  */
 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
-                          *root, struct btrfs_path *path, int data_size,
-                          int empty)
+                          *root, struct btrfs_path *path,
+                          int min_data_size, int data_size,
+                          int empty, u32 min_slot)
 {
        struct extent_buffer *left = path->nodes[0];
        struct extent_buffer *right;
@@ -2514,8 +2523,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
        if (left_nritems == 0)
                goto out_unlock;
 
-       return __push_leaf_right(trans, root, path, data_size, empty,
-                               right, free_space, left_nritems);
+       return __push_leaf_right(trans, root, path, min_data_size, empty,
+                               right, free_space, left_nritems, min_slot);
 out_unlock:
        btrfs_tree_unlock(right);
        free_extent_buffer(right);
@@ -2525,12 +2534,17 @@ out_unlock:
 /*
  * push some data in the path leaf to the left, trying to free up at
  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
+ *
+ * max_slot can put a limit on how far into the leaf we'll push items.  The
+ * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
+ * items
  */
 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root,
                                     struct btrfs_path *path, int data_size,
                                     int empty, struct extent_buffer *left,
-                                    int free_space, int right_nritems)
+                                    int free_space, u32 right_nritems,
+                                    u32 max_slot)
 {
        struct btrfs_disk_key disk_key;
        struct extent_buffer *right = path->nodes[0];
@@ -2549,9 +2563,9 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
        slot = path->slots[1];
 
        if (empty)
-               nr = right_nritems;
+               nr = min(right_nritems, max_slot);
        else
-               nr = right_nritems - 1;
+               nr = min(right_nritems - 1, max_slot);
 
        for (i = 0; i < nr; i++) {
                item = btrfs_item_nr(right, i);
@@ -2712,10 +2726,14 @@ out:
 /*
  * push some data in the path leaf to the left, trying to free up at
  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
+ *
+ * max_slot can put a limit on how far into the leaf we'll push items.  The
+ * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
+ * items
  */
 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
-                         *root, struct btrfs_path *path, int data_size,
-                         int empty)
+                         *root, struct btrfs_path *path, int min_data_size,
+                         int data_size, int empty, u32 max_slot)
 {
        struct extent_buffer *right = path->nodes[0];
        struct extent_buffer *left;
@@ -2761,8 +2779,9 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
                goto out;
        }
 
-       return __push_leaf_left(trans, root, path, data_size,
-                              empty, left, free_space, right_nritems);
+       return __push_leaf_left(trans, root, path, min_data_size,
+                              empty, left, free_space, right_nritems,
+                              max_slot);
 out:
        btrfs_tree_unlock(left);
        free_extent_buffer(left);
@@ -2854,6 +2873,64 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
        return ret;
 }
 
+/*
+ * double splits happen when we need to insert a big item in the middle
+ * of a leaf.  A double split can leave us with 3 mostly empty leaves:
+ * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
+ *          A                 B                 C
+ *
+ * We avoid this by trying to push the items on either side of our target
+ * into the adjacent leaves.  If all goes well we can avoid the double split
+ * completely.
+ */
+static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
+                                         struct btrfs_root *root,
+                                         struct btrfs_path *path,
+                                         int data_size)
+{
+       int ret;
+       int progress = 0;
+       int slot;
+       u32 nritems;
+
+       slot = path->slots[0];
+
+       /*
+        * try to push all the items after our slot into the
+        * right leaf
+        */
+       ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
+       if (ret < 0)
+               return ret;
+
+       if (ret == 0)
+               progress++;
+
+       nritems = btrfs_header_nritems(path->nodes[0]);
+       /*
+        * our goal is to get our slot at the start or end of a leaf.  If
+        * we've done so we're done
+        */
+       if (path->slots[0] == 0 || path->slots[0] == nritems)
+               return 0;
+
+       if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+               return 0;
+
+       /* try to push all the items before our slot into the next leaf */
+       slot = path->slots[0];
+       ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
+       if (ret < 0)
+               return ret;
+
+       if (ret == 0)
+               progress++;
+
+       if (progress)
+               return 0;
+       return 1;
+}
+
 /*
  * split the path's leaf in two, making sure there is at least data_size
  * available for the resulting leaf level of the path.
@@ -2876,6 +2953,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
        int wret;
        int split;
        int num_doubles = 0;
+       int tried_avoid_double = 0;
 
        l = path->nodes[0];
        slot = path->slots[0];
@@ -2884,12 +2962,14 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
                return -EOVERFLOW;
 
        /* first try to make some room by pushing left and right */
-       if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
-               wret = push_leaf_right(trans, root, path, data_size, 0);
+       if (data_size) {
+               wret = push_leaf_right(trans, root, path, data_size,
+                                      data_size, 0, 0);
                if (wret < 0)
                        return wret;
                if (wret) {
-                       wret = push_leaf_left(trans, root, path, data_size, 0);
+                       wret = push_leaf_left(trans, root, path, data_size,
+                                             data_size, 0, (u32)-1);
                        if (wret < 0)
                                return wret;
                }
@@ -2923,6 +3003,8 @@ again:
                                if (mid != nritems &&
                                    leaf_space_used(l, mid, nritems - mid) +
                                    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+                                       if (data_size && !tried_avoid_double)
+                                               goto push_for_double;
                                        split = 2;
                                }
                        }
@@ -2939,6 +3021,8 @@ again:
                                if (mid != nritems &&
                                    leaf_space_used(l, mid, nritems - mid) +
                                    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+                                       if (data_size && !tried_avoid_double)
+                                               goto push_for_double;
                                        split = 2 ;
                                }
                        }
@@ -3019,6 +3103,13 @@ again:
        }
 
        return ret;
+
+push_for_double:
+       push_for_double_split(trans, root, path, data_size);
+       tried_avoid_double = 1;
+       if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+               return 0;
+       goto again;
 }
 
 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
@@ -3915,13 +4006,15 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                        extent_buffer_get(leaf);
 
                        btrfs_set_path_blocking(path);
-                       wret = push_leaf_left(trans, root, path, 1, 1);
+                       wret = push_leaf_left(trans, root, path, 1, 1,
+                                             1, (u32)-1);
                        if (wret < 0 && wret != -ENOSPC)
                                ret = wret;
 
                        if (path->nodes[0] == leaf &&
                            btrfs_header_nritems(leaf)) {
-                               wret = push_leaf_right(trans, root, path, 1, 1);
+                               wret = push_leaf_right(trans, root, path, 1,
+                                                      1, 1, 0);
                                if (wret < 0 && wret != -ENOSPC)
                                        ret = wret;
                        }
index 4dbaf89..9254b3d 100644 (file)
@@ -1458,7 +1458,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
         */
 
        /* the destination must be opened for writing */
-       if (!(file->f_mode & FMODE_WRITE))
+       if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND))
                return -EINVAL;
 
        ret = mnt_want_write(file->f_path.mnt);
@@ -1511,7 +1511,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 
        /* determine range to clone */
        ret = -EINVAL;
-       if (off >= src->i_size || off + len > src->i_size)
+       if (off + len > src->i_size || off + len < off)
                goto out_unlock;
        if (len == 0)
                olen = len = src->i_size - off;
@@ -1578,6 +1578,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        u64 disko = 0, diskl = 0;
                        u64 datao = 0, datal = 0;
                        u8 comp;
+                       u64 endoff;
 
                        size = btrfs_item_size_nr(leaf, slot);
                        read_extent_buffer(leaf, buf,
@@ -1712,9 +1713,18 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        btrfs_release_path(root, path);
 
                        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-                       if (new_key.offset + datal > inode->i_size)
-                               btrfs_i_size_write(inode,
-                                                  new_key.offset + datal);
+
+                       /*
+                        * we round up to the block size at eof when
+                        * determining which extents to clone above,
+                        * but shouldn't round up the file size
+                        */
+                       endoff = new_key.offset + datal;
+                       if (endoff > off+olen)
+                               endoff = off+olen;
+                       if (endoff > inode->i_size)
+                               btrfs_i_size_write(inode, endoff);
+
                        BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
                        ret = btrfs_update_inode(trans, root, inode);
                        BUG_ON(ret);
index 04b8280..bc87b9c 100644 (file)
@@ -2,7 +2,7 @@ config CEPH_FS
         tristate "Ceph distributed file system (EXPERIMENTAL)"
        depends on INET && EXPERIMENTAL
        select LIBCRC32C
-       select CONFIG_CRYPTO_AES
+       select CRYPTO_AES
        help
          Choose Y or M here to include support for mounting the
          experimental Ceph distributed file system.  Ceph is an extremely
index 3fe4904..6d44053 100644 (file)
@@ -613,6 +613,9 @@ static void ceph_x_destroy(struct ceph_auth_client *ac)
                remove_ticket_handler(ac, th);
        }
 
+       if (xi->auth_authorizer.buf)
+               ceph_buffer_put(xi->auth_authorizer.buf);
+
        kfree(ac->private);
        ac->private = NULL;
 }
index 74144d6..b81be9a 100644 (file)
@@ -627,7 +627,7 @@ retry:
        if (fmode >= 0)
                __ceph_get_fmode(ci, fmode);
        spin_unlock(&inode->i_lock);
-       wake_up(&ci->i_cap_wq);
+       wake_up_all(&ci->i_cap_wq);
        return 0;
 }
 
@@ -1181,7 +1181,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        }
 
        if (wake)
-               wake_up(&ci->i_cap_wq);
+               wake_up_all(&ci->i_cap_wq);
 
        return delayed;
 }
@@ -2153,7 +2153,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
        else if (flushsnaps)
                ceph_flush_snaps(ci);
        if (wake)
-               wake_up(&ci->i_cap_wq);
+               wake_up_all(&ci->i_cap_wq);
        if (put)
                iput(inode);
 }
@@ -2229,7 +2229,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                iput(inode);
        } else if (complete_capsnap) {
                ceph_flush_snaps(ci);
-               wake_up(&ci->i_cap_wq);
+               wake_up_all(&ci->i_cap_wq);
        }
        if (drop_capsnap)
                iput(inode);
@@ -2405,7 +2405,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
        if (queue_invalidate)
                ceph_queue_invalidate(inode);
        if (wake)
-               wake_up(&ci->i_cap_wq);
+               wake_up_all(&ci->i_cap_wq);
 
        if (check_caps == 1)
                ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
@@ -2460,7 +2460,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                                         struct ceph_inode_info,
                                         i_flushing_item)->vfs_inode);
                mdsc->num_cap_flushing--;
-               wake_up(&mdsc->cap_flushing_wq);
+               wake_up_all(&mdsc->cap_flushing_wq);
                dout(" inode %p now !flushing\n", inode);
 
                if (ci->i_dirty_caps == 0) {
@@ -2472,7 +2472,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                }
        }
        spin_unlock(&mdsc->cap_dirty_lock);
-       wake_up(&ci->i_cap_wq);
+       wake_up_all(&ci->i_cap_wq);
 
 out:
        spin_unlock(&inode->i_lock);
@@ -2984,6 +2984,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
                memcpy(*p, dentry->d_name.name, dentry->d_name.len);
                *p += dentry->d_name.len;
                rel->dname_seq = cpu_to_le32(di->lease_seq);
+               __ceph_mdsc_drop_dentry_lease(dentry);
        }
        spin_unlock(&dentry->d_lock);
        return ret;
index f857193..f94ed3c 100644 (file)
@@ -266,6 +266,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
        spin_lock(&inode->i_lock);
        if ((filp->f_pos == 2 || fi->dentry) &&
            !ceph_test_opt(client, NOASYNCREADDIR) &&
+           ceph_snap(inode) != CEPH_SNAPDIR &&
            (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
            __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
                err = __dcache_readdir(filp, dirent, filldir);
@@ -1013,18 +1014,22 @@ out_touch:
 
 /*
  * When a dentry is released, clear the dir I_COMPLETE if it was part
- * of the current dir gen.
+ * of the current dir gen or if this is in the snapshot namespace.
  */
 static void ceph_dentry_release(struct dentry *dentry)
 {
        struct ceph_dentry_info *di = ceph_dentry(dentry);
        struct inode *parent_inode = dentry->d_parent->d_inode;
+       u64 snapid = ceph_snap(parent_inode);
 
-       if (parent_inode) {
+       dout("dentry_release %p parent %p\n", dentry, parent_inode);
+
+       if (parent_inode && snapid != CEPH_SNAPDIR) {
                struct ceph_inode_info *ci = ceph_inode(parent_inode);
 
                spin_lock(&parent_inode->i_lock);
-               if (ci->i_shared_gen == di->lease_shared_gen) {
+               if (ci->i_shared_gen == di->lease_shared_gen ||
+                   snapid <= CEPH_MAXSNAP) {
                        dout(" clearing %p complete (d_release)\n",
                             parent_inode);
                        ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
@@ -1241,7 +1246,9 @@ struct dentry_operations ceph_dentry_ops = {
 
 struct dentry_operations ceph_snapdir_dentry_ops = {
        .d_revalidate = ceph_snapdir_d_revalidate,
+       .d_release = ceph_dentry_release,
 };
 
 struct dentry_operations ceph_snap_dentry_ops = {
+       .d_release = ceph_dentry_release,
 };
index 6251a15..7c08698 100644 (file)
@@ -265,7 +265,7 @@ int ceph_release(struct inode *inode, struct file *file)
        kmem_cache_free(ceph_file_cachep, cf);
 
        /* wake up anyone waiting for caps on this inode */
-       wake_up(&ci->i_cap_wq);
+       wake_up_all(&ci->i_cap_wq);
        return 0;
 }
 
index 8f9b9fe..389f9db 100644 (file)
@@ -1199,8 +1199,10 @@ retry_lookup:
                                goto out;
                        }
                        err = ceph_init_dentry(dn);
-                       if (err < 0)
+                       if (err < 0) {
+                               dput(dn);
                                goto out;
+                       }
                } else if (dn->d_inode &&
                           (ceph_ino(dn->d_inode) != vino.ino ||
                            ceph_snap(dn->d_inode) != vino.snap)) {
@@ -1499,7 +1501,7 @@ retry:
        if (wrbuffer_refs == 0)
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
        if (wake)
-               wake_up(&ci->i_cap_wq);
+               wake_up_all(&ci->i_cap_wq);
 }
 
 
index 3ab79f6..dd440bd 100644 (file)
@@ -868,7 +868,7 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       wake_up(&ci->i_cap_wq);
+       wake_up_all(&ci->i_cap_wq);
        if (arg) {
                spin_lock(&inode->i_lock);
                ci->i_wanted_max_size = 0;
@@ -1514,6 +1514,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        ceph_encode_filepath(&p, end, ino1, path1);
        ceph_encode_filepath(&p, end, ino2, path2);
 
+       /* make note of release offset, in case we need to replay */
+       req->r_request_release_offset = p - msg->front.iov_base;
+
        /* cap releases */
        releases = 0;
        if (req->r_inode_drop)
@@ -1561,7 +1564,7 @@ static void complete_request(struct ceph_mds_client *mdsc,
        if (req->r_callback)
                req->r_callback(mdsc, req);
        else
-               complete(&req->r_completion);
+               complete_all(&req->r_completion);
 }
 
 /*
@@ -1580,6 +1583,32 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
        dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
             req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
 
+       if (req->r_got_unsafe) {
+               /*
+                * Replay.  Do not regenerate message (and rebuild
+                * paths, etc.); just use the original message.
+                * Rebuilding paths will break for renames because
+                * d_move mangles the src name.
+                */
+               msg = req->r_request;
+               rhead = msg->front.iov_base;
+
+               flags = le32_to_cpu(rhead->flags);
+               flags |= CEPH_MDS_FLAG_REPLAY;
+               rhead->flags = cpu_to_le32(flags);
+
+               if (req->r_target_inode)
+                       rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
+
+               rhead->num_retry = req->r_attempts - 1;
+
+               /* remove cap/dentry releases from message */
+               rhead->num_releases = 0;
+               msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset);
+               msg->front.iov_len = req->r_request_release_offset;
+               return 0;
+       }
+
        if (req->r_request) {
                ceph_msg_put(req->r_request);
                req->r_request = NULL;
@@ -1601,13 +1630,9 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
        rhead->flags = cpu_to_le32(flags);
        rhead->num_fwd = req->r_num_fwd;
        rhead->num_retry = req->r_attempts - 1;
+       rhead->ino = 0;
 
        dout(" r_locked_dir = %p\n", req->r_locked_dir);
-
-       if (req->r_target_inode && req->r_got_unsafe)
-               rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
-       else
-               rhead->ino = 0;
        return 0;
 }
 
@@ -1907,7 +1932,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        if (head->safe) {
                req->r_got_safe = true;
                __unregister_request(mdsc, req);
-               complete(&req->r_safe_completion);
+               complete_all(&req->r_safe_completion);
 
                if (req->r_got_unsafe) {
                        /*
@@ -1922,7 +1947,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
 
                        /* last unsafe request during umount? */
                        if (mdsc->stopping && !__get_oldest_req(mdsc))
-                               complete(&mdsc->safe_umount_waiters);
+                               complete_all(&mdsc->safe_umount_waiters);
                        mutex_unlock(&mdsc->mutex);
                        goto out;
                }
@@ -2101,7 +2126,7 @@ static void handle_session(struct ceph_mds_session *session,
                        pr_info("mds%d reconnect denied\n", session->s_mds);
                remove_session_caps(session);
                wake = 1; /* for good measure */
-               complete(&mdsc->session_close_waiters);
+               complete_all(&mdsc->session_close_waiters);
                kick_requests(mdsc, mds);
                break;
 
index b292fa4..952410c 100644 (file)
@@ -188,6 +188,7 @@ struct ceph_mds_request {
        int r_old_inode_drop, r_old_inode_unless;
 
        struct ceph_msg  *r_request;  /* original request */
+       int r_request_release_offset;
        struct ceph_msg  *r_reply;
        struct ceph_mds_reply_info_parsed r_reply_info;
        int r_err;
index 9ad43a3..15167b2 100644 (file)
@@ -43,7 +43,8 @@ static void ceph_fault(struct ceph_connection *con);
  * nicely render a sockaddr as a string.
  */
 #define MAX_ADDR_STR 20
-static char addr_str[MAX_ADDR_STR][40];
+#define MAX_ADDR_STR_LEN 60
+static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
 static DEFINE_SPINLOCK(addr_str_lock);
 static int last_addr_str;
 
@@ -52,7 +53,6 @@ const char *pr_addr(const struct sockaddr_storage *ss)
        int i;
        char *s;
        struct sockaddr_in *in4 = (void *)ss;
-       unsigned char *quad = (void *)&in4->sin_addr.s_addr;
        struct sockaddr_in6 *in6 = (void *)ss;
 
        spin_lock(&addr_str_lock);
@@ -64,25 +64,13 @@ const char *pr_addr(const struct sockaddr_storage *ss)
 
        switch (ss->ss_family) {
        case AF_INET:
-               sprintf(s, "%u.%u.%u.%u:%u",
-                       (unsigned int)quad[0],
-                       (unsigned int)quad[1],
-                       (unsigned int)quad[2],
-                       (unsigned int)quad[3],
-                       (unsigned int)ntohs(in4->sin_port));
+               snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr,
+                        (unsigned int)ntohs(in4->sin_port));
                break;
 
        case AF_INET6:
-               sprintf(s, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u",
-                       in6->sin6_addr.s6_addr16[0],
-                       in6->sin6_addr.s6_addr16[1],
-                       in6->sin6_addr.s6_addr16[2],
-                       in6->sin6_addr.s6_addr16[3],
-                       in6->sin6_addr.s6_addr16[4],
-                       in6->sin6_addr.s6_addr16[5],
-                       in6->sin6_addr.s6_addr16[6],
-                       in6->sin6_addr.s6_addr16[7],
-                       (unsigned int)ntohs(in6->sin6_port));
+               snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr,
+                        (unsigned int)ntohs(in6->sin6_port));
                break;
 
        default:
@@ -215,12 +203,13 @@ static void set_sock_callbacks(struct socket *sock,
  */
 static struct socket *ceph_tcp_connect(struct ceph_connection *con)
 {
-       struct sockaddr *paddr = (struct sockaddr *)&con->peer_addr.in_addr;
+       struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
        struct socket *sock;
        int ret;
 
        BUG_ON(con->sock);
-       ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+       ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
+                              IPPROTO_TCP, &sock);
        if (ret)
                return ERR_PTR(ret);
        con->sock = sock;
@@ -234,7 +223,8 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con)
 
        dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
 
-       ret = sock->ops->connect(sock, paddr, sizeof(*paddr), O_NONBLOCK);
+       ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
+                                O_NONBLOCK);
        if (ret == -EINPROGRESS) {
                dout("connect %s EINPROGRESS sk_state = %u\n",
                     pr_addr(&con->peer_addr.in_addr),
@@ -1009,19 +999,32 @@ int ceph_parse_ips(const char *c, const char *end,
                struct sockaddr_in *in4 = (void *)ss;
                struct sockaddr_in6 *in6 = (void *)ss;
                int port;
+               char delim = ',';
+
+               if (*p == '[') {
+                       delim = ']';
+                       p++;
+               }
 
                memset(ss, 0, sizeof(*ss));
                if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
-                            ',', &ipend)) {
+                            delim, &ipend))
                        ss->ss_family = AF_INET;
-               else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
-                                   ',', &ipend)) {
+               else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
+                                 delim, &ipend))
                        ss->ss_family = AF_INET6;
-               } else {
+               else
                        goto bad;
-               }
                p = ipend;
 
+               if (delim == ']') {
+                       if (*p != ']') {
+                               dout("missing matching ']'\n");
+                               goto bad;
+                       }
+                       p++;
+               }
+
                /* port? */
                if (p < end && *p == ':') {
                        port = 0;
@@ -1055,7 +1058,7 @@ int ceph_parse_ips(const char *c, const char *end,
        return 0;
 
 bad:
-       pr_err("parse_ips bad ip '%s'\n", c);
+       pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
        return -EINVAL;
 }
 
@@ -2015,20 +2018,20 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
 {
        mutex_lock(&con->mutex);
        if (!list_empty(&msg->list_head)) {
-               dout("con_revoke %p msg %p\n", con, msg);
+               dout("con_revoke %p msg %p - was on queue\n", con, msg);
                list_del_init(&msg->list_head);
                ceph_msg_put(msg);
                msg->hdr.seq = 0;
-               if (con->out_msg == msg) {
-                       ceph_msg_put(con->out_msg);
-                       con->out_msg = NULL;
-               }
+       }
+       if (con->out_msg == msg) {
+               dout("con_revoke %p msg %p - was sending\n", con, msg);
+               con->out_msg = NULL;
                if (con->out_kvec_is_msg) {
                        con->out_skip = con->out_kvec_bytes;
                        con->out_kvec_is_msg = false;
                }
-       } else {
-               dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg);
+               ceph_msg_put(msg);
+               msg->hdr.seq = 0;
        }
        mutex_unlock(&con->mutex);
 }
index cc115ea..54fe01c 100644 (file)
@@ -345,7 +345,7 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
 
 out:
        mutex_unlock(&monc->mutex);
-       wake_up(&client->auth_wq);
+       wake_up_all(&client->auth_wq);
 }
 
 /*
@@ -462,7 +462,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
        }
        mutex_unlock(&monc->mutex);
        if (req) {
-               complete(&req->completion);
+               complete_all(&req->completion);
                put_generic_request(req);
        }
        return;
@@ -718,7 +718,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
                                     monc->m_auth->front_max);
        if (ret < 0) {
                monc->client->auth_err = ret;
-               wake_up(&monc->client->auth_wq);
+               wake_up_all(&monc->client->auth_wq);
        } else if (ret > 0) {
                __send_prepared_auth_request(monc, ret);
        } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
index 92b7251..e385223 100644 (file)
@@ -862,12 +862,12 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
        if (req->r_callback)
                req->r_callback(req, msg);
        else
-               complete(&req->r_completion);
+               complete_all(&req->r_completion);
 
        if (flags & CEPH_OSD_FLAG_ONDISK) {
                if (req->r_safe_callback)
                        req->r_safe_callback(req, msg);
-               complete(&req->r_safe_completion);  /* fsync waiter */
+               complete_all(&req->r_safe_completion);  /* fsync waiter */
        }
 
 done:
@@ -1083,7 +1083,7 @@ done:
        if (newmap)
                kick_requests(osdc, NULL);
        up_read(&osdc->map_sem);
-       wake_up(&osdc->client->auth_wq);
+       wake_up_all(&osdc->client->auth_wq);
        return;
 
 bad:
index 50ce64e..416d46a 100644 (file)
@@ -568,6 +568,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
                if (ev > CEPH_PG_POOL_VERSION) {
                        pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
                                   ev, CEPH_PG_POOL_VERSION);
+                       kfree(pi);
                        goto bad;
                }
                __decode_pool(p, pi);
@@ -830,12 +831,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                /* remove any? */
                while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
                                                node)->pgid, pgid) <= 0) {
-                       struct rb_node *cur = rbp;
+                       struct ceph_pg_mapping *cur =
+                               rb_entry(rbp, struct ceph_pg_mapping, node);
+                       
                        rbp = rb_next(rbp);
-                       dout(" removed pg_temp %llx\n",
-                            *(u64 *)&rb_entry(cur, struct ceph_pg_mapping,
-                                              node)->pgid);
-                       rb_erase(cur, &map->pg_temp);
+                       dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
+                       rb_erase(&cur->node, &map->pg_temp);
+                       kfree(cur);
                }
 
                if (pglen) {
@@ -851,19 +853,22 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                        for (j = 0; j < pglen; j++)
                                pg->osds[j] = ceph_decode_32(p);
                        err = __insert_pg_mapping(pg, &map->pg_temp);
-                       if (err)
+                       if (err) {
+                               kfree(pg);
                                goto bad;
+                       }
                        dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
                             pglen);
                }
        }
        while (rbp) {
-               struct rb_node *cur = rbp;
+               struct ceph_pg_mapping *cur =
+                       rb_entry(rbp, struct ceph_pg_mapping, node);
+
                rbp = rb_next(rbp);
-               dout(" removed pg_temp %llx\n",
-                    *(u64 *)&rb_entry(cur, struct ceph_pg_mapping,
-                                      node)->pgid);
-               rb_erase(cur, &map->pg_temp);
+               dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
+               rb_erase(&cur->node, &map->pg_temp);
+               kfree(cur);
        }
 
        /* ignore the rest */
index 484e52b..2cb1a70 100644 (file)
@@ -923,7 +923,7 @@ init_cifs(void)
                goto out_unregister_filesystem;
 #endif
 #ifdef CONFIG_CIFS_DFS_UPCALL
-       rc = register_key_type(&key_type_dns_resolver);
+       rc = cifs_init_dns_resolver();
        if (rc)
                goto out_unregister_key_type;
 #endif
@@ -935,7 +935,7 @@ init_cifs(void)
 
  out_unregister_resolver_key:
 #ifdef CONFIG_CIFS_DFS_UPCALL
-       unregister_key_type(&key_type_dns_resolver);
+       cifs_exit_dns_resolver();
  out_unregister_key_type:
 #endif
 #ifdef CONFIG_CIFS_UPCALL
@@ -961,7 +961,7 @@ exit_cifs(void)
        cifs_proc_clean();
 #ifdef CONFIG_CIFS_DFS_UPCALL
        cifs_dfs_release_automount_timer();
-       unregister_key_type(&key_type_dns_resolver);
+       cifs_exit_dns_resolver();
 #endif
 #ifdef CONFIG_CIFS_UPCALL
        unregister_key_type(&cifs_spnego_key_type);
index 4db2c5e..853a968 100644 (file)
  */
 
 #include <linux/slab.h>
+#include <linux/keyctl.h>
+#include <linux/key-type.h>
 #include <keys/user-type.h>
 #include "dns_resolve.h"
 #include "cifsglob.h"
 #include "cifsproto.h"
 #include "cifs_debug.h"
 
+static const struct cred *dns_resolver_cache;
+
 /* Checks if supplied name is IP address
  * returns:
  *             1 - name is IP
@@ -94,6 +98,7 @@ struct key_type key_type_dns_resolver = {
 int
 dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
 {
+       const struct cred *saved_cred;
        int rc = -EAGAIN;
        struct key *rkey = ERR_PTR(-EAGAIN);
        char *name;
@@ -133,8 +138,15 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
                goto skip_upcall;
        }
 
+       saved_cred = override_creds(dns_resolver_cache);
        rkey = request_key(&key_type_dns_resolver, name, "");
+       revert_creds(saved_cred);
        if (!IS_ERR(rkey)) {
+               if (!(rkey->perm & KEY_USR_VIEW)) {
+                       down_read(&rkey->sem);
+                       rkey->perm |= KEY_USR_VIEW;
+                       up_read(&rkey->sem);
+               }
                len = rkey->type_data.x[0];
                data = rkey->payload.data;
        } else {
@@ -165,4 +177,61 @@ out:
        return rc;
 }
 
+int __init cifs_init_dns_resolver(void)
+{
+       struct cred *cred;
+       struct key *keyring;
+       int ret;
+
+       printk(KERN_NOTICE "Registering the %s key type\n",
+              key_type_dns_resolver.name);
+
+       /* create an override credential set with a special thread keyring in
+        * which DNS requests are cached
+        *
+        * this is used to prevent malicious redirections from being installed
+        * with add_key().
+        */
+       cred = prepare_kernel_cred(NULL);
+       if (!cred)
+               return -ENOMEM;
+
+       keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred,
+                           (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+                           KEY_USR_VIEW | KEY_USR_READ,
+                           KEY_ALLOC_NOT_IN_QUOTA);
+       if (IS_ERR(keyring)) {
+               ret = PTR_ERR(keyring);
+               goto failed_put_cred;
+       }
+
+       ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
+       if (ret < 0)
+               goto failed_put_key;
+
+       ret = register_key_type(&key_type_dns_resolver);
+       if (ret < 0)
+               goto failed_put_key;
+
+       /* instruct request_key() to use this special keyring as a cache for
+        * the results it looks up */
+       cred->thread_keyring = keyring;
+       cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+       dns_resolver_cache = cred;
+       return 0;
+
+failed_put_key:
+       key_put(keyring);
+failed_put_cred:
+       put_cred(cred);
+       return ret;
+}
 
+void cifs_exit_dns_resolver(void)
+{
+       key_revoke(dns_resolver_cache->thread_keyring);
+       unregister_key_type(&key_type_dns_resolver);
+       put_cred(dns_resolver_cache);
+       printk(KERN_NOTICE "Unregistered %s key type\n",
+              key_type_dns_resolver.name);
+}
index 966e928..5d7f291 100644 (file)
@@ -24,8 +24,8 @@
 #define _DNS_RESOLVE_H
 
 #ifdef __KERNEL__
-#include <linux/key-type.h>
-extern struct key_type key_type_dns_resolver;
+extern int __init cifs_init_dns_resolver(void);
+extern void cifs_exit_dns_resolver(void);
 extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
 #endif /* KERNEL */
 
index c8c78ba..86d4db1 100644 (file)
@@ -896,7 +896,7 @@ EXPORT_SYMBOL(shrink_dcache_parent);
  *
  * In this case we return -1 to tell the caller that we baled.
  */
-static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
+static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        if (nr) {
                if (!(gfp_mask & __GFP_FS))
index 2d8dbce..46c4dd8 100644 (file)
@@ -31,9 +31,9 @@ static struct mutex ecryptfs_msg_ctx_lists_mux;
 
 static struct hlist_head *ecryptfs_daemon_hash;
 struct mutex ecryptfs_daemon_hash_mux;
-static int ecryptfs_hash_buckets;
+static int ecryptfs_hash_bits;
 #define ecryptfs_uid_hash(uid) \
-        hash_long((unsigned long)uid, ecryptfs_hash_buckets)
+        hash_long((unsigned long)uid, ecryptfs_hash_bits)
 
 static u32 ecryptfs_msg_counter;
 static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
@@ -486,18 +486,19 @@ int ecryptfs_init_messaging(void)
        }
        mutex_init(&ecryptfs_daemon_hash_mux);
        mutex_lock(&ecryptfs_daemon_hash_mux);
-       ecryptfs_hash_buckets = 1;
-       while (ecryptfs_number_of_users >> ecryptfs_hash_buckets)
-               ecryptfs_hash_buckets++;
+       ecryptfs_hash_bits = 1;
+       while (ecryptfs_number_of_users >> ecryptfs_hash_bits)
+               ecryptfs_hash_bits++;
        ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head)
-                                       * ecryptfs_hash_buckets), GFP_KERNEL);
+                                       * (1 << ecryptfs_hash_bits)),
+                                      GFP_KERNEL);
        if (!ecryptfs_daemon_hash) {
                rc = -ENOMEM;
                printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
                mutex_unlock(&ecryptfs_daemon_hash_mux);
                goto out;
        }
-       for (i = 0; i < ecryptfs_hash_buckets; i++)
+       for (i = 0; i < (1 << ecryptfs_hash_bits); i++)
                INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]);
        mutex_unlock(&ecryptfs_daemon_hash_mux);
        ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx)
@@ -554,7 +555,7 @@ void ecryptfs_release_messaging(void)
                int i;
 
                mutex_lock(&ecryptfs_daemon_hash_mux);
-               for (i = 0; i < ecryptfs_hash_buckets; i++) {
+               for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
                        int rc;
 
                        hlist_for_each_entry(daemon, elem,
index 4a48c0f..84da64b 100644 (file)
@@ -1041,6 +1041,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
 
        if (gfs2_is_stuffed(ip)) {
                u64 dsize = size + sizeof(struct gfs2_inode);
+               ip->i_disksize = size;
                ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
                gfs2_dinode_out(ip, dibh->b_data);
index 8295c5b..6b48d7c 100644 (file)
@@ -392,7 +392,7 @@ static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
        unsigned totlen = be16_to_cpu(dent->de_rec_len);
 
        if (gfs2_dirent_sentinel(dent))
-               actual = GFS2_DIRENT_SIZE(0);
+               actual = 0;
        if (totlen - actual >= required)
                return 1;
        return 0;
@@ -1231,6 +1231,25 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
        return 0;
 }
 
+static void *gfs2_alloc_sort_buffer(unsigned size)
+{
+       void *ptr = NULL;
+
+       if (size < KMALLOC_MAX_SIZE)
+               ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN);
+       if (!ptr)
+               ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL);
+       return ptr;
+}
+
+static void gfs2_free_sort_buffer(void *ptr)
+{
+       if (is_vmalloc_addr(ptr))
+               vfree(ptr);
+       else
+               kfree(ptr);
+}
+
 static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
                              filldir_t filldir, int *copied, unsigned *depth,
                              u64 leaf_no)
@@ -1271,7 +1290,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
         * 99 is the maximum number of entries that can fit in a single
         * leaf block.
         */
-       larr = vmalloc((leaves + entries + 99) * sizeof(void *));
+       larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *));
        if (!larr)
                goto out;
        darr = (const struct gfs2_dirent **)(larr + leaves);
@@ -1282,7 +1301,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
        do {
                error = get_leaf(ip, lfn, &bh);
                if (error)
-                       goto out_kfree;
+                       goto out_free;
                lf = (struct gfs2_leaf *)bh->b_data;
                lfn = be64_to_cpu(lf->lf_next);
                if (lf->lf_entries) {
@@ -1291,7 +1310,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
                                                gfs2_dirent_gather, NULL, &g);
                        error = PTR_ERR(dent);
                        if (IS_ERR(dent))
-                               goto out_kfree;
+                               goto out_free;
                        if (entries2 != g.offset) {
                                fs_warn(sdp, "Number of entries corrupt in dir "
                                                "leaf %llu, entries2 (%u) != "
@@ -1300,7 +1319,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
                                        entries2, g.offset);
                                        
                                error = -EIO;
-                               goto out_kfree;
+                               goto out_free;
                        }
                        error = 0;
                        larr[leaf++] = bh;
@@ -1312,10 +1331,10 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
        BUG_ON(entries2 != entries);
        error = do_filldir_main(ip, offset, opaque, filldir, darr,
                                entries, copied);
-out_kfree:
+out_free:
        for(i = 0; i < leaf; i++)
                brelse(larr[i]);
-       vfree(larr);
+       gfs2_free_sort_buffer(larr);
 out:
        return error;
 }
index ddcdbf4..0898f3e 100644 (file)
@@ -706,8 +706,18 @@ static void glock_work_func(struct work_struct *work)
 {
        unsigned long delay = 0;
        struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
+       struct gfs2_holder *gh;
        int drop_ref = 0;
 
+       if (unlikely(test_bit(GLF_FROZEN, &gl->gl_flags))) {
+               spin_lock(&gl->gl_spin);
+               gh = find_first_waiter(gl);
+               if (gh && (gh->gh_flags & LM_FLAG_NOEXP) &&
+                   test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
+                       set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+               spin_unlock(&gl->gl_spin);
+       }
+
        if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
                finish_xmote(gl, gl->gl_reply);
                drop_ref = 1;
@@ -1348,7 +1358,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
 }
 
 
-static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
+static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        struct gfs2_glock *gl;
        int may_demote;
index b5612cb..f03afd9 100644 (file)
@@ -169,7 +169,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
 {
        struct inode *inode;
        struct gfs2_inode *ip;
-       struct gfs2_glock *io_gl;
+       struct gfs2_glock *io_gl = NULL;
        int error;
 
        inode = gfs2_iget(sb, no_addr);
@@ -198,6 +198,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
                ip->i_iopen_gh.gh_gl->gl_object = ip;
 
                gfs2_glock_put(io_gl);
+               io_gl = NULL;
 
                if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
                        goto gfs2_nfsbypass;
@@ -228,7 +229,8 @@ gfs2_nfsbypass:
 fail_glock:
        gfs2_glock_dq(&ip->i_iopen_gh);
 fail_iopen:
-       gfs2_glock_put(io_gl);
+       if (io_gl)
+               gfs2_glock_put(io_gl);
 fail_put:
        if (inode->i_state & I_NEW)
                ip->i_gl->gl_object = NULL;
@@ -256,7 +258,7 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
 {
        struct gfs2_sbd *sdp;
        struct gfs2_inode *ip;
-       struct gfs2_glock *io_gl;
+       struct gfs2_glock *io_gl = NULL;
        int error;
        struct gfs2_holder gh;
        struct inode *inode;
@@ -293,6 +295,7 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
 
        ip->i_iopen_gh.gh_gl->gl_object = ip;
        gfs2_glock_put(io_gl);
+       io_gl = NULL;
 
        inode->i_mode = DT2IF(DT_UNKNOWN);
 
@@ -319,7 +322,8 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
 fail_glock:
        gfs2_glock_dq(&ip->i_iopen_gh);
 fail_iopen:
-       gfs2_glock_put(io_gl);
+       if (io_gl)
+               gfs2_glock_put(io_gl);
 fail_put:
        ip->i_gl->gl_object = NULL;
        gfs2_glock_put(ip->i_gl);
index 49667d6..8f02d3d 100644 (file)
@@ -77,7 +77,7 @@ static LIST_HEAD(qd_lru_list);
 static atomic_t qd_lru_count = ATOMIC_INIT(0);
 static DEFINE_SPINLOCK(qd_lru_lock);
 
-int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
+int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        struct gfs2_quota_data *qd;
        struct gfs2_sbd *sdp;
@@ -694,10 +694,8 @@ get_a_page:
                if (!buffer_mapped(bh))
                        goto unlock_out;
                /* If it's a newly allocated disk block for quota, zero it */
-               if (buffer_new(bh)) {
-                       memset(bh->b_data, 0, bh->b_size);
-                       set_buffer_uptodate(bh);
-               }
+               if (buffer_new(bh))
+                       zero_user(page, pos - blocksize, bh->b_size);
        }
 
        if (PageUptodate(page))
@@ -723,7 +721,7 @@ get_a_page:
 
        /* If quota straddles page boundary, we need to update the rest of the
         * quota at the beginning of the next page */
-       if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */
+       if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
                ptr = ptr + nbytes;
                nbytes = sizeof(struct gfs2_quota) - nbytes;
                offset = 0;
index 195f60c..e7d236c 100644 (file)
@@ -51,7 +51,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
        return ret;
 }
 
-extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
+extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask);
 extern const struct quotactl_ops gfs2_quotactl_ops;
 
 #endif /* __QUOTA_DOT_H__ */
index 2bee20a..722860b 100644 (file)
@@ -512,7 +512,7 @@ static void prune_icache(int nr_to_scan)
  * This function is passed the number of inodes to scan, and it returns the
  * total number of remaining possibly-reclaimable inodes.
  */
-static int shrink_icache_memory(int nr, gfp_t gfp_mask)
+static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        if (nr) {
                /*
index bc2ff59..0368808 100644 (file)
@@ -297,7 +297,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
        struct page *new_page;
        unsigned int new_offset;
        struct buffer_head *bh_in = jh2bh(jh_in);
-       struct jbd2_buffer_trigger_type *triggers;
        journal_t *journal = transaction->t_journal;
 
        /*
@@ -328,21 +327,21 @@ repeat:
                done_copy_out = 1;
                new_page = virt_to_page(jh_in->b_frozen_data);
                new_offset = offset_in_page(jh_in->b_frozen_data);
-               triggers = jh_in->b_frozen_triggers;
        } else {
                new_page = jh2bh(jh_in)->b_page;
                new_offset = offset_in_page(jh2bh(jh_in)->b_data);
-               triggers = jh_in->b_triggers;
        }
 
        mapped_data = kmap_atomic(new_page, KM_USER0);
        /*
-        * Fire any commit trigger.  Do this before checking for escaping,
-        * as the trigger may modify the magic offset.  If a copy-out
-        * happens afterwards, it will have the correct data in the buffer.
+        * Fire data frozen trigger if data already wasn't frozen.  Do this
+        * before checking for escaping, as the trigger may modify the magic
+        * offset.  If a copy-out happens afterwards, it will have the correct
+        * data in the buffer.
         */
-       jbd2_buffer_commit_trigger(jh_in, mapped_data + new_offset,
-                                  triggers);
+       if (!done_copy_out)
+               jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset,
+                                          jh_in->b_triggers);
 
        /*
         * Check for escaping
index e214d68..b8e0806 100644 (file)
@@ -725,6 +725,9 @@ done:
                page = jh2bh(jh)->b_page;
                offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
                source = kmap_atomic(page, KM_USER0);
+               /* Fire data frozen trigger just before we copy the data */
+               jbd2_buffer_frozen_trigger(jh, source + offset,
+                                          jh->b_triggers);
                memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
                kunmap_atomic(source, KM_USER0);
 
@@ -963,15 +966,15 @@ void jbd2_journal_set_triggers(struct buffer_head *bh,
        jh->b_triggers = type;
 }
 
-void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data,
+void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
                                struct jbd2_buffer_trigger_type *triggers)
 {
        struct buffer_head *bh = jh2bh(jh);
 
-       if (!triggers || !triggers->t_commit)
+       if (!triggers || !triggers->t_frozen)
                return;
 
-       triggers->t_commit(triggers, bh, mapped_data, bh->b_size);
+       triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
 }
 
 void jbd2_buffer_abort_trigger(struct journal_head *jh,
index a2d58c9..d258e26 100644 (file)
@@ -626,7 +626,7 @@ void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i
 
 static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
 {
-       /* success of check_xattr_ref_inode() means taht inode (ic) dose not have
+       /* success of check_xattr_ref_inode() means that inode (ic) dose not have
         * duplicate name/value pairs. If duplicate name/value pair would be found,
         * one will be removed.
         */
index ec88ff3..e28f21b 100644 (file)
@@ -115,7 +115,7 @@ mb_cache_indexes(struct mb_cache *cache)
  * What the mbcache registers as to get shrunk dynamically.
  */
 
-static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
+static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
 
 static struct shrinker mb_cache_shrinker = {
        .shrink = mb_cache_shrink_fn,
@@ -191,13 +191,14 @@ forget:
  * This function is called by the kernel memory management when memory
  * gets low.
  *
+ * @shrink: (ignored)
  * @nr_to_scan: Number of objects to scan
  * @gfp_mask: (ignored)
  *
  * Returns the number of objects which are present in the cache.
  */
 static int
-mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
+mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        LIST_HEAD(free_list);
        struct list_head *l, *ltmp;
index 782b431..e60416d 100644 (file)
@@ -1710,7 +1710,7 @@ static void nfs_access_free_list(struct list_head *head)
        }
 }
 
-int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
+int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        LIST_HEAD(head);
        struct nfs_inode *nfsi;
index 36a5e74..f036153 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pagemap.h>
 #include <linux/aio.h>
 #include <linux/gfp.h>
+#include <linux/swap.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -493,11 +494,19 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
  */
 static int nfs_release_page(struct page *page, gfp_t gfp)
 {
+       struct address_space *mapping = page->mapping;
+
        dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
 
        /* Only do I/O if gfp is a superset of GFP_KERNEL */
-       if ((gfp & GFP_KERNEL) == GFP_KERNEL)
-               nfs_wb_page(page->mapping->host, page);
+       if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
+               int how = FLUSH_SYNC;
+
+               /* Don't let kswapd deadlock waiting for OOM RPC calls */
+               if (current_is_kswapd())
+                       how = 0;
+               nfs_commit_inode(mapping->host, how);
+       }
        /* If PagePrivate() is set, then the page is not freeable */
        if (PagePrivate(page))
                return 0;
index d8bd619..e70f44b 100644 (file)
@@ -205,7 +205,8 @@ extern struct rpc_procinfo nfs4_procedures[];
 void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
 
 /* dir.c */
-extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask);
+extern int nfs_access_cache_shrinker(struct shrinker *shrink,
+                                       int nr_to_scan, gfp_t gfp_mask);
 
 /* inode.c */
 extern struct workqueue_struct *nfsiod_workqueue;
index 6bd19d8..df101d9 100644 (file)
@@ -105,7 +105,7 @@ static char nfs_root_name[256] __initdata = "";
 static __be32 servaddr __initdata = 0;
 
 /* Name of directory to mount */
-static char nfs_export_path[NFS_MAXPATHLEN] __initdata = { 0, };
+static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = { 0, };
 
 /* NFS-related data */
 static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */
index 91679e2..9f81bdd 100644 (file)
@@ -222,7 +222,7 @@ static void nfs_end_page_writeback(struct page *page)
                clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
 }
 
-static struct nfs_page *nfs_find_and_lock_request(struct page *page)
+static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
 {
        struct inode *inode = page->mapping->host;
        struct nfs_page *req;
@@ -241,7 +241,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
                 *       request as dirty (in which case we don't care).
                 */
                spin_unlock(&inode->i_lock);
-               ret = nfs_wait_on_request(req);
+               if (!nonblock)
+                       ret = nfs_wait_on_request(req);
+               else
+                       ret = -EAGAIN;
                nfs_release_request(req);
                if (ret != 0)
                        return ERR_PTR(ret);
@@ -256,12 +259,12 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
  * May return an error if the user signalled nfs_wait_on_request().
  */
 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
-                               struct page *page)
+                               struct page *page, bool nonblock)
 {
        struct nfs_page *req;
        int ret = 0;
 
-       req = nfs_find_and_lock_request(page);
+       req = nfs_find_and_lock_request(page, nonblock);
        if (!req)
                goto out;
        ret = PTR_ERR(req);
@@ -283,12 +286,20 @@ out:
 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
 {
        struct inode *inode = page->mapping->host;
+       int ret;
 
        nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
        nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
 
        nfs_pageio_cond_complete(pgio, page->index);
-       return nfs_page_async_flush(pgio, page);
+       ret = nfs_page_async_flush(pgio, page,
+                       wbc->sync_mode == WB_SYNC_NONE ||
+                       wbc->nonblocking != 0);
+       if (ret == -EAGAIN) {
+               redirty_page_for_writepage(wbc, page);
+               ret = 0;
+       }
+       return ret;
 }
 
 /*
@@ -1379,7 +1390,7 @@ static const struct rpc_call_ops nfs_commit_ops = {
        .rpc_release = nfs_commit_release,
 };
 
-static int nfs_commit_inode(struct inode *inode, int how)
+int nfs_commit_inode(struct inode *inode, int how)
 {
        LIST_HEAD(head);
        int may_wait = how & FLUSH_SYNC;
@@ -1443,11 +1454,6 @@ out_mark_dirty:
        return ret;
 }
 #else
-static int nfs_commit_inode(struct inode *inode, int how)
-{
-       return 0;
-}
-
 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
 {
        return 0;
@@ -1546,7 +1552,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
 
        nfs_fscache_release_page(page, GFP_KERNEL);
 
-       req = nfs_find_and_lock_request(page);
+       req = nfs_find_and_lock_request(page, false);
        ret = PTR_ERR(req);
        if (IS_ERR(req))
                goto out;
index 3623ca2..356e976 100644 (file)
@@ -196,15 +196,14 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
                        dump_stack();
                        goto bail;
                }
-
-               past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
-               mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
-                    (unsigned long long)past_eof);
-
-               if (create && (iblock >= past_eof))
-                       set_buffer_new(bh_result);
        }
 
+       past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
+       mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
+            (unsigned long long)past_eof);
+       if (create && (iblock >= past_eof))
+               set_buffer_new(bh_result);
+
 bail:
        if (err < 0)
                err = -EIO;
@@ -459,36 +458,6 @@ int walk_page_buffers(     handle_t *handle,
        return ret;
 }
 
-handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
-                                                        struct page *page,
-                                                        unsigned from,
-                                                        unsigned to)
-{
-       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-       handle_t *handle;
-       int ret = 0;
-
-       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
-       if (IS_ERR(handle)) {
-               ret = -ENOMEM;
-               mlog_errno(ret);
-               goto out;
-       }
-
-       if (ocfs2_should_order_data(inode)) {
-               ret = ocfs2_jbd2_file_inode(handle, inode);
-               if (ret < 0)
-                       mlog_errno(ret);
-       }
-out:
-       if (ret) {
-               if (!IS_ERR(handle))
-                       ocfs2_commit_trans(osb, handle);
-               handle = ERR_PTR(ret);
-       }
-       return handle;
-}
-
 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
 {
        sector_t status;
@@ -1131,23 +1100,37 @@ out:
  */
 static int ocfs2_grab_pages_for_write(struct address_space *mapping,
                                      struct ocfs2_write_ctxt *wc,
-                                     u32 cpos, loff_t user_pos, int new,
+                                     u32 cpos, loff_t user_pos,
+                                     unsigned user_len, int new,
                                      struct page *mmap_page)
 {
        int ret = 0, i;
-       unsigned long start, target_index, index;
+       unsigned long start, target_index, end_index, index;
        struct inode *inode = mapping->host;
+       loff_t last_byte;
 
        target_index = user_pos >> PAGE_CACHE_SHIFT;
 
        /*
         * Figure out how many pages we'll be manipulating here. For
         * non allocating write, we just change the one
-        * page. Otherwise, we'll need a whole clusters worth.
+        * page. Otherwise, we'll need a whole clusters worth.  If we're
+        * writing past i_size, we only need enough pages to cover the
+        * last page of the write.
         */
        if (new) {
                wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
                start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
+               /*
+                * We need the index *past* the last page we could possibly
+                * touch.  This is the page past the end of the write or
+                * i_size, whichever is greater.
+                */
+               last_byte = max(user_pos + user_len, i_size_read(inode));
+               BUG_ON(last_byte < 1);
+               end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
+               if ((start + wc->w_num_pages) > end_index)
+                       wc->w_num_pages = end_index - start;
        } else {
                wc->w_num_pages = 1;
                start = target_index;
@@ -1620,21 +1603,20 @@ out:
  * write path can treat it as an non-allocating write, which has no
  * special case code for sparse/nonsparse files.
  */
-static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
-                                       unsigned len,
+static int ocfs2_expand_nonsparse_inode(struct inode *inode,
+                                       struct buffer_head *di_bh,
+                                       loff_t pos, unsigned len,
                                        struct ocfs2_write_ctxt *wc)
 {
        int ret;
-       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        loff_t newsize = pos + len;
 
-       if (ocfs2_sparse_alloc(osb))
-               return 0;
+       BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
 
        if (newsize <= i_size_read(inode))
                return 0;
 
-       ret = ocfs2_extend_no_holes(inode, newsize, pos);
+       ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
        if (ret)
                mlog_errno(ret);
 
@@ -1644,6 +1626,18 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
        return ret;
 }
 
+static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
+                          loff_t pos)
+{
+       int ret = 0;
+
+       BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
+       if (pos > i_size_read(inode))
+               ret = ocfs2_zero_extend(inode, di_bh, pos);
+
+       return ret;
+}
+
 int ocfs2_write_begin_nolock(struct address_space *mapping,
                             loff_t pos, unsigned len, unsigned flags,
                             struct page **pagep, void **fsdata,
@@ -1679,7 +1673,11 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
                }
        }
 
-       ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc);
+       if (ocfs2_sparse_alloc(osb))
+               ret = ocfs2_zero_tail(inode, di_bh, pos);
+       else
+               ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len,
+                                                  wc);
        if (ret) {
                mlog_errno(ret);
                goto out;
@@ -1789,7 +1787,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
         * that we can zero and flush if we error after adding the
         * extent.
         */
-       ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
+       ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
                                         cluster_of_pages, mmap_page);
        if (ret) {
                mlog_errno(ret);
index 6b5a492..153abb5 100644 (file)
@@ -1671,7 +1671,7 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
        struct dlm_ctxt *dlm = NULL;
        struct dlm_ctxt *new_ctxt = NULL;
 
-       if (strlen(domain) > O2NM_MAX_NAME_LEN) {
+       if (strlen(domain) >= O2NM_MAX_NAME_LEN) {
                ret = -ENAMETOOLONG;
                mlog(ML_ERROR, "domain name length too long\n");
                goto leave;
@@ -1709,6 +1709,7 @@ retry:
                }
 
                if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
+                       spin_unlock(&dlm_domain_lock);
                        mlog(ML_ERROR,
                             "Requested locking protocol version is not "
                             "compatible with already registered domain "
index 4a7506a..94b97fc 100644 (file)
@@ -2808,14 +2808,8 @@ again:
                mlog(0, "trying again...\n");
                goto again;
        }
-       /* now that we are sure the MIGRATING state is there, drop
-        * the unneded state which blocked threads trying to DIRTY */
-       spin_lock(&res->spinlock);
-       BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
-       BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
-       res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
-       spin_unlock(&res->spinlock);
 
+       ret = 0;
        /* did the target go down or die? */
        spin_lock(&dlm->spinlock);
        if (!test_bit(target, dlm->domain_map)) {
@@ -2825,10 +2819,22 @@ again:
        }
        spin_unlock(&dlm->spinlock);
 
+       /*
+        * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
+        * another try; otherwise, we are sure the MIGRATING state is there,
+        * drop the unneded state which blocked threads trying to DIRTY
+        */
+       spin_lock(&res->spinlock);
+       BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
+       res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
+       if (!ret)
+               BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
+       spin_unlock(&res->spinlock);
+
        /*
         * at this point:
         *
-        *   o the DLM_LOCK_RES_MIGRATING flag is set
+        *   o the DLM_LOCK_RES_MIGRATING flag is set if target not down
         *   o there are no pending asts on this lockres
         *   o all processes trying to reserve an ast on this
         *     lockres must wait for the MIGRATING flag to clear
index f8b75ce..9dfaac7 100644 (file)
@@ -463,7 +463,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
        if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
                int bit;
 
-               bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
+               bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
                if (bit >= O2NM_MAX_NODES || bit < 0)
                        dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
                else
index 6a13ea6..2b10b36 100644 (file)
@@ -724,28 +724,55 @@ leave:
        return status;
 }
 
+/*
+ * While a write will already be ordering the data, a truncate will not.
+ * Thus, we need to explicitly order the zeroed pages.
+ */
+static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
+{
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       handle_t *handle = NULL;
+       int ret = 0;
+
+       if (!ocfs2_should_order_data(inode))
+               goto out;
+
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+       if (IS_ERR(handle)) {
+               ret = -ENOMEM;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_jbd2_file_inode(handle, inode);
+       if (ret < 0)
+               mlog_errno(ret);
+
+out:
+       if (ret) {
+               if (!IS_ERR(handle))
+                       ocfs2_commit_trans(osb, handle);
+               handle = ERR_PTR(ret);
+       }
+       return handle;
+}
+
 /* Some parts of this taken from generic_cont_expand, which turned out
  * to be too fragile to do exactly what we need without us having to
  * worry about recursive locking in ->write_begin() and ->write_end(). */
-static int ocfs2_write_zero_page(struct inode *inode,
-                                u64 size)
+static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
+                                u64 abs_to)
 {
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
-       unsigned long index;
-       unsigned int offset;
+       unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
        handle_t *handle = NULL;
-       int ret;
+       int ret = 0;
+       unsigned zero_from, zero_to, block_start, block_end;
 
-       offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
-       /* ugh.  in prepare/commit_write, if from==to==start of block, we
-       ** skip the prepare.  make sure we never send an offset for the start
-       ** of a block
-       */
-       if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
-               offset++;
-       }
-       index = size >> PAGE_CACHE_SHIFT;
+       BUG_ON(abs_from >= abs_to);
+       BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
+       BUG_ON(abs_from & (inode->i_blkbits - 1));
 
        page = grab_cache_page(mapping, index);
        if (!page) {
@@ -754,31 +781,56 @@ static int ocfs2_write_zero_page(struct inode *inode,
                goto out;
        }
 
-       ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
-       if (ret < 0) {
-               mlog_errno(ret);
-               goto out_unlock;
-       }
+       /* Get the offsets within the page that we want to zero */
+       zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
+       zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
+       if (!zero_to)
+               zero_to = PAGE_CACHE_SIZE;
 
-       if (ocfs2_should_order_data(inode)) {
-               handle = ocfs2_start_walk_page_trans(inode, page, offset,
-                                                    offset);
-               if (IS_ERR(handle)) {
-                       ret = PTR_ERR(handle);
-                       handle = NULL;
+       mlog(0,
+            "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n",
+            (unsigned long long)abs_from, (unsigned long long)abs_to,
+            index, zero_from, zero_to);
+
+       /* We know that zero_from is block aligned */
+       for (block_start = zero_from; block_start < zero_to;
+            block_start = block_end) {
+               block_end = block_start + (1 << inode->i_blkbits);
+
+               /*
+                * block_start is block-aligned.  Bump it by one to
+                * force ocfs2_{prepare,commit}_write() to zero the
+                * whole block.
+                */
+               ret = ocfs2_prepare_write_nolock(inode, page,
+                                                block_start + 1,
+                                                block_start + 1);
+               if (ret < 0) {
+                       mlog_errno(ret);
                        goto out_unlock;
                }
-       }
 
-       /* must not update i_size! */
-       ret = block_commit_write(page, offset, offset);
-       if (ret < 0)
-               mlog_errno(ret);
-       else
-               ret = 0;
+               if (!handle) {
+                       handle = ocfs2_zero_start_ordered_transaction(inode);
+                       if (IS_ERR(handle)) {
+                               ret = PTR_ERR(handle);
+                               handle = NULL;
+                               break;
+                       }
+               }
+
+               /* must not update i_size! */
+               ret = block_commit_write(page, block_start + 1,
+                                        block_start + 1);
+               if (ret < 0)
+                       mlog_errno(ret);
+               else
+                       ret = 0;
+       }
 
        if (handle)
                ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+
 out_unlock:
        unlock_page(page);
        page_cache_release(page);
@@ -786,22 +838,114 @@ out:
        return ret;
 }
 
-static int ocfs2_zero_extend(struct inode *inode,
-                            u64 zero_to_size)
+/*
+ * Find the next range to zero.  We do this in terms of bytes because
+ * that's what ocfs2_zero_extend() wants, and it is dealing with the
+ * pagecache.  We may return multiple extents.
+ *
+ * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
+ * needs to be zeroed.  range_start and range_end return the next zeroing
+ * range.  A subsequent call should pass the previous range_end as its
+ * zero_start.  If range_end is 0, there's nothing to do.
+ *
+ * Unwritten extents are skipped over.  Refcounted extents are CoWd.
+ */
+static int ocfs2_zero_extend_get_range(struct inode *inode,
+                                      struct buffer_head *di_bh,
+                                      u64 zero_start, u64 zero_end,
+                                      u64 *range_start, u64 *range_end)
 {
-       int ret = 0;
-       u64 start_off;
-       struct super_block *sb = inode->i_sb;
+       int rc = 0, needs_cow = 0;
+       u32 p_cpos, zero_clusters = 0;
+       u32 zero_cpos =
+               zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+       u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
+       unsigned int num_clusters = 0;
+       unsigned int ext_flags = 0;
 
-       start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
-       while (start_off < zero_to_size) {
-               ret = ocfs2_write_zero_page(inode, start_off);
-               if (ret < 0) {
-                       mlog_errno(ret);
+       while (zero_cpos < last_cpos) {
+               rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
+                                       &num_clusters, &ext_flags);
+               if (rc) {
+                       mlog_errno(rc);
+                       goto out;
+               }
+
+               if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
+                       zero_clusters = num_clusters;
+                       if (ext_flags & OCFS2_EXT_REFCOUNTED)
+                               needs_cow = 1;
+                       break;
+               }
+
+               zero_cpos += num_clusters;
+       }
+       if (!zero_clusters) {
+               *range_end = 0;
+               goto out;
+       }
+
+       while ((zero_cpos + zero_clusters) < last_cpos) {
+               rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
+                                       &p_cpos, &num_clusters,
+                                       &ext_flags);
+               if (rc) {
+                       mlog_errno(rc);
                        goto out;
                }
 
-               start_off += sb->s_blocksize;
+               if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
+                       break;
+               if (ext_flags & OCFS2_EXT_REFCOUNTED)
+                       needs_cow = 1;
+               zero_clusters += num_clusters;
+       }
+       if ((zero_cpos + zero_clusters) > last_cpos)
+               zero_clusters = last_cpos - zero_cpos;
+
+       if (needs_cow) {
+               rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, zero_clusters,
+                                       UINT_MAX);
+               if (rc) {
+                       mlog_errno(rc);
+                       goto out;
+               }
+       }
+
+       *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
+       *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
+                                            zero_cpos + zero_clusters);
+
+out:
+       return rc;
+}
+
+/*
+ * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
+ * has made sure that the entire range needs zeroing.
+ */
+static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
+                                  u64 range_end)
+{
+       int rc = 0;
+       u64 next_pos;
+       u64 zero_pos = range_start;
+
+       mlog(0, "range_start = %llu, range_end = %llu\n",
+            (unsigned long long)range_start,
+            (unsigned long long)range_end);
+       BUG_ON(range_start >= range_end);
+
+       while (zero_pos < range_end) {
+               next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
+               if (next_pos > range_end)
+                       next_pos = range_end;
+               rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
+               if (rc < 0) {
+                       mlog_errno(rc);
+                       break;
+               }
+               zero_pos = next_pos;
 
                /*
                 * Very large extends have the potential to lock up
@@ -810,16 +954,63 @@ static int ocfs2_zero_extend(struct inode *inode,
                cond_resched();
        }
 
-out:
+       return rc;
+}
+
+int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
+                     loff_t zero_to_size)
+{
+       int ret = 0;
+       u64 zero_start, range_start = 0, range_end = 0;
+       struct super_block *sb = inode->i_sb;
+
+       zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
+       mlog(0, "zero_start %llu for i_size %llu\n",
+            (unsigned long long)zero_start,
+            (unsigned long long)i_size_read(inode));
+       while (zero_start < zero_to_size) {
+               ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
+                                                 zero_to_size,
+                                                 &range_start,
+                                                 &range_end);
+               if (ret) {
+                       mlog_errno(ret);
+                       break;
+               }
+               if (!range_end)
+                       break;
+               /* Trim the ends */
+               if (range_start < zero_start)
+                       range_start = zero_start;
+               if (range_end > zero_to_size)
+                       range_end = zero_to_size;
+
+               ret = ocfs2_zero_extend_range(inode, range_start,
+                                             range_end);
+               if (ret) {
+                       mlog_errno(ret);
+                       break;
+               }
+               zero_start = range_end;
+       }
+
        return ret;
 }
 
-int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
+int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
+                         u64 new_i_size, u64 zero_to)
 {
        int ret;
        u32 clusters_to_add;
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
 
+       /*
+        * Only quota files call this without a bh, and they can't be
+        * refcounted.
+        */
+       BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
+       BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
+
        clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
        if (clusters_to_add < oi->ip_clusters)
                clusters_to_add = 0;
@@ -840,7 +1031,7 @@ int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
         * still need to zero the area between the old i_size and the
         * new i_size.
         */
-       ret = ocfs2_zero_extend(inode, zero_to);
+       ret = ocfs2_zero_extend(inode, di_bh, zero_to);
        if (ret < 0)
                mlog_errno(ret);
 
@@ -862,27 +1053,15 @@ static int ocfs2_extend_file(struct inode *inode,
                goto out;
 
        if (i_size_read(inode) == new_i_size)
-               goto out;
+               goto out;
        BUG_ON(new_i_size < i_size_read(inode));
 
-       /*
-        * Fall through for converting inline data, even if the fs
-        * supports sparse files.
-        *
-        * The check for inline data here is legal - nobody can add
-        * the feature since we have i_mutex. We must check it again
-        * after acquiring ip_alloc_sem though, as paths like mmap
-        * might have raced us to converting the inode to extents.
-        */
-       if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
-           && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
-               goto out_update_size;
-
        /*
         * The alloc sem blocks people in read/write from reading our
         * allocation until we're done changing it. We depend on
         * i_mutex to block other extend/truncate calls while we're
-        * here.
+        * here.  We even have to hold it for sparse files because there
+        * might be some tail zeroing.
         */
        down_write(&oi->ip_alloc_sem);
 
@@ -899,14 +1078,16 @@ static int ocfs2_extend_file(struct inode *inode,
                ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
                if (ret) {
                        up_write(&oi->ip_alloc_sem);
-
                        mlog_errno(ret);
                        goto out;
                }
        }
 
-       if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
-               ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size);
+       if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+               ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
+       else
+               ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
+                                           new_i_size);
 
        up_write(&oi->ip_alloc_sem);
 
index d66cf4f..97bf761 100644 (file)
@@ -54,8 +54,10 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb,
 int ocfs2_simple_size_update(struct inode *inode,
                             struct buffer_head *di_bh,
                             u64 new_i_size);
-int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size,
-                         u64 zero_to);
+int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
+                         u64 new_i_size, u64 zero_to);
+int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
+                     loff_t zero_to);
 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
 int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
                  struct kstat *stat);
index 47878cf..625de9d 100644 (file)
@@ -472,7 +472,7 @@ static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger
        return container_of(triggers, struct ocfs2_triggers, ot_triggers);
 }
 
-static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
                                 struct buffer_head *bh,
                                 void *data, size_t size)
 {
@@ -491,7 +491,7 @@ static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
  * Quota blocks have their own trigger because the struct ocfs2_block_check
  * offset depends on the blocksize.
  */
-static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
                                 struct buffer_head *bh,
                                 void *data, size_t size)
 {
@@ -511,7 +511,7 @@ static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
  * Directory blocks also have their own trigger because the
  * struct ocfs2_block_check offset depends on the blocksize.
  */
-static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
                                 struct buffer_head *bh,
                                 void *data, size_t size)
 {
@@ -544,7 +544,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
 
 static struct ocfs2_triggers di_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_dinode, i_check),
@@ -552,7 +552,7 @@ static struct ocfs2_triggers di_triggers = {
 
 static struct ocfs2_triggers eb_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_extent_block, h_check),
@@ -560,7 +560,7 @@ static struct ocfs2_triggers eb_triggers = {
 
 static struct ocfs2_triggers rb_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_refcount_block, rf_check),
@@ -568,7 +568,7 @@ static struct ocfs2_triggers rb_triggers = {
 
 static struct ocfs2_triggers gd_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_group_desc, bg_check),
@@ -576,14 +576,14 @@ static struct ocfs2_triggers gd_triggers = {
 
 static struct ocfs2_triggers db_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_db_commit_trigger,
+               .t_frozen = ocfs2_db_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
 };
 
 static struct ocfs2_triggers xb_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_xattr_block, xb_check),
@@ -591,14 +591,14 @@ static struct ocfs2_triggers xb_triggers = {
 
 static struct ocfs2_triggers dq_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_dq_commit_trigger,
+               .t_frozen = ocfs2_dq_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
 };
 
 static struct ocfs2_triggers dr_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_dx_root_block, dr_check),
@@ -606,7 +606,7 @@ static struct ocfs2_triggers dr_triggers = {
 
 static struct ocfs2_triggers dl_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_dx_leaf, dl_check),
@@ -1936,7 +1936,7 @@ void ocfs2_orphan_scan_work(struct work_struct *work)
        mutex_lock(&os->os_lock);
        ocfs2_queue_orphan_scan(osb);
        if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
-               schedule_delayed_work(&os->os_orphan_scan_work,
+               queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
                                      ocfs2_orphan_scan_timeout());
        mutex_unlock(&os->os_lock);
 }
@@ -1976,8 +1976,8 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
                atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
        else {
                atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
-               schedule_delayed_work(&os->os_orphan_scan_work,
-                                     ocfs2_orphan_scan_timeout());
+               queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
+                                  ocfs2_orphan_scan_timeout());
        }
 }
 
index 3d74196..ec6adbf 100644 (file)
@@ -118,6 +118,7 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
 {
        unsigned int la_mb;
        unsigned int gd_mb;
+       unsigned int la_max_mb;
        unsigned int megs_per_slot;
        struct super_block *sb = osb->sb;
 
@@ -182,6 +183,12 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
        if (megs_per_slot < la_mb)
                la_mb = megs_per_slot;
 
+       /* We can't store more bits than we can in a block. */
+       la_max_mb = ocfs2_clusters_to_megabytes(osb->sb,
+                                               ocfs2_local_alloc_size(sb) * 8);
+       if (la_mb > la_max_mb)
+               la_mb = la_max_mb;
+
        return la_mb;
 }
 
index 2bb35fe..4607923 100644 (file)
@@ -775,7 +775,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
                 * locking allocators ranks above a transaction start
                 */
                WARN_ON(journal_current_handle());
-               status = ocfs2_extend_no_holes(gqinode,
+               status = ocfs2_extend_no_holes(gqinode, NULL,
                        gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
                        gqinode->i_size);
                if (status < 0)
index 8bd70d4..dc78764 100644 (file)
@@ -971,7 +971,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
        u64 p_blkno;
 
        /* We are protected by dqio_sem so no locking needed */
-       status = ocfs2_extend_no_holes(lqinode,
+       status = ocfs2_extend_no_holes(lqinode, NULL,
                                       lqinode->i_size + 2 * sb->s_blocksize,
                                       lqinode->i_size);
        if (status < 0) {
@@ -1114,7 +1114,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
                return ocfs2_local_quota_add_chunk(sb, type, offset);
 
        /* We are protected by dqio_sem so no locking needed */
-       status = ocfs2_extend_no_holes(lqinode,
+       status = ocfs2_extend_no_holes(lqinode, NULL,
                                       lqinode->i_size + sb->s_blocksize,
                                       lqinode->i_size);
        if (status < 0) {
index 4793f36..3ac5aa7 100644 (file)
@@ -2931,6 +2931,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
 
        offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
        end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
+       /*
+        * We only duplicate pages until we reach the page contains i_size - 1.
+        * So trim 'end' to i_size.
+        */
+       if (end > i_size_read(context->inode))
+               end = i_size_read(context->inode);
 
        while (offset < end) {
                page_index = offset >> PAGE_CACHE_SHIFT;
@@ -4166,6 +4172,12 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
        struct inode *inode = old_dentry->d_inode;
        struct buffer_head *new_bh = NULL;
 
+       if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
+               ret = -EINVAL;
+               mlog_errno(ret);
+               goto out;
+       }
+
        ret = filemap_fdatawrite(inode->i_mapping);
        if (ret) {
                mlog_errno(ret);
index f4c2a9e..a8e6a95 100644 (file)
@@ -741,7 +741,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
                     le16_to_cpu(bg->bg_free_bits_count));
        le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
                     le16_to_cpu(bg->bg_bits));
-       cl->cl_recs[alloc_rec].c_blkno  = cpu_to_le64(bg->bg_blkno);
+       cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
        if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
                le16_add_cpu(&cl->cl_next_free_rec, 1);
 
index e97b348..d03469f 100644 (file)
@@ -709,7 +709,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
                                         struct ocfs2_xattr_value_buf *vb,
                                         struct ocfs2_xattr_set_ctxt *ctxt)
 {
-       int status = 0;
+       int status = 0, credits;
        handle_t *handle = ctxt->handle;
        enum ocfs2_alloc_restarted why;
        u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
@@ -719,38 +719,54 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
 
        ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
 
-       status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
-                             OCFS2_JOURNAL_ACCESS_WRITE);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
+       while (clusters_to_add) {
+               status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
+                                      OCFS2_JOURNAL_ACCESS_WRITE);
+               if (status < 0) {
+                       mlog_errno(status);
+                       break;
+               }
 
-       prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
-       status = ocfs2_add_clusters_in_btree(handle,
-                                            &et,
-                                            &logical_start,
-                                            clusters_to_add,
-                                            0,
-                                            ctxt->data_ac,
-                                            ctxt->meta_ac,
-                                            &why);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
+               prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
+               status = ocfs2_add_clusters_in_btree(handle,
+                                                    &et,
+                                                    &logical_start,
+                                                    clusters_to_add,
+                                                    0,
+                                                    ctxt->data_ac,
+                                                    ctxt->meta_ac,
+                                                    &why);
+               if ((status < 0) && (status != -EAGAIN)) {
+                       if (status != -ENOSPC)
+                               mlog_errno(status);
+                       break;
+               }
 
-       ocfs2_journal_dirty(handle, vb->vb_bh);
+               ocfs2_journal_dirty(handle, vb->vb_bh);
 
-       clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters;
+               clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) -
+                                        prev_clusters;
 
-       /*
-        * We should have already allocated enough space before the transaction,
-        * so no need to restart.
-        */
-       BUG_ON(why != RESTART_NONE || clusters_to_add);
-
-leave:
+               if (why != RESTART_NONE && clusters_to_add) {
+                       /*
+                        * We can only fail in case the alloc file doesn't give
+                        * up enough clusters.
+                        */
+                       BUG_ON(why == RESTART_META);
+
+                       mlog(0, "restarting xattr value extension for %u"
+                            " clusters,.\n", clusters_to_add);
+                       credits = ocfs2_calc_extend_credits(inode->i_sb,
+                                                           &vb->vb_xv->xr_list,
+                                                           clusters_to_add);
+                       status = ocfs2_extend_trans(handle, credits);
+                       if (status < 0) {
+                               status = -ENOMEM;
+                               mlog_errno(status);
+                               break;
+                       }
+               }
+       }
 
        return status;
 }
@@ -6788,16 +6804,15 @@ out:
        return ret;
 }
 
-static int ocfs2_reflink_xattr_buckets(handle_t *handle,
+static int ocfs2_reflink_xattr_bucket(handle_t *handle,
                                u64 blkno, u64 new_blkno, u32 clusters,
+                               u32 *cpos, int num_buckets,
                                struct ocfs2_alloc_context *meta_ac,
                                struct ocfs2_alloc_context *data_ac,
                                struct ocfs2_reflink_xattr_tree_args *args)
 {
        int i, j, ret = 0;
        struct super_block *sb = args->reflink->old_inode->i_sb;
-       u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
-       u32 num_buckets = clusters * bpc;
        int bpb = args->old_bucket->bu_blocks;
        struct ocfs2_xattr_value_buf vb = {
                .vb_access = ocfs2_journal_access,
@@ -6816,14 +6831,6 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
                        break;
                }
 
-               /*
-                * The real bucket num in this series of blocks is stored
-                * in the 1st bucket.
-                */
-               if (i == 0)
-                       num_buckets = le16_to_cpu(
-                               bucket_xh(args->old_bucket)->xh_num_buckets);
-
                ret = ocfs2_xattr_bucket_journal_access(handle,
                                                args->new_bucket,
                                                OCFS2_JOURNAL_ACCESS_CREATE);
@@ -6837,6 +6844,18 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
                               bucket_block(args->old_bucket, j),
                               sb->s_blocksize);
 
+               /*
+                * Record the start cpos so that we can use it to initialize
+                * our xattr tree we also set the xh_num_bucket for the new
+                * bucket.
+                */
+               if (i == 0) {
+                       *cpos = le32_to_cpu(bucket_xh(args->new_bucket)->
+                                           xh_entries[0].xe_name_hash);
+                       bucket_xh(args->new_bucket)->xh_num_buckets =
+                               cpu_to_le16(num_buckets);
+               }
+
                ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
 
                ret = ocfs2_reflink_xattr_header(handle, args->reflink,
@@ -6866,6 +6885,7 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
                }
 
                ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
+
                ocfs2_xattr_bucket_relse(args->old_bucket);
                ocfs2_xattr_bucket_relse(args->new_bucket);
        }
@@ -6874,6 +6894,75 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
        ocfs2_xattr_bucket_relse(args->new_bucket);
        return ret;
 }
+
+static int ocfs2_reflink_xattr_buckets(handle_t *handle,
+                               struct inode *inode,
+                               struct ocfs2_reflink_xattr_tree_args *args,
+                               struct ocfs2_extent_tree *et,
+                               struct ocfs2_alloc_context *meta_ac,
+                               struct ocfs2_alloc_context *data_ac,
+                               u64 blkno, u32 cpos, u32 len)
+{
+       int ret, first_inserted = 0;
+       u32 p_cluster, num_clusters, reflink_cpos = 0;
+       u64 new_blkno;
+       unsigned int num_buckets, reflink_buckets;
+       unsigned int bpc =
+               ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
+
+       ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+       num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets);
+       ocfs2_xattr_bucket_relse(args->old_bucket);
+
+       while (len && num_buckets) {
+               ret = ocfs2_claim_clusters(handle, data_ac,
+                                          1, &p_cluster, &num_clusters);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+
+               new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
+               reflink_buckets = min(num_buckets, bpc * num_clusters);
+
+               ret = ocfs2_reflink_xattr_bucket(handle, blkno,
+                                                new_blkno, num_clusters,
+                                                &reflink_cpos, reflink_buckets,
+                                                meta_ac, data_ac, args);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+
+               /*
+                * For the 1st allocated cluster, we make it use the same cpos
+                * so that the xattr tree looks the same as the original one
+                * in the most case.
+                */
+               if (!first_inserted) {
+                       reflink_cpos = cpos;
+                       first_inserted = 1;
+               }
+               ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno,
+                                         num_clusters, 0, meta_ac);
+               if (ret)
+                       mlog_errno(ret);
+
+               mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
+                    (unsigned long long)new_blkno, num_clusters, reflink_cpos);
+
+               len -= num_clusters;
+               blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
+               num_buckets -= reflink_buckets;
+       }
+out:
+       return ret;
+}
+
 /*
  * Create the same xattr extent record in the new inode's xattr tree.
  */
@@ -6885,8 +6974,6 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
                                   void *para)
 {
        int ret, credits = 0;
-       u32 p_cluster, num_clusters;
-       u64 new_blkno;
        handle_t *handle;
        struct ocfs2_reflink_xattr_tree_args *args =
                        (struct ocfs2_reflink_xattr_tree_args *)para;
@@ -6895,6 +6982,9 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
        struct ocfs2_alloc_context *data_ac = NULL;
        struct ocfs2_extent_tree et;
 
+       mlog(0, "reflink xattr buckets %llu len %u\n",
+            (unsigned long long)blkno, len);
+
        ocfs2_init_xattr_tree_extent_tree(&et,
                                          INODE_CACHE(args->reflink->new_inode),
                                          args->new_blk_bh);
@@ -6914,32 +7004,12 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
                goto out;
        }
 
-       ret = ocfs2_claim_clusters(handle, data_ac,
-                                  len, &p_cluster, &num_clusters);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_commit;
-       }
-
-       new_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cluster);
-
-       mlog(0, "reflink xattr buckets %llu to %llu, len %u\n",
-            (unsigned long long)blkno, (unsigned long long)new_blkno, len);
-       ret = ocfs2_reflink_xattr_buckets(handle, blkno, new_blkno, len,
-                                         meta_ac, data_ac, args);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_commit;
-       }
-
-       mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
-            (unsigned long long)new_blkno, len, cpos);
-       ret = ocfs2_insert_extent(handle, &et, cpos, new_blkno,
-                                 len, 0, meta_ac);
+       ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et,
+                                         meta_ac, data_ac,
+                                         blkno, cpos, len);
        if (ret)
                mlog_errno(ret);
 
-out_commit:
        ocfs2_commit_trans(osb, handle);
 
 out:
index 3e73de5..fc84976 100644 (file)
@@ -74,6 +74,7 @@ int ibm_partition(struct parsed_partitions *state)
        } *label;
        unsigned char *data;
        Sector sect;
+       sector_t labelsect;
 
        res = 0;
        blocksize = bdev_logical_block_size(bdev);
@@ -97,11 +98,20 @@ int ibm_partition(struct parsed_partitions *state)
            ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
                goto out_freeall;
 
+       /*
+        * Special case for FBA disks: label sector does not depend on
+        * blocksize.
+        */
+       if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
+           (info->cu_type == 0x3880 && info->dev_type == 0x3370))
+               labelsect = info->label_block;
+       else
+               labelsect = info->label_block * (blocksize >> 9);
+
        /*
         * Get volume label, extract name and type.
         */
-       data = read_part_sector(state, info->label_block*(blocksize/512),
-                               &sect);
+       data = read_part_sector(state, labelsect, &sect);
        if (data == NULL)
                goto out_readerr;
 
index 9b58d38..fff6572 100644 (file)
@@ -176,7 +176,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
                if (tracer)
                        tpid = task_pid_nr_ns(tracer, ns);
        }
-       cred = get_cred((struct cred *) __task_cred(p));
+       cred = get_task_cred(p);
        seq_printf(m,
                "State:\t%s\n"
                "Tgid:\t%d\n"
index 12c233d..437d2ca 100644 (file)
@@ -676,7 +676,7 @@ static void prune_dqcache(int count)
  * This is called from kswapd when we think we need some
  * more memory
  */
-static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
+static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        if (nr) {
                spin_lock(&dq_list_lock);
index f71246b..a7ac78f 100644 (file)
@@ -28,6 +28,7 @@ static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target,
        struct sysfs_dirent *target_sd = NULL;
        struct sysfs_dirent *sd = NULL;
        struct sysfs_addrm_cxt acxt;
+       enum kobj_ns_type ns_type;
        int error;
 
        BUG_ON(!name);
@@ -58,16 +59,29 @@ static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target,
        if (!sd)
                goto out_put;
 
-       if (sysfs_ns_type(parent_sd))
+       ns_type = sysfs_ns_type(parent_sd);
+       if (ns_type)
                sd->s_ns = target->ktype->namespace(target);
        sd->s_symlink.target_sd = target_sd;
        target_sd = NULL;       /* reference is now owned by the symlink */
 
        sysfs_addrm_start(&acxt, parent_sd);
-       if (warn)
-               error = sysfs_add_one(&acxt, sd);
-       else
-               error = __sysfs_add_one(&acxt, sd);
+       /* Symlinks must be between directories with the same ns_type */
+       if (!ns_type ||
+           (ns_type == sysfs_ns_type(sd->s_symlink.target_sd->s_parent))) {
+               if (warn)
+                       error = sysfs_add_one(&acxt, sd);
+               else
+                       error = __sysfs_add_one(&acxt, sd);
+       } else {
+               error = -EINVAL;
+               WARN(1, KERN_WARNING
+                       "sysfs: symlink across ns_types %s/%s -> %s/%s\n",
+                       parent_sd->s_name,
+                       sd->s_name,
+                       sd->s_symlink.target_sd->s_parent->s_name,
+                       sd->s_symlink.target_sd->s_name);
+       }
        sysfs_addrm_finish(&acxt);
 
        if (error)
@@ -122,7 +136,7 @@ void sysfs_delete_link(struct kobject *kobj, struct kobject *targ,
 {
        const void *ns = NULL;
        spin_lock(&sysfs_assoc_lock);
-       if (targ->sd)
+       if (targ->sd && sysfs_ns_type(kobj->sd))
                ns = targ->sd->s_ns;
        spin_unlock(&sysfs_assoc_lock);
        sysfs_hash_and_remove(kobj->sd, ns, name);
index 02feb59..0b20111 100644 (file)
@@ -277,7 +277,7 @@ static int kick_a_thread(void)
        return 0;
 }
 
-int ubifs_shrinker(int nr, gfp_t gfp_mask)
+int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        int freed, contention = 0;
        long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
index 2eef553..0431087 100644 (file)
@@ -1575,7 +1575,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot);
 int ubifs_tnc_end_commit(struct ubifs_info *c);
 
 /* shrinker.c */
-int ubifs_shrinker(int nr_to_scan, gfp_t gfp_mask);
+int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
 
 /* commit.c */
 int ubifs_bg_thread(void *info);
index 649ade8..2ee3f7a 100644 (file)
@@ -45,7 +45,7 @@
 
 static kmem_zone_t *xfs_buf_zone;
 STATIC int xfsbufd(void *);
-STATIC int xfsbufd_wakeup(int, gfp_t);
+STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
 static struct shrinker xfs_buf_shake = {
        .shrink = xfsbufd_wakeup,
@@ -340,7 +340,7 @@ _xfs_buf_lookup_pages(
                                        __func__, gfp_mask);
 
                        XFS_STATS_INC(xb_page_retries);
-                       xfsbufd_wakeup(0, gfp_mask);
+                       xfsbufd_wakeup(NULL, 0, gfp_mask);
                        congestion_wait(BLK_RW_ASYNC, HZ/50);
                        goto retry;
                }
@@ -1762,6 +1762,7 @@ xfs_buf_runall_queues(
 
 STATIC int
 xfsbufd_wakeup(
+       struct shrinker         *shrink,
        int                     priority,
        gfp_t                   mask)
 {
index f2d1718..80938c7 100644 (file)
@@ -1883,7 +1883,6 @@ init_xfs_fs(void)
                goto out_cleanup_procfs;
 
        vfs_initquota();
-       xfs_inode_shrinker_init();
 
        error = register_filesystem(&xfs_fs_type);
        if (error)
@@ -1911,7 +1910,6 @@ exit_xfs_fs(void)
 {
        vfs_exitquota();
        unregister_filesystem(&xfs_fs_type);
-       xfs_inode_shrinker_destroy();
        xfs_sysctl_unregister();
        xfs_cleanup_procfs();
        xfs_buf_terminate();
index ef7f021..a51a07c 100644 (file)
@@ -144,6 +144,41 @@ restart:
        return last_error;
 }
 
+/*
+ * Select the next per-ag structure to iterate during the walk. The reclaim
+ * walk is optimised only to walk AGs with reclaimable inodes in them.
+ */
+static struct xfs_perag *
+xfs_inode_ag_iter_next_pag(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          *first,
+       int                     tag)
+{
+       struct xfs_perag        *pag = NULL;
+
+       if (tag == XFS_ICI_RECLAIM_TAG) {
+               int found;
+               int ref;
+
+               spin_lock(&mp->m_perag_lock);
+               found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
+                               (void **)&pag, *first, 1, tag);
+               if (found <= 0) {
+                       spin_unlock(&mp->m_perag_lock);
+                       return NULL;
+               }
+               *first = pag->pag_agno + 1;
+               /* open coded pag reference increment */
+               ref = atomic_inc_return(&pag->pag_ref);
+               spin_unlock(&mp->m_perag_lock);
+               trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_);
+       } else {
+               pag = xfs_perag_get(mp, *first);
+               (*first)++;
+       }
+       return pag;
+}
+
 int
 xfs_inode_ag_iterator(
        struct xfs_mount        *mp,
@@ -154,16 +189,15 @@ xfs_inode_ag_iterator(
        int                     exclusive,
        int                     *nr_to_scan)
 {
+       struct xfs_perag        *pag;
        int                     error = 0;
        int                     last_error = 0;
        xfs_agnumber_t          ag;
        int                     nr;
 
        nr = nr_to_scan ? *nr_to_scan : INT_MAX;
-       for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
-               struct xfs_perag        *pag;
-
-               pag = xfs_perag_get(mp, ag);
+       ag = 0;
+       while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) {
                error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
                                                exclusive, &nr);
                xfs_perag_put(pag);
@@ -640,6 +674,17 @@ __xfs_inode_set_reclaim_tag(
        radix_tree_tag_set(&pag->pag_ici_root,
                           XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
                           XFS_ICI_RECLAIM_TAG);
+
+       if (!pag->pag_ici_reclaimable) {
+               /* propagate the reclaim tag up into the perag radix tree */
+               spin_lock(&ip->i_mount->m_perag_lock);
+               radix_tree_tag_set(&ip->i_mount->m_perag_tree,
+                               XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
+                               XFS_ICI_RECLAIM_TAG);
+               spin_unlock(&ip->i_mount->m_perag_lock);
+               trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
+                                                       -1, _RET_IP_);
+       }
        pag->pag_ici_reclaimable++;
 }
 
@@ -674,6 +719,16 @@ __xfs_inode_clear_reclaim_tag(
        radix_tree_tag_clear(&pag->pag_ici_root,
                        XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
        pag->pag_ici_reclaimable--;
+       if (!pag->pag_ici_reclaimable) {
+               /* clear the reclaim tag from the perag radix tree */
+               spin_lock(&ip->i_mount->m_perag_lock);
+               radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
+                               XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
+                               XFS_ICI_RECLAIM_TAG);
+               spin_unlock(&ip->i_mount->m_perag_lock);
+               trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
+                                                       -1, _RET_IP_);
+       }
 }
 
 /*
@@ -828,83 +883,52 @@ xfs_reclaim_inodes(
 
 /*
  * Shrinker infrastructure.
- *
- * This is all far more complex than it needs to be. It adds a global list of
- * mounts because the shrinkers can only call a global context. We need to make
- * the shrinkers pass a context to avoid the need for global state.
  */
-static LIST_HEAD(xfs_mount_list);
-static struct rw_semaphore xfs_mount_list_lock;
-
 static int
 xfs_reclaim_inode_shrink(
+       struct shrinker *shrink,
        int             nr_to_scan,
        gfp_t           gfp_mask)
 {
        struct xfs_mount *mp;
        struct xfs_perag *pag;
        xfs_agnumber_t  ag;
-       int             reclaimable = 0;
+       int             reclaimable;
 
+       mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
        if (nr_to_scan) {
                if (!(gfp_mask & __GFP_FS))
                        return -1;
 
-               down_read(&xfs_mount_list_lock);
-               list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
-                       xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
+               xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
                                        XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
-                       if (nr_to_scan <= 0)
-                               break;
-               }
-               up_read(&xfs_mount_list_lock);
-       }
+               /* if we don't exhaust the scan, don't bother coming back */
+               if (nr_to_scan > 0)
+                       return -1;
+       }
 
-       down_read(&xfs_mount_list_lock);
-       list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
-               for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
-                       pag = xfs_perag_get(mp, ag);
-                       reclaimable += pag->pag_ici_reclaimable;
-                       xfs_perag_put(pag);
-               }
+       reclaimable = 0;
+       ag = 0;
+       while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag,
+                                       XFS_ICI_RECLAIM_TAG))) {
+               reclaimable += pag->pag_ici_reclaimable;
+               xfs_perag_put(pag);
        }
-       up_read(&xfs_mount_list_lock);
        return reclaimable;
 }
 
-static struct shrinker xfs_inode_shrinker = {
-       .shrink = xfs_reclaim_inode_shrink,
-       .seeks = DEFAULT_SEEKS,
-};
-
-void __init
-xfs_inode_shrinker_init(void)
-{
-       init_rwsem(&xfs_mount_list_lock);
-       register_shrinker(&xfs_inode_shrinker);
-}
-
-void
-xfs_inode_shrinker_destroy(void)
-{
-       ASSERT(list_empty(&xfs_mount_list));
-       unregister_shrinker(&xfs_inode_shrinker);
-}
-
 void
 xfs_inode_shrinker_register(
        struct xfs_mount        *mp)
 {
-       down_write(&xfs_mount_list_lock);
-       list_add_tail(&mp->m_mplist, &xfs_mount_list);
-       up_write(&xfs_mount_list_lock);
+       mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
+       mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
+       register_shrinker(&mp->m_inode_shrink);
 }
 
 void
 xfs_inode_shrinker_unregister(
        struct xfs_mount        *mp)
 {
-       down_write(&xfs_mount_list_lock);
-       list_del(&mp->m_mplist);
-       up_write(&xfs_mount_list_lock);
+       unregister_shrinker(&mp->m_inode_shrink);
 }
index cdcbaac..e28139a 100644 (file)
@@ -55,8 +55,6 @@ int xfs_inode_ag_iterator(struct xfs_mount *mp,
        int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
        int flags, int tag, int write_lock, int *nr_to_scan);
 
-void xfs_inode_shrinker_init(void);
-void xfs_inode_shrinker_destroy(void);
 void xfs_inode_shrinker_register(struct xfs_mount *mp);
 void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
 
index 73d5aa1..3028206 100644 (file)
@@ -124,7 +124,10 @@ DEFINE_EVENT(xfs_perag_class, name,        \
                 unsigned long caller_ip),                                      \
        TP_ARGS(mp, agno, refcount, caller_ip))
 DEFINE_PERAG_REF_EVENT(xfs_perag_get);
+DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim);
 DEFINE_PERAG_REF_EVENT(xfs_perag_put);
+DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
+DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
 
 TRACE_EVENT(xfs_attr_list_node_descend,
        TP_PROTO(struct xfs_attr_list_context *ctx,
index 8c117ff..67c0183 100644 (file)
@@ -69,7 +69,7 @@ STATIC void   xfs_qm_list_destroy(xfs_dqlist_t *);
 
 STATIC int     xfs_qm_init_quotainos(xfs_mount_t *);
 STATIC int     xfs_qm_init_quotainfo(xfs_mount_t *);
-STATIC int     xfs_qm_shake(int, gfp_t);
+STATIC int     xfs_qm_shake(struct shrinker *, int, gfp_t);
 
 static struct shrinker xfs_qm_shaker = {
        .shrink = xfs_qm_shake,
@@ -2117,7 +2117,10 @@ xfs_qm_shake_freelist(
  */
 /* ARGSUSED */
 STATIC int
-xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
+xfs_qm_shake(
+       struct shrinker *shrink,
+       int             nr_to_scan,
+       gfp_t           gfp_mask)
 {
        int     ndqused, nfree, n;
 
index 1d2c7ee..5761087 100644 (file)
@@ -259,7 +259,7 @@ typedef struct xfs_mount {
        wait_queue_head_t       m_wait_single_sync_task;
        __int64_t               m_update_flags; /* sb flags we need to update
                                                   on the next remount,rw */
-       struct list_head        m_mplist;       /* inode shrinker mount list */
+       struct shrinker         m_inode_shrink; /* inode reclaim shrinker */
 } xfs_mount_t;
 
 /*
index da565a4..a68ca8a 100644 (file)
@@ -48,7 +48,7 @@ struct acpi_power_register {
        u8 space_id;
        u8 bit_width;
        u8 bit_offset;
-       u8 reserved;
+       u8 access_size;
        u64 address;
 } __attribute__ ((packed));
 
@@ -63,6 +63,7 @@ struct acpi_processor_cx {
        u32 power;
        u32 usage;
        u64 time;
+       u8 bm_sts_skip;
        char desc[ACPI_CX_DESC_LEN];
 };
 
index ae6b88e..4e7ae60 100644 (file)
 /* Align . to a 8 byte boundary equals to maximum function alignment. */
 #define ALIGN_FUNCTION()  . = ALIGN(8)
 
+/*
+ * Align to a 32 byte boundary equal to the
+ * alignment gcc 4.5 uses for a struct
+ */
+#define STRUCT_ALIGN() . = ALIGN(32)
+
 /* The actual configuration determine if the init/exit sections
  * are handled as text/data or they can be discarded (which
  * often happens at runtime)
        LIKELY_PROFILE()                                                \
        BRANCH_PROFILE()                                                \
        TRACE_PRINTKS()                                                 \
+                                                                       \
+       STRUCT_ALIGN();                                                 \
        FTRACE_EVENTS()                                                 \
+                                                                       \
+       STRUCT_ALIGN();                                                 \
        TRACE_SYSCALLS()
 
 /*
  */
 #define INIT_TASK_DATA_SECTION(align)                                  \
        . = ALIGN(align);                                               \
-       .data..init_task : {                                            \
+       .data..init_task :  AT(ADDR(.data..init_task) - LOAD_OFFSET) {  \
                INIT_TASK_DATA(align)                                   \
        }
 
index 224a38c..ccf94dc 100644 (file)
@@ -253,7 +253,7 @@ int acpi_resources_are_enforced(void);
 #ifdef CONFIG_PM_SLEEP
 void __init acpi_no_s4_hw_signature(void);
 void __init acpi_old_suspend_ordering(void);
-void __init acpi_s4_no_nvs(void);
+void __init acpi_nvs_nosave(void);
 #endif /* CONFIG_PM_SLEEP */
 
 struct acpi_osc_context {
index 75c0fa8..4d2c395 100644 (file)
@@ -153,6 +153,7 @@ struct cred {
 extern void __put_cred(struct cred *);
 extern void exit_creds(struct task_struct *);
 extern int copy_creds(struct task_struct *, unsigned long);
+extern const struct cred *get_task_cred(struct task_struct *);
 extern struct cred *cred_alloc_blank(void);
 extern struct cred *prepare_creds(void);
 extern struct cred *prepare_exec_creds(void);
@@ -273,33 +274,18 @@ static inline void put_cred(const struct cred *_cred)
  * @task: The task to query
  *
  * Access the objective credentials of a task.  The caller must hold the RCU
- * readlock.
+ * readlock or the task must be dead and unable to change its own credentials.
  *
- * The caller must make sure task doesn't go away, either by holding a ref on
- * task or by holding tasklist_lock to prevent it from being unlinked.
+ * The result of this function should not be passed directly to get_cred();
+ * rather get_task_cred() should be used instead.
  */
-#define __task_cred(task) \
-       ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held())))
-
-/**
- * get_task_cred - Get another task's objective credentials
- * @task: The task to query
- *
- * Get the objective credentials of a task, pinning them so that they can't go
- * away.  Accessing a task's credentials directly is not permitted.
- *
- * The caller must make sure task doesn't go away, either by holding a ref on
- * task or by holding tasklist_lock to prevent it from being unlinked.
- */
-#define get_task_cred(task)                            \
-({                                                     \
-       struct cred *__cred;                            \
-       rcu_read_lock();                                \
-       __cred = (struct cred *) __task_cred((task));   \
-       get_cred(__cred);                               \
-       rcu_read_unlock();                              \
-       __cred;                                         \
-})
+#define __task_cred(task)                                              \
+       ({                                                              \
+               const struct task_struct *__t = (task);                 \
+               rcu_dereference_check(__t->real_cred,                   \
+                                     rcu_read_lock_held() ||           \
+                                     task_is_dead(__t));               \
+       })
 
 /**
  * get_current_cred - Get the current task's subjective credentials
index 8e5a9df..e7445df 100644 (file)
@@ -873,6 +873,8 @@ struct fb_info {
 static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
        struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct)
                        + max_num * sizeof(struct aperture), GFP_KERNEL);
+       if (!a)
+               return NULL;
        a->count = max_num;
        return a;
 }
index 013dc52..d147461 100644 (file)
@@ -61,7 +61,8 @@ struct files_struct {
        (rcu_dereference_check((fdtfd), \
                               rcu_read_lock_held() || \
                               lockdep_is_held(&(files)->file_lock) || \
-                              atomic_read(&(files)->count) == 1))
+                              atomic_read(&(files)->count) == 1 || \
+                              rcu_my_thread_group_empty()))
 
 #define files_fdtable(files) \
                (rcu_dereference_check_fdtable((files), (files)->fdt))
index 9bf6870..a986ff5 100644 (file)
@@ -46,31 +46,31 @@ int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
 
 #else
 
-void i8042_lock_chip(void)
+static inline void i8042_lock_chip(void)
 {
 }
 
-void i8042_unlock_chip(void)
+static inline void i8042_unlock_chip(void)
 {
 }
 
-int i8042_command(unsigned char *param, int command)
+static inline int i8042_command(unsigned char *param, int command)
 {
        return -ENODEV;
 }
 
-bool i8042_check_port_owner(const struct serio *serio)
+static inline bool i8042_check_port_owner(const struct serio *serio)
 {
        return false;
 }
 
-int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
                                        struct serio *serio))
 {
        return -ENODEV;
 }
 
-int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
                                       struct serio *serio))
 {
        return -ENODEV;
index 9ea047a..1ffaeff 100644 (file)
@@ -67,6 +67,8 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
        }
 }
 
+extern void macvlan_common_setup(struct net_device *dev);
+
 extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
                                  struct nlattr *tb[], struct nlattr *data[],
                                  int (*receive)(struct sk_buff *skb),
index a4d2e9f..adf832d 100644 (file)
@@ -1026,11 +1026,12 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
 
 struct jbd2_buffer_trigger_type {
        /*
-        * Fired just before a buffer is written to the journal.
-        * mapped_data is a mapped buffer that is the frozen data for
-        * commit.
+        * Fired a the moment data to write to the journal are known to be
+        * stable - so either at the moment b_frozen_data is created or just
+        * before a buffer is written to the journal.  mapped_data is a mapped
+        * buffer that is the frozen data for commit.
         */
-       void (*t_commit)(struct jbd2_buffer_trigger_type *type,
+       void (*t_frozen)(struct jbd2_buffer_trigger_type *type,
                         struct buffer_head *bh, void *mapped_data,
                         size_t size);
 
@@ -1042,7 +1043,7 @@ struct jbd2_buffer_trigger_type {
                        struct buffer_head *bh);
 };
 
-extern void jbd2_buffer_commit_trigger(struct journal_head *jh,
+extern void jbd2_buffer_frozen_trigger(struct journal_head *jh,
                                       void *mapped_data,
                                       struct jbd2_buffer_trigger_type *triggers);
 extern void jbd2_buffer_abort_trigger(struct journal_head *jh,
diff --git a/include/linux/lmb.h b/include/linux/lmb.h
deleted file mode 100644 (file)
index f3d1433..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef _LINUX_LMB_H
-#define _LINUX_LMB_H
-#ifdef __KERNEL__
-
-/*
- * Logical memory blocks.
- *
- * Copyright (C) 2001 Peter Bergner, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-
-#define MAX_LMB_REGIONS 128
-
-struct lmb_property {
-       u64 base;
-       u64 size;
-};
-
-struct lmb_region {
-       unsigned long cnt;
-       u64 size;
-       struct lmb_property region[MAX_LMB_REGIONS+1];
-};
-
-struct lmb {
-       unsigned long debug;
-       u64 rmo_size;
-       struct lmb_region memory;
-       struct lmb_region reserved;
-};
-
-extern struct lmb lmb;
-
-extern void __init lmb_init(void);
-extern void __init lmb_analyze(void);
-extern long lmb_add(u64 base, u64 size);
-extern long lmb_remove(u64 base, u64 size);
-extern long __init lmb_free(u64 base, u64 size);
-extern long __init lmb_reserve(u64 base, u64 size);
-extern u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
-                               u64 (*nid_range)(u64, u64, int *));
-extern u64 __init lmb_alloc(u64 size, u64 align);
-extern u64 __init lmb_alloc_base(u64 size,
-               u64, u64 max_addr);
-extern u64 __init __lmb_alloc_base(u64 size,
-               u64 align, u64 max_addr);
-extern u64 __init lmb_phys_mem_size(void);
-extern u64 lmb_end_of_DRAM(void);
-extern void __init lmb_enforce_memory_limit(u64 memory_limit);
-extern int __init lmb_is_reserved(u64 addr);
-extern int lmb_is_region_reserved(u64 base, u64 size);
-extern int lmb_find(struct lmb_property *res);
-
-extern void lmb_dump_all(void);
-
-static inline u64
-lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
-{
-       return type->region[region_nr].size;
-}
-static inline u64
-lmb_size_pages(struct lmb_region *type, unsigned long region_nr)
-{
-       return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT;
-}
-static inline u64
-lmb_start_pfn(struct lmb_region *type, unsigned long region_nr)
-{
-       return type->region[region_nr].base >> PAGE_SHIFT;
-}
-static inline u64
-lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
-{
-       return lmb_start_pfn(type, region_nr) +
-              lmb_size_pages(type, region_nr);
-}
-
-#include <asm/lmb.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_LMB_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
new file mode 100644 (file)
index 0000000..a59faf2
--- /dev/null
@@ -0,0 +1,89 @@
+#ifndef _LINUX_MEMBLOCK_H
+#define _LINUX_MEMBLOCK_H
+#ifdef __KERNEL__
+
+/*
+ * Logical memory blocks.
+ *
+ * Copyright (C) 2001 Peter Bergner, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#define MAX_MEMBLOCK_REGIONS 128
+
+struct memblock_property {
+       u64 base;
+       u64 size;
+};
+
+struct memblock_region {
+       unsigned long cnt;
+       u64 size;
+       struct memblock_property region[MAX_MEMBLOCK_REGIONS+1];
+};
+
+struct memblock {
+       unsigned long debug;
+       u64 rmo_size;
+       struct memblock_region memory;
+       struct memblock_region reserved;
+};
+
+extern struct memblock memblock;
+
+extern void __init memblock_init(void);
+extern void __init memblock_analyze(void);
+extern long memblock_add(u64 base, u64 size);
+extern long memblock_remove(u64 base, u64 size);
+extern long __init memblock_free(u64 base, u64 size);
+extern long __init memblock_reserve(u64 base, u64 size);
+extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid,
+                               u64 (*nid_range)(u64, u64, int *));
+extern u64 __init memblock_alloc(u64 size, u64 align);
+extern u64 __init memblock_alloc_base(u64 size,
+               u64, u64 max_addr);
+extern u64 __init __memblock_alloc_base(u64 size,
+               u64 align, u64 max_addr);
+extern u64 __init memblock_phys_mem_size(void);
+extern u64 memblock_end_of_DRAM(void);
+extern void __init memblock_enforce_memory_limit(u64 memory_limit);
+extern int __init memblock_is_reserved(u64 addr);
+extern int memblock_is_region_reserved(u64 base, u64 size);
+extern int memblock_find(struct memblock_property *res);
+
+extern void memblock_dump_all(void);
+
+static inline u64
+memblock_size_bytes(struct memblock_region *type, unsigned long region_nr)
+{
+       return type->region[region_nr].size;
+}
+static inline u64
+memblock_size_pages(struct memblock_region *type, unsigned long region_nr)
+{
+       return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT;
+}
+static inline u64
+memblock_start_pfn(struct memblock_region *type, unsigned long region_nr)
+{
+       return type->region[region_nr].base >> PAGE_SHIFT;
+}
+static inline u64
+memblock_end_pfn(struct memblock_region *type, unsigned long region_nr)
+{
+       return memblock_start_pfn(type, region_nr) +
+              memblock_size_pages(type, region_nr);
+}
+
+#include <asm/memblock.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_MEMBLOCK_H */
index b969efb..a2b4804 100644 (file)
@@ -999,7 +999,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
  * querying the cache size, so a fastpath for that case is appropriate.
  */
 struct shrinker {
-       int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+       int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask);
        int seeks;      /* seeks to recreate an obj */
 
        /* These are for internal use */
index 77c2ae5..bad4d12 100644 (file)
@@ -493,8 +493,15 @@ extern int nfs_wb_all(struct inode *inode);
 extern int nfs_wb_page(struct inode *inode, struct page* page);
 extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+extern int  nfs_commit_inode(struct inode *, int);
 extern struct nfs_write_data *nfs_commitdata_alloc(void);
 extern void nfs_commit_free(struct nfs_write_data *wdata);
+#else
+static inline int
+nfs_commit_inode(struct inode *inode, int how)
+{
+       return 0;
+}
 #endif
 
 static inline int
index 7cb0084..f26fda7 100644 (file)
@@ -288,6 +288,7 @@ struct pci_dev {
         */
        unsigned int    irq;
        struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+       resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */
 
        /* These fields are used by common fixups */
        unsigned int    transparent:1;  /* Transparent PCI bridge */
diff --git a/include/linux/regulator/tps6507x.h b/include/linux/regulator/tps6507x.h
new file mode 100644 (file)
index 0000000..4892f59
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * tps6507x.h  --  Voltage regulation for the Texas Instruments TPS6507X
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef REGULATOR_TPS6507X
+#define REGULATOR_TPS6507X
+
+/**
+ * tps6507x_reg_platform_data - platform data for tps6507x
+ * @defdcdc_default: Defines whether DCDC high or the low register controls
+ *     output voltage by default. Valid for DCDC2 and DCDC3 outputs only.
+ */
+struct tps6507x_reg_platform_data {
+       bool defdcdc_default;
+};
+
+#endif
index 747fcae..0478888 100644 (file)
@@ -214,6 +214,7 @@ extern char ___assert_task_state[1 - 2*!!(
 
 #define task_is_traced(task)   ((task->state & __TASK_TRACED) != 0)
 #define task_is_stopped(task)  ((task->state & __TASK_STOPPED) != 0)
+#define task_is_dead(task)     ((task)->exit_state != 0)
 #define task_is_stopped_or_traced(task)        \
                        ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 #define task_contributes_to_load(task) \
index 7f614ce..13ebb54 100644 (file)
@@ -124,7 +124,8 @@ extern struct trace_event_functions enter_syscall_print_funcs;
 extern struct trace_event_functions exit_syscall_print_funcs;
 
 #define SYSCALL_TRACE_ENTER_EVENT(sname)                               \
-       static struct syscall_metadata __syscall_meta_##sname;          \
+       static struct syscall_metadata                                  \
+       __attribute__((__aligned__(4))) __syscall_meta_##sname;         \
        static struct ftrace_event_call                                 \
        __attribute__((__aligned__(4))) event_enter_##sname;            \
        static struct ftrace_event_call __used                          \
@@ -138,7 +139,8 @@ extern struct trace_event_functions exit_syscall_print_funcs;
        }
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)                                        \
-       static struct syscall_metadata __syscall_meta_##sname;          \
+       static struct syscall_metadata                                  \
+       __attribute__((__aligned__(4))) __syscall_meta_##sname;         \
        static struct ftrace_event_call                                 \
        __attribute__((__aligned__(4))) event_exit_##sname;             \
        static struct ftrace_event_call __used                          \
index 4496322..609e8ca 100644 (file)
@@ -45,6 +45,7 @@ struct sysrq_key_op {
  */
 
 void handle_sysrq(int key, struct tty_struct *tty);
+void __handle_sysrq(int key, struct tty_struct *tty, int check_mask);
 int register_sysrq_key(int key, struct sysrq_key_op *op);
 int unregister_sysrq_key(int key, struct sysrq_key_op *op);
 struct sysrq_key_op *__sysrq_get_key_op(int key);
index c9a9759..814f294 100644 (file)
@@ -29,6 +29,7 @@
  */
 
 #ifndef LINUX_VGA_H
+#define LINUX_VGA_H
 
 #include <asm/vga.h>
 
index fd88226..9696a5e 100644 (file)
@@ -799,7 +799,7 @@ do {                                                                        \
                X##_e -= (_FP_W_TYPE_SIZE - rsize);                     \
        X##_e = rsize - X##_e - 1;                                      \
                                                                        \
-       if (_FP_FRACBITS_##fs < rsize && _FP_WFRACBITS_##fs < X##_e)    \
+       if (_FP_FRACBITS_##fs < rsize && _FP_WFRACBITS_##fs <= X##_e)   \
          __FP_FRAC_SRS_1(ur_, (X##_e - _FP_WFRACBITS_##fs + 1), rsize);\
        _FP_FRAC_DISASSEMBLE_##wc(X, ur_, rsize);                       \
        if ((_FP_WFRACBITS_##fs - X##_e - 1) > 0)                       \
index 731150d..0a691ea 100644 (file)
@@ -1224,12 +1224,7 @@ static inline void sk_tx_queue_clear(struct sock *sk)
 
 static inline int sk_tx_queue_get(const struct sock *sk)
 {
-       return sk->sk_tx_queue_mapping;
-}
-
-static inline bool sk_tx_queue_recorded(const struct sock *sk)
-{
-       return (sk && sk->sk_tx_queue_mapping >= 0);
+       return sk ? sk->sk_tx_queue_mapping : -1;
 }
 
 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
index ceac661..cfe2943 100644 (file)
@@ -9,6 +9,7 @@ struct tcf_mirred {
        int                     tcfm_ifindex;
        int                     tcfm_ok_push;
        struct net_device       *tcfm_dev;
+       struct list_head        tcfm_list;
 };
 #define to_mirred(pc) \
        container_of(pc, struct tcf_mirred, common)
index 506c849..40a8f46 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1256,6 +1256,33 @@ out:
        return un;
 }
 
+
+/**
+ * get_queue_result - Retrieve the result code from sem_queue
+ * @q: Pointer to queue structure
+ *
+ * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
+ * q->status, then we must loop until the value is replaced with the final
+ * value: This may happen if a task is woken up by an unrelated event (e.g.
+ * signal) and in parallel the task is woken up by another task because it got
+ * the requested semaphores.
+ *
+ * The function can be called with or without holding the semaphore spinlock.
+ */
+static int get_queue_result(struct sem_queue *q)
+{
+       int error;
+
+       error = q->status;
+       while (unlikely(error == IN_WAKEUP)) {
+               cpu_relax();
+               error = q->status;
+       }
+
+       return error;
+}
+
+
 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
                unsigned, nsops, const struct timespec __user *, timeout)
 {
@@ -1409,15 +1436,18 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
        else
                schedule();
 
-       error = queue.status;
-       while(unlikely(error == IN_WAKEUP)) {
-               cpu_relax();
-               error = queue.status;
-       }
+       error = get_queue_result(&queue);
 
        if (error != -EINTR) {
                /* fast path: update_queue already obtained all requested
-                * resources */
+                * resources.
+                * Perform a smp_mb(): User space could assume that semop()
+                * is a memory barrier: Without the mb(), the cpu could
+                * speculatively read in user space stale data that was
+                * overwritten by the previous owner of the semaphore.
+                */
+               smp_mb();
+
                goto out_free;
        }
 
@@ -1427,10 +1457,12 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
                goto out_free;
        }
 
+       error = get_queue_result(&queue);
+
        /*
         * If queue.status != -EINTR we are woken up by another process
         */
-       error = queue.status;
+
        if (error != -EINTR) {
                goto out_unlock_free;
        }
index a2d5504..60bc8b1 100644 (file)
@@ -209,6 +209,31 @@ void exit_creds(struct task_struct *tsk)
        }
 }
 
+/**
+ * get_task_cred - Get another task's objective credentials
+ * @task: The task to query
+ *
+ * Get the objective credentials of a task, pinning them so that they can't go
+ * away.  Accessing a task's credentials directly is not permitted.
+ *
+ * The caller must also make sure task doesn't get deleted, either by holding a
+ * ref on task or by holding tasklist_lock to prevent it from being unlinked.
+ */
+const struct cred *get_task_cred(struct task_struct *task)
+{
+       const struct cred *cred;
+
+       rcu_read_lock();
+
+       do {
+               cred = __task_cred((task));
+               BUG_ON(!cred);
+       } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage));
+
+       rcu_read_unlock();
+       return cred;
+}
+
 /*
  * Allocate blank credentials, such that the credentials can be filled in at a
  * later date without risk of ENOMEM.
index 5cb7cd1..8bc5eef 100644 (file)
@@ -605,13 +605,13 @@ cpu_master_loop:
                if (dbg_kdb_mode) {
                        kgdb_connected = 1;
                        error = kdb_stub(ks);
+                       kgdb_connected = 0;
                } else {
                        error = gdb_serial_stub(ks);
                }
 
                if (error == DBG_PASS_EVENT) {
                        dbg_kdb_mode = !dbg_kdb_mode;
-                       kgdb_connected = 0;
                } else if (error == DBG_SWITCH_CPU_EVENT) {
                        dbg_cpu_switch(cpu, dbg_switch_cpu);
                        goto cpu_loop;
index 4b17b32..e8fd686 100644 (file)
@@ -621,10 +621,8 @@ static void gdb_cmd_query(struct kgdb_state *ks)
        switch (remcom_in_buffer[1]) {
        case 's':
        case 'f':
-               if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
-                       error_packet(remcom_out_buffer, -EINVAL);
+               if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10))
                        break;
-               }
 
                i = 0;
                remcom_out_buffer[0] = 'm';
@@ -665,10 +663,9 @@ static void gdb_cmd_query(struct kgdb_state *ks)
                pack_threadid(remcom_out_buffer + 2, thref);
                break;
        case 'T':
-               if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
-                       error_packet(remcom_out_buffer, -EINVAL);
+               if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16))
                        break;
-               }
+
                ks->threadid = 0;
                ptr = remcom_in_buffer + 17;
                kgdb_hex2long(&ptr, &ks->threadid);
index 184cd82..ebe4a28 100644 (file)
@@ -1820,9 +1820,8 @@ static int kdb_sr(int argc, const char **argv)
 {
        if (argc != 1)
                return KDB_ARGCOUNT;
-       sysrq_toggle_support(1);
        kdb_trap_printk++;
-       handle_sysrq(*argv[1], NULL);
+       __handle_sysrq(*argv[1], NULL, 0);
        kdb_trap_printk--;
 
        return 0;
@@ -1883,6 +1882,7 @@ static int kdb_lsmod(int argc, const char **argv)
                        kdb_printf(" (Loading)");
                else
                        kdb_printf(" (Live)");
+               kdb_printf(" 0x%p", mod->module_core);
 
 #ifdef CONFIG_MODULE_UNLOAD
                {
@@ -2291,6 +2291,9 @@ static int kdb_ll(int argc, const char **argv)
        while (va) {
                char buf[80];
 
+               if (KDB_FLAG(CMD_INTERRUPT))
+                       return 0;
+
                sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
                diag = kdb_parse(buf);
                if (diag)
index 31aa933..7bfae88 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/bootmem.h>
 #include <linux/mm.h>
 #include <linux/early_res.h>
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
 
 /*
  * Early reserved memory areas.
@@ -319,6 +321,8 @@ void __init free_early(u64 start, u64 end)
        struct early_res *r;
        int i;
 
+       kmemleak_free_part(__va(start), end - start);
+
        i = find_overlapped_early(start, end);
        r = &early_res[i];
        if (i >= max_early_res || r->end != end || r->start != start)
@@ -333,6 +337,8 @@ void __init free_early_partial(u64 start, u64 end)
        struct early_res *r;
        int i;
 
+       kmemleak_free_part(__va(start), end - start);
+
        if (start == end)
                return;
 
index 5d2d281..6c56282 100644 (file)
@@ -787,7 +787,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 
        /* Store the name of the last unloaded module for diagnostic purposes */
        strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
-       ddebug_remove_module(mod->name);
 
        free_module(mod);
        return 0;
@@ -1550,6 +1549,9 @@ static void free_module(struct module *mod)
        remove_sect_attrs(mod);
        mod_kobject_remove(mod);
 
+       /* Remove dynamic debug info */
+       ddebug_remove_module(mod->name);
+
        /* Arch-specific cleanup. */
        module_arch_cleanup(mod);
 
index 170d8ca..5b916bc 100644 (file)
@@ -181,9 +181,6 @@ config HAS_DMA
 config CHECK_SIGNATURE
        bool
 
-config HAVE_LMB
-       boolean
-
 config CPUMASK_OFFSTACK
        bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
        help
index 3f1062c..0bfabba 100644 (file)
@@ -89,8 +89,6 @@ obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
 
 lib-$(CONFIG_GENERIC_BUG) += bug.o
 
-obj-$(CONFIG_HAVE_LMB) += lmb.o
-
 obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
 
 obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
diff --git a/lib/lmb.c b/lib/lmb.c
deleted file mode 100644 (file)
index b1fc526..0000000
--- a/lib/lmb.c
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * Procedures for maintaining information about logical memory blocks.
- *
- * Peter Bergner, IBM Corp.    June 2001.
- * Copyright (C) 2001 Peter Bergner.
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/lmb.h>
-
-#define LMB_ALLOC_ANYWHERE     0
-
-struct lmb lmb;
-
-static int lmb_debug;
-
-static int __init early_lmb(char *p)
-{
-       if (p && strstr(p, "debug"))
-               lmb_debug = 1;
-       return 0;
-}
-early_param("lmb", early_lmb);
-
-static void lmb_dump(struct lmb_region *region, char *name)
-{
-       unsigned long long base, size;
-       int i;
-
-       pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
-
-       for (i = 0; i < region->cnt; i++) {
-               base = region->region[i].base;
-               size = region->region[i].size;
-
-               pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
-                   name, i, base, base + size - 1, size);
-       }
-}
-
-void lmb_dump_all(void)
-{
-       if (!lmb_debug)
-               return;
-
-       pr_info("LMB configuration:\n");
-       pr_info(" rmo_size    = 0x%llx\n", (unsigned long long)lmb.rmo_size);
-       pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
-
-       lmb_dump(&lmb.memory, "memory");
-       lmb_dump(&lmb.reserved, "reserved");
-}
-
-static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
-                                       u64 size2)
-{
-       return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
-}
-
-static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
-{
-       if (base2 == base1 + size1)
-               return 1;
-       else if (base1 == base2 + size2)
-               return -1;
-
-       return 0;
-}
-
-static long lmb_regions_adjacent(struct lmb_region *rgn,
-               unsigned long r1, unsigned long r2)
-{
-       u64 base1 = rgn->region[r1].base;
-       u64 size1 = rgn->region[r1].size;
-       u64 base2 = rgn->region[r2].base;
-       u64 size2 = rgn->region[r2].size;
-
-       return lmb_addrs_adjacent(base1, size1, base2, size2);
-}
-
-static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
-{
-       unsigned long i;
-
-       for (i = r; i < rgn->cnt - 1; i++) {
-               rgn->region[i].base = rgn->region[i + 1].base;
-               rgn->region[i].size = rgn->region[i + 1].size;
-       }
-       rgn->cnt--;
-}
-
-/* Assumption: base addr of region 1 < base addr of region 2 */
-static void lmb_coalesce_regions(struct lmb_region *rgn,
-               unsigned long r1, unsigned long r2)
-{
-       rgn->region[r1].size += rgn->region[r2].size;
-       lmb_remove_region(rgn, r2);
-}
-
-void __init lmb_init(void)
-{
-       /* Create a dummy zero size LMB which will get coalesced away later.
-        * This simplifies the lmb_add() code below...
-        */
-       lmb.memory.region[0].base = 0;
-       lmb.memory.region[0].size = 0;
-       lmb.memory.cnt = 1;
-
-       /* Ditto. */
-       lmb.reserved.region[0].base = 0;
-       lmb.reserved.region[0].size = 0;
-       lmb.reserved.cnt = 1;
-}
-
-void __init lmb_analyze(void)
-{
-       int i;
-
-       lmb.memory.size = 0;
-
-       for (i = 0; i < lmb.memory.cnt; i++)
-               lmb.memory.size += lmb.memory.region[i].size;
-}
-
-static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
-{
-       unsigned long coalesced = 0;
-       long adjacent, i;
-
-       if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
-               rgn->region[0].base = base;
-               rgn->region[0].size = size;
-               return 0;
-       }
-
-       /* First try and coalesce this LMB with another. */
-       for (i = 0; i < rgn->cnt; i++) {
-               u64 rgnbase = rgn->region[i].base;
-               u64 rgnsize = rgn->region[i].size;
-
-               if ((rgnbase == base) && (rgnsize == size))
-                       /* Already have this region, so we're done */
-                       return 0;
-
-               adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
-               if (adjacent > 0) {
-                       rgn->region[i].base -= size;
-                       rgn->region[i].size += size;
-                       coalesced++;
-                       break;
-               } else if (adjacent < 0) {
-                       rgn->region[i].size += size;
-                       coalesced++;
-                       break;
-               }
-       }
-
-       if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
-               lmb_coalesce_regions(rgn, i, i+1);
-               coalesced++;
-       }
-
-       if (coalesced)
-               return coalesced;
-       if (rgn->cnt >= MAX_LMB_REGIONS)
-               return -1;
-
-       /* Couldn't coalesce the LMB, so add it to the sorted table. */
-       for (i = rgn->cnt - 1; i >= 0; i--) {
-               if (base < rgn->region[i].base) {
-                       rgn->region[i+1].base = rgn->region[i].base;
-                       rgn->region[i+1].size = rgn->region[i].size;
-               } else {
-                       rgn->region[i+1].base = base;
-                       rgn->region[i+1].size = size;
-                       break;
-               }
-       }
-
-       if (base < rgn->region[0].base) {
-               rgn->region[0].base = base;
-               rgn->region[0].size = size;
-       }
-       rgn->cnt++;
-
-       return 0;
-}
-
-long lmb_add(u64 base, u64 size)
-{
-       struct lmb_region *_rgn = &lmb.memory;
-
-       /* On pSeries LPAR systems, the first LMB is our RMO region. */
-       if (base == 0)
-               lmb.rmo_size = size;
-
-       return lmb_add_region(_rgn, base, size);
-
-}
-
-static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
-{
-       u64 rgnbegin, rgnend;
-       u64 end = base + size;
-       int i;
-
-       rgnbegin = rgnend = 0; /* supress gcc warnings */
-
-       /* Find the region where (base, size) belongs to */
-       for (i=0; i < rgn->cnt; i++) {
-               rgnbegin = rgn->region[i].base;
-               rgnend = rgnbegin + rgn->region[i].size;
-
-               if ((rgnbegin <= base) && (end <= rgnend))
-                       break;
-       }
-
-       /* Didn't find the region */
-       if (i == rgn->cnt)
-               return -1;
-
-       /* Check to see if we are removing entire region */
-       if ((rgnbegin == base) && (rgnend == end)) {
-               lmb_remove_region(rgn, i);
-               return 0;
-       }
-
-       /* Check to see if region is matching at the front */
-       if (rgnbegin == base) {
-               rgn->region[i].base = end;
-               rgn->region[i].size -= size;
-               return 0;
-       }
-
-       /* Check to see if the region is matching at the end */
-       if (rgnend == end) {
-               rgn->region[i].size -= size;
-               return 0;
-       }
-
-       /*
-        * We need to split the entry -  adjust the current one to the
-        * beginging of the hole and add the region after hole.
-        */
-       rgn->region[i].size = base - rgn->region[i].base;
-       return lmb_add_region(rgn, end, rgnend - end);
-}
-
-long lmb_remove(u64 base, u64 size)
-{
-       return __lmb_remove(&lmb.memory, base, size);
-}
-
-long __init lmb_free(u64 base, u64 size)
-{
-       return __lmb_remove(&lmb.reserved, base, size);
-}
-
-long __init lmb_reserve(u64 base, u64 size)
-{
-       struct lmb_region *_rgn = &lmb.reserved;
-
-       BUG_ON(0 == size);
-
-       return lmb_add_region(_rgn, base, size);
-}
-
-long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
-{
-       unsigned long i;
-
-       for (i = 0; i < rgn->cnt; i++) {
-               u64 rgnbase = rgn->region[i].base;
-               u64 rgnsize = rgn->region[i].size;
-               if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
-                       break;
-       }
-
-       return (i < rgn->cnt) ? i : -1;
-}
-
-static u64 lmb_align_down(u64 addr, u64 size)
-{
-       return addr & ~(size - 1);
-}
-
-static u64 lmb_align_up(u64 addr, u64 size)
-{
-       return (addr + (size - 1)) & ~(size - 1);
-}
-
-static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
-                                          u64 size, u64 align)
-{
-       u64 base, res_base;
-       long j;
-
-       base = lmb_align_down((end - size), align);
-       while (start <= base) {
-               j = lmb_overlaps_region(&lmb.reserved, base, size);
-               if (j < 0) {
-                       /* this area isn't reserved, take it */
-                       if (lmb_add_region(&lmb.reserved, base, size) < 0)
-                               base = ~(u64)0;
-                       return base;
-               }
-               res_base = lmb.reserved.region[j].base;
-               if (res_base < size)
-                       break;
-               base = lmb_align_down(res_base - size, align);
-       }
-
-       return ~(u64)0;
-}
-
-static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
-                                      u64 (*nid_range)(u64, u64, int *),
-                                      u64 size, u64 align, int nid)
-{
-       u64 start, end;
-
-       start = mp->base;
-       end = start + mp->size;
-
-       start = lmb_align_up(start, align);
-       while (start < end) {
-               u64 this_end;
-               int this_nid;
-
-               this_end = nid_range(start, end, &this_nid);
-               if (this_nid == nid) {
-                       u64 ret = lmb_alloc_nid_unreserved(start, this_end,
-                                                          size, align);
-                       if (ret != ~(u64)0)
-                               return ret;
-               }
-               start = this_end;
-       }
-
-       return ~(u64)0;
-}
-
-u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
-                        u64 (*nid_range)(u64 start, u64 end, int *nid))
-{
-       struct lmb_region *mem = &lmb.memory;
-       int i;
-
-       BUG_ON(0 == size);
-
-       size = lmb_align_up(size, align);
-
-       for (i = 0; i < mem->cnt; i++) {
-               u64 ret = lmb_alloc_nid_region(&mem->region[i],
-                                              nid_range,
-                                              size, align, nid);
-               if (ret != ~(u64)0)
-                       return ret;
-       }
-
-       return lmb_alloc(size, align);
-}
-
-u64 __init lmb_alloc(u64 size, u64 align)
-{
-       return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
-}
-
-u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
-{
-       u64 alloc;
-
-       alloc = __lmb_alloc_base(size, align, max_addr);
-
-       if (alloc == 0)
-               panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
-                     (unsigned long long) size, (unsigned long long) max_addr);
-
-       return alloc;
-}
-
-u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
-{
-       long i, j;
-       u64 base = 0;
-       u64 res_base;
-
-       BUG_ON(0 == size);
-
-       size = lmb_align_up(size, align);
-
-       /* On some platforms, make sure we allocate lowmem */
-       /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
-       if (max_addr == LMB_ALLOC_ANYWHERE)
-               max_addr = LMB_REAL_LIMIT;
-
-       for (i = lmb.memory.cnt - 1; i >= 0; i--) {
-               u64 lmbbase = lmb.memory.region[i].base;
-               u64 lmbsize = lmb.memory.region[i].size;
-
-               if (lmbsize < size)
-                       continue;
-               if (max_addr == LMB_ALLOC_ANYWHERE)
-                       base = lmb_align_down(lmbbase + lmbsize - size, align);
-               else if (lmbbase < max_addr) {
-                       base = min(lmbbase + lmbsize, max_addr);
-                       base = lmb_align_down(base - size, align);
-               } else
-                       continue;
-
-               while (base && lmbbase <= base) {
-                       j = lmb_overlaps_region(&lmb.reserved, base, size);
-                       if (j < 0) {
-                               /* this area isn't reserved, take it */
-                               if (lmb_add_region(&lmb.reserved, base, size) < 0)
-                                       return 0;
-                               return base;
-                       }
-                       res_base = lmb.reserved.region[j].base;
-                       if (res_base < size)
-                               break;
-                       base = lmb_align_down(res_base - size, align);
-               }
-       }
-       return 0;
-}
-
-/* You must call lmb_analyze() before this. */
-u64 __init lmb_phys_mem_size(void)
-{
-       return lmb.memory.size;
-}
-
-u64 lmb_end_of_DRAM(void)
-{
-       int idx = lmb.memory.cnt - 1;
-
-       return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
-}
-
-/* You must call lmb_analyze() after this. */
-void __init lmb_enforce_memory_limit(u64 memory_limit)
-{
-       unsigned long i;
-       u64 limit;
-       struct lmb_property *p;
-
-       if (!memory_limit)
-               return;
-
-       /* Truncate the lmb regions to satisfy the memory limit. */
-       limit = memory_limit;
-       for (i = 0; i < lmb.memory.cnt; i++) {
-               if (limit > lmb.memory.region[i].size) {
-                       limit -= lmb.memory.region[i].size;
-                       continue;
-               }
-
-               lmb.memory.region[i].size = limit;
-               lmb.memory.cnt = i + 1;
-               break;
-       }
-
-       if (lmb.memory.region[0].size < lmb.rmo_size)
-               lmb.rmo_size = lmb.memory.region[0].size;
-
-       memory_limit = lmb_end_of_DRAM();
-
-       /* And truncate any reserves above the limit also. */
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               p = &lmb.reserved.region[i];
-
-               if (p->base > memory_limit)
-                       p->size = 0;
-               else if ((p->base + p->size) > memory_limit)
-                       p->size = memory_limit - p->base;
-
-               if (p->size == 0) {
-                       lmb_remove_region(&lmb.reserved, i);
-                       i--;
-               }
-       }
-}
-
-int __init lmb_is_reserved(u64 addr)
-{
-       int i;
-
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               u64 upper = lmb.reserved.region[i].base +
-                       lmb.reserved.region[i].size - 1;
-               if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
-                       return 1;
-       }
-       return 0;
-}
-
-int lmb_is_region_reserved(u64 base, u64 size)
-{
-       return lmb_overlaps_region(&lmb.reserved, base, size);
-}
-
-/*
- * Given a <base, len>, find which memory regions belong to this range.
- * Adjust the request and return a contiguous chunk.
- */
-int lmb_find(struct lmb_property *res)
-{
-       int i;
-       u64 rstart, rend;
-
-       rstart = res->base;
-       rend = rstart + res->size - 1;
-
-       for (i = 0; i < lmb.memory.cnt; i++) {
-               u64 start = lmb.memory.region[i].base;
-               u64 end = start + lmb.memory.region[i].size - 1;
-
-               if (start > rend)
-                       return -1;
-
-               if ((end >= rstart) && (start < rend)) {
-                       /* adjust the request */
-                       if (rstart < start)
-                               rstart = start;
-                       if (rend > end)
-                               rend = end;
-                       res->base = rstart;
-                       res->size = rend - rstart + 1;
-                       return 0;
-               }
-       }
-       return -1;
-}
index 527136b..f4e516e 100644 (file)
@@ -128,6 +128,9 @@ config SPARSEMEM_VMEMMAP
         pfn_to_page and page_to_pfn operations.  This is the most
         efficient option when sufficient kernel resources are available.
 
+config HAVE_MEMBLOCK
+       boolean
+
 # eventually, we can have this option just 'select SPARSEMEM'
 config MEMORY_HOTPLUG
        bool "Allow for memory hot-add"
index 8982504..34b2546 100644 (file)
@@ -15,6 +15,8 @@ obj-y                 := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
                           $(mmu-y)
 obj-y += init-mm.o
 
+obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
+
 obj-$(CONFIG_BOUNCE)   += bounce.o
 obj-$(CONFIG_SWAP)     += page_io.o swap_state.o swapfile.o thrash.o
 obj-$(CONFIG_HAS_DMA)  += dmapool.o
index 58c66cc..142c84a 100644 (file)
@@ -833,15 +833,24 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
                                   unsigned long align, unsigned long goal)
 {
+       void *ptr;
+
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
 #ifdef CONFIG_NO_BOOTMEM
-       return __alloc_memory_core_early(pgdat->node_id, size, align,
+       ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+                                        goal, -1ULL);
+       if (ptr)
+               return ptr;
+
+       ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
                                         goal, -1ULL);
 #else
-       return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
+       ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
 #endif
+
+       return ptr;
 }
 
 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -977,14 +986,21 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
                                       unsigned long align, unsigned long goal)
 {
+       void *ptr;
+
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
 #ifdef CONFIG_NO_BOOTMEM
-       return __alloc_memory_core_early(pgdat->node_id, size, align,
+       ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+                               goal, ARCH_LOW_ADDRESS_LIMIT);
+       if (ptr)
+               return ptr;
+       ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
                                goal, ARCH_LOW_ADDRESS_LIMIT);
 #else
-       return ___alloc_bootmem_node(pgdat->bdata, size, align,
+       ptr = ___alloc_bootmem_node(pgdat->bdata, size, align,
                                goal, ARCH_LOW_ADDRESS_LIMIT);
 #endif
+       return ptr;
 }
diff --git a/mm/memblock.c b/mm/memblock.c
new file mode 100644 (file)
index 0000000..3024eb3
--- /dev/null
@@ -0,0 +1,541 @@
+/*
+ * Procedures for maintaining information about logical memory blocks.
+ *
+ * Peter Bergner, IBM Corp.    June 2001.
+ * Copyright (C) 2001 Peter Bergner.
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/memblock.h>
+
+#define MEMBLOCK_ALLOC_ANYWHERE        0
+
+struct memblock memblock;
+
+static int memblock_debug;
+
+static int __init early_memblock(char *p)
+{
+       if (p && strstr(p, "debug"))
+               memblock_debug = 1;
+       return 0;
+}
+early_param("memblock", early_memblock);
+
+static void memblock_dump(struct memblock_region *region, char *name)
+{
+       unsigned long long base, size;
+       int i;
+
+       pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
+
+       for (i = 0; i < region->cnt; i++) {
+               base = region->region[i].base;
+               size = region->region[i].size;
+
+               pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
+                   name, i, base, base + size - 1, size);
+       }
+}
+
+void memblock_dump_all(void)
+{
+       if (!memblock_debug)
+               return;
+
+       pr_info("MEMBLOCK configuration:\n");
+       pr_info(" rmo_size    = 0x%llx\n", (unsigned long long)memblock.rmo_size);
+       pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size);
+
+       memblock_dump(&memblock.memory, "memory");
+       memblock_dump(&memblock.reserved, "reserved");
+}
+
+static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2,
+                                       u64 size2)
+{
+       return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
+}
+
+static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
+{
+       if (base2 == base1 + size1)
+               return 1;
+       else if (base1 == base2 + size2)
+               return -1;
+
+       return 0;
+}
+
+static long memblock_regions_adjacent(struct memblock_region *rgn,
+               unsigned long r1, unsigned long r2)
+{
+       u64 base1 = rgn->region[r1].base;
+       u64 size1 = rgn->region[r1].size;
+       u64 base2 = rgn->region[r2].base;
+       u64 size2 = rgn->region[r2].size;
+
+       return memblock_addrs_adjacent(base1, size1, base2, size2);
+}
+
+static void memblock_remove_region(struct memblock_region *rgn, unsigned long r)
+{
+       unsigned long i;
+
+       for (i = r; i < rgn->cnt - 1; i++) {
+               rgn->region[i].base = rgn->region[i + 1].base;
+               rgn->region[i].size = rgn->region[i + 1].size;
+       }
+       rgn->cnt--;
+}
+
+/* Assumption: base addr of region 1 < base addr of region 2 */
+static void memblock_coalesce_regions(struct memblock_region *rgn,
+               unsigned long r1, unsigned long r2)
+{
+       rgn->region[r1].size += rgn->region[r2].size;
+       memblock_remove_region(rgn, r2);
+}
+
+void __init memblock_init(void)
+{
+       /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
+        * This simplifies the memblock_add() code below...
+        */
+       memblock.memory.region[0].base = 0;
+       memblock.memory.region[0].size = 0;
+       memblock.memory.cnt = 1;
+
+       /* Ditto. */
+       memblock.reserved.region[0].base = 0;
+       memblock.reserved.region[0].size = 0;
+       memblock.reserved.cnt = 1;
+}
+
+void __init memblock_analyze(void)
+{
+       int i;
+
+       memblock.memory.size = 0;
+
+       for (i = 0; i < memblock.memory.cnt; i++)
+               memblock.memory.size += memblock.memory.region[i].size;
+}
+
+static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size)
+{
+       unsigned long coalesced = 0;
+       long adjacent, i;
+
+       if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
+               rgn->region[0].base = base;
+               rgn->region[0].size = size;
+               return 0;
+       }
+
+       /* First try and coalesce this MEMBLOCK with another. */
+       for (i = 0; i < rgn->cnt; i++) {
+               u64 rgnbase = rgn->region[i].base;
+               u64 rgnsize = rgn->region[i].size;
+
+               if ((rgnbase == base) && (rgnsize == size))
+                       /* Already have this region, so we're done */
+                       return 0;
+
+               adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
+               if (adjacent > 0) {
+                       rgn->region[i].base -= size;
+                       rgn->region[i].size += size;
+                       coalesced++;
+                       break;
+               } else if (adjacent < 0) {
+                       rgn->region[i].size += size;
+                       coalesced++;
+                       break;
+               }
+       }
+
+       if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) {
+               memblock_coalesce_regions(rgn, i, i+1);
+               coalesced++;
+       }
+
+       if (coalesced)
+               return coalesced;
+       if (rgn->cnt >= MAX_MEMBLOCK_REGIONS)
+               return -1;
+
+       /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
+       for (i = rgn->cnt - 1; i >= 0; i--) {
+               if (base < rgn->region[i].base) {
+                       rgn->region[i+1].base = rgn->region[i].base;
+                       rgn->region[i+1].size = rgn->region[i].size;
+               } else {
+                       rgn->region[i+1].base = base;
+                       rgn->region[i+1].size = size;
+                       break;
+               }
+       }
+
+       if (base < rgn->region[0].base) {
+               rgn->region[0].base = base;
+               rgn->region[0].size = size;
+       }
+       rgn->cnt++;
+
+       return 0;
+}
+
+long memblock_add(u64 base, u64 size)
+{
+       struct memblock_region *_rgn = &memblock.memory;
+
+       /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */
+       if (base == 0)
+               memblock.rmo_size = size;
+
+       return memblock_add_region(_rgn, base, size);
+
+}
+
+static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size)
+{
+       u64 rgnbegin, rgnend;
+       u64 end = base + size;
+       int i;
+
+       rgnbegin = rgnend = 0; /* supress gcc warnings */
+
+       /* Find the region where (base, size) belongs to */
+       for (i=0; i < rgn->cnt; i++) {
+               rgnbegin = rgn->region[i].base;
+               rgnend = rgnbegin + rgn->region[i].size;
+
+               if ((rgnbegin <= base) && (end <= rgnend))
+                       break;
+       }
+
+       /* Didn't find the region */
+       if (i == rgn->cnt)
+               return -1;
+
+       /* Check to see if we are removing entire region */
+       if ((rgnbegin == base) && (rgnend == end)) {
+               memblock_remove_region(rgn, i);
+               return 0;
+       }
+
+       /* Check to see if region is matching at the front */
+       if (rgnbegin == base) {
+               rgn->region[i].base = end;
+               rgn->region[i].size -= size;
+               return 0;
+       }
+
+       /* Check to see if the region is matching at the end */
+       if (rgnend == end) {
+               rgn->region[i].size -= size;
+               return 0;
+       }
+
+       /*
+        * We need to split the entry -  adjust the current one to the
+        * beginging of the hole and add the region after hole.
+        */
+       rgn->region[i].size = base - rgn->region[i].base;
+       return memblock_add_region(rgn, end, rgnend - end);
+}
+
+long memblock_remove(u64 base, u64 size)
+{
+       return __memblock_remove(&memblock.memory, base, size);
+}
+
+long __init memblock_free(u64 base, u64 size)
+{
+       return __memblock_remove(&memblock.reserved, base, size);
+}
+
+long __init memblock_reserve(u64 base, u64 size)
+{
+       struct memblock_region *_rgn = &memblock.reserved;
+
+       BUG_ON(0 == size);
+
+       return memblock_add_region(_rgn, base, size);
+}
+
+long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size)
+{
+       unsigned long i;
+
+       for (i = 0; i < rgn->cnt; i++) {
+               u64 rgnbase = rgn->region[i].base;
+               u64 rgnsize = rgn->region[i].size;
+               if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
+                       break;
+       }
+
+       return (i < rgn->cnt) ? i : -1;
+}
+
+static u64 memblock_align_down(u64 addr, u64 size)
+{
+       return addr & ~(size - 1);
+}
+
+static u64 memblock_align_up(u64 addr, u64 size)
+{
+       return (addr + (size - 1)) & ~(size - 1);
+}
+
+static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end,
+                                          u64 size, u64 align)
+{
+       u64 base, res_base;
+       long j;
+
+       base = memblock_align_down((end - size), align);
+       while (start <= base) {
+               j = memblock_overlaps_region(&memblock.reserved, base, size);
+               if (j < 0) {
+                       /* this area isn't reserved, take it */
+                       if (memblock_add_region(&memblock.reserved, base, size) < 0)
+                               base = ~(u64)0;
+                       return base;
+               }
+               res_base = memblock.reserved.region[j].base;
+               if (res_base < size)
+                       break;
+               base = memblock_align_down(res_base - size, align);
+       }
+
+       return ~(u64)0;
+}
+
+static u64 __init memblock_alloc_nid_region(struct memblock_property *mp,
+                                      u64 (*nid_range)(u64, u64, int *),
+                                      u64 size, u64 align, int nid)
+{
+       u64 start, end;
+
+       start = mp->base;
+       end = start + mp->size;
+
+       start = memblock_align_up(start, align);
+       while (start < end) {
+               u64 this_end;
+               int this_nid;
+
+               this_end = nid_range(start, end, &this_nid);
+               if (this_nid == nid) {
+                       u64 ret = memblock_alloc_nid_unreserved(start, this_end,
+                                                          size, align);
+                       if (ret != ~(u64)0)
+                               return ret;
+               }
+               start = this_end;
+       }
+
+       return ~(u64)0;
+}
+
+u64 __init memblock_alloc_nid(u64 size, u64 align, int nid,
+                        u64 (*nid_range)(u64 start, u64 end, int *nid))
+{
+       struct memblock_region *mem = &memblock.memory;
+       int i;
+
+       BUG_ON(0 == size);
+
+       size = memblock_align_up(size, align);
+
+       for (i = 0; i < mem->cnt; i++) {
+               u64 ret = memblock_alloc_nid_region(&mem->region[i],
+                                              nid_range,
+                                              size, align, nid);
+               if (ret != ~(u64)0)
+                       return ret;
+       }
+
+       return memblock_alloc(size, align);
+}
+
+u64 __init memblock_alloc(u64 size, u64 align)
+{
+       return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
+}
+
+u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr)
+{
+       u64 alloc;
+
+       alloc = __memblock_alloc_base(size, align, max_addr);
+
+       if (alloc == 0)
+               panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
+                     (unsigned long long) size, (unsigned long long) max_addr);
+
+       return alloc;
+}
+
+u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr)
+{
+       long i, j;
+       u64 base = 0;
+       u64 res_base;
+
+       BUG_ON(0 == size);
+
+       size = memblock_align_up(size, align);
+
+       /* On some platforms, make sure we allocate lowmem */
+       /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */
+       if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
+               max_addr = MEMBLOCK_REAL_LIMIT;
+
+       for (i = memblock.memory.cnt - 1; i >= 0; i--) {
+               u64 memblockbase = memblock.memory.region[i].base;
+               u64 memblocksize = memblock.memory.region[i].size;
+
+               if (memblocksize < size)
+                       continue;
+               if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
+                       base = memblock_align_down(memblockbase + memblocksize - size, align);
+               else if (memblockbase < max_addr) {
+                       base = min(memblockbase + memblocksize, max_addr);
+                       base = memblock_align_down(base - size, align);
+               } else
+                       continue;
+
+               while (base && memblockbase <= base) {
+                       j = memblock_overlaps_region(&memblock.reserved, base, size);
+                       if (j < 0) {
+                               /* this area isn't reserved, take it */
+                               if (memblock_add_region(&memblock.reserved, base, size) < 0)
+                                       return 0;
+                               return base;
+                       }
+                       res_base = memblock.reserved.region[j].base;
+                       if (res_base < size)
+                               break;
+                       base = memblock_align_down(res_base - size, align);
+               }
+       }
+       return 0;
+}
+
+/* You must call memblock_analyze() before this. */
+u64 __init memblock_phys_mem_size(void)
+{
+       return memblock.memory.size;
+}
+
+u64 memblock_end_of_DRAM(void)
+{
+       int idx = memblock.memory.cnt - 1;
+
+       return (memblock.memory.region[idx].base + memblock.memory.region[idx].size);
+}
+
+/* You must call memblock_analyze() after this. */
+void __init memblock_enforce_memory_limit(u64 memory_limit)
+{
+       unsigned long i;
+       u64 limit;
+       struct memblock_property *p;
+
+       if (!memory_limit)
+               return;
+
+       /* Truncate the memblock regions to satisfy the memory limit. */
+       limit = memory_limit;
+       for (i = 0; i < memblock.memory.cnt; i++) {
+               if (limit > memblock.memory.region[i].size) {
+                       limit -= memblock.memory.region[i].size;
+                       continue;
+               }
+
+               memblock.memory.region[i].size = limit;
+               memblock.memory.cnt = i + 1;
+               break;
+       }
+
+       if (memblock.memory.region[0].size < memblock.rmo_size)
+               memblock.rmo_size = memblock.memory.region[0].size;
+
+       memory_limit = memblock_end_of_DRAM();
+
+       /* And truncate any reserves above the limit also. */
+       for (i = 0; i < memblock.reserved.cnt; i++) {
+               p = &memblock.reserved.region[i];
+
+               if (p->base > memory_limit)
+                       p->size = 0;
+               else if ((p->base + p->size) > memory_limit)
+                       p->size = memory_limit - p->base;
+
+               if (p->size == 0) {
+                       memblock_remove_region(&memblock.reserved, i);
+                       i--;
+               }
+       }
+}
+
+int __init memblock_is_reserved(u64 addr)
+{
+       int i;
+
+       for (i = 0; i < memblock.reserved.cnt; i++) {
+               u64 upper = memblock.reserved.region[i].base +
+                       memblock.reserved.region[i].size - 1;
+               if ((addr >= memblock.reserved.region[i].base) && (addr <= upper))
+                       return 1;
+       }
+       return 0;
+}
+
+int memblock_is_region_reserved(u64 base, u64 size)
+{
+       return memblock_overlaps_region(&memblock.reserved, base, size);
+}
+
+/*
+ * Given a <base, len>, find which memory regions belong to this range.
+ * Adjust the request and return a contiguous chunk.
+ */
+int memblock_find(struct memblock_property *res)
+{
+       int i;
+       u64 rstart, rend;
+
+       rstart = res->base;
+       rend = rstart + res->size - 1;
+
+       for (i = 0; i < memblock.memory.cnt; i++) {
+               u64 start = memblock.memory.region[i].base;
+               u64 end = start + memblock.memory.region[i].size - 1;
+
+               if (start > rend)
+                       return -1;
+
+               if ((end >= rstart) && (start < rend)) {
+                       /* adjust the request */
+                       if (rstart < start)
+                               rstart = start;
+                       if (rend > end)
+                               rend = end;
+                       res->base = rstart;
+                       res->size = rend - rstart + 1;
+                       return 0;
+               }
+       }
+       return -1;
+}
index 119b7cc..bde42c6 100644 (file)
@@ -1394,10 +1394,20 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                return i ? : -EFAULT;
                        }
                        if (pages) {
-                               struct page *page = vm_normal_page(gate_vma, start, *pte);
+                               struct page *page;
+
+                               page = vm_normal_page(gate_vma, start, *pte);
+                               if (!page) {
+                                       if (!(gup_flags & FOLL_DUMP) &&
+                                            is_zero_pfn(pte_pfn(*pte)))
+                                               page = pte_page(*pte);
+                                       else {
+                                               pte_unmap(pte);
+                                               return i ? : -EFAULT;
+                                       }
+                               }
                                pages[i] = page;
-                               if (page)
-                                       get_page(page);
+                               get_page(page);
                        }
                        pte_unmap(pte);
                        if (vmas)
index 431214b..9bd339e 100644 (file)
@@ -3634,6 +3634,9 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
        int i;
        void *ptr;
 
+       if (limit > get_max_mapped())
+               limit = get_max_mapped();
+
        /* need to go over early_node_map to find out good range for node */
        for_each_active_range_index_in_nid(i, nid) {
                u64 addr;
@@ -3659,6 +3662,11 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
                ptr = phys_to_virt(addr);
                memset(ptr, 0, size);
                reserve_early_without_check(addr, addr + size, "BOOTMEM");
+               /*
+                * The min_count is set to 0 so that bootmem allocated blocks
+                * are never reported as leaks.
+                */
+               kmemleak_alloc(ptr, size, 0, 0);
                return ptr;
        }
 
index 6c00814..5bffada 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/vmalloc.h>
 #include <linux/cgroup.h>
 #include <linux/swapops.h>
+#include <linux/kmemleak.h>
 
 static void __meminit
 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
@@ -126,6 +127,12 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
                        if (!base)
                                base = vmalloc(table_size);
                }
+               /*
+                * The value stored in section->page_cgroup is (base - pfn)
+                * and it does not point to the memory block allocated above,
+                * causing kmemleak false positives.
+                */
+               kmemleak_not_leak(base);
        } else {
                /*
                 * We don't have to allocate page_cgroup again, but
index 9c7e57c..b94fe1b 100644 (file)
@@ -213,8 +213,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
                unsigned long total_scan;
-               unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
+               unsigned long max_pass;
 
+               max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
                delta = (4 * scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
@@ -242,8 +243,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                        int shrink_ret;
                        int nr_before;
 
-                       nr_before = (*shrinker->shrink)(0, gfp_mask);
-                       shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
+                       nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
+                       shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
+                                                               gfp_mask);
                        if (shrink_ret == -1)
                                break;
                        if (shrink_ret < nr_before)
@@ -296,7 +298,7 @@ static int may_write_to_queue(struct backing_dev_info *bdi)
 static void handle_write_error(struct address_space *mapping,
                                struct page *page, int error)
 {
-       lock_page(page);
+       lock_page_nosync(page);
        if (page_mapping(page) == mapping)
                mapping_set_error(mapping, error);
        unlock_page(page);
index b10e3cd..800b6b9 100644 (file)
@@ -358,6 +358,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
                acl->sec_level = sec_level;
                acl->auth_type = auth_type;
                hci_acl_connect(acl);
+       } else {
+               if (acl->sec_level < sec_level)
+                       acl->sec_level = sec_level;
+               if (acl->auth_type < auth_type)
+                       acl->auth_type = auth_type;
        }
 
        if (type == ACL_LINK)
index 6c57fc7..786b5de 100644 (file)
@@ -1049,6 +1049,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
        if (conn) {
                if (!ev->status)
                        conn->link_mode |= HCI_LM_AUTH;
+               else
+                       conn->sec_level = BT_SECURITY_LOW;
 
                clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
 
index 1b682a5..cf3c407 100644 (file)
@@ -401,6 +401,11 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
        l2cap_send_sframe(pi, control);
 }
 
+static inline int __l2cap_no_conn_pending(struct sock *sk)
+{
+       return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
+}
+
 static void l2cap_do_start(struct sock *sk)
 {
        struct l2cap_conn *conn = l2cap_pi(sk)->conn;
@@ -409,12 +414,13 @@ static void l2cap_do_start(struct sock *sk)
                if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
                        return;
 
-               if (l2cap_check_security(sk)) {
+               if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
                        struct l2cap_conn_req req;
                        req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
                        req.psm  = l2cap_pi(sk)->psm;
 
                        l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+                       l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
 
                        l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
                                        L2CAP_CONN_REQ, sizeof(req), &req);
@@ -464,12 +470,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
                }
 
                if (sk->sk_state == BT_CONNECT) {
-                       if (l2cap_check_security(sk)) {
+                       if (l2cap_check_security(sk) &&
+                                       __l2cap_no_conn_pending(sk)) {
                                struct l2cap_conn_req req;
                                req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
                                req.psm  = l2cap_pi(sk)->psm;
 
                                l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+                               l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
 
                                l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
                                        L2CAP_CONN_REQ, sizeof(req), &req);
@@ -2912,7 +2920,6 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
                l2cap_pi(sk)->ident = 0;
                l2cap_pi(sk)->dcid = dcid;
                l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-
                l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
 
                l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
@@ -4404,6 +4411,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                                req.psm  = l2cap_pi(sk)->psm;
 
                                l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+                               l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
 
                                l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
                                        L2CAP_CONN_REQ, sizeof(req), &req);
index eedf2c9..753fc42 100644 (file)
@@ -217,14 +217,6 @@ static bool br_devices_support_netpoll(struct net_bridge *br)
        return count != 0 && ret;
 }
 
-static void br_poll_controller(struct net_device *br_dev)
-{
-       struct netpoll *np = br_dev->npinfo->netpoll;
-
-       if (np->real_dev != br_dev)
-               netpoll_poll_dev(np->real_dev);
-}
-
 void br_netpoll_cleanup(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -295,7 +287,6 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_do_ioctl            = br_dev_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_netpoll_cleanup     = br_netpoll_cleanup,
-       .ndo_poll_controller     = br_poll_controller,
 #endif
 };
 
index a4e72a8..595da45 100644 (file)
@@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
                        kfree_skb(skb);
                else {
                        skb_push(skb, ETH_HLEN);
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-                       if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
-                               netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
-                               skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
-                       } else
-#endif
-                               dev_queue_xmit(skb);
+                       dev_queue_xmit(skb);
                }
        }
 
@@ -73,23 +66,9 @@ int br_forward_finish(struct sk_buff *skb)
 
 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       struct net_bridge *br = to->br;
-       if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
-               struct netpoll *np;
-               to->dev->npinfo = skb->dev->npinfo;
-               np = skb->dev->npinfo->netpoll;
-               np->real_dev = np->dev = to->dev;
-               to->dev->priv_flags |= IFF_IN_NETPOLL;
-       }
-#endif
        skb->dev = to->dev;
        NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
                br_forward_finish);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       if (skb->dev->npinfo)
-               skb->dev->npinfo->netpoll->dev = br->dev;
-#endif
 }
 
 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
index 723a347..1f466e8 100644 (file)
@@ -1488,6 +1488,7 @@ static inline void net_timestamp_check(struct sk_buff *skb)
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 {
        skb_orphan(skb);
+       nf_reset(skb);
 
        if (!(dev->flags & IFF_UP) ||
            (skb->len > (dev->mtu + dev->hard_header_len))) {
@@ -1911,8 +1912,16 @@ static int dev_gso_segment(struct sk_buff *skb)
  */
 static inline void skb_orphan_try(struct sk_buff *skb)
 {
-       if (!skb_tx(skb)->flags)
+       struct sock *sk = skb->sk;
+
+       if (sk && !skb_tx(skb)->flags) {
+               /* skb_tx_hash() wont be able to get sk.
+                * We copy sk_hash into skb->rxhash
+                */
+               if (!skb->rxhash)
+                       skb->rxhash = sk->sk_hash;
                skb_orphan(skb);
+       }
 }
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -1998,8 +2007,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
        if (skb->sk && skb->sk->sk_hash)
                hash = skb->sk->sk_hash;
        else
-               hash = (__force u16) skb->protocol;
-
+               hash = (__force u16) skb->protocol ^ skb->rxhash;
        hash = jhash_1word(hash, hashrnd);
 
        return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
@@ -2022,12 +2030,11 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
                                        struct sk_buff *skb)
 {
-       u16 queue_index;
+       int queue_index;
        struct sock *sk = skb->sk;
 
-       if (sk_tx_queue_recorded(sk)) {
-               queue_index = sk_tx_queue_get(sk);
-       } else {
+       queue_index = sk_tx_queue_get(sk);
+       if (queue_index < 0) {
                const struct net_device_ops *ops = dev->netdev_ops;
 
                if (ops->ndo_select_queue) {
index 6ba1c0e..a4e0a74 100644 (file)
@@ -949,7 +949,10 @@ static void neigh_update_hhs(struct neighbour *neigh)
 {
        struct hh_cache *hh;
        void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
-               = neigh->dev->header_ops->cache_update;
+               = NULL;
+
+       if (neigh->dev->header_ops)
+               update = neigh->dev->header_ops->cache_update;
 
        if (update) {
                for (hh = neigh->hh; hh; hh = hh->hh_next) {
index 34432b4..ce88293 100644 (file)
@@ -843,7 +843,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
        skb->network_header   += off;
        if (skb_mac_header_was_set(skb))
                skb->mac_header += off;
-       skb->csum_start       += nhead;
+       /* Only adjust this if it actually is csum_start rather than csum */
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb->csum_start += nhead;
        skb->cloned   = 0;
        skb->hdr_len  = 0;
        skb->nohdr    = 0;
@@ -930,7 +932,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
        copy_skb_header(n, skb);
 
        off                  = newheadroom - oldheadroom;
-       n->csum_start       += off;
+       if (n->ip_summed == CHECKSUM_PARTIAL)
+               n->csum_start += off;
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
        n->transport_header += off;
        n->network_header   += off;
index c51b554..1120178 100644 (file)
@@ -1,7 +1,7 @@
 menuconfig NET_DSA
        bool "Distributed Switch Architecture support"
        default n
-       depends on EXPERIMENTAL && !S390
+       depends on EXPERIMENTAL && NET_ETHERNET && !S390
        select PHYLIB
        ---help---
          This allows you to use hardware switch chips that use
index 757f25e..7f62735 100644 (file)
@@ -442,8 +442,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
        int err;
 
        err = ipmr_fib_lookup(net, &fl, &mrt);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(skb);
                return err;
+       }
 
        read_lock(&mrt_lock);
        dev->stats.tx_bytes += skb->len;
@@ -1728,8 +1730,10 @@ int ip_mr_input(struct sk_buff *skb)
                goto dont_forward;
 
        err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(skb);
                return err;
+       }
 
        if (!local) {
                    if (IPCB(skb)->opt.router_alert) {
index 6596b4f..65afeae 100644 (file)
@@ -608,6 +608,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
        ssize_t spliced;
        int ret;
 
+       sock_rps_record_flow(sk);
        /*
         * We can't seek on a socket input
         */
index b4ed957..7ed9dc1 100644 (file)
@@ -2208,6 +2208,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
        int mib_idx;
        int fwd_rexmitting = 0;
 
+       if (!tp->packets_out)
+               return;
+
        if (!tp->lost_out)
                tp->retransmit_high = tp->snd_una;
 
index e1a698d..784f34d 100644 (file)
@@ -1760,7 +1760,10 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
 
        idev = ipv6_find_idev(dev);
        if (!idev)
-               return NULL;
+               return ERR_PTR(-ENOBUFS);
+
+       if (idev->cnf.disable_ipv6)
+               return ERR_PTR(-EACCES);
 
        /* Add default multicast route */
        addrconf_add_mroute(dev);
@@ -2129,8 +2132,9 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
        if (!dev)
                return -ENODEV;
 
-       if ((idev = addrconf_add_dev(dev)) == NULL)
-               return -ENOBUFS;
+       idev = addrconf_add_dev(dev);
+       if (IS_ERR(idev))
+               return PTR_ERR(idev);
 
        scope = ipv6_addr_scope(pfx);
 
@@ -2377,7 +2381,7 @@ static void addrconf_dev_config(struct net_device *dev)
        }
 
        idev = addrconf_add_dev(dev);
-       if (idev == NULL)
+       if (IS_ERR(idev))
                return;
 
        memset(&addr, 0, sizeof(struct in6_addr));
@@ -2468,7 +2472,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
        ASSERT_RTNL();
 
        idev = addrconf_add_dev(dev);
-       if (!idev) {
+       if (IS_ERR(idev)) {
                printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
                return;
        }
index 2794b60..d6e9599 100644 (file)
@@ -347,11 +347,12 @@ static const struct xfrm_type mip6_destopt_type =
 
 static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
 {
+       struct ipv6hdr *iph = ipv6_hdr(skb);
        struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
        int err = rt2->rt_hdr.nexthdr;
 
        spin_lock(&x->lock);
-       if (!ipv6_addr_equal(&rt2->addr, (struct in6_addr *)x->coaddr) &&
+       if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) &&
            !ipv6_addr_any((struct in6_addr *)x->coaddr))
                err = -ENOENT;
        spin_unlock(&x->lock);
index c7000a6..67ee34f 100644 (file)
@@ -632,7 +632,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
        skb->dev = sta->sdata->dev;
        skb->protocol = eth_type_trans(skb, sta->sdata->dev);
        memset(skb->cb, 0, sizeof(skb->cb));
-       netif_rx(skb);
+       netif_rx_ni(skb);
 }
 
 static void sta_apply_parameters(struct ieee80211_local *local,
index 94d72e8..b2a3ae6 100644 (file)
@@ -698,6 +698,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
                newsk = NULL;
                goto out;
        }
+       kfree_skb(oskb);
 
        sock_hold(sk);
        pep_sk(newsk)->listener = sk;
index c0b6863..1980b71 100644 (file)
@@ -33,6 +33,7 @@
 static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1];
 static u32 mirred_idx_gen;
 static DEFINE_RWLOCK(mirred_lock);
+static LIST_HEAD(mirred_list);
 
 static struct tcf_hashinfo mirred_hash_info = {
        .htab   =       tcf_mirred_ht,
@@ -47,7 +48,9 @@ static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
                        m->tcf_bindcnt--;
                m->tcf_refcnt--;
                if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
-                       dev_put(m->tcfm_dev);
+                       list_del(&m->tcfm_list);
+                       if (m->tcfm_dev)
+                               dev_put(m->tcfm_dev);
                        tcf_hash_destroy(&m->common, &mirred_hash_info);
                        return 1;
                }
@@ -134,8 +137,10 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
                m->tcfm_ok_push = ok_push;
        }
        spin_unlock_bh(&m->tcf_lock);
-       if (ret == ACT_P_CREATED)
+       if (ret == ACT_P_CREATED) {
+               list_add(&m->tcfm_list, &mirred_list);
                tcf_hash_insert(pc, &mirred_hash_info);
+       }
 
        return ret;
 }
@@ -162,9 +167,14 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
        m->tcf_tm.lastuse = jiffies;
 
        dev = m->tcfm_dev;
+       if (!dev) {
+               printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
+               goto out;
+       }
+
        if (!(dev->flags & IFF_UP)) {
                if (net_ratelimit())
-                       pr_notice("tc mirred to Houston: device %s is gone!\n",
+                       pr_notice("tc mirred to Houston: device %s is down\n",
                                  dev->name);
                goto out;
        }
@@ -232,6 +242,28 @@ nla_put_failure:
        return -1;
 }
 
+static int mirred_device_event(struct notifier_block *unused,
+                              unsigned long event, void *ptr)
+{
+       struct net_device *dev = ptr;
+       struct tcf_mirred *m;
+
+       if (event == NETDEV_UNREGISTER)
+               list_for_each_entry(m, &mirred_list, tcfm_list) {
+                       if (m->tcfm_dev == dev) {
+                               dev_put(dev);
+                               m->tcfm_dev = NULL;
+                       }
+               }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block mirred_device_notifier = {
+       .notifier_call = mirred_device_event,
+};
+
+
 static struct tc_action_ops act_mirred_ops = {
        .kind           =       "mirred",
        .hinfo          =       &mirred_hash_info,
@@ -252,12 +284,17 @@ MODULE_LICENSE("GPL");
 
 static int __init mirred_init_module(void)
 {
+       int err = register_netdevice_notifier(&mirred_device_notifier);
+       if (err)
+               return err;
+
        pr_info("Mirror/redirect action on\n");
        return tcf_register_action(&act_mirred_ops);
 }
 
 static void __exit mirred_cleanup_module(void)
 {
+       unregister_netdevice_notifier(&mirred_device_notifier);
        tcf_unregister_action(&act_mirred_ops);
 }
 
index 5709494..724553e 100644 (file)
@@ -205,7 +205,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
        {
                struct icmphdr *icmph;
 
-               if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
+               if (!pskb_may_pull(skb, ihl + sizeof(*icmph)))
                        goto drop;
 
                icmph = (void *)(skb_network_header(skb) + ihl);
@@ -215,6 +215,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
                    (icmph->type != ICMP_PARAMETERPROB))
                        break;
 
+               if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
+                       goto drop;
+
                iph = (void *)(icmph + 1);
                if (egress)
                        addr = iph->daddr;
index 73affb8..8dc47f1 100644 (file)
@@ -267,7 +267,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
  * Run memory cache shrinker.
  */
 static int
-rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
+rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        LIST_HEAD(free);
        int res;
index af1c173..a7ec5a8 100644 (file)
@@ -1594,8 +1594,8 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 
        /* Try to instantiate a bundle */
        err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
-       if (err < 0) {
-               if (err != -EAGAIN)
+       if (err <= 0) {
+               if (err != 0 && err != -EAGAIN)
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
                return ERR_PTR(err);
        }
@@ -1678,6 +1678,13 @@ xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
                        goto make_dummy_bundle;
                dst_hold(&xdst->u.dst);
                return oldflo;
+       } else if (new_xdst == NULL) {
+               num_xfrms = 0;
+               if (oldflo == NULL)
+                       goto make_dummy_bundle;
+               xdst->num_xfrms = 0;
+               dst_hold(&xdst->u.dst);
+               return oldflo;
        }
 
        /* Kill the previous bundle */
@@ -1760,6 +1767,10 @@ restart:
                                xfrm_pols_put(pols, num_pols);
                                err = PTR_ERR(xdst);
                                goto dropdst;
+                       } else if (xdst == NULL) {
+                               num_xfrms = 0;
+                               drop_pols = num_pols;
+                               goto no_transform;
                        }
 
                        spin_lock_bh(&xfrm_policy_sk_bundle_lock);
index 115edb4..a9d9344 100644 (file)
@@ -226,7 +226,7 @@ void fill_window(WINDOW *win, const char *text)
                int len = get_line_length(line);
                strncpy(tmp, line, min(len, x));
                tmp[len] = '\0';
-               mvwprintw(win, i, 0, tmp);
+               mvwprintw(win, i, 0, "%s", tmp);
        }
 }
 
index 3a681ef..d2c29b6 100644 (file)
@@ -44,7 +44,7 @@ rpm-pkg rpm: $(objtree)/kernel.spec FORCE
        fi
        $(MAKE) clean
        $(PREV) ln -sf $(srctree) $(KERNELPATH)
-       $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --scm-only > $(objtree)/.scmversion
+       $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --save-scmversion
        $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/.
        $(PREV) rm $(KERNELPATH)
        rm -f $(objtree)/.scmversion
index d6a866e..64a9cb5 100755 (executable)
 #
 
 usage() {
-       echo "Usage: $0 [--scm-only] [srctree]" >&2
+       echo "Usage: $0 [--save-scmversion] [srctree]" >&2
        exit 1
 }
 
 scm_only=false
 srctree=.
-if test "$1" = "--scm-only"; then
+if test "$1" = "--save-scmversion"; then
        scm_only=true
        shift
 fi
@@ -30,11 +30,12 @@ fi
 
 scm_version()
 {
-       local short=false
+       local short
+       short=false
 
        cd "$srctree"
        if test -e .scmversion; then
-               cat "$_"
+               cat .scmversion
                return
        fi
        if test "$1" = "--short"; then
@@ -131,12 +132,15 @@ collect_files()
 }
 
 if $scm_only; then
-       scm_version
+       if test ! -e .scmversion; then
+               res=$(scm_version)
+               echo "$res" >.scmversion
+       fi
        exit
 fi
 
 if test -e include/config/auto.conf; then
-       source "$_"
+       . include/config/auto.conf
 else
        echo "Error: kernelrelease not valid - run 'make prepare' to update it"
        exit 1
index 86067ee..2fc5396 100644 (file)
@@ -52,6 +52,10 @@ struct hdmi_spec {
         */
        struct hda_multi_out multiout;
        unsigned int codec_type;
+
+       /* misc flags */
+       /* PD bit indicates only the update, not the current state */
+       unsigned int old_pin_detect:1;
 };
 
 
@@ -616,6 +620,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
  * Unsolicited events
  */
 
+static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
+                              struct hdmi_eld *eld);
+
 static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
 {
        struct hdmi_spec *spec = codec->spec;
@@ -632,6 +639,12 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
        if (index < 0)
                return;
 
+       if (spec->old_pin_detect) {
+               if (pind)
+                       hdmi_present_sense(codec, tag, &spec->sink_eld[index]);
+               pind = spec->sink_eld[index].monitor_present;
+       }
+
        spec->sink_eld[index].monitor_present = pind;
        spec->sink_eld[index].eld_valid = eldv;
 
index 3c10c0b..b0652ac 100644 (file)
@@ -478,6 +478,7 @@ static int patch_nvhdmi_8ch_89(struct hda_codec *codec)
 
        codec->spec = spec;
        spec->codec_type = HDA_CODEC_NVIDIA_MCP89;
+       spec->old_pin_detect = 1;
 
        if (hdmi_parse_codec(codec) < 0) {
                codec->spec = NULL;
@@ -508,6 +509,7 @@ static int patch_nvhdmi_8ch_7x(struct hda_codec *codec)
        spec->multiout.max_channels = 8;
        spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x;
        spec->codec_type = HDA_CODEC_NVIDIA_MCP7X;
+       spec->old_pin_detect = 1;
 
        codec->patch_ops = nvhdmi_patch_ops_8ch_7x;
 
@@ -528,6 +530,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
        spec->multiout.max_channels = 2;
        spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x;
        spec->codec_type = HDA_CODEC_NVIDIA_MCP7X;
+       spec->old_pin_detect = 1;
 
        codec->patch_ops = nvhdmi_patch_ops_2ch;
 
index ff614dd..596ea2f 100644 (file)
@@ -1267,11 +1267,11 @@ static int alc_auto_parse_customize_define(struct hda_codec *codec)
        unsigned nid = 0;
        struct alc_spec *spec = codec->spec;
 
+       spec->cdefine.enable_pcbeep = 1; /* assume always enabled */
+
        ass = codec->subsystem_id & 0xffff;
-       if (ass != codec->bus->pci->subsystem_device && (ass & 1)) {
-               spec->cdefine.enable_pcbeep = 1; /* assume always enabled */
+       if (ass != codec->bus->pci->subsystem_device && (ass & 1))
                goto do_sku;
-       }
 
        nid = 0x1d;
        if (codec->vendor_id == 0x10ec0260)
@@ -5180,8 +5180,24 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids,
 #ifdef CONFIG_SND_HDA_INPUT_BEEP
 #define set_beep_amp(spec, nid, idx, dir) \
        ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir))
+
+static struct snd_pci_quirk beep_white_list[] = {
+       SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+       {}
+};
+
+static inline int has_cdefine_beep(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       const struct snd_pci_quirk *q;
+       q = snd_pci_quirk_lookup(codec->bus->pci, beep_white_list);
+       if (q)
+               return q->value;
+       return spec->cdefine.enable_pcbeep;
+}
 #else
 #define set_beep_amp(spec, nid, idx, dir) /* NOP */
+#define has_cdefine_beep(codec)                0
 #endif
 
 /*
@@ -10566,10 +10582,12 @@ static int patch_alc882(struct hda_codec *codec)
                }
        }
 
-       err = snd_hda_attach_beep_device(codec, 0x1);
-       if (err < 0) {
-               alc_free(codec);
-               return err;
+       if (has_cdefine_beep(codec)) {
+               err = snd_hda_attach_beep_device(codec, 0x1);
+               if (err < 0) {
+                       alc_free(codec);
+                       return err;
+               }
        }
 
        if (board_config != ALC882_AUTO)
@@ -10619,7 +10637,7 @@ static int patch_alc882(struct hda_codec *codec)
 
        set_capture_mixer(codec);
 
-       if (spec->cdefine.enable_pcbeep)
+       if (has_cdefine_beep(codec))
                set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
 
        if (board_config == ALC882_AUTO)
@@ -12435,7 +12453,7 @@ static int patch_alc262(struct hda_codec *codec)
                }
        }
 
-       if (!spec->no_analog) {
+       if (!spec->no_analog && has_cdefine_beep(codec)) {
                err = snd_hda_attach_beep_device(codec, 0x1);
                if (err < 0) {
                        alc_free(codec);
@@ -12486,7 +12504,7 @@ static int patch_alc262(struct hda_codec *codec)
        }
        if (!spec->cap_mixer && !spec->no_analog)
                set_capture_mixer(codec);
-       if (!spec->no_analog && spec->cdefine.enable_pcbeep)
+       if (!spec->no_analog && has_cdefine_beep(codec))
                set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
 
        spec->vmaster_nid = 0x0c;
@@ -14458,10 +14476,12 @@ static int patch_alc269(struct hda_codec *codec)
                }
        }
 
-       err = snd_hda_attach_beep_device(codec, 0x1);
-       if (err < 0) {
-               alc_free(codec);
-               return err;
+       if (has_cdefine_beep(codec)) {
+               err = snd_hda_attach_beep_device(codec, 0x1);
+               if (err < 0) {
+                       alc_free(codec);
+                       return err;
+               }
        }
 
        if (board_config != ALC269_AUTO)
@@ -14494,7 +14514,7 @@ static int patch_alc269(struct hda_codec *codec)
 
        if (!spec->cap_mixer)
                set_capture_mixer(codec);
-       if (spec->cdefine.enable_pcbeep)
+       if (has_cdefine_beep(codec))
                set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
 
        if (board_config == ALC269_AUTO)
@@ -18691,10 +18711,12 @@ static int patch_alc662(struct hda_codec *codec)
                }
        }
 
-       err = snd_hda_attach_beep_device(codec, 0x1);
-       if (err < 0) {
-               alc_free(codec);
-               return err;
+       if (has_cdefine_beep(codec)) {
+               err = snd_hda_attach_beep_device(codec, 0x1);
+               if (err < 0) {
+                       alc_free(codec);
+                       return err;
+               }
        }
 
        if (board_config != ALC662_AUTO)
@@ -18716,7 +18738,7 @@ static int patch_alc662(struct hda_codec *codec)
        if (!spec->cap_mixer)
                set_capture_mixer(codec);
 
-       if (spec->cdefine.enable_pcbeep) {
+       if (has_cdefine_beep(codec)) {
                switch (codec->vendor_id) {
                case 0x10ec0662:
                        set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
index 495be6e..24454c9 100644 (file)
@@ -300,7 +300,7 @@ struct snd_soc_dai au1xpsc_i2s_dai = {
 };
 EXPORT_SYMBOL(au1xpsc_i2s_dai);
 
-static int __init au1xpsc_i2s_drvprobe(struct platform_device *pdev)
+static int __devinit au1xpsc_i2s_drvprobe(struct platform_device *pdev)
 {
        struct resource *r;
        unsigned long sel;
index 31ac553..5da30eb 100644 (file)
@@ -83,8 +83,8 @@ config SND_SOC_ALL_CODECS
 
 config SND_SOC_WM_HUBS
        tristate
-       default y if SND_SOC_WM8993=y
-       default m if SND_SOC_WM8993=m
+       default y if SND_SOC_WM8993=y || SND_SOC_WM8994=y
+       default m if SND_SOC_WM8993=m || SND_SOC_WM8994=m
 
 config SND_SOC_AC97_CODEC
        tristate
index 1072621..9d1df26 100644 (file)
@@ -127,6 +127,8 @@ static __devinit int wm8727_platform_probe(struct platform_device *pdev)
                goto err_codec;
        }
 
+       return 0;
+
 err_codec:
        snd_soc_unregister_codec(codec);
 err:
index 7e4a627..4e212ed 100644 (file)
@@ -94,7 +94,6 @@ SOC_DAPM_SINGLE("Bypass Switch", WM8776_OUTMUX, 2, 1, 0),
 
 static const struct snd_soc_dapm_widget wm8776_dapm_widgets[] = {
 SND_SOC_DAPM_INPUT("AUX"),
-SND_SOC_DAPM_INPUT("AUX"),
 
 SND_SOC_DAPM_INPUT("AIN1"),
 SND_SOC_DAPM_INPUT("AIN2"),
index 0417dae..19ad590 100644 (file)
@@ -885,7 +885,6 @@ static int wm8988_register(struct wm8988_priv *wm8988,
        ret = snd_soc_register_dai(&wm8988_dai);
        if (ret != 0) {
                dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
-               snd_soc_unregister_codec(codec);
                goto err_codec;
        }
 
index 3396a0d..ec4acac 100644 (file)
@@ -683,20 +683,15 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream,
 
        /* clock inversion (CKG2) */
        data = 0;
-       switch (SH_FSI_INVERSION_MASK & flags) {
-       case SH_FSI_LRM_INV:
-               data = 1 << 12;
-               break;
-       case SH_FSI_BRM_INV:
-               data = 1 << 8;
-               break;
-       case SH_FSI_LRS_INV:
-               data = 1 << 4;
-               break;
-       case SH_FSI_BRS_INV:
-               data = 1 << 0;
-               break;
-       }
+       if (SH_FSI_LRM_INV & flags)
+               data |= 1 << 12;
+       if (SH_FSI_BRM_INV & flags)
+               data |= 1 << 8;
+       if (SH_FSI_LRS_INV & flags)
+               data |= 1 << 4;
+       if (SH_FSI_BRS_INV & flags)
+               data |= 1 << 0;
+
        fsi_reg_write(fsi, CKG2, data);
 
        /* do fmt, di fmt */
@@ -726,15 +721,15 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream,
                break;
        case SH_FSI_FMT_TDM:
                msg = "TDM";
-               data = CR_FMT(CR_TDM) | (fsi->chan - 1);
                fsi->chan = is_play ?
                        SH_FSI_GET_CH_O(flags) : SH_FSI_GET_CH_I(flags);
+               data = CR_FMT(CR_TDM) | (fsi->chan - 1);
                break;
        case SH_FSI_FMT_TDM_DELAY:
                msg = "TDM Delay";
-               data = CR_FMT(CR_TDM_D) | (fsi->chan - 1);
                fsi->chan = is_play ?
                        SH_FSI_GET_CH_O(flags) : SH_FSI_GET_CH_I(flags);
+               data = CR_FMT(CR_TDM_D) | (fsi->chan - 1);
                break;
        default:
                dev_err(dai->dev, "unknown format.\n");
index 3d8f31e..d75c28a 100644 (file)
@@ -600,30 +600,32 @@ endif
 
 ifdef NO_DEMANGLE
        BASIC_CFLAGS += -DNO_DEMANGLE
-else ifdef HAVE_CPLUS_DEMANGLE
-       EXTLIBS += -liberty
-       BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
 else
-       has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y")
-
-       ifeq ($(has_bfd),y)
-               EXTLIBS += -lbfd
+       ifdef HAVE_CPLUS_DEMANGLE
+               EXTLIBS += -liberty
+               BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
        else
-               has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y")
-               ifeq ($(has_bfd_iberty),y)
-                       EXTLIBS += -lbfd -liberty
+               has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y")
+
+               ifeq ($(has_bfd),y)
+                       EXTLIBS += -lbfd
                else
-                       has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y")
-                       ifeq ($(has_bfd_iberty_z),y)
-                               EXTLIBS += -lbfd -liberty -lz
+                       has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y")
+                       ifeq ($(has_bfd_iberty),y)
+                               EXTLIBS += -lbfd -liberty
                        else
-                               has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y")
-                               ifeq ($(has_cplus_demangle),y)
-                                       EXTLIBS += -liberty
-                                       BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
+                               has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y")
+                               ifeq ($(has_bfd_iberty_z),y)
+                                       EXTLIBS += -lbfd -liberty -lz
                                else
-                                       msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling)
-                                       BASIC_CFLAGS += -DNO_DEMANGLE
+                                       has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y")
+                                       ifeq ($(has_cplus_demangle),y)
+                                               EXTLIBS += -liberty
+                                               BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
+                                       else
+                                               msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling)
+                                               BASIC_CFLAGS += -DNO_DEMANGLE
+                                       endif
                                endif
                        endif
                endif
diff --git a/tools/perf/arch/sparc/Makefile b/tools/perf/arch/sparc/Makefile
new file mode 100644 (file)
index 0000000..15130b5
--- /dev/null
@@ -0,0 +1,4 @@
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
+endif
diff --git a/tools/perf/arch/sparc/util/dwarf-regs.c b/tools/perf/arch/sparc/util/dwarf-regs.c
new file mode 100644 (file)
index 0000000..0ab8848
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Mapping of DWARF debug register numbers into register names.
+ *
+ * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <libio.h>
+#include <dwarf-regs.h>
+
+#define SPARC_MAX_REGS 96
+
+const char *sparc_regs_table[SPARC_MAX_REGS] = {
+       "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", "%g6", "%g7",
+       "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%sp", "%o7",
+       "%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7",
+       "%i0", "%i1", "%i2", "%i3", "%i4", "%i5", "%fp", "%i7",
+       "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
+       "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
+       "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
+       "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
+       "%f32", "%f33", "%f34", "%f35", "%f36", "%f37", "%f38", "%f39",
+       "%f40", "%f41", "%f42", "%f43", "%f44", "%f45", "%f46", "%f47",
+       "%f48", "%f49", "%f50", "%f51", "%f52", "%f53", "%f54", "%f55",
+       "%f56", "%f57", "%f58", "%f59", "%f60", "%f61", "%f62", "%f63",
+};
+
+/**
+ * get_arch_regstr() - lookup register name from it's DWARF register number
+ * @n: the DWARF register number
+ *
+ * get_arch_regstr() returns the name of the register in struct
+ * regdwarfnum_table from it's DWARF register number. If the register is not
+ * found in the table, this returns NULL;
+ */
+const char *get_arch_regstr(unsigned int n)
+{
+       return (n <= SPARC_MAX_REGS) ? sparc_regs_table[n] : NULL;
+}
index 3592057..fd7407c 100644 (file)
@@ -107,7 +107,7 @@ static int perf_session__add_hist_entry(struct perf_session *self,
                goto out_free_syms;
        err = 0;
        if (symbol_conf.use_callchain) {
-               err = append_chain(he->callchain, data->callchain, syms);
+               err = append_chain(he->callchain, data->callchain, syms, data->period);
                if (err)
                        goto out_free_syms;
        }
index 49ece79..97d7656 100755 (executable)
@@ -5,17 +5,13 @@ if [ $# -eq 1 ]  ; then
 fi
 
 GVF=${OUTPUT}PERF-VERSION-FILE
-DEF_VER=v0.0.2.PERF
 
 LF='
 '
 
-# First see if there is a version file (included in release tarballs),
-# then try git-describe, then default.
-if test -f version
-then
-       VN=$(cat version) || VN="$DEF_VER"
-elif test -d .git -o -f .git &&
+# First check if there is a .git to get the version from git describe
+# otherwise try to get the version from the kernel makefile
+if test -d ../../.git -o -f ../../.git &&
        VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
        case "$VN" in
        *$LF*) (exit 1) ;;
@@ -27,7 +23,12 @@ elif test -d .git -o -f .git &&
 then
        VN=$(echo "$VN" | sed -e 's/-/./g');
 else
-       VN="$DEF_VER"
+       eval `grep '^VERSION\s*=' ../../Makefile|tr -d ' '`
+       eval `grep '^PATCHLEVEL\s*=' ../../Makefile|tr -d ' '`
+       eval `grep '^SUBLEVEL\s*=' ../../Makefile|tr -d ' '`
+       eval `grep '^EXTRAVERSION\s*=' ../../Makefile|tr -d ' '`
+
+       VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}"
 fi
 
 VN=$(expr "$VN" : v*'\(.*\)')
index 62b69ad..52c777e 100644 (file)
@@ -230,7 +230,7 @@ fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
 
 static void
 add_child(struct callchain_node *parent, struct resolved_chain *chain,
-         int start)
+         int start, u64 period)
 {
        struct callchain_node *new;
 
@@ -238,7 +238,7 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain,
        fill_node(new, chain, start);
 
        new->children_hit = 0;
-       new->hit = 1;
+       new->hit = period;
 }
 
 /*
@@ -248,7 +248,8 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain,
  */
 static void
 split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
-               struct callchain_list *to_split, int idx_parents, int idx_local)
+               struct callchain_list *to_split, int idx_parents, int idx_local,
+               u64 period)
 {
        struct callchain_node *new;
        struct list_head *old_tail;
@@ -275,41 +276,41 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
        /* create a new child for the new branch if any */
        if (idx_total < chain->nr) {
                parent->hit = 0;
-               add_child(parent, chain, idx_total);
-               parent->children_hit++;
+               add_child(parent, chain, idx_total, period);
+               parent->children_hit += period;
        } else {
-               parent->hit = 1;
+               parent->hit = period;
        }
 }
 
 static int
 __append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start);
+              unsigned int start, u64 period);
 
 static void
 __append_chain_children(struct callchain_node *root,
                        struct resolved_chain *chain,
-                       unsigned int start)
+                       unsigned int start, u64 period)
 {
        struct callchain_node *rnode;
 
        /* lookup in childrens */
        chain_for_each_child(rnode, root) {
-               unsigned int ret = __append_chain(rnode, chain, start);
+               unsigned int ret = __append_chain(rnode, chain, start, period);
 
                if (!ret)
                        goto inc_children_hit;
        }
        /* nothing in children, add to the current node */
-       add_child(root, chain, start);
+       add_child(root, chain, start, period);
 
 inc_children_hit:
-       root->children_hit++;
+       root->children_hit += period;
 }
 
 static int
 __append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start)
+              unsigned int start, u64 period)
 {
        struct callchain_list *cnode;
        unsigned int i = start;
@@ -345,18 +346,18 @@ __append_chain(struct callchain_node *root, struct resolved_chain *chain,
 
        /* we match only a part of the node. Split it and add the new chain */
        if (i - start < root->val_nr) {
-               split_add_child(root, chain, cnode, start, i - start);
+               split_add_child(root, chain, cnode, start, i - start, period);
                return 0;
        }
 
        /* we match 100% of the path, increment the hit */
        if (i - start == root->val_nr && i == chain->nr) {
-               root->hit++;
+               root->hit += period;
                return 0;
        }
 
        /* We match the node and still have a part remaining */
-       __append_chain_children(root, chain, i);
+       __append_chain_children(root, chain, i, period);
 
        return 0;
 }
@@ -380,7 +381,7 @@ static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
 
 
 int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms)
+                struct map_symbol *syms, u64 period)
 {
        struct resolved_chain *filtered;
 
@@ -397,7 +398,7 @@ int append_chain(struct callchain_node *root, struct ip_callchain *chain,
        if (!filtered->nr)
                goto end;
 
-       __append_chain_children(root, filtered, 0);
+       __append_chain_children(root, filtered, 0, period);
 end:
        free(filtered);
 
index 1ca73e4..f2e9ee1 100644 (file)
@@ -49,6 +49,9 @@ static inline void callchain_init(struct callchain_node *node)
        INIT_LIST_HEAD(&node->brothers);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->val);
+
+       node->parent = NULL;
+       node->hit = 0;
 }
 
 static inline u64 cumul_hits(struct callchain_node *node)
@@ -58,7 +61,7 @@ static inline u64 cumul_hits(struct callchain_node *node)
 
 int register_callchain_param(struct callchain_param *param);
 int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms);
+                struct map_symbol *syms, u64 period);
 
 bool ip_callchain__valid(struct ip_callchain *chain, event_t *event);
 #endif /* __PERF_CALLCHAIN_H */
index 07f89b6..784ee0b 100644 (file)
@@ -631,9 +631,14 @@ int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
                        u64 session_total)
 {
        char bf[512];
-       hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
-                            show_displacement, displacement,
-                            true, session_total);
+       int ret;
+
+       ret = hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
+                                  show_displacement, displacement,
+                                  true, session_total);
+       if (!ret)
+               return 0;
+
        return fprintf(fp, "%s\n", bf);
 }
 
@@ -762,6 +767,7 @@ size_t hists__fprintf(struct hists *self, struct hists *pair,
 print_entries:
        for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+               int cnt;
 
                if (show_displacement) {
                        if (h->pair != NULL)
@@ -771,8 +777,13 @@ print_entries:
                                displacement = 0;
                        ++position;
                }
-               ret += hist_entry__fprintf(h, pair, show_displacement,
-                                          displacement, fp, self->stats.total_period);
+               cnt = hist_entry__fprintf(h, pair, show_displacement,
+                                         displacement, fp, self->stats.total_period);
+               /* Ignore those that didn't match the parent filter */
+               if (!cnt)
+                       continue;
+
+               ret += cnt;
 
                if (symbol_conf.use_callchain)
                        ret += hist_entry__fprintf_callchain(h, fp, self->stats.total_period);
@@ -965,13 +976,17 @@ static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
                 * Parse hexa addresses followed by ':'
                 */
                line_ip = strtoull(tmp, &tmp2, 16);
-               if (*tmp2 != ':' || tmp == tmp2)
+               if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
                        line_ip = -1;
        }
 
        if (line_ip != -1) {
-               u64 start = map__rip_2objdump(self->ms.map, sym->start);
+               u64 start = map__rip_2objdump(self->ms.map, sym->start),
+                   end = map__rip_2objdump(self->ms.map, sym->end);
+
                offset = line_ip - start;
+               if (offset < 0 || (u64)line_ip > end)
+                       offset = -1;
        }
 
        objdump_line = objdump_line__new(offset, line);
index b63e571..5b27683 100644 (file)
@@ -1443,6 +1443,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *self,
 {
        struct dirent *dent;
        DIR *dir = opendir(dir_name);
+       int ret = 0;
 
        if (!dir) {
                pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
@@ -1465,8 +1466,9 @@ static int map_groups__set_modules_path_dir(struct map_groups *self,
 
                        snprintf(path, sizeof(path), "%s/%s",
                                 dir_name, dent->d_name);
-                       if (map_groups__set_modules_path_dir(self, path) < 0)
-                               goto failure;
+                       ret = map_groups__set_modules_path_dir(self, path);
+                       if (ret < 0)
+                               goto out;
                } else {
                        char *dot = strrchr(dent->d_name, '.'),
                             dso_name[PATH_MAX];
@@ -1487,17 +1489,18 @@ static int map_groups__set_modules_path_dir(struct map_groups *self,
                                 dir_name, dent->d_name);
 
                        long_name = strdup(path);
-                       if (long_name == NULL)
-                               goto failure;
+                       if (long_name == NULL) {
+                               ret = -1;
+                               goto out;
+                       }
                        dso__set_long_name(map->dso, long_name);
                        dso__kernel_module_get_build_id(map->dso, "");
                }
        }
 
-       return 0;
-failure:
+out:
        closedir(dir);
-       return -1;
+       return ret;
 }
 
 static char *get_kernel_version(const char *root_dir)