Merge branch 'stable-3.2' into pandora-3.2
authorGrazvydas Ignotas <notasas@gmail.com>
Sun, 3 Jul 2016 00:22:57 +0000 (03:22 +0300)
committerGrazvydas Ignotas <notasas@gmail.com>
Sun, 3 Jul 2016 00:22:57 +0000 (03:22 +0300)
743 files changed:
Documentation/ABI/testing/debugfs-aufs [new file with mode: 0644]
Documentation/ABI/testing/sysfs-aufs [new file with mode: 0644]
Documentation/DMA-attributes.txt
Documentation/arm/OMAP/DSS
Documentation/arm/memory.txt
Documentation/cpuidle/sysfs.txt
Documentation/filesystems/aufs/README [new file with mode: 0644]
Documentation/filesystems/aufs/design/01intro.txt [new file with mode: 0644]
Documentation/filesystems/aufs/design/02struct.txt [new file with mode: 0644]
Documentation/filesystems/aufs/design/03lookup.txt [new file with mode: 0644]
Documentation/filesystems/aufs/design/04branch.txt [new file with mode: 0644]
Documentation/filesystems/aufs/design/05wbr_policy.txt [new file with mode: 0644]
Documentation/filesystems/aufs/design/06mmap.txt [new file with mode: 0644]
Documentation/filesystems/aufs/design/07export.txt [new file with mode: 0644]
Documentation/filesystems/aufs/design/08shwh.txt [new file with mode: 0644]
Documentation/filesystems/aufs/design/10dynop.txt [new file with mode: 0644]
Documentation/filesystems/aufs/design/99plan.txt [new file with mode: 0644]
Documentation/filesystems/debugfs.txt
Documentation/filesystems/files.txt
Documentation/kernel-parameters.txt
Documentation/trace/events-kmem.txt
Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
arch/Kconfig
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/compressed/head.S
arch/arm/common/dmabounce.c
arch/arm/configs/omap3_pandora_debug_defconfig [new file with mode: 0644]
arch/arm/configs/omap3_pandora_defconfig [new file with mode: 0644]
arch/arm/include/asm/assembler.h
arch/arm/include/asm/atomic.h
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/device.h
arch/arm/include/asm/dma-contiguous.h [new file with mode: 0644]
arch/arm/include/asm/dma-iommu.h [new file with mode: 0644]
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/ftrace.h
arch/arm/include/asm/hugetlb-2level.h [new file with mode: 0644]
arch/arm/include/asm/hugetlb-3level.h [new file with mode: 0644]
arch/arm/include/asm/hugetlb.h [new file with mode: 0644]
arch/arm/include/asm/idmap.h [new file with mode: 0644]
arch/arm/include/asm/io.h
arch/arm/include/asm/mach/map.h
arch/arm/include/asm/opcodes.h [new file with mode: 0644]
arch/arm/include/asm/page.h
arch/arm/include/asm/pgalloc.h
arch/arm/include/asm/pgtable-2level-types.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/pgtable-3level-hwdef.h [new file with mode: 0644]
arch/arm/include/asm/pgtable-3level-types.h [new file with mode: 0644]
arch/arm/include/asm/pgtable-3level.h [new file with mode: 0644]
arch/arm/include/asm/pgtable-hwdef.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/proc-fns.h
arch/arm/include/asm/processor.h
arch/arm/include/asm/sched_clock.h
arch/arm/include/asm/system.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/tlb.h
arch/arm/include/asm/tlbflush.h
arch/arm/include/asm/tls.h
arch/arm/kernel/Makefile
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/head.S
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/opcodes.c [new file with mode: 0644]
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_xscale.c
arch/arm/kernel/process.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/return_address.c
arch/arm/kernel/sched_clock.c
arch/arm/kernel/setup.c
arch/arm/kernel/sleep.S
arch/arm/kernel/smp.c
arch/arm/kernel/suspend.c
arch/arm/kernel/swp_emulate.c
arch/arm/kernel/sysfs_v7.c [new file with mode: 0644]
arch/arm/kernel/traps.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/lib/Makefile
arch/arm/lib/call_with_stack.S [new file with mode: 0644]
arch/arm/lib/csumpartial.S
arch/arm/lib/delay.S
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/mach-at91/include/mach/vmalloc.h [deleted file]
arch/arm/mach-bcmring/include/mach/vmalloc.h [deleted file]
arch/arm/mach-clps711x/include/mach/system.h
arch/arm/mach-clps711x/include/mach/vmalloc.h [deleted file]
arch/arm/mach-cns3xxx/include/mach/vmalloc.h [deleted file]
arch/arm/mach-davinci/include/mach/vmalloc.h [deleted file]
arch/arm/mach-dove/include/mach/vmalloc.h [deleted file]
arch/arm/mach-ebsa110/include/mach/system.h
arch/arm/mach-ebsa110/include/mach/vmalloc.h [deleted file]
arch/arm/mach-ep93xx/include/mach/vmalloc.h [deleted file]
arch/arm/mach-exynos/include/mach/vmalloc.h [deleted file]
arch/arm/mach-footbridge/include/mach/system.h
arch/arm/mach-footbridge/include/mach/vmalloc.h [deleted file]
arch/arm/mach-gemini/include/mach/vmalloc.h [deleted file]
arch/arm/mach-h720x/include/mach/vmalloc.h [deleted file]
arch/arm/mach-highbank/include/mach/vmalloc.h [deleted file]
arch/arm/mach-integrator/include/mach/vmalloc.h [deleted file]
arch/arm/mach-iop13xx/include/mach/vmalloc.h [deleted file]
arch/arm/mach-iop32x/include/mach/system.h
arch/arm/mach-iop32x/include/mach/vmalloc.h [deleted file]
arch/arm/mach-iop33x/include/mach/system.h
arch/arm/mach-iop33x/include/mach/vmalloc.h [deleted file]
arch/arm/mach-ixp2000/include/mach/vmalloc.h [deleted file]
arch/arm/mach-ixp23xx/include/mach/vmalloc.h [deleted file]
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-ixp4xx/include/mach/hardware.h
arch/arm/mach-ixp4xx/include/mach/system.h
arch/arm/mach-ixp4xx/include/mach/vmalloc.h [deleted file]
arch/arm/mach-kirkwood/include/mach/vmalloc.h [deleted file]
arch/arm/mach-ks8695/include/mach/system.h
arch/arm/mach-ks8695/include/mach/vmalloc.h [deleted file]
arch/arm/mach-lpc32xx/include/mach/vmalloc.h [deleted file]
arch/arm/mach-mmp/include/mach/system.h
arch/arm/mach-mmp/include/mach/vmalloc.h [deleted file]
arch/arm/mach-mmp/time.c
arch/arm/mach-msm/smd_debug.c
arch/arm/mach-mv78xx0/include/mach/vmalloc.h [deleted file]
arch/arm/mach-mxs/include/mach/vmalloc.h [deleted file]
arch/arm/mach-mxs/system.c
arch/arm/mach-netx/include/mach/vmalloc.h [deleted file]
arch/arm/mach-nomadik/include/mach/vmalloc.h [deleted file]
arch/arm/mach-omap1/include/mach/vmalloc.h [deleted file]
arch/arm/mach-omap1/time.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/board-omap3pandora.c
arch/arm/mach-omap2/clkt34xx_dpll3m2.c
arch/arm/mach-omap2/clkt_clksel.c
arch/arm/mach-omap2/clock.h
arch/arm/mach-omap2/clock3xxx.c
arch/arm/mach-omap2/clock3xxx.h
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/clock44xx_data.c
arch/arm/mach-omap2/clockdomain.c
arch/arm/mach-omap2/clockdomain.h
arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
arch/arm/mach-omap2/clockdomains44xx_data.c
arch/arm/mach-omap2/common-board-devices.c
arch/arm/mach-omap2/cpuidle34xx.c
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/dpll3xxx.c
arch/arm/mach-omap2/drm.c [new file with mode: 0644]
arch/arm/mach-omap2/gpmc-nand.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/hsmmc.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/include/mach/vmalloc.h [deleted file]
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/irq.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_2420_data.c
arch/arm/mach-omap2/omap_hwmod_2430_data.c
arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_l3_smx.c
arch/arm/mach-omap2/pm-debug.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/pm.h
arch/arm/mach-omap2/pm24xx.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/powerdomain.c
arch/arm/mach-omap2/powerdomain.h
arch/arm/mach-omap2/sdram-micron-mt29c4g96mazapcjg-5.h [new file with mode: 0644]
arch/arm/mach-omap2/sdram-micron-mt46h32m32lf-6.h
arch/arm/mach-omap2/sdrc.c
arch/arm/mach-omap2/serial.c
arch/arm/mach-omap2/smartreflex-class3.c
arch/arm/mach-omap2/smartreflex.c
arch/arm/mach-omap2/smartreflex.h
arch/arm/mach-omap2/sr_device.c
arch/arm/mach-omap2/sram34xx.S
arch/arm/mach-omap2/timer.c
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-omap2/vc3xxx_data.c
arch/arm/mach-omap2/voltage.c
arch/arm/mach-omap2/vp.c
arch/arm/mach-orion5x/include/mach/vmalloc.h [deleted file]
arch/arm/mach-picoxcell/include/mach/vmalloc.h [deleted file]
arch/arm/mach-picoxcell/time.c
arch/arm/mach-pnx4008/include/mach/system.h
arch/arm/mach-pnx4008/include/mach/vmalloc.h [deleted file]
arch/arm/mach-prima2/include/mach/vmalloc.h [deleted file]
arch/arm/mach-pxa/Kconfig
arch/arm/mach-pxa/include/mach/hardware.h
arch/arm/mach-pxa/include/mach/vmalloc.h [deleted file]
arch/arm/mach-pxa/reset.c
arch/arm/mach-pxa/time.c
arch/arm/mach-realview/include/mach/vmalloc.h [deleted file]
arch/arm/mach-rpc/include/mach/system.h
arch/arm/mach-rpc/include/mach/vmalloc.h [deleted file]
arch/arm/mach-s3c2410/include/mach/system-reset.h
arch/arm/mach-s3c2410/include/mach/vmalloc.h [deleted file]
arch/arm/mach-s3c64xx/include/mach/system.h
arch/arm/mach-s3c64xx/include/mach/vmalloc.h [deleted file]
arch/arm/mach-s5p64x0/include/mach/vmalloc.h [deleted file]
arch/arm/mach-s5pc100/include/mach/vmalloc.h [deleted file]
arch/arm/mach-s5pv210/include/mach/vmalloc.h [deleted file]
arch/arm/mach-sa1100/include/mach/system.h
arch/arm/mach-sa1100/include/mach/vmalloc.h [deleted file]
arch/arm/mach-sa1100/time.c
arch/arm/mach-shark/include/mach/vmalloc.h [deleted file]
arch/arm/mach-shmobile/include/mach/system.h
arch/arm/mach-shmobile/include/mach/vmalloc.h [deleted file]
arch/arm/mach-spear3xx/include/mach/vmalloc.h [deleted file]
arch/arm/mach-spear6xx/include/mach/vmalloc.h [deleted file]
arch/arm/mach-tegra/timer.c
arch/arm/mach-u300/include/mach/vmalloc.h [deleted file]
arch/arm/mach-u300/timer.c
arch/arm/mach-ux500/include/mach/vmalloc.h [deleted file]
arch/arm/mach-versatile/include/mach/vmalloc.h [deleted file]
arch/arm/mach-vexpress/include/mach/vmalloc.h [deleted file]
arch/arm/mach-vt8500/include/mach/vmalloc.h [deleted file]
arch/arm/mach-w90x900/include/mach/system.h
arch/arm/mach-w90x900/include/mach/vmalloc.h [deleted file]
arch/arm/mach-zynq/include/mach/vmalloc.h [deleted file]
arch/arm/mm/Kconfig
arch/arm/mm/Makefile
arch/arm/mm/alignment.c
arch/arm/mm/cache-v3.S
arch/arm/mm/cache-v4.S
arch/arm/mm/cache-v4wb.S
arch/arm/mm/cache-v4wt.S
arch/arm/mm/cache-v6.S
arch/arm/mm/cache-v7.S
arch/arm/mm/context.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/fault.c
arch/arm/mm/fault.h
arch/arm/mm/flush.c
arch/arm/mm/fsr-2level.c [new file with mode: 0644]
arch/arm/mm/fsr-3level.c [new file with mode: 0644]
arch/arm/mm/hugetlbpage.c [new file with mode: 0644]
arch/arm/mm/idmap.c
arch/arm/mm/init.c
arch/arm/mm/ioremap.c
arch/arm/mm/mm.h
arch/arm/mm/mmu.c
arch/arm/mm/nommu.c
arch/arm/mm/pgd.c
arch/arm/mm/proc-arm1020.S
arch/arm/mm/proc-arm1020e.S
arch/arm/mm/proc-arm1022.S
arch/arm/mm/proc-arm1026.S
arch/arm/mm/proc-arm6_7.S
arch/arm/mm/proc-arm720.S
arch/arm/mm/proc-arm740.S
arch/arm/mm/proc-arm7tdmi.S
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm922.S
arch/arm/mm/proc-arm925.S
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-arm940.S
arch/arm/mm/proc-arm946.S
arch/arm/mm/proc-arm9tdmi.S
arch/arm/mm/proc-fa526.S
arch/arm/mm/proc-feroceon.S
arch/arm/mm/proc-macros.S
arch/arm/mm/proc-mohawk.S
arch/arm/mm/proc-sa110.S
arch/arm/mm/proc-sa1100.S
arch/arm/mm/proc-v6.S
arch/arm/mm/proc-v7-2level.S [new file with mode: 0644]
arch/arm/mm/proc-v7-3level.S [new file with mode: 0644]
arch/arm/mm/proc-v7.S
arch/arm/mm/proc-xsc3.S
arch/arm/mm/proc-xscale.S
arch/arm/mm/vmregion.c
arch/arm/mm/vmregion.h
arch/arm/plat-iop/time.c
arch/arm/plat-mxc/include/mach/vmalloc.h [deleted file]
arch/arm/plat-mxc/system.c
arch/arm/plat-mxc/time.c
arch/arm/plat-nomadik/timer.c
arch/arm/plat-omap/Makefile
arch/arm/plat-omap/clock.c
arch/arm/plat-omap/common.c
arch/arm/plat-omap/counter_32k.c
arch/arm/plat-omap/cpu-omap.c [deleted file]
arch/arm/plat-omap/dma.c
arch/arm/plat-omap/dmtimer.c
arch/arm/plat-omap/include/plat/common.h
arch/arm/plat-omap/include/plat/cpu.h
arch/arm/plat-omap/include/plat/dmtimer.h
arch/arm/plat-omap/include/plat/gpmc.h
arch/arm/plat-omap/include/plat/omap-serial.h
arch/arm/plat-omap/include/plat/omap_device.h
arch/arm/plat-omap/include/plat/omap_hwmod.h
arch/arm/plat-omap/include/plat/sram.h
arch/arm/plat-omap/omap-pm-noop.c
arch/arm/plat-omap/omap_device.c
arch/arm/plat-omap/sram.c
arch/arm/plat-orion/time.c
arch/arm/plat-s5p/s5p-time.c
arch/arm/plat-spear/include/plat/system.h
arch/arm/plat-spear/include/plat/vmalloc.h [deleted file]
arch/arm/plat-tcc/include/mach/vmalloc.h [deleted file]
arch/arm/plat-versatile/sched-clock.c
arch/ia64/include/asm/hugetlb.h
arch/mips/include/asm/hugetlb.h
arch/mips/kernel/kspd.c
arch/powerpc/include/asm/hugetlb.h
arch/powerpc/kernel/sysfs.c
arch/powerpc/platforms/cell/spufs/coredump.c
arch/s390/include/asm/debug.h
arch/s390/include/asm/hugetlb.h
arch/s390/kernel/debug.c
arch/sh/include/asm/hugetlb.h
arch/sparc/include/asm/hugetlb.h
arch/tile/include/asm/hugetlb.h
arch/tile/include/asm/pgtable.h
arch/tile/include/asm/pgtable_32.h
arch/tile/include/asm/pgtable_64.h
arch/tile/include/hv/hypervisor.h
arch/x86/Kconfig
arch/x86/include/asm/hugetlb.h
arch/x86/xen/debugfs.c
arch/x86/xen/debugfs.h
drivers/acpi/ec_sys.c
drivers/base/Kconfig
drivers/base/Makefile
drivers/base/dma-coherent.c
drivers/base/dma-contiguous.c [new file with mode: 0644]
drivers/base/firmware_class.c
drivers/base/memory.c
drivers/base/node.c
drivers/base/power/opp.c
drivers/base/soc.c [new file with mode: 0644]
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/omap-cpufreq.c [new file with mode: 0644]
drivers/cpuidle/cpuidle.c
drivers/cpuidle/governors/menu.c
drivers/cpuidle/sysfs.c
drivers/gpio/gpio-omap.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg.c
drivers/i2c/busses/i2c-omap.c
drivers/input/keyboard/gpio_keys.c
drivers/input/keyboard/twl4030_keypad.c
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/twl4030-pwrbutton.c
drivers/input/misc/vsense.c [new file with mode: 0644]
drivers/input/touchscreen/ads7846.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/led-triggers.c
drivers/leds/leds-twl4030-pwm.c [new file with mode: 0644]
drivers/media/video/omap/omap_vout.c
drivers/mfd/twl-core.c
drivers/mfd/twl4030-irq.c
drivers/mfd/twl4030-power.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/omap_overclocking.c [new file with mode: 0644]
drivers/mmc/card/block.c
drivers/mmc/card/mmc_test.c
drivers/mmc/card/queue.c
drivers/mmc/core/bus.c
drivers/mmc/core/core.c
drivers/mmc/core/core.h
drivers/mmc/core/mmc.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_io.c
drivers/mmc/core/sdio_ops.c
drivers/mmc/host/omap_hsmmc.c
drivers/mtd/nand/omap2.c
drivers/mtd/ubi/Kconfig
drivers/mtd/ubi/Makefile
drivers/mtd/ubi/attach.c [moved from drivers/mtd/ubi/scan.c with 56% similarity]
drivers/mtd/ubi/build.c
drivers/mtd/ubi/cdev.c
drivers/mtd/ubi/debug.c
drivers/mtd/ubi/debug.h
drivers/mtd/ubi/eba.c
drivers/mtd/ubi/gluebi.c
drivers/mtd/ubi/io.c
drivers/mtd/ubi/kapi.c
drivers/mtd/ubi/scan.h [deleted file]
drivers/mtd/ubi/ubi-media.h
drivers/mtd/ubi/ubi.h
drivers/mtd/ubi/upd.c
drivers/mtd/ubi/vmt.c
drivers/mtd/ubi/vtbl.c
drivers/mtd/ubi/wl.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/usb/asix.c
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
drivers/net/wireless/ath/carl9170/debug.c
drivers/net/wireless/libertas/debugfs.c
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
drivers/net/wireless/wl1251/acx.c
drivers/net/wireless/wl1251/acx.h
drivers/net/wireless/wl1251/boot.c
drivers/net/wireless/wl1251/cmd.c
drivers/net/wireless/wl1251/cmd.h
drivers/net/wireless/wl1251/debugfs.c
drivers/net/wireless/wl1251/event.c
drivers/net/wireless/wl1251/event.h
drivers/net/wireless/wl1251/init.c
drivers/net/wireless/wl1251/main.c
drivers/net/wireless/wl1251/ps.c
drivers/net/wireless/wl1251/rx.c
drivers/net/wireless/wl1251/sdio.c
drivers/net/wireless/wl1251/spi.c
drivers/net/wireless/wl1251/tx.c
drivers/net/wireless/wl1251/tx.h
drivers/net/wireless/wl1251/wl1251.h
drivers/power/bq27x00_battery.c
drivers/power/power_supply_leds.c
drivers/power/twl4030_charger.c
drivers/regulator/twl-regulator.c
drivers/rtc/interface.c
drivers/s390/block/dasd.c
drivers/scsi/bfa/bfad_debugfs.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/android/Kconfig [new file with mode: 0644]
drivers/staging/android/Makefile [new file with mode: 0644]
drivers/staging/android/TODO [new file with mode: 0644]
drivers/staging/android/alarm-dev.c [new file with mode: 0644]
drivers/staging/android/alarm.c [new file with mode: 0644]
drivers/staging/android/android_alarm.h [new file with mode: 0644]
drivers/staging/android/ashmem.c [new file with mode: 0644]
drivers/staging/android/ashmem.h [new file with mode: 0644]
drivers/staging/android/binder.c [new file with mode: 0644]
drivers/staging/android/binder.h [new file with mode: 0644]
drivers/staging/android/logger.c [new file with mode: 0644]
drivers/staging/android/logger.h [new file with mode: 0644]
drivers/staging/android/lowmemorykiller.c [new file with mode: 0644]
drivers/staging/android/persistent_ram.c [new file with mode: 0644]
drivers/staging/android/persistent_ram.h [new file with mode: 0644]
drivers/staging/android/ram_console.c [new file with mode: 0644]
drivers/staging/android/ram_console.h [moved from arch/arm/mach-tegra/include/mach/vmalloc.h with 65% similarity]
drivers/staging/android/switch/Kconfig [new file with mode: 0644]
drivers/staging/android/switch/Makefile [new file with mode: 0644]
drivers/staging/android/switch/switch.h [new file with mode: 0644]
drivers/staging/android/switch/switch_class.c [new file with mode: 0644]
drivers/staging/android/switch/switch_gpio.c [new file with mode: 0644]
drivers/staging/android/timed_gpio.c [new file with mode: 0644]
drivers/staging/android/timed_gpio.h [moved from arch/arm/mach-msm/include/mach/vmalloc.h with 58% similarity]
drivers/staging/android/timed_output.c [new file with mode: 0644]
drivers/staging/android/timed_output.h [new file with mode: 0644]
drivers/staging/media/as102/as102_usb_drv.c
drivers/staging/omapdrm/Kconfig [new file with mode: 0644]
drivers/staging/omapdrm/Makefile [new file with mode: 0644]
drivers/staging/omapdrm/TODO [new file with mode: 0644]
drivers/staging/omapdrm/omap_connector.c [new file with mode: 0644]
drivers/staging/omapdrm/omap_crtc.c [new file with mode: 0644]
drivers/staging/omapdrm/omap_drm.h [new file with mode: 0644]
drivers/staging/omapdrm/omap_drv.c [new file with mode: 0644]
drivers/staging/omapdrm/omap_drv.h [new file with mode: 0644]
drivers/staging/omapdrm/omap_encoder.c [new file with mode: 0644]
drivers/staging/omapdrm/omap_fb.c [new file with mode: 0644]
drivers/staging/omapdrm/omap_fbdev.c [new file with mode: 0644]
drivers/staging/omapdrm/omap_gem.c [new file with mode: 0644]
drivers/staging/omapdrm/omap_gem_helpers.c [new file with mode: 0644]
drivers/staging/tidspbridge/Kconfig
drivers/thermal/Kconfig
drivers/thermal/Makefile
drivers/thermal/omap3_thermal.c [new file with mode: 0644]
drivers/tty/serial/omap-serial.c
drivers/tty/vt/keyboard.c
drivers/usb/gadget/inode.c
drivers/usb/host/ehci-omap.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/omap2430.c
drivers/usb/otg/twl4030-usb.c
drivers/video/backlight/Kconfig
drivers/video/backlight/Makefile
drivers/video/backlight/pandora_bl.c [new file with mode: 0644]
drivers/video/fbmem.c
drivers/video/logo/Kconfig
drivers/video/logo/Makefile
drivers/video/logo/logo.c
drivers/video/logo/logo_pandora_clut224.ppm [new file with mode: 0644]
drivers/video/omap2/displays/panel-acx565akm.c
drivers/video/omap2/displays/panel-generic-dpi.c
drivers/video/omap2/displays/panel-n8x0.c
drivers/video/omap2/displays/panel-taal.c
drivers/video/omap2/displays/panel-tpo-td043mtea1.c
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/display.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/dss.h
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/dss/manager.c
drivers/video/omap2/dss/overlay.c
drivers/video/omap2/dss/rfbi.c
drivers/video/omap2/dss/venc.c
drivers/video/omap2/omapfb/Kconfig
drivers/video/omap2/omapfb/omapfb-ioctl.c
drivers/video/omap2/omapfb/omapfb-main.c
drivers/video/omap2/omapfb/omapfb.h
drivers/watchdog/twl4030_wdt.c
fs/Kconfig
fs/Makefile
fs/aufs/Kconfig [new file with mode: 0644]
fs/aufs/Makefile [new file with mode: 0644]
fs/aufs/aufs.h [new file with mode: 0644]
fs/aufs/branch.c [new file with mode: 0644]
fs/aufs/branch.h [new file with mode: 0644]
fs/aufs/conf.mk [new file with mode: 0644]
fs/aufs/cpup.c [new file with mode: 0644]
fs/aufs/cpup.h [new file with mode: 0644]
fs/aufs/dbgaufs.c [new file with mode: 0644]
fs/aufs/dbgaufs.h [new file with mode: 0644]
fs/aufs/dcsub.c [new file with mode: 0644]
fs/aufs/dcsub.h [new file with mode: 0644]
fs/aufs/debug.c [new file with mode: 0644]
fs/aufs/debug.h [new file with mode: 0644]
fs/aufs/dentry.c [new file with mode: 0644]
fs/aufs/dentry.h [new file with mode: 0644]
fs/aufs/dinfo.c [new file with mode: 0644]
fs/aufs/dir.c [new file with mode: 0644]
fs/aufs/dir.h [new file with mode: 0644]
fs/aufs/dynop.c [new file with mode: 0644]
fs/aufs/dynop.h [new file with mode: 0644]
fs/aufs/export.c [new file with mode: 0644]
fs/aufs/f_op.c [new file with mode: 0644]
fs/aufs/f_op_sp.c [new file with mode: 0644]
fs/aufs/file.c [new file with mode: 0644]
fs/aufs/file.h [new file with mode: 0644]
fs/aufs/finfo.c [new file with mode: 0644]
fs/aufs/fstype.h [new file with mode: 0644]
fs/aufs/hfsnotify.c [new file with mode: 0644]
fs/aufs/hfsplus.c [new file with mode: 0644]
fs/aufs/hnotify.c [new file with mode: 0644]
fs/aufs/i_op.c [new file with mode: 0644]
fs/aufs/i_op_add.c [new file with mode: 0644]
fs/aufs/i_op_del.c [new file with mode: 0644]
fs/aufs/i_op_ren.c [new file with mode: 0644]
fs/aufs/iinfo.c [new file with mode: 0644]
fs/aufs/inode.c [new file with mode: 0644]
fs/aufs/inode.h [new file with mode: 0644]
fs/aufs/ioctl.c [new file with mode: 0644]
fs/aufs/loop.c [new file with mode: 0644]
fs/aufs/loop.h [new file with mode: 0644]
fs/aufs/magic.mk [new file with mode: 0644]
fs/aufs/module.c [new file with mode: 0644]
fs/aufs/module.h [new file with mode: 0644]
fs/aufs/opts.c [new file with mode: 0644]
fs/aufs/opts.h [new file with mode: 0644]
fs/aufs/plink.c [new file with mode: 0644]
fs/aufs/poll.c [new file with mode: 0644]
fs/aufs/procfs.c [new file with mode: 0644]
fs/aufs/rdu.c [new file with mode: 0644]
fs/aufs/rwsem.h [new file with mode: 0644]
fs/aufs/sbinfo.c [new file with mode: 0644]
fs/aufs/spl.h [new file with mode: 0644]
fs/aufs/super.c [new file with mode: 0644]
fs/aufs/super.h [new file with mode: 0644]
fs/aufs/sysaufs.c [new file with mode: 0644]
fs/aufs/sysaufs.h [new file with mode: 0644]
fs/aufs/sysfs.c [new file with mode: 0644]
fs/aufs/sysrq.c [new file with mode: 0644]
fs/aufs/vdir.c [new file with mode: 0644]
fs/aufs/vfsub.c [new file with mode: 0644]
fs/aufs/vfsub.h [new file with mode: 0644]
fs/aufs/wbr_policy.c [new file with mode: 0644]
fs/aufs/whout.c [new file with mode: 0644]
fs/aufs/whout.h [new file with mode: 0644]
fs/aufs/wkq.c [new file with mode: 0644]
fs/aufs/wkq.h [new file with mode: 0644]
fs/aufs/xino.c [new file with mode: 0644]
fs/autofs4/dev-ioctl.c
fs/binfmt_misc.c
fs/cifs/cifsfs.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/misc.c
fs/compat.c
fs/debugfs/file.c
fs/debugfs/inode.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/keystore.c
fs/ecryptfs/miscdev.c
fs/exec.c
fs/fat/dir.c
fs/fat/fat.h
fs/fat/inode.c
fs/fcntl.c
fs/file.c
fs/file_table.c
fs/inode.c
fs/internal.h
fs/namei.c
fs/namespace.c
fs/notify/group.c
fs/notify/mark.c
fs/ocfs2/cluster/netdebug.c
fs/open.c
fs/proc/base.c
fs/proc/internal.h
fs/proc/task_mmu.c
fs/splice.c
fs/ubifs/Kconfig
fs/ubifs/Makefile
fs/ubifs/budget.c
fs/ubifs/commit.c
fs/ubifs/compress.c
fs/ubifs/debug.c
fs/ubifs/debug.h
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/gc.c
fs/ubifs/io.c
fs/ubifs/journal.c
fs/ubifs/log.c
fs/ubifs/lprops.c
fs/ubifs/lpt.c
fs/ubifs/lpt_commit.c
fs/ubifs/master.c
fs/ubifs/orphan.c
fs/ubifs/recovery.c
fs/ubifs/replay.c
fs/ubifs/sb.c
fs/ubifs/scan.c
fs/ubifs/super.c
fs/ubifs/tnc.c
fs/ubifs/tnc_commit.c
fs/ubifs/tnc_misc.c
fs/ubifs/ubifs.h
fs/ubifs/xattr.c
include/asm-generic/dma-coherent.h
include/asm-generic/dma-contiguous.h [new file with mode: 0644]
include/asm-generic/pgtable.h
include/linux/Kbuild
include/linux/aufs_type.h [new file with mode: 0644]
include/linux/blkdev.h
include/linux/cpuidle.h
include/linux/debugfs.h
include/linux/device.h
include/linux/dma-attrs.h
include/linux/dma-contiguous.h [new file with mode: 0644]
include/linux/dma-mapping.h
include/linux/fdtable.h
include/linux/gfp.h
include/linux/huge_mm.h
include/linux/i2c/twl.h
include/linux/i2c/vsense.h [new file with mode: 0644]
include/linux/leds.h
include/linux/linux_logo.h
include/linux/memory.h
include/linux/mm.h
include/linux/mmc/card.h
include/linux/mmc/core.h
include/linux/mmc/host.h
include/linux/mmzone.h
include/linux/msdos_fs.h
include/linux/mtd/ubi.h
include/linux/namei.h
include/linux/node.h
include/linux/omapfb.h
include/linux/opp.h
include/linux/page-debug-flags.h
include/linux/page-isolation.h
include/linux/pagevec.h
include/linux/platform_data/omap_drm.h [new file with mode: 0644]
include/linux/relay.h
include/linux/slub_def.h
include/linux/splice.h
include/linux/swap.h
include/linux/sys_soc.h [new file with mode: 0644]
include/linux/usb/otg.h
include/linux/vmalloc.h
include/linux/writeback.h
include/mtd/ubi-user.h
include/net/bluetooth/hci_core.h
include/net/netfilter/xt_log.h
include/sound/pcm.h
include/trace/events/kmem.h
include/video/omapdss.h
kernel/exit.c
kernel/irq/pm.c
kernel/relay.c
kernel/sched.c
kernel/signal.c
kernel/trace/blktrace.c
kernel/trace/trace.c
kernel/trace/trace.h
lib/fault-inject.c
mm/Kconfig
mm/Kconfig.debug
mm/Makefile
mm/compaction.c
mm/failslab.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/madvise.c
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/page_isolation.c
mm/pgtable-generic.c
mm/readahead.c
mm/shmem.c
mm/slub.c
mm/swap.c
mm/swapfile.c
mm/vmalloc.c
mm/vmscan.c
mm/vmstat.c
net/bluetooth/hci_conn.c
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_debugfs.c
net/mac80211/rc80211_minstrel_ht.c
scripts/dtc/dtc.c
scripts/dtc/flattree.c
security/commoncap.c
security/device_cgroup.c
security/security.c
sound/core/pcm_native.c
sound/soc/codecs/Makefile
sound/soc/codecs/twl4030.c
sound/soc/omap/omap-mcbsp.c
sound/soc/omap/omap-pcm.c
sound/soc/omap/omap3pandora.c
sound/usb/caiaq/device.c

diff --git a/Documentation/ABI/testing/debugfs-aufs b/Documentation/ABI/testing/debugfs-aufs
new file mode 100644 (file)
index 0000000..a58f0d0
--- /dev/null
@@ -0,0 +1,50 @@
+What:          /debug/aufs/si_<id>/
+Date:          March 2009
+Contact:       J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+               Under /debug/aufs, a directory named si_<id> is created
+               per aufs mount, where <id> is a unique id generated
+               internally.
+
+What:          /debug/aufs/si_<id>/plink
+Date:          Apr 2013
+Contact:       J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+               It has three lines and shows the information about the
+               pseudo-link. The first line is a single number
+               representing a number of buckets. The second line is a
+               number of pseudo-links per buckets (separated by a
+               blank). The last line is a single number representing a
+               total number of psedo-links.
+               When the aufs mount option 'noplink' is specified, it
+               will show "1\n0\n0\n".
+
+What:          /debug/aufs/si_<id>/xib
+Date:          March 2009
+Contact:       J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+               It shows the consumed blocks by xib (External Inode Number
+               Bitmap), its block size and file size.
+               When the aufs mount option 'noxino' is specified, it
+               will be empty. About XINO files, see the aufs manual.
+
+What:          /debug/aufs/si_<id>/xino0, xino1 ... xinoN
+Date:          March 2009
+Contact:       J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+               It shows the consumed blocks by xino (External Inode Number
+               Translation Table), its link count, block size and file
+               size.
+               When the aufs mount option 'noxino' is specified, it
+               will be empty. About XINO files, see the aufs manual.
+
+What:          /debug/aufs/si_<id>/xigen
+Date:          March 2009
+Contact:       J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+               It shows the consumed blocks by xigen (External Inode
+               Generation Table), its block size and file size.
+               If CONFIG_AUFS_EXPORT is disabled, this entry will not
+               be created.
+               When the aufs mount option 'noxino' is specified, it
+               will be empty. About XINO files, see the aufs manual.
diff --git a/Documentation/ABI/testing/sysfs-aufs b/Documentation/ABI/testing/sysfs-aufs
new file mode 100644 (file)
index 0000000..066916d
--- /dev/null
@@ -0,0 +1,31 @@
+What:          /sys/fs/aufs/si_<id>/
+Date:          March 2009
+Contact:       J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+               Under /sys/fs/aufs, a directory named si_<id> is created
+               per aufs mount, where <id> is a unique id generated
+               internally.
+
+What:          /sys/fs/aufs/si_<id>/br0, br1 ... brN
+Date:          March 2009
+Contact:       J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+               It shows the abolute path of a member directory (which
+               is called branch) in aufs, and its permission.
+
+What:          /sys/fs/aufs/si_<id>/brid0, brid1 ... bridN
+Date:          July 2013
+Contact:       J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+               It shows the id of a member directory (which is called
+               branch) in aufs.
+
+What:          /sys/fs/aufs/si_<id>/xi_path
+Date:          March 2009
+Contact:       J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+               It shows the abolute path of XINO (External Inode Number
+               Bitmap, Translation Table and Generation Table) file
+               even if it is the default path.
+               When the aufs mount option 'noxino' is specified, it
+               will be empty. About XINO files, see the aufs manual.
index b768cc0..725580d 100644 (file)
@@ -31,3 +31,39 @@ may be weakly ordered, that is that reads and writes may pass each other.
 Since it is optional for platforms to implement DMA_ATTR_WEAK_ORDERING,
 those that do not will simply ignore the attribute and exhibit default
 behavior.
+
+DMA_ATTR_WRITE_COMBINE
+----------------------
+
+DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be
+buffered to improve performance.
+
+Since it is optional for platforms to implement DMA_ATTR_WRITE_COMBINE,
+those that do not will simply ignore the attribute and exhibit default
+behavior.
+
+DMA_ATTR_NON_CONSISTENT
+-----------------------
+
+DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
+consistent or non-consistent memory as it sees fit.  By using this API,
+you are guaranteeing to the platform that you have all the correct and
+necessary sync points for this memory in the driver.
+
+DMA_ATTR_NO_KERNEL_MAPPING
+--------------------------
+
+DMA_ATTR_NO_KERNEL_MAPPING lets the platform to avoid creating a kernel
+virtual mapping for the allocated buffer. On some architectures creating
+such mapping is non-trivial task and consumes very limited resources
+(like kernel virtual address space or dma consistent address space).
+Buffers allocated with this attribute can be only passed to user space
+by calling dma_mmap_attrs(). By using this API, you are guaranteeing
+that you won't dereference the pointer returned by dma_alloc_attr(). You
+can threat it as a cookie that must be passed to dma_mmap_attrs() and
+dma_free_attrs(). Make sure that both of these also get this attribute
+set on each call.
+
+Since it is optional for platforms to implement
+DMA_ATTR_NO_KERNEL_MAPPING, those that do not will simply ignore the
+attribute and exhibit default behavior.
index 888ae7b..c3f3d25 100644 (file)
@@ -156,6 +156,7 @@ timings             Display timings (pixclock,xres/hfp/hbp/hsw,yres/vfp/vbp/vsw)
                "pal" and "ntsc"
 panel_name
 tear_elim      Tearing elimination 0=off, 1=on
+venc_type      Output type (video encoder only): "composite" or "svideo"
 
 There are also some debugfs files at <debugfs>/omapdss/ which show information
 about clocks and registers.
@@ -239,7 +240,10 @@ FB0 +-- GFX  ---- LCD ---- LCD
 Misc notes
 ----------
 
-OMAP FB allocates the framebuffer memory using the OMAP VRAM allocator.
+OMAP FB allocates the framebuffer memory using the standard dma allocator. You
+can enable Contiguous Memory Allocator (CONFIG_CMA) to improve the dma
+allocator, and if CMA is enabled, you use "cma=" kernel parameter to increase
+the global memory area for CMA.
 
 Using DSI DPLL to generate pixel clock it is possible produce the pixel clock
 of 86.5MHz (max possible), and with that you get 1280x1024@57 output from DVI.
@@ -255,11 +259,6 @@ framebuffer parameters.
 Kernel boot arguments
 ---------------------
 
-vram=<size>[,<physaddr>]
-       - Amount of total VRAM to preallocate and optionally a physical start
-         memory address. For example, "10M". omapfb allocates memory for
-         framebuffers from VRAM.
-
 omapfb.mode=<display>:<mode>[,...]
        - Default video mode for specified displays. For example,
          "dvi:800x400MR-24@60".  See drivers/video/modedb.c.
@@ -293,6 +292,16 @@ omapfb.rotate=<angle>
 omapfb.mirror=<y|n>
        - Default mirror for all framebuffers. Only works with DMA rotation.
 
+omapfb.vram_cache=<y|n>
+       - Sets the framebuffer memory to be write-through cached. This may be
+         useful in the configurations where only CPU is allowed to write to
+         the framebuffer and eliminate the need for enabling shadow
+         framebuffer in Xorg DDX drivers such as xf86-video-fbdev and
+         xf86-video-omapfb. Enabling write-through cache is only useful
+         for ARM11 and Cortex-A8 processors. Cortex-A9 does not support
+         write-through cache well, see "Cortex-A9 behavior for Normal Memory
+         Cacheable memory regions" section in Cortex-A9 TRM for more details.
+
 omapdss.def_disp=<display>
        - Name of default display, to which all overlays will be connected.
          Common examples are "lcd" or "tv".
index 771d48d..208a2d4 100644 (file)
@@ -51,15 +51,14 @@ ffc00000    ffefffff        DMA memory mapping region.  Memory returned
 ff000000       ffbfffff        Reserved for future expansion of DMA
                                mapping region.
 
-VMALLOC_END    feffffff        Free for platform use, recommended.
-                               VMALLOC_END must be aligned to a 2MB
-                               boundary.
-
 VMALLOC_START  VMALLOC_END-1   vmalloc() / ioremap() space.
                                Memory returned by vmalloc/ioremap will
                                be dynamically placed in this region.
-                               VMALLOC_START may be based upon the value
-                               of the high_memory variable.
+                               Machine specific static mappings are also
+                               located here through iotable_init().
+                               VMALLOC_START is based upon the value
+                               of the high_memory variable, and VMALLOC_END
+                               is equal to 0xff000000.
 
 PAGE_OFFSET    high_memory-1   Kernel direct-mapped RAM region.
                                This maps the platforms RAM, and typically
index 50d7b16..9d28a34 100644 (file)
@@ -36,6 +36,7 @@ drwxr-xr-x 2 root root 0 Feb  8 10:42 state3
 /sys/devices/system/cpu/cpu0/cpuidle/state0:
 total 0
 -r--r--r-- 1 root root 4096 Feb  8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb  8 10:42 disable
 -r--r--r-- 1 root root 4096 Feb  8 10:42 latency
 -r--r--r-- 1 root root 4096 Feb  8 10:42 name
 -r--r--r-- 1 root root 4096 Feb  8 10:42 power
@@ -45,6 +46,7 @@ total 0
 /sys/devices/system/cpu/cpu0/cpuidle/state1:
 total 0
 -r--r--r-- 1 root root 4096 Feb  8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb  8 10:42 disable
 -r--r--r-- 1 root root 4096 Feb  8 10:42 latency
 -r--r--r-- 1 root root 4096 Feb  8 10:42 name
 -r--r--r-- 1 root root 4096 Feb  8 10:42 power
@@ -54,6 +56,7 @@ total 0
 /sys/devices/system/cpu/cpu0/cpuidle/state2:
 total 0
 -r--r--r-- 1 root root 4096 Feb  8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb  8 10:42 disable
 -r--r--r-- 1 root root 4096 Feb  8 10:42 latency
 -r--r--r-- 1 root root 4096 Feb  8 10:42 name
 -r--r--r-- 1 root root 4096 Feb  8 10:42 power
@@ -63,6 +66,7 @@ total 0
 /sys/devices/system/cpu/cpu0/cpuidle/state3:
 total 0
 -r--r--r-- 1 root root 4096 Feb  8 10:42 desc
+-rw-r--r-- 1 root root 4096 Feb  8 10:42 disable
 -r--r--r-- 1 root root 4096 Feb  8 10:42 latency
 -r--r--r-- 1 root root 4096 Feb  8 10:42 name
 -r--r--r-- 1 root root 4096 Feb  8 10:42 power
@@ -72,6 +76,7 @@ total 0
 
 
 * desc : Small description about the idle state (string)
+* disable : Option to disable this idle state (bool)
 * latency : Latency to exit out of this idle state (in microseconds)
 * name : Name of the idle state (string)
 * power : Power consumed while in this idle state (in milliwatts)
diff --git a/Documentation/filesystems/aufs/README b/Documentation/filesystems/aufs/README
new file mode 100644 (file)
index 0000000..51b4923
--- /dev/null
@@ -0,0 +1,338 @@
+
+Aufs3 -- advanced multi layered unification filesystem version 3.x
+http://aufs.sf.net
+Junjiro R. Okajima
+
+
+0. Introduction
+----------------------------------------
+In the early days, aufs was entirely re-designed and re-implemented
+Unionfs Version 1.x series. After many original ideas, approaches,
+improvements and implementations, it becomes totally different from
+Unionfs while keeping the basic features.
+Recently, Unionfs Version 2.x series begin taking some of the same
+approaches to aufs1's.
+Unionfs is being developed by Professor Erez Zadok at Stony Brook
+University and his team.
+
+Aufs3 supports linux-3.0 and later.
+If you want older kernel version support, try aufs2-2.6.git or
+aufs2-standalone.git repository, aufs1 from CVS on SourceForge.
+
+Note: it becomes clear that "Aufs was rejected. Let's give it up."
+According to Christoph Hellwig, linux rejects all union-type filesystems
+but UnionMount.
+<http://marc.info/?l=linux-kernel&m=123938533724484&w=2>
+
+
+1. Features
+----------------------------------------
+- unite several directories into a single virtual filesystem. The member
+  directory is called as a branch.
+- you can specify the permission flags to the branch, which are 'readonly',
+  'readwrite' and 'whiteout-able.'
+- by upper writable branch, internal copyup and whiteout, files/dirs on
+  readonly branch are modifiable logically.
+- dynamic branch manipulation, add, del.
+- etc...
+
+Also there are many enhancements in aufs1, such as:
+- readdir(3) in userspace.
+- keep inode number by external inode number table
+- keep the timestamps of file/dir in internal copyup operation
+- seekable directory, supporting NFS readdir.
+- whiteout is hardlinked in order to reduce the consumption of inodes
+  on branch
+- do not copyup, nor create a whiteout when it is unnecessary
+- revert a single systemcall when an error occurs in aufs
+- remount interface instead of ioctl
+- maintain /etc/mtab by an external command, /sbin/mount.aufs.
+- loopback mounted filesystem as a branch
+- kernel thread for removing the dir who has a plenty of whiteouts
+- support copyup sparse file (a file which has a 'hole' in it)
+- default permission flags for branches
+- selectable permission flags for ro branch, whether whiteout can
+  exist or not
+- export via NFS.
+- support <sysfs>/fs/aufs and <debugfs>/aufs.
+- support multiple writable branches, some policies to select one
+  among multiple writable branches.
+- a new semantics for link(2) and rename(2) to support multiple
+  writable branches.
+- no glibc changes are required.
+- pseudo hardlink (hardlink over branches)
+- allow a direct access manually to a file on branch, e.g. bypassing aufs.
+  including NFS or remote filesystem branch.
+- userspace wrapper for pathconf(3)/fpathconf(3) with _PC_LINK_MAX.
+- and more...
+
+Currently these features are dropped temporary from aufs3.
+See design/08plan.txt in detail.
+- test only the highest one for the directory permission (dirperm1)
+- copyup on open (coo=)
+- nested mount, i.e. aufs as readonly no-whiteout branch of another aufs
+  (robr)
+- statistics of aufs thread (/sys/fs/aufs/stat)
+- delegation mode (dlgt)
+  a delegation of the internal branch access to support task I/O
+  accounting, which also supports Linux Security Modules (LSM) mainly
+  for Suse AppArmor.
+- intent.open/create (file open in a single lookup)
+
+Features or just an idea in the future (see also design/*.txt),
+- reorder the branch index without del/re-add.
+- permanent xino files for NFSD
+- an option for refreshing the opened files after add/del branches
+- 'move' policy for copy-up between two writable branches, after
+  checking free space.
+- light version, without branch manipulation. (unnecessary?)
+- copyup in userspace
+- inotify in userspace
+- readv/writev
+- xattr, acl
+
+
+2. Download
+----------------------------------------
+There were three GIT trees for aufs3, aufs3-linux.git,
+aufs3-standalone.git, and aufs-util.git. Note that there is no "3" in
+"aufs-util.git."
+While the aufs-util is always necessary, you need either of aufs3-linux
+or aufs3-standalone.
+
+The aufs3-linux tree includes the whole linux mainline GIT tree,
+git://git.kernel.org/.../torvalds/linux.git.
+And you cannot select CONFIG_AUFS_FS=m for this version, eg. you cannot
+build aufs3 as an external kernel module.
+
+On the other hand, the aufs3-standalone tree has only aufs source files
+and necessary patches, and you can select CONFIG_AUFS_FS=m.
+
+You will find GIT branches whose name is in form of "aufs3.x" where "x"
+represents the linux kernel version, "linux-3.x". For instance,
+"aufs3.0" is for linux-3.0. For latest "linux-3.x-rcN", use
+"aufs3.x-rcN" branch.
+
+o aufs3-linux tree
+$ git clone --reference /your/linux/git/tree \
+       git://git.code.sf.net/p/aufs/aufs3-linux aufs-aufs3-linux \
+       aufs3-linux.git
+- if you don't have linux GIT tree, then remove "--reference ..."
+$ cd aufs3-linux.git
+$ git checkout origin/aufs3.0
+
+o aufs3-standalone tree
+$ git clone git://git.code.sf.net/p/aufs/aufs3-standalone \
+       aufs3-standalone.git
+$ cd aufs3-standalone.git
+$ git checkout origin/aufs3.0
+
+o aufs-util tree
+$ git clone git://git.code.sf.net/p/aufs/aufs-util \
+       aufs-util.git
+$ cd aufs-util.git
+$ git checkout origin/aufs3.0
+
+Note: The 3.x-rcN branch is to be used with `rc' kernel versions ONLY.
+The minor version number, 'x' in '3.x', of aufs may not always
+follow the minor version number of the kernel.
+Because changes in the kernel that cause the use of a new
+minor version number do not always require changes to aufs-util.
+
+Since aufs-util has its own minor version number, you may not be
+able to find a GIT branch in aufs-util for your kernel's
+exact minor version number.
+In this case, you should git-checkout the branch for the
+nearest lower number.
+
+For (an unreleased) example:
+If you are using "linux-3.10" and the "aufs3.10" branch
+does not exist in aufs-util repository, then "aufs3.9", "aufs3.8"
+or something numerically smaller is the branch for your kernel.
+
+Also you can view all branches by
+       $ git branch -a
+
+
+3. Configuration and Compilation
+----------------------------------------
+Make sure you have git-checkout'ed the correct branch.
+
+For aufs3-linux tree,
+- enable CONFIG_EXPERIMENTAL and CONFIG_AUFS_FS.
+- set other aufs configurations if necessary.
+
+For aufs3-standalone tree,
+There are several ways to build.
+
+1.
+- apply ./aufs3-kbuild.patch to your kernel source files.
+- apply ./aufs3-base.patch too.
+- apply ./aufs3-mmap.patch too.
+- apply ./aufs3-standalone.patch too, if you have a plan to set
+  CONFIG_AUFS_FS=m. otherwise you don't need ./aufs3-standalone.patch.
+- copy ./{Documentation,fs,include/linux/aufs_type.h} files to your
+  kernel source tree. Never copy $PWD/include/linux/Kbuild.
+- enable CONFIG_EXPERIMENTAL and CONFIG_AUFS_FS, you can select either
+  =m or =y.
+- and build your kernel as usual.
+- install the built kernel.
+- install the header files too by "make headers_install" to the
+  directory where you specify. By default, it is $PWD/usr.
+  "make help" shows a brief note for headers_install.
+- and reboot your system.
+
+2.
+- module only (CONFIG_AUFS_FS=m).
+- apply ./aufs3-base.patch to your kernel source files.
+- apply ./aufs3-mmap.patch too.
+- apply ./aufs3-standalone.patch too.
+- build your kernel, don't forget "make headers_install", and reboot.
+- edit ./config.mk and set other aufs configurations if necessary.
+  Note: You should read $PWD/fs/aufs/Kconfig carefully which describes
+  every aufs configurations.
+- build the module by simple "make".
+- you can specify ${KDIR} make variable which points to your kernel
+  source tree.
+- install the files
+  + run "make install" to install the aufs module, or copy the built
+    $PWD/aufs.ko to /lib/modules/... and run depmod -a (or reboot simply).
+  + run "make install_headers" (instead of headers_install) to install
+    the modified aufs header file (you can specify DESTDIR which is
+    available in aufs standalone version's Makefile only), or copy
+    $PWD/usr/include/linux/aufs_type.h to /usr/include/linux or wherever
+    you like manually. By default, the target directory is $PWD/usr.
+- no need to apply aufs3-kbuild.patch, nor copying source files to your
+  kernel source tree.
+
+Note: The header file aufs_type.h is necessary to build aufs-util
+      as well as "make headers_install" in the kernel source tree.
+      headers_install is subject to be forgotten, but it is essentially
+      necessary, not only for building aufs-util.
+      You may not meet problems without headers_install in some older
+      version though.
+
+And then,
+- read README in aufs-util, build and install it
+- note that your distribution may contain an obsoleted version of
+  aufs_type.h in /usr/include/linux or something. When you build aufs
+  utilities, make sure that your compiler refers the correct aufs header
+  file which is built by "make headers_install."
+- if you want to use readdir(3) in userspace or pathconf(3) wrapper,
+  then run "make install_ulib" too. And refer to the aufs manual in
+  detail.
+
+
+4. Usage
+----------------------------------------
+At first, make sure aufs-util are installed, and please read the aufs
+manual, aufs.5 in aufs-util.git tree.
+$ man -l aufs.5
+
+And then,
+$ mkdir /tmp/rw /tmp/aufs
+# mount -t aufs -o br=/tmp/rw:${HOME} none /tmp/aufs
+
+Here is another example. The result is equivalent.
+# mount -t aufs -o br=/tmp/rw=rw:${HOME}=ro none /tmp/aufs
+  Or
+# mount -t aufs -o br:/tmp/rw none /tmp/aufs
+# mount -o remount,append:${HOME} /tmp/aufs
+
+Then, you can see whole tree of your home dir through /tmp/aufs. If
+you modify a file under /tmp/aufs, the one on your home directory is
+not affected, instead the same named file will be newly created under
+/tmp/rw. And all of your modification to a file will be applied to
+the one under /tmp/rw. This is called the file based Copy on Write
+(COW) method.
+Aufs mount options are described in aufs.5.
+If you run chroot or something and make your aufs as a root directory,
+then you need to customize the shutdown script. See the aufs manual in
+detail.
+
+Additionally, there are some sample usages of aufs which are a
+diskless system with network booting, and LiveCD over NFS.
+See sample dir in CVS tree on SourceForge.
+
+
+5. Contact
+----------------------------------------
+When you have any problems or strange behaviour in aufs, please let me
+know with:
+- /proc/mounts (instead of the output of mount(8))
+- /sys/module/aufs/*
+- /sys/fs/aufs/* (if you have them)
+- /debug/aufs/* (if you have them)
+- linux kernel version
+  if your kernel is not plain, for example modified by distributor,
+  the url where i can download its source is necessary too.
+- aufs version which was printed at loading the module or booting the
+  system, instead of the date you downloaded.
+- configuration (define/undefine CONFIG_AUFS_xxx)
+- kernel configuration or /proc/config.gz (if you have it)
+- behaviour which you think to be incorrect
+- actual operation, reproducible one is better
+- mailto: aufs-users at lists.sourceforge.net
+
+Usually, I don't watch the Public Areas(Bugs, Support Requests, Patches,
+and Feature Requests) on SourceForge. Please join and write to
+aufs-users ML.
+
+
+6. Acknowledgements
+----------------------------------------
+Thanks to everyone who have tried and are using aufs, whoever
+have reported a bug or any feedback.
+
+Especially donators:
+Tomas Matejicek(slax.org) made a donation (much more than once).
+       Since Apr 2010, Tomas M (the author of Slax and Linux Live
+       scripts) is making "doubling" donations.
+       Unfortunately I cannot list all of the donators, but I really
+       appreciate.
+       It ends Aug 2010, but the ordinary donation URL is still available.
+       <http://sourceforge.net/donate/index.php?group_id=167503>
+Dai Itasaka made a donation (2007/8).
+Chuck Smith made a donation (2008/4, 10 and 12).
+Henk Schoneveld made a donation (2008/9).
+Chih-Wei Huang, ASUS, CTC donated Eee PC 4G (2008/10).
+Francois Dupoux made a donation (2008/11).
+Bruno Cesar Ribas and Luis Carlos Erpen de Bona, C3SL serves public
+       aufs2 GIT tree (2009/2).
+William Grant made a donation (2009/3).
+Patrick Lane made a donation (2009/4).
+The Mail Archive (mail-archive.com) made donations (2009/5).
+Nippy Networks (Ed Wildgoose) made a donation (2009/7).
+New Dream Network, LLC (www.dreamhost.com) made a donation (2009/11).
+Pavel Pronskiy made a donation (2011/2).
+Iridium and Inmarsat satellite phone retailer (www.mailasail.com), Nippy
+       Networks (Ed Wildgoose) made a donation for hardware (2011/3).
+Max Lekomcev (DOM-TV project) made a donation (2011/7, 12, 2012/3, 6 and
+11).
+Sam Liddicott made a donation (2011/9).
+Era Scarecrow made a donation (2013/4).
+Bor Ratajc made a donation (2013/4).
+Alessandro Gorreta made a donation (2013/4).
+POIRETTE Marc made a donation (2013/4).
+Alessandro Gorreta made a donation (2013/4).
+lauri kasvandik made a donation (2013/5).
+"pemasu from Finland" made a donation (2013/7).
+The Parted Magic Project made a donation (2013/9).
+Pavel Barta made a donation (2013/10).
+
+Thank you very much.
+Donations are always, including future donations, very important and
+helpful for me to keep on developing aufs.
+
+
+7.
+----------------------------------------
+If you are an experienced user, no explanation is needed. Aufs is
+just a linux filesystem.
+
+
+Enjoy!
+
+# Local variables: ;
+# mode: text;
+# End: ;
diff --git a/Documentation/filesystems/aufs/design/01intro.txt b/Documentation/filesystems/aufs/design/01intro.txt
new file mode 100644 (file)
index 0000000..e60f8c6
--- /dev/null
@@ -0,0 +1,162 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Introduction
+----------------------------------------
+
+aufs [ei ju: ef es] | [a u f s]
+1. abbrev. for "advanced multi-layered unification filesystem".
+2. abbrev. for "another unionfs".
+3. abbrev. for "auf das" in German which means "on the" in English.
+   Ex. "Butter aufs Brot"(G) means "butter onto bread"(E).
+   But "Filesystem aufs Filesystem" is hard to understand.
+
+AUFS is a filesystem with features:
+- multi layered stackable unification filesystem, the member directory
+  is called as a branch.
+- branch permission and attribute, 'readonly', 'real-readonly',
+  'readwrite', 'whiteout-able', 'link-able whiteout' and their
+  combination.
+- internal "file copy-on-write".
+- logical deletion, whiteout.
+- dynamic branch manipulation, adding, deleting and changing permission.
+- allow bypassing aufs, user's direct branch access.
+- external inode number translation table and bitmap which maintains the
+  persistent aufs inode number.
+- seekable directory, including NFS readdir.
+- file mapping, mmap and sharing pages.
+- pseudo-link, hardlink over branches.
+- loopback mounted filesystem as a branch.
+- several policies to select one among multiple writable branches.
+- revert a single systemcall when an error occurs in aufs.
+- and more...
+
+
+Multi Layered Stackable Unification Filesystem
+----------------------------------------------------------------------
+Most people already knows what it is.
+It is a filesystem which unifies several directories and provides a
+merged single directory. When users access a file, the access will be
+passed/re-directed/converted (sorry, I am not sure which English word is
+correct) to the real file on the member filesystem. The member
+filesystem is called 'lower filesystem' or 'branch' and has a mode
+'readonly' and 'readwrite.' And the deletion for a file on the lower
+readonly branch is handled by creating 'whiteout' on the upper writable
+branch.
+
+On LKML, there have been discussions about UnionMount (Jan Blunck,
+Bharata B Rao and Valerie Aurora) and Unionfs (Erez Zadok). They took
+different approaches to implement the merged-view.
+The former tries putting it into VFS, and the latter implements as a
+separate filesystem.
+(If I misunderstand about these implementations, please let me know and
+I shall correct it. Because it is a long time ago when I read their
+source files last time).
+
+UnionMount's approach will be able to small, but may be hard to share
+branches between several UnionMount since the whiteout in it is
+implemented in the inode on branch filesystem and always
+shared. According to Bharata's post, readdir does not seems to be
+finished yet.
+There are several missing features known in this implementations such as
+- for users, the inode number may change silently. eg. copy-up.
+- link(2) may break by copy-up.
+- read(2) may get an obsoleted filedata (fstat(2) too).
+- fcntl(F_SETLK) may be broken by copy-up.
+- unnecessary copy-up may happen, for example mmap(MAP_PRIVATE) after
+  open(O_RDWR).
+
+Unionfs has a longer history. When I started implementing a stacking filesystem
+(Aug 2005), it already existed. It has virtual super_block, inode,
+dentry and file objects and they have an array pointing lower same kind
+objects. After contributing many patches for Unionfs, I re-started my
+project AUFS (Jun 2006).
+
+In AUFS, the structure of filesystem resembles to Unionfs, but I
+implemented my own ideas, approaches and enhancements and it became
+totally different one.
+
+Comparing DM snapshot and fs based implementation
+- the number of bytes to be copied between devices is much smaller.
+- the type of filesystem must be one and only.
+- the fs must be writable, no readonly fs, even for the lower original
+  device. so the compression fs will not be usable. but if we use
+  loopback mount, we may address this issue.
+  for instance,
+       mount /cdrom/squashfs.img /sq
+       losetup /sq/ext2.img
+       losetup /somewhere/cow
+       dmsetup "snapshot /dev/loop0 /dev/loop1 ..."
+- it will be difficult (or needs more operations) to extract the
+  difference between the original device and COW.
+- DM snapshot-merge may help a lot when users try merging. in the
+  fs-layer union, users will use rsync(1).
+
+
+Several characters/aspects of aufs
+----------------------------------------------------------------------
+
+Aufs has several characters or aspects.
+1. a filesystem, callee of VFS helper
+2. sub-VFS, caller of VFS helper for branches
+3. a virtual filesystem which maintains persistent inode number
+4. reader/writer of files on branches such like an application
+
+1. Callee of VFS Helper
+As an ordinary linux filesystem, aufs is a callee of VFS. For instance,
+unlink(2) from an application reaches sys_unlink() kernel function and
+then vfs_unlink() is called. vfs_unlink() is one of VFS helper and it
+calls filesystem specific unlink operation. Actually aufs implements the
+unlink operation but it behaves like a redirector.
+
+2. Caller of VFS Helper for Branches
+aufs_unlink() passes the unlink request to the branch filesystem as if
+it were called from VFS. So the called unlink operation of the branch
+filesystem acts as usual. As a caller of VFS helper, aufs should handle
+every necessary pre/post operation for the branch filesystem.
+- acquire the lock for the parent dir on a branch
+- lookup in a branch
+- revalidate dentry on a branch
+- mnt_want_write() for a branch
+- vfs_unlink() for a branch
+- mnt_drop_write() for a branch
+- release the lock on a branch
+
+3. Persistent Inode Number
+One of the most important issue for a filesystem is to maintain inode
+numbers. This is particularly important to support exporting a
+filesystem via NFS. Aufs is a virtual filesystem which doesn't have a
+backend block device for its own. But some storage is necessary to
+maintain inode number. It may be a large space and may not suit to keep
+in memory. Aufs rents some space from its first writable branch
+filesystem (by default) and creates file(s) on it. These files are
+created by aufs internally and removed soon (currently) keeping opened.
+Note: Because these files are removed, they are totally gone after
+      unmounting aufs. It means the inode numbers are not persistent
+      across unmount or reboot. I have a plan to make them really
+      persistent which will be important for aufs on NFS server.
+
+4. Read/Write Files Internally (copy-on-write)
+Because a branch can be readonly, when you write a file on it, aufs will
+"copy-up" it to the upper writable branch internally. And then write the
+originally requested thing to the file. Generally kernel doesn't
+open/read/write file actively. In aufs, even a single write may cause a
+internal "file copy". This behaviour is very similar to cp(1) command.
+
+Some people may think it is better to pass such work to user space
+helper, instead of doing in kernel space. Actually I am still thinking
+about it. But currently I have implemented it in kernel space.
diff --git a/Documentation/filesystems/aufs/design/02struct.txt b/Documentation/filesystems/aufs/design/02struct.txt
new file mode 100644 (file)
index 0000000..f54d654
--- /dev/null
@@ -0,0 +1,226 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Basic Aufs Internal Structure
+
+Superblock/Inode/Dentry/File Objects
+----------------------------------------------------------------------
+As like an ordinary filesystem, aufs has its own
+superblock/inode/dentry/file objects. All these objects have a
+dynamically allocated array and store the same kind of pointers to the
+lower filesystem, branch.
+For example, when you build a union with one readwrite branch and one
+readonly, mounted /au, /rw and /ro respectively.
+- /au = /rw + /ro
+- /ro/fileA exists but /rw/fileA
+
+Aufs lookup operation finds /ro/fileA and gets dentry for that. These
+pointers are stored in a aufs dentry. The array in aufs dentry will be,
+- [0] = NULL
+- [1] = /ro/fileA
+
+This style of an array is essentially same to the aufs
+superblock/inode/dentry/file objects.
+
+Because aufs supports manipulating branches, ie. add/delete/change
+dynamically, these objects has its own generation. When branches are
+changed, the generation in aufs superblock is incremented. And a
+generation in other object are compared when it is accessed.
+When a generation in other objects are obsoleted, aufs refreshes the
+internal array.
+
+
+Superblock
+----------------------------------------------------------------------
+Additionally aufs superblock has some data for policies to select one
+among multiple writable branches, XIB files, pseudo-links and kobject.
+See below in detail.
+About the policies which supports copy-down a directory, see policy.txt
+too.
+
+
+Branch and XINO(External Inode Number Translation Table)
+----------------------------------------------------------------------
+Every branch has its own xino (external inode number translation table)
+file. The xino file is created and unlinked by aufs internally. When two
+members of a union exist on the same filesystem, they share the single
+xino file.
+The struct of a xino file is simple, just a sequence of aufs inode
+numbers which is indexed by the lower inode number.
+In the above sample, assume the inode number of /ro/fileA is i111 and
+aufs assigns the inode number i999 for fileA. Then aufs writes 999 as
+4(8) bytes at 111 * 4(8) bytes offset in the xino file.
+
+When the inode numbers are not contiguous, the xino file will be sparse
+which has a hole in it and doesn't consume as much disk space as it
+might appear. If your branch filesystem consumes disk space for such
+holes, then you should specify 'xino=' option at mounting aufs.
+
+Also a writable branch has three kinds of "whiteout bases". All these
+are existed when the branch is joined to aufs and the names are
+whiteout-ed doubly, so that users will never see their names in aufs
+hierarchy.
+1. a regular file which will be linked to all whiteouts.
+2. a directory to store a pseudo-link.
+3. a directory to store an "orphan-ed" file temporary.
+
+1. Whiteout Base
+   When you remove a file on a readonly branch, aufs handles it as a
+   logical deletion and creates a whiteout on the upper writable branch
+   as a hardlink of this file in order not to consume inode on the
+   writable branch.
+2. Pseudo-link Dir
+   See below, Pseudo-link.
+3. Step-Parent Dir
+   When "fileC" exists on the lower readonly branch only and it is
+   opened and removed with its parent dir, and then user writes
+   something into it, then aufs copies-up fileC to this
+   directory. Because there is no other dir to store fileC. After
+   creating a file under this dir, the file is unlinked.
+
+Because aufs supports manipulating branches, ie. add/delete/change
+dynamically, a branch has its own id. When the branch order changes, aufs
+finds the new index by searching the branch id.
+
+
+Pseudo-link
+----------------------------------------------------------------------
+Assume "fileA" exists on the lower readonly branch only and it is
+hardlinked to "fileB" on the branch. When you write something to fileA,
+aufs copies-up it to the upper writable branch. Additionally aufs
+creates a hardlink under the Pseudo-link Directory of the writable
+branch. The inode of a pseudo-link is kept in aufs super_block as a
+simple list. If fileB is read after unlinking fileA, aufs returns
+filedata from the pseudo-link instead of the lower readonly
+branch. Because the pseudo-link is based upon the inode, to keep the
+inode number by xino (see above) is important.
+
+All the hardlinks under the Pseudo-link Directory of the writable branch
+should be restored in a proper location later. Aufs provides a utility
+to do this. The userspace helpers executed at remounting and unmounting
+aufs by default.
+During this utility is running, it puts aufs into the pseudo-link
+maintenance mode. In this mode, only the process which began the
+maintenance mode (and its child processes) is allowed to operate in
+aufs. Some other processes which are not related to the pseudo-link will
+be allowed to run too, but the rest have to return an error or wait
+until the maintenance mode ends. If a process already acquires an inode
+mutex (in VFS), it has to return an error.
+
+
+XIB(external inode number bitmap)
+----------------------------------------------------------------------
+Addition to the xino file per a branch, aufs has an external inode number
+bitmap in a superblock object. It is also a file such like a xino file.
+It is a simple bitmap to mark whether the aufs inode number is in-use or
+not.
+To reduce the file I/O, aufs prepares a single memory page to cache xib.
+
+Aufs implements a feature to truncate/refresh both of xino and xib to
+reduce the number of consumed disk blocks for these files.
+
+
+Virtual or Vertical Dir, and Readdir in Userspace
+----------------------------------------------------------------------
+In order to support multiple layers (branches), aufs readdir operation
+constructs a virtual dir block on memory. For readdir, aufs calls
+vfs_readdir() internally for each dir on branches, merges their entries
+with eliminating the whiteout-ed ones, and sets it to file (dir)
+object. So the file object has its entry list until it is closed. The
+entry list will be updated when the file position is zero and becomes
+old. This decision is made in aufs automatically.
+
+The dynamically allocated memory block for the name of entries has a
+unit of 512 bytes (by default) and stores the names contiguously (no
+padding). Another block for each entry is handled by kmem_cache too.
+During building dir blocks, aufs creates hash list and judging whether
+the entry is whiteouted by its upper branch or already listed.
+The merged result is cached in the corresponding inode object and
+maintained by a customizable life-time option.
+
+Some people may call it can be a security hole or invite DoS attack
+since the opened and once readdir-ed dir (file object) holds its entry
+list and becomes a pressure for system memory. But I'd say it is similar
+to files under /proc or /sys. The virtual files in them also holds a
+memory page (generally) while they are opened. When an idea to reduce
+memory for them is introduced, it will be applied to aufs too.
+For those who really hate this situation, I've developed readdir(3)
+library which operates this merging in userspace. You just need to set
+LD_PRELOAD environment variable, and aufs will not consume no memory in
+kernel space for readdir(3).
+
+
+Workqueue
+----------------------------------------------------------------------
+Aufs sometimes requires privilege access to a branch. For instance,
+in copy-up/down operation. When a user process is going to make changes
+to a file which exists in the lower readonly branch only, and the mode
+of one of ancestor directories may not be writable by a user
+process. Here aufs copy-up the file with its ancestors and they may
+require privilege to set its owner/group/mode/etc.
+This is a typical case of a application character of aufs (see
+Introduction).
+
+Aufs uses workqueue synchronously for this case. It creates its own
+workqueue. The workqueue is a kernel thread and has privilege. Aufs
+passes the request to call mkdir or write (for example), and wait for
+its completion. This approach solves a problem of a signal handler
+simply.
+If aufs didn't adopt the workqueue and changed the privilege of the
+process, and if the mkdir/write call arises SIGXFSZ or other signal,
+then the user process might gain a privilege or the generated core file
+was owned by a superuser.
+
+Also aufs uses the system global workqueue ("events" kernel thread) too
+for asynchronous tasks, such like handling inotify/fsnotify, re-creating a
+whiteout base and etc. This is unrelated to a privilege.
+Most of aufs operation tries acquiring a rw_semaphore for aufs
+superblock at the beginning, at the same time waits for the completion
+of all queued asynchronous tasks.
+
+
+Whiteout
+----------------------------------------------------------------------
+The whiteout in aufs is very similar to Unionfs's. That is represented
+by its filename. UnionMount takes an approach of a file mode, but I am
+afraid several utilities (find(1) or something) will have to support it.
+
+Basically the whiteout represents "logical deletion" which stops aufs to
+lookup further, but also it represents "dir is opaque" which also stop
+lookup.
+
+In aufs, rmdir(2) and rename(2) for dir uses whiteout alternatively.
+In order to make several functions in a single systemcall to be
+revertible, aufs adopts an approach to rename a directory to a temporary
+unique whiteouted name.
+For example, in rename(2) dir where the target dir already existed, aufs
+renames the target dir to a temporary unique whiteouted name before the
+actual rename on a branch and then handles other actions (make it opaque,
+update the attributes, etc). If an error happens in these actions, aufs
+simply renames the whiteouted name back and returns an error. If all are
+succeeded, aufs registers a function to remove the whiteouted unique
+temporary name completely and asynchronously to the system global
+workqueue.
+
+
+Copy-up
+----------------------------------------------------------------------
+It is a well-known feature or concept.
+When user modifies a file on a readonly branch, aufs operate "copy-up"
+internally and makes change to the new file on the upper writable branch.
+When the trigger systemcall does not update the timestamps of the parent
+dir, aufs reverts it after copy-up.
diff --git a/Documentation/filesystems/aufs/design/03lookup.txt b/Documentation/filesystems/aufs/design/03lookup.txt
new file mode 100644 (file)
index 0000000..d3ca527
--- /dev/null
@@ -0,0 +1,106 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Lookup in a Branch
+----------------------------------------------------------------------
+Since aufs has a character of sub-VFS (see Introduction), it operates
+lookup for branches as VFS does. It may be a heavy work. Generally
+speaking struct nameidata is a bigger structure and includes many
+information. But almost all lookup operation in aufs is the simplest
+case, ie. lookup only an entry directly connected to its parent. Digging
+down the directory hierarchy is unnecessary.
+
+VFS has a function lookup_one_len() for that use, but it is not usable
+for a branch filesystem which requires struct nameidata. So aufs
+implements a simple lookup wrapper function. When a branch filesystem
+allows NULL as nameidata, it calls lookup_one_len(). Otherwise it builds
+a simplest nameidata and calls lookup_hash().
+Here aufs applies "a principle in NFSD", ie. if the filesystem supports
+NFS-export, then it has to support NULL as a nameidata parameter for
+->create(), ->lookup() and ->d_revalidate(). So the lookup wrapper in
+aufs tests if ->s_export_op in the branch is NULL or not.
+
+When a branch is a remote filesystem, aufs basically trusts its
+->d_revalidate(), also aufs forces the hardest revalidate tests for
+them.
+For d_revalidate, aufs implements three levels of revalidate tests. See
+"Revalidate Dentry and UDBA" in detail.
+
+
+Loopback Mount
+----------------------------------------------------------------------
+Basically aufs supports any type of filesystem and block device for a
+branch (actually there are some exceptions). But it is prohibited to add
+a loopback mounted one whose backend file exists in a filesystem which is
+already added to aufs. The reason is to protect aufs from a recursive
+lookup. If it was allowed, the aufs lookup operation might re-enter a
+lookup for the loopback mounted branch in the same context, and will
+cause a deadlock.
+
+
+Revalidate Dentry and UDBA (User's Direct Branch Access)
+----------------------------------------------------------------------
+Generally VFS helpers re-validate a dentry as a part of lookup.
+0. digging down the directory hierarchy.
+1. lock the parent dir by its i_mutex.
+2. lookup the final (child) entry.
+3. revalidate it.
+4. call the actual operation (create, unlink, etc.)
+5. unlock the parent dir
+
+If the filesystem implements its ->d_revalidate() (step 3), then it is
+called. Actually aufs implements it and checks the dentry on a branch is
+still valid.
+But it is not enough. Because aufs has to release the lock for the
+parent dir on a branch at the end of ->lookup() (step 2) and
+->d_revalidate() (step 3) while the i_mutex of the aufs dir is still
+held by VFS.
+If the file on a branch is changed directly, eg. bypassing aufs, after
+aufs released the lock, then the subsequent operation may cause
+something unpleasant result.
+
+This situation is a result of VFS architecture, ->lookup() and
+->d_revalidate() is separated. But I never say it is wrong. It is a good
+design from VFS's point of view. It is just not suitable for sub-VFS
+character in aufs.
+
+Aufs supports such case by three level of revalidation which is
+selectable by user.
+1. Simple Revalidate
+   Addition to the native flow in VFS's, confirm the child-parent
+   relationship on the branch just after locking the parent dir on the
+   branch in the "actual operation" (step 4). When this validation
+   fails, aufs returns EBUSY. ->d_revalidate() (step 3) in aufs still
+   checks the validation of the dentry on branches.
+2. Monitor Changes Internally by Inotify/Fsnotify
+   Addition to above, in the "actual operation" (step 4) aufs re-lookup
+   the dentry on the branch, and returns EBUSY if it finds different
+   dentry.
+   Additionally, aufs sets the inotify/fsnotify watch for every dir on branches
+   during it is in cache. When the event is notified, aufs registers a
+   function to kernel 'events' thread by schedule_work(). And the
+   function sets some special status to the cached aufs dentry and inode
+   private data. If they are not cached, then aufs has nothing to
+   do. When the same file is accessed through aufs (step 0-3) later,
+   aufs will detect the status and refresh all necessary data.
+   In this mode, aufs has to ignore the event which is fired by aufs
+   itself.
+3. No Extra Validation
+   This is the simplest test and doesn't add any additional revalidation
+   test, and skip therevalidatin in step 4. It is useful and improves
+   aufs performance when system surely hide the aufs branches from user,
+   by over-mounting something (or another method).
diff --git a/Documentation/filesystems/aufs/design/04branch.txt b/Documentation/filesystems/aufs/design/04branch.txt
new file mode 100644 (file)
index 0000000..f85f3a8
--- /dev/null
@@ -0,0 +1,76 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Branch Manipulation
+
+Since aufs supports dynamic branch manipulation, ie. add/remove a branch
+and changing its permission/attribute, there are a lot of works to do.
+
+
+Add a Branch
+----------------------------------------------------------------------
+o Confirm the adding dir exists outside of aufs, including loopback
+  mount.
+- and other various attributes...
+o Initialize the xino file and whiteout bases if necessary.
+  See struct.txt.
+
+o Check the owner/group/mode of the directory
+  When the owner/group/mode of the adding directory differs from the
+  existing branch, aufs issues a warning because it may impose a
+  security risk.
+  For example, when a upper writable branch has a world writable empty
+  top directory, a malicious user can create any files on the writable
+  branch directly, like copy-up and modify manually. If something like
+  /etc/{passwd,shadow} exists on the lower readonly branch but the upper
+  writable branch, and the writable branch is world-writable, then a
+  malicious guy may create /etc/passwd on the writable branch directly
+  and the infected file will be valid in aufs.
+  I am afraid it can be a security issue, but nothing to do except
+  producing a warning.
+
+
+Delete a Branch
+----------------------------------------------------------------------
+o Confirm the deleting branch is not busy
+  To be general, there is one merit to adopt "remount" interface to
+  manipulate branches. It is to discard caches. At deleting a branch,
+  aufs checks the still cached (and connected) dentries and inodes. If
+  there are any, then they are all in-use. An inode without its
+  corresponding dentry can be alive alone (for example, inotify/fsnotify case).
+
+  For the cached one, aufs checks whether the same named entry exists on
+  other branches.
+  If the cached one is a directory, because aufs provides a merged view
+  to users, as long as one dir is left on any branch aufs can show the
+  dir to users. In this case, the branch can be removed from aufs.
+  Otherwise aufs rejects deleting the branch.
+
+  If any file on the deleting branch is opened by aufs, then aufs
+  rejects deleting.
+
+
+Modify the Permission of a Branch
+----------------------------------------------------------------------
+o Re-initialize or remove the xino file and whiteout bases if necessary.
+  See struct.txt.
+
+o rw --> ro: Confirm the modifying branch is not busy
+  Aufs rejects the request if any of these conditions are true.
+  - a file on the branch is mmap-ed.
+  - a regular file on the branch is opened for write and there is no
+    same named entry on the upper branch.
diff --git a/Documentation/filesystems/aufs/design/05wbr_policy.txt b/Documentation/filesystems/aufs/design/05wbr_policy.txt
new file mode 100644 (file)
index 0000000..2bb8e58
--- /dev/null
@@ -0,0 +1,65 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Policies to Select One among Multiple Writable Branches
+----------------------------------------------------------------------
+When the number of writable branch is more than one, aufs has to decide
+the target branch for file creation or copy-up. By default, the highest
+writable branch which has the parent (or ancestor) dir of the target
+file is chosen (top-down-parent policy).
+By user's request, aufs implements some other policies to select the
+writable branch, for file creation two policies, round-robin and
+most-free-space policies. For copy-up three policies, top-down-parent,
+bottom-up-parent and bottom-up policies.
+
+As expected, the round-robin policy selects the branch in circular. When
+you have two writable branches and creates 10 new files, 5 files will be
+created for each branch. mkdir(2) systemcall is an exception. When you
+create 10 new directories, all will be created on the same branch.
+And the most-free-space policy selects the one which has most free
+space among the writable branches. The amount of free space will be
+checked by aufs internally, and users can specify its time interval.
+
+The policies for copy-up is more simple,
+top-down-parent is equivalent to the same named on in create policy,
+bottom-up-parent selects the writable branch where the parent dir
+exists and the nearest upper one from the copyup-source,
+bottom-up selects the nearest upper writable branch from the
+copyup-source, regardless the existence of the parent dir.
+
+There are some rules or exceptions to apply these policies.
+- If there is a readonly branch above the policy-selected branch and
+  the parent dir is marked as opaque (a variation of whiteout), or the
+  target (creating) file is whiteout-ed on the upper readonly branch,
+  then the result of the policy is ignored and the target file will be
+  created on the nearest upper writable branch than the readonly branch.
+- If there is a writable branch above the policy-selected branch and
+  the parent dir is marked as opaque or the target file is whiteouted
+  on the branch, then the result of the policy is ignored and the target
+  file will be created on the highest one among the upper writable
+  branches who has diropq or whiteout. In case of whiteout, aufs removes
+  it as usual.
+- link(2) and rename(2) systemcalls are exceptions in every policy.
+  They try selecting the branch where the source exists as possible
+  since copyup a large file will take long time. If it can't be,
+  ie. the branch where the source exists is readonly, then they will
+  follow the copyup policy.
+- There is an exception for rename(2) when the target exists.
+  If the rename target exists, aufs compares the index of the branches
+  where the source and the target exists and selects the higher
+  one. If the selected branch is readonly, then aufs follows the
+  copyup policy.
diff --git a/Documentation/filesystems/aufs/design/06mmap.txt b/Documentation/filesystems/aufs/design/06mmap.txt
new file mode 100644 (file)
index 0000000..55524d6
--- /dev/null
@@ -0,0 +1,47 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+mmap(2) -- File Memory Mapping
+----------------------------------------------------------------------
+In aufs, the file-mapped pages are handled by a branch fs directly, no
+interaction with aufs. It means aufs_mmap() calls the branch fs's
+->mmap().
+This approach is simple and good, but there is one problem.
+Under /proc, several entries show the mmap-ped files by its path (with
+device and inode number), and the printed path will be the path on the
+branch fs's instead of virtual aufs's.
+This is not a problem in most cases, but some utilities lsof(1) (and its
+user) may expect the path on aufs.
+
+To address this issue, aufs adds a new member called vm_prfile in struct
+vm_area_struct (and struct vm_region). The original vm_file points to
+the file on the branch fs in order to handle everything correctly as
+usual. The new vm_prfile points to a virtual file in aufs, and the
+show-functions in procfs refers to vm_prfile if it is set.
+Also we need to maintain several other places where touching vm_file
+such like
+- fork()/clone() copies vma and the reference count of vm_file is
+  incremented.
+- merging vma maintains the ref count too.
+
+This is not a good approach. It just faking the printed path. But it
+leaves all behaviour around f_mapping unchanged. This is surely an
+advantage.
+Actually aufs had adopted another complicated approach which calls
+generic_file_mmap() and handles struct vm_operations_struct. In this
+approach, aufs met a hard problem and I could not solve it without
+switching the approach.
diff --git a/Documentation/filesystems/aufs/design/07export.txt b/Documentation/filesystems/aufs/design/07export.txt
new file mode 100644 (file)
index 0000000..ecf42a4
--- /dev/null
@@ -0,0 +1,59 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Export Aufs via NFS
+----------------------------------------------------------------------
+Here is an approach.
+- like xino/xib, add a new file 'xigen' which stores aufs inode
+  generation.
+- iget_locked(): initialize aufs inode generation for a new inode, and
+  store it in xigen file.
+- destroy_inode(): increment aufs inode generation and store it in xigen
+  file. it is necessary even if it is not unlinked, because any data of
+  inode may be changed by UDBA.
+- encode_fh(): for a root dir, simply return FILEID_ROOT. otherwise
+  build file handle by
+  + branch id (4 bytes)
+  + superblock generation (4 bytes)
+  + inode number (4 or 8 bytes)
+  + parent dir inode number (4 or 8 bytes)
+  + inode generation (4 bytes))
+  + return value of exportfs_encode_fh() for the parent on a branch (4
+    bytes)
+  + file handle for a branch (by exportfs_encode_fh())
+- fh_to_dentry():
+  + find the index of a branch from its id in handle, and check it is
+    still exist in aufs.
+  + 1st level: get the inode number from handle and search it in cache.
+  + 2nd level: if not found, get the parent inode number from handle and
+    search it in cache. and then open the parent dir, find the matching
+    inode number by vfs_readdir() and get its name, and call
+    lookup_one_len() for the target dentry.
+  + 3rd level: if the parent dir is not cached, call
+    exportfs_decode_fh() for a branch and get the parent on a branch,
+    build a pathname of it, convert it a pathname in aufs, call
+    path_lookup(). now aufs gets a parent dir dentry, then handle it as
+    the 2nd level.
+  + to open the dir, aufs needs struct vfsmount. aufs keeps vfsmount
+    for every branch, but not itself. to get this, (currently) aufs
+    searches in current->nsproxy->mnt_ns list. it may not be a good
+    idea, but I didn't get other approach.
+  + test the generation of the gotten inode.
+- every inode operation: they may get EBUSY due to UDBA. in this case,
+  convert it into ESTALE for NFSD.
+- readdir(): call lockdep_on/off() because filldir in NFSD calls
+  lookup_one_len(), vfs_getattr(), encode_fh() and others.
diff --git a/Documentation/filesystems/aufs/design/08shwh.txt b/Documentation/filesystems/aufs/design/08shwh.txt
new file mode 100644 (file)
index 0000000..18b889c
--- /dev/null
@@ -0,0 +1,53 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Show Whiteout Mode (shwh)
+----------------------------------------------------------------------
+Generally aufs hides the name of whiteouts. But in some cases, to show
+them is very useful for users. For instance, creating a new middle layer
+(branch) by merging existing layers.
+
+(borrowing aufs1 HOW-TO from a user, Michael Towers)
+When you have three branches,
+- Bottom: 'system', squashfs (underlying base system), read-only
+- Middle: 'mods', squashfs, read-only
+- Top: 'overlay', ram (tmpfs), read-write
+
+The top layer is loaded at boot time and saved at shutdown, to preserve
+the changes made to the system during the session.
+When larger changes have been made, or smaller changes have accumulated,
+the size of the saved top layer data grows. At this point, it would be
+nice to be able to merge the two overlay branches ('mods' and 'overlay')
+and rewrite the 'mods' squashfs, clearing the top layer and thus
+restoring save and load speed.
+
+This merging is simplified by the use of another aufs mount, of just the
+two overlay branches using the 'shwh' option.
+# mount -t aufs -o ro,shwh,br:/livesys/overlay=ro+wh:/livesys/mods=rr+wh \
+       aufs /livesys/merge_union
+
+A merged view of these two branches is then available at
+/livesys/merge_union, and the new feature is that the whiteouts are
+visible!
+Note that in 'shwh' mode the aufs mount must be 'ro', which will disable
+writing to all branches. Also the default mode for all branches is 'ro'.
+It is now possible to save the combined contents of the two overlay
+branches to a new squashfs, e.g.:
+# mksquashfs /livesys/merge_union /path/to/newmods.squash
+
+This new squashfs archive can be stored on the boot device and the
+initramfs will use it to replace the old one at the next boot.
diff --git a/Documentation/filesystems/aufs/design/10dynop.txt b/Documentation/filesystems/aufs/design/10dynop.txt
new file mode 100644 (file)
index 0000000..49e9a53
--- /dev/null
@@ -0,0 +1,47 @@
+
+# Copyright (C) 2010-2013 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Dynamically customizable FS operations
+----------------------------------------------------------------------
+Generally FS operations (struct inode_operations, struct
+address_space_operations, struct file_operations, etc.) are defined as
+"static const", but it never means that FS have only one set of
+operation. Some FS have multiple sets of them. For instance, ext2 has
+three sets, one for XIP, for NOBH, and for normal.
+Since aufs overrides and redirects these operations, sometimes aufs has
+to change its behaviour according to the branch FS type. More imporantly
+VFS acts differently if a function (member in the struct) is set or
+not. It means aufs should have several sets of operations and select one
+among them according to the branch FS definition.
+
+In order to solve this problem and not to affect the behavour of VFS,
+aufs defines these operations dynamically. For instance, aufs defines
+aio_read function for struct file_operations, but it may not be set to
+the file_operations. When the branch FS doesn't have it, aufs doesn't
+set it to its file_operations while the function definition itself is
+still alive. So the behaviour of io_submit(2) will not change, and it
+will return an error when aio_read is not defined.
+
+The lifetime of these dynamically generated operation object is
+maintained by aufs branch object. When the branch is removed from aufs,
+the reference counter of the object is decremented. When it reaches
+zero, the dynamically generated operation object will be freed.
+
+This approach is designed to support AIO (io_submit), Direcit I/O and
+XIP mainly.
+Currently this approach is applied to file_operations and
+vm_operations_struct for regular files only.
diff --git a/Documentation/filesystems/aufs/design/99plan.txt b/Documentation/filesystems/aufs/design/99plan.txt
new file mode 100644 (file)
index 0000000..a21f133
--- /dev/null
@@ -0,0 +1,96 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Plan
+
+Restoring some features which was implemented in aufs1.
+They were dropped in aufs2 in order to make source files simpler and
+easier to be reviewed.
+
+
+Test Only the Highest One for the Directory Permission (dirperm1 option)
+----------------------------------------------------------------------
+Let's try case study.
+- aufs has two branches, upper readwrite and lower readonly.
+  /au = /rw + /ro
+- "dirA" exists under /ro, but /rw. and its mode is 0700.
+- user invoked "chmod a+rx /au/dirA"
+- then "dirA" becomes world readable?
+
+In this case, /ro/dirA is still 0700 since it exists in readonly branch,
+or it may be a natively readonly filesystem. If aufs respects the lower
+branch, it should not respond readdir request from other users. But user
+allowed it by chmod. Should really aufs rejects showing the entries
+under /ro/dirA?
+
+To be honest, I don't have a best solution for this case. So I
+implemented 'dirperm1' and 'nodirperm1' option in aufs1, and leave it to
+users.
+When dirperm1 is specified, aufs checks only the highest one for the
+directory permission, and shows the entries. Otherwise, as usual, checks
+every dir existing on all branches and rejects the request.
+
+As a side effect, dirperm1 option improves the performance of aufs
+because the number of permission check is reduced.
+
+
+Being Another Aufs's Readonly Branch (robr)
+----------------------------------------------------------------------
+Aufs1 allows aufs to be another aufs's readonly branch.
+This feature was developed by a user's request. But it may not be used
+currecnly.
+
+
+Copy-up on Open (coo=)
+----------------------------------------------------------------------
+By default the internal copy-up is executed when it is really necessary.
+It is not done when a file is opened for writing, but when write(2) is
+done. Users who have many (over 100) branches want to know and analyse
+when and what file is copied-up. To insert a new upper branch which
+contains such files only may improve the performance of aufs.
+
+Aufs1 implemented "coo=none | leaf | all" option.
+
+
+Refresh the Opened File (refrof)
+----------------------------------------------------------------------
+This option is implemented in aufs1 but incomplete.
+
+When user reads from a file, he expects to get its latest filedata
+generally. If the file is removed and a new same named file is created,
+the content he gets is unchanged, ie. the unlinked filedata.
+
+Let's try case study again.
+- aufs has two branches.
+  /au = /rw + /ro
+- "fileA" exists under /ro, but /rw.
+- user opened "/au/fileA".
+- he or someone else inserts a branch (/new) between /rw and /ro.
+  /au = /rw + /new + /ro
+- the new branch has "fileA".
+- user reads from the opened "fileA"
+- which filedata should aufs return, from /ro or /new?
+
+Some people says it has to be "from /ro" and it is a semantics of Unix.
+The others say it should be "from /new" because the file is not removed
+and it is equivalent to the case of someone else modifies the file.
+
+Here again I don't have a best and final answer. I got an idea to
+implement 'refrof' and 'norefrof' option. When 'refrof' (REFResh the
+Opened File) is specified (by default), aufs returns the filedata from
+/new.
+Otherwise from /new.
index 742cc06..9281a95 100644 (file)
@@ -35,7 +35,7 @@ described below will work.
 
 The most general way to create a file within a debugfs directory is with:
 
-    struct dentry *debugfs_create_file(const char *name, mode_t mode,
+    struct dentry *debugfs_create_file(const char *name, umode_t mode,
                                       struct dentry *parent, void *data,
                                       const struct file_operations *fops);
 
@@ -53,13 +53,13 @@ actually necessary; the debugfs code provides a number of helper functions
 for simple situations.  Files containing a single integer value can be
 created with any of:
 
-    struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+    struct dentry *debugfs_create_u8(const char *name, umode_t mode,
                                     struct dentry *parent, u8 *value);
-    struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+    struct dentry *debugfs_create_u16(const char *name, umode_t mode,
                                      struct dentry *parent, u16 *value);
-    struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+    struct dentry *debugfs_create_u32(const char *name, umode_t mode,
                                      struct dentry *parent, u32 *value);
-    struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+    struct dentry *debugfs_create_u64(const char *name, umode_t mode,
                                      struct dentry *parent, u64 *value);
 
 These files support both reading and writing the given value; if a specific
@@ -67,13 +67,13 @@ file should not be written to, simply set the mode bits accordingly.  The
 values in these files are in decimal; if hexadecimal is more appropriate,
 the following functions can be used instead:
 
-    struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+    struct dentry *debugfs_create_x8(const char *name, umode_t mode,
                                     struct dentry *parent, u8 *value);
-    struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+    struct dentry *debugfs_create_x16(const char *name, umode_t mode,
                                      struct dentry *parent, u16 *value);
-    struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+    struct dentry *debugfs_create_x32(const char *name, umode_t mode,
                                      struct dentry *parent, u32 *value);
-    struct dentry *debugfs_create_x64(const char *name, mode_t mode,
+    struct dentry *debugfs_create_x64(const char *name, umode_t mode,
                                      struct dentry *parent, u64 *value);
 
 These functions are useful as long as the developer knows the size of the
@@ -81,7 +81,7 @@ value to be exported.  Some types can have different widths on different
 architectures, though, complicating the situation somewhat.  There is a
 function meant to help out in one special case:
 
-    struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+    struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
                                         struct dentry *parent, 
                                         size_t *value);
 
@@ -90,7 +90,7 @@ a variable of type size_t.
 
 Boolean values can be placed in debugfs with:
 
-    struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+    struct dentry *debugfs_create_bool(const char *name, umode_t mode,
                                       struct dentry *parent, u32 *value);
 
 A read on the resulting file will yield either Y (for non-zero values) or
@@ -104,7 +104,7 @@ Finally, a block of arbitrary binary data can be exported with:
        unsigned long size;
     };
 
-    struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+    struct dentry *debugfs_create_blob(const char *name, umode_t mode,
                                       struct dentry *parent,
                                       struct debugfs_blob_wrapper *blob);
 
index ac2facc..46dfc6b 100644 (file)
@@ -113,8 +113,8 @@ the fdtable structure -
        if (fd >= 0) {
                /* locate_fd() may have expanded fdtable, load the ptr */
                fdt = files_fdtable(files);
-               FD_SET(fd, fdt->open_fds);
-               FD_CLR(fd, fdt->close_on_exec);
+               __set_open_fd(fd, fdt);
+               __clear_close_on_exec(fd, fdt);
                spin_unlock(&files->file_lock);
        .....
 
index ac601c4..91ff71c 100644 (file)
@@ -503,6 +503,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Also note the kernel might malfunction if you disable
                        some critical bits.
 
+       cma=nn[MG]      [ARM,KNL]
+                       Sets the size of kernel global memory area for contiguous
+                       memory allocations. For more information, see
+                       include/linux/dma-contiguous.h
+
        cmo_free_hint=  [PPC] Format: { yes | no }
                        Specify whether pages are marked as being inactive
                        when they are freed.  This is used in CMO environments
@@ -510,6 +515,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        a hypervisor.
                        Default: yes
 
+       coherent_pool=nn[KMG]   [ARM,KNL]
+                       Sets the size of memory pool for coherent, atomic dma
+                       allocations, by default set to 256K.
+
        code_bytes      [X86] How many bytes of object code to print
                        in an oops report.
                        Range: 0 - 8192
@@ -625,6 +634,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        no_debug_objects
                        [KNL] Disable object debugging
 
+       debug_guardpage_minorder=
+                       [KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
+                       parameter allows control of the order of pages that will
+                       be intentionally kept free (and hence protected) by the
+                       buddy allocator. Bigger value increase the probability
+                       of catching random memory corruption, but reduce the
+                       amount of memory for normal system use. The maximum
+                       possible value is MAX_ORDER/2.  Setting this parameter
+                       to 1 or 2 should be enough to identify most random
+                       memory corruption problems caused by bugs in kernel or
+                       driver code when a CPU writes to (or reads from) a
+                       random memory location. Note that there exists a class
+                       of memory corruptions problems caused by buggy H/W or
+                       F/W or by drivers badly programing DMA (basically when
+                       memory is written at bus level and the CPU MMU is
+                       bypassed) which are not detectable by
+                       CONFIG_DEBUG_PAGEALLOC, hence this option will not help
+                       tracking down these problems.
+
        debugpat        [X86] Enable PAT debugging
 
        decnet.addr=    [HW,NET]
index aa82ee4..1948004 100644 (file)
@@ -40,8 +40,8 @@ but the call_site can usually be used to extrapolate that information.
 ==================
 mm_page_alloc            page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s
 mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d
-mm_page_free_direct      page=%p pfn=%lu order=%d
-mm_pagevec_free                  page=%p pfn=%lu order=%d cold=%d
+mm_page_free             page=%p pfn=%lu order=%d
+mm_page_free_batched     page=%p pfn=%lu order=%d cold=%d
 
 These four events deal with page allocation and freeing. mm_page_alloc is
 a simple indicator of page allocator activity. Pages may be allocated from
@@ -53,13 +53,13 @@ amounts of activity imply high activity on the zone->lock. Taking this lock
 impairs performance by disabling interrupts, dirtying cache lines between
 CPUs and serialising many CPUs.
 
-When a page is freed directly by the caller, the mm_page_free_direct event
+When a page is freed directly by the caller, the only mm_page_free event
 is triggered. Significant amounts of activity here could indicate that the
 callers should be batching their activities.
 
-When pages are freed using a pagevec, the mm_pagevec_free is
-triggered. Broadly speaking, pages are taken off the LRU lock in bulk and
-freed in batch with a pagevec. Significant amounts of activity here could
+When pages are freed in batch, the also mm_page_free_batched is triggered.
+Broadly speaking, pages are taken off the LRU lock in bulk and
+freed in batch with a page list. Significant amounts of activity here could
 indicate that the system is under memory pressure and can also indicate
 contention on the zone->lru_lock.
 
index 7df50e8..0a120aa 100644 (file)
@@ -17,8 +17,8 @@ use Getopt::Long;
 
 # Tracepoint events
 use constant MM_PAGE_ALLOC             => 1;
-use constant MM_PAGE_FREE_DIRECT       => 2;
-use constant MM_PAGEVEC_FREE           => 3;
+use constant MM_PAGE_FREE              => 2;
+use constant MM_PAGE_FREE_BATCHED      => 3;
 use constant MM_PAGE_PCPU_DRAIN                => 4;
 use constant MM_PAGE_ALLOC_ZONE_LOCKED => 5;
 use constant MM_PAGE_ALLOC_EXTFRAG     => 6;
@@ -223,10 +223,10 @@ EVENT_PROCESS:
                # Perl Switch() sucks majorly
                if ($tracepoint eq "mm_page_alloc") {
                        $perprocesspid{$process_pid}->{MM_PAGE_ALLOC}++;
-               } elsif ($tracepoint eq "mm_page_free_direct") {
-                       $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT}++;
-               } elsif ($tracepoint eq "mm_pagevec_free") {
-                       $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE}++;
+               } elsif ($tracepoint eq "mm_page_free") {
+                       $perprocesspid{$process_pid}->{MM_PAGE_FREE}++
+               } elsif ($tracepoint eq "mm_page_free_batched") {
+                       $perprocesspid{$process_pid}->{MM_PAGE_FREE_BATCHED}++;
                } elsif ($tracepoint eq "mm_page_pcpu_drain") {
                        $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN}++;
                        $perprocesspid{$process_pid}->{STATE_PCPU_PAGES_DRAINED}++;
@@ -336,8 +336,8 @@ sub dump_stats {
                        $process_pid,
                        $stats{$process_pid}->{MM_PAGE_ALLOC},
                        $stats{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED},
-                       $stats{$process_pid}->{MM_PAGE_FREE_DIRECT},
-                       $stats{$process_pid}->{MM_PAGEVEC_FREE},
+                       $stats{$process_pid}->{MM_PAGE_FREE},
+                       $stats{$process_pid}->{MM_PAGE_FREE_BATCHED},
                        $stats{$process_pid}->{MM_PAGE_PCPU_DRAIN},
                        $stats{$process_pid}->{HIGH_PCPU_DRAINS},
                        $stats{$process_pid}->{HIGH_PCPU_REFILLS},
@@ -364,8 +364,8 @@ sub aggregate_perprocesspid() {
 
                $perprocess{$process}->{MM_PAGE_ALLOC} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC};
                $perprocess{$process}->{MM_PAGE_ALLOC_ZONE_LOCKED} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED};
-               $perprocess{$process}->{MM_PAGE_FREE_DIRECT} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT};
-               $perprocess{$process}->{MM_PAGEVEC_FREE} += $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE};
+               $perprocess{$process}->{MM_PAGE_FREE} += $perprocesspid{$process_pid}->{MM_PAGE_FREE};
+               $perprocess{$process}->{MM_PAGE_FREE_BATCHED} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_BATCHED};
                $perprocess{$process}->{MM_PAGE_PCPU_DRAIN} += $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN};
                $perprocess{$process}->{HIGH_PCPU_DRAINS} += $perprocesspid{$process_pid}->{HIGH_PCPU_DRAINS};
                $perprocess{$process}->{HIGH_PCPU_REFILLS} += $perprocesspid{$process_pid}->{HIGH_PCPU_REFILLS};
index 4b0669c..604f912 100644 (file)
@@ -124,6 +124,9 @@ config HAVE_ARCH_TRACEHOOK
 config HAVE_DMA_ATTRS
        bool
 
+config HAVE_DMA_CONTIGUOUS
+       bool
+
 config USE_GENERIC_SMP_HELPERS
        bool
 
@@ -181,4 +184,7 @@ config HAVE_RCU_TABLE_FREE
 config ARCH_HAVE_NMI_SAFE_CMPXCHG
        bool
 
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       bool
+
 source "kernel/gcov/Kconfig"
index 082bd36..870794a 100644 (file)
@@ -4,6 +4,8 @@ config ARM
        select ARCH_SUPPORTS_ATOMIC_RMW
        select HAVE_DMA_API_DEBUG
        select HAVE_IDE if PCI || ISA || PCMCIA
+       select HAVE_DMA_ATTRS
+       select HAVE_DMA_CONTIGUOUS if MMU
        select HAVE_MEMBLOCK
        select RTC_LIB
        select SYS_SUPPORTS_APM_EMULATION
@@ -28,6 +30,7 @@ config ARM
        select HAVE_C_RECORDMCOUNT
        select HAVE_GENERIC_HARDIRQS
        select HAVE_SPARSE_IRQ
+       select HAVE_IOREMAP_PROT
        select GENERIC_IRQ_SHOW
        select CPU_PM if (SUSPEND || CPU_IDLE)
        help
@@ -41,6 +44,14 @@ config ARM
 config ARM_HAS_SG_CHAIN
        bool
 
+config NEED_SG_DMA_LENGTH
+       bool
+
+config ARM_DMA_USE_IOMMU
+       select NEED_SG_DMA_LENGTH
+       select ARM_HAS_SG_CHAIN
+       bool
+
 config HAVE_PWM
        bool
 
@@ -178,6 +189,9 @@ config ZONE_DMA
 config NEED_DMA_MAP_STATE
        def_bool y
 
+config ARCH_HAS_DMA_SET_COHERENT_MASK
+       bool
+
 config GENERIC_ISA_DMA
        bool
 
@@ -518,6 +532,7 @@ config ARCH_IXP2000
 config ARCH_IXP4XX
        bool "IXP4xx-based"
        depends on MMU
+       select ARCH_HAS_DMA_SET_COHERENT_MASK
        select CLKSRC_MMIO
        select CPU_XSCALE
        select ARCH_REQUIRE_GPIOLIB
@@ -1704,6 +1719,14 @@ config HW_PERF_EVENTS
          Enable hardware performance counter support for perf events. If
          disabled, perf events will use software events only.
 
+config SYS_SUPPORTS_HUGETLBFS
+       def_bool y
+       depends on ARM_LPAE || (!CPU_USE_DOMAINS && !MEMORY_FAILURE)
+
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       def_bool y
+       depends on SYS_SUPPORTS_HUGETLBFS
+
 source "mm/Kconfig"
 
 config FORCE_MAX_ZONEORDER
@@ -1776,7 +1799,7 @@ config LEDS_CPU
          will overrule the CPU usage LED.
 
 config ALIGNMENT_TRAP
-       bool
+       bool "Enable alignment trap"
        depends on CPU_CP15_MMU
        default y if !ARCH_EBSA110
        select HAVE_PROC_CPU if PROC_FS
@@ -1838,6 +1861,11 @@ config DEPRECATED_PARAM_STRUCT
          This was deprecated in 2001 and announced to live on for 5 years.
          Some old boot loaders still use this way.
 
+config CPU_V7_SYSFS
+       bool
+       depends on CPU_V7 && SYSFS
+       default y
+
 endmenu
 
 menu "Boot options"
@@ -1982,7 +2010,7 @@ endchoice
 
 config XIP_KERNEL
        bool "Kernel Execute-In-Place from ROM"
-       depends on !ZBOOT_ROM
+       depends on !ZBOOT_ROM && !ARM_LPAE
        help
          Execute-In-Place allows the kernel to run from non-volatile storage
          directly addressable by the CPU, such as NOR flash. This saves RAM
@@ -2012,7 +2040,7 @@ config XIP_PHYS_ADDR
 
 config KEXEC
        bool "Kexec system call (EXPERIMENTAL)"
-       depends on EXPERIMENTAL
+       depends on EXPERIMENTAL && (!SMP || HOTPLUG_CPU)
        help
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
index 4a93374..e9925e0 100644 (file)
@@ -99,6 +99,8 @@ tune-$(CONFIG_CPU_FEROCEON)   :=$(call cc-option,-mtune=marvell-f,-mtune=xscale)
 tune-$(CONFIG_CPU_V6)          :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
 tune-$(CONFIG_CPU_V6K)         :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
 
+tune-$(CONFIG_MACH_OMAP3_PANDORA) :=-mtune=cortex-a8
+
 ifeq ($(CONFIG_AEABI),y)
 CFLAGS_ABI     :=-mabi=aapcs-linux -mno-thumb-interwork
 else
index 8c57359..db712ad 100644 (file)
@@ -661,6 +661,7 @@ __armv7_mmu_cache_on:
                mcrne   p15, 0, r3, c2, c0, 0   @ load page table pointer
                mcrne   p15, 0, r1, c3, c0, 0   @ load domain access control
 #endif
+               mcr     p15, 0, r0, c7, c5, 4   @ ISB
                mcr     p15, 0, r0, c1, c0, 0   @ load control register
                mrc     p15, 0, r0, c1, c0, 0   @ and read it back
                mov     r0, #0
index 595ecd2..aa07f59 100644 (file)
@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
        read_lock_irqsave(&device_info->lock, flags);
 
        list_for_each_entry(b, &device_info->safe_buffers, node)
-               if (b->safe_dma_addr == safe_dma_addr) {
+               if (b->safe_dma_addr <= safe_dma_addr &&
+                   b->safe_dma_addr + b->size > safe_dma_addr) {
                        rb = b;
                        break;
                }
@@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
        if (buf == NULL) {
                dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
                       __func__, ptr);
-               return ~0;
+               return DMA_ERROR_CODE;
        }
 
        dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
  * substitute the safe buffer for the unsafe one.
  * (basically move the buffer from an unsafe area to a safe one)
  */
-dma_addr_t __dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir)
+static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
 {
        dma_addr_t dma_addr;
        int ret;
@@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
 
        ret = needs_bounce(dev, dma_addr, size);
        if (ret < 0)
-               return ~0;
+               return DMA_ERROR_CODE;
 
        if (ret == 0) {
-               __dma_page_cpu_to_dev(page, offset, size, dir);
+               arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
                return dma_addr;
        }
 
        if (PageHighMem(page)) {
                dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
-               return ~0;
+               return DMA_ERROR_CODE;
        }
 
        return map_single(dev, page_address(page) + offset, size, dir);
 }
-EXPORT_SYMBOL(__dma_map_page);
 
 /*
  * see if a mapped address was really a "safe" buffer and if so, copy
@@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
  * the safe buffer.  (basically return things back to the way they
  * should be)
  */
-void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
-               enum dma_data_direction dir)
+static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+               enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        struct safe_buffer *buf;
 
@@ -352,31 +353,32 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
 
        buf = find_safe_buffer_dev(dev, dma_addr, __func__);
        if (!buf) {
-               __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
-                       dma_addr & ~PAGE_MASK, size, dir);
+               arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
                return;
        }
 
        unmap_single(dev, buf, size, dir);
 }
-EXPORT_SYMBOL(__dma_unmap_page);
 
-int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
-               unsigned long off, size_t sz, enum dma_data_direction dir)
+static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+               size_t sz, enum dma_data_direction dir)
 {
        struct safe_buffer *buf;
+       unsigned long off;
 
-       dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-               __func__, addr, off, sz, dir);
+       dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+               __func__, addr, sz, dir);
 
        buf = find_safe_buffer_dev(dev, addr, __func__);
        if (!buf)
                return 1;
 
+       off = addr - buf->safe_dma_addr;
+
        BUG_ON(buf->direction != dir);
 
-       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-               __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+               __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
                buf->safe, buf->safe_dma_addr);
 
        DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -388,24 +390,35 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
        }
        return 0;
 }
-EXPORT_SYMBOL(dmabounce_sync_for_cpu);
 
-int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
-               unsigned long off, size_t sz, enum dma_data_direction dir)
+static void dmabounce_sync_for_cpu(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
+               return;
+
+       arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
+}
+
+static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+               size_t sz, enum dma_data_direction dir)
 {
        struct safe_buffer *buf;
+       unsigned long off;
 
-       dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-               __func__, addr, off, sz, dir);
+       dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+               __func__, addr, sz, dir);
 
        buf = find_safe_buffer_dev(dev, addr, __func__);
        if (!buf)
                return 1;
 
+       off = addr - buf->safe_dma_addr;
+
        BUG_ON(buf->direction != dir);
 
-       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-               __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+               __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
                buf->safe, buf->safe_dma_addr);
 
        DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
        }
        return 0;
 }
-EXPORT_SYMBOL(dmabounce_sync_for_device);
+
+static void dmabounce_sync_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__dmabounce_sync_for_device(dev, handle, size, dir))
+               return;
+
+       arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+}
+
+static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+{
+       if (dev->archdata.dmabounce)
+               return 0;
+
+       return arm_dma_ops.set_dma_mask(dev, dma_mask);
+}
+
+static struct dma_map_ops dmabounce_ops = {
+       .alloc                  = arm_dma_alloc,
+       .free                   = arm_dma_free,
+       .mmap                   = arm_dma_mmap,
+       .map_page               = dmabounce_map_page,
+       .unmap_page             = dmabounce_unmap_page,
+       .sync_single_for_cpu    = dmabounce_sync_for_cpu,
+       .sync_single_for_device = dmabounce_sync_for_device,
+       .map_sg                 = arm_dma_map_sg,
+       .unmap_sg               = arm_dma_unmap_sg,
+       .sync_sg_for_cpu        = arm_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_dma_sync_sg_for_device,
+       .set_dma_mask           = dmabounce_set_mask,
+};
 
 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
                const char *name, unsigned long size)
@@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
 #endif
 
        dev->archdata.dmabounce = device_info;
+       set_dma_ops(dev, &dmabounce_ops);
 
        dev_info(dev, "dmabounce: registered device\n");
 
@@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev)
        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 
        dev->archdata.dmabounce = NULL;
+       set_dma_ops(dev, NULL);
 
        if (!device_info) {
                dev_warn(dev,
diff --git a/arch/arm/configs/omap3_pandora_debug_defconfig b/arch/arm/configs/omap3_pandora_debug_defconfig
new file mode 100644 (file)
index 0000000..a8a11cd
--- /dev/null
@@ -0,0 +1,2421 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/arm 3.2.1 Kernel Configuration
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_HAVE_SCHED_CLOCK=y
+CONFIG_GENERIC_GPIO=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_ARM_PATCH_PHYS_VIRT=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_HAVE_IRQ_WORK=y
+CONFIG_IRQ_WORK=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="pandora"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_FHANDLE is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_HAVE_SPARSE_IRQ=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_CHIP=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_SPARSE_IRQ is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TINY_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CGROUP_DEVICE is not set
+# CONFIG_CPUSETS is not set
+# CONFIG_CGROUP_CPUACCT is not set
+# CONFIG_RESOURCE_COUNTERS is not set
+# CONFIG_CGROUP_PERF is not set
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+# CONFIG_RT_GROUP_SCHED is not set
+# CONFIG_BLK_CGROUP is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_SCHED_AUTOGROUP=y
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_HIGHBANK is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_PRIMA2 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_MXS is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LPC32XX is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_PICOXCELL is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P64X0 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_EXYNOS is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_TCC_926 is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_PLAT_SPEAR is not set
+# CONFIG_ARCH_VT8500 is not set
+# CONFIG_ARCH_ZYNQ is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_KEYBOARD_GPIO_POLLED is not set
+
+#
+# TI OMAP Common Features
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+CONFIG_ARCH_OMAP2PLUS=y
+
+#
+# OMAP Feature Selections
+#
+# CONFIG_OMAP_SMARTREFLEX is not set
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_MUX=y
+# CONFIG_OMAP_MUX_DEBUG is not set
+CONFIG_OMAP_MUX_WARNINGS=y
+CONFIG_OMAP_MCBSP=y
+# CONFIG_OMAP_MBOX_FWK is not set
+CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+CONFIG_OMAP_PM_NOOP=y
+CONFIG_MACH_OMAP_GENERIC=y
+
+#
+# TI OMAP2/3/4 Specific Features
+#
+# CONFIG_ARCH_OMAP2PLUS_TYPICAL is not set
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+# CONFIG_ARCH_OMAP4 is not set
+CONFIG_SOC_OMAP3430=y
+# CONFIG_SOC_OMAPTI816X is not set
+CONFIG_OMAP_PACKAGE_CBB=y
+
+#
+# OMAP Board Type
+#
+# CONFIG_MACH_OMAP3_BEAGLE is not set
+# CONFIG_MACH_DEVKIT8000 is not set
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OMAP3530_LV_SOM is not set
+# CONFIG_MACH_OMAP3_TORPEDO is not set
+# CONFIG_MACH_OVERO is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_OMAP3517EVM is not set
+# CONFIG_MACH_CRANEBOARD is not set
+CONFIG_MACH_OMAP3_PANDORA=y
+# CONFIG_MACH_OMAP3_TOUCHBOOK is not set
+# CONFIG_MACH_OMAP_3430SDP is not set
+# CONFIG_MACH_NOKIA_RM680 is not set
+# CONFIG_MACH_NOKIA_RX51 is not set
+# CONFIG_MACH_OMAP_ZOOM2 is not set
+# CONFIG_MACH_OMAP_ZOOM3 is not set
+# CONFIG_MACH_CM_T35 is not set
+# CONFIG_MACH_CM_T3517 is not set
+# CONFIG_MACH_IGEP0020 is not set
+# CONFIG_MACH_IGEP0030 is not set
+# CONFIG_MACH_SBC3530 is not set
+# CONFIG_MACH_OMAP_3630SDP is not set
+CONFIG_OMAP3_EMU=y
+CONFIG_OMAP3_SDRC_AC_TIMING=y
+
+#
+# System MMU
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_THUMBEE=y
+CONFIG_SWP_EMULATE=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
+CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_USER_L2_PLE=y
+CONFIG_USER_PMON=y
+CONFIG_CPU_HAS_PMU=y
+CONFIG_ARM_ERRATA_430973=y
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+# CONFIG_ARM_ERRATA_720789 is not set
+# CONFIG_ARM_ERRATA_743622 is not set
+# CONFIG_ARM_ERRATA_751472 is not set
+# CONFIG_ARM_ERRATA_754322 is not set
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=128
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+CONFIG_HAVE_ARCH_PFN_VALID=y
+# CONFIG_HIGHMEM is not set
+CONFIG_HW_PERF_EVENTS=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=999999
+# CONFIG_COMPACTION is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NEED_PER_CPU_KM=y
+# CONFIG_CLEANCACHE is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
+CONFIG_CPU_V7_SYSFS=y
+
+#
+# Boot options
+#
+CONFIG_USE_OF=y
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+# CONFIG_ARM_APPENDED_DTB is not set
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_AUTO_ZRELADDR is not set
+
+#
+# CPU Power Management
+#
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+
+#
+# ARM CPU frequency scaling drivers
+#
+CONFIG_ARM_OMAP2PLUS_CPUFREQ=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_CAN_PM_TRACE=y
+# CONFIG_APM_EMULATION is not set
+CONFIG_ARCH_HAS_OPP=y
+CONFIG_PM_OPP=y
+CONFIG_PM_CLK=y
+CONFIG_CPU_PM=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARM_CPU_SUSPEND=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+# CONFIG_BATMAN_ADV is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+# CONFIG_BT_RFCOMM_TTY is not set
+CONFIG_BT_BNEP=y
+# CONFIG_BT_BNEP_MC_FILTER is not set
+# CONFIG_BT_BNEP_PROTO_FILTER is not set
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+# CONFIG_BT_HCIUART_BCSP is not set
+# CONFIG_BT_HCIUART_ATH3K is not set
+# CONFIG_BT_HCIUART_LL is not set
+# CONFIG_BT_HCIBCM203X is not set
+# CONFIG_BT_HCIBPA10X is not set
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_CFG80211=y
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=y
+CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_RFKILL_REGULATOR is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH=""
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_OF_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+# CONFIG_MTD_BLKDEVS is not set
+# CONFIG_MTD_BLOCK is not set
+# CONFIG_MTD_BLOCK_RO is not set
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_MTD_OOPS is not set
+# CONFIG_MTD_SWAP is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_DOCG3 is not set
+CONFIG_MTD_NAND_ECC=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_BCH is not set
+# CONFIG_MTD_SM_COMMON is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+# CONFIG_MTD_UBI_DEBUG is not set
+CONFIG_DTC=y
+CONFIG_OF=y
+
+#
+# Device Tree and Open Firmware support
+#
+# CONFIG_PROC_DEVICETREE is not set
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_DEVICE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_I2C=y
+CONFIG_OF_NET=y
+CONFIG_OF_SPI=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_ATMEL_PWM is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_OMAP_OVERCLOCKING=y
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_BMP085 is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_IWMC3200TOP is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_TARGET_CORE is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_MII=y
+# CONFIG_MACVLAN is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+
+#
+# CAIF transport drivers
+#
+CONFIG_ETHERNET=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_DM9000 is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_ETHOC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_PHYLIB is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX8817X is not set
+CONFIG_USB_NET_CDCETHER=y
+# CONFIG_USB_NET_CDC_EEM is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_GL620A is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_KALMIA is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
+# CONFIG_USB_VL600 is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_BRCMFMAC is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IWM is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTL8192CU is not set
+CONFIG_WL1251=m
+# CONFIG_WL1251_SPI is not set
+CONFIG_WL1251_SDIO=m
+# CONFIG_WL12XX_MENU is not set
+CONFIG_WL12XX_PLATFORM_DATA=y
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=800
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+CONFIG_KEYBOARD_TWL4030=y
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_XPAD is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MAX11801 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_TOUCHSCREEN_ST1232 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_TWL4030_PWRBUTTON=y
+# CONFIG_INPUT_TWL4030_VIBRA is not set
+# CONFIG_INPUT_TWL6040_VIBRA is not set
+CONFIG_INPUT_VSENSE=y
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_CMA3000 is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+# CONFIG_DEVKMEM is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+# CONFIG_SERIAL_AMBA_PL011 is not set
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX3107 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_OMAP=y
+CONFIG_SERIAL_OMAP_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_RAMOOPS is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_MUX is not set
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_OC_TINY is not set
+CONFIG_SPI_OMAP24XX=y
+# CONFIG_SPI_PL022 is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+
+#
+# Enable Device Drivers -> PPS to see the PTP clock options.
+#
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO drivers:
+#
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_PL061 is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+CONFIG_GPIO_TWL4030=y
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_BQ20Z75 is not set
+CONFIG_BATTERY_BQ27x00=y
+CONFIG_BATTERY_BQ27X00_I2C=y
+# CONFIG_BATTERY_BQ27X00_PLATFORM is not set
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_BATTERY_MAX17042 is not set
+# CONFIG_CHARGER_ISP1704 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+CONFIG_CHARGER_TWL4030=y
+# CONFIG_CHARGER_GPIO is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_NTC_THERMISTOR is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_TWL4030_MADC is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_CORE is not set
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ARM_SP805_WATCHDOG is not set
+# CONFIG_DW_WATCHDOG is not set
+CONFIG_OMAP_WATCHDOG=y
+CONFIG_TWL4030_WATCHDOG=y
+# CONFIG_MAX63XX_WATCHDOG is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+CONFIG_TWL4030_CORE=y
+CONFIG_TWL4030_MADC=y
+CONFIG_TWL4030_POWER=y
+CONFIG_MFD_TWL4030_AUDIO=y
+# CONFIG_TWL6030_PWM is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13XXX is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_WL1273_CORE is not set
+CONFIG_MFD_OMAP_USB_HOST=y
+# CONFIG_MFD_AAT2870_CORE is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_GPIO is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+# CONFIG_REGULATOR_MAX8952 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_LP3972 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_REGULATOR_ISL6271A is not set
+# CONFIG_REGULATOR_AD5398 is not set
+# CONFIG_REGULATOR_TPS6524X is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_WMT_GE_ROPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_ARMCLCD is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=0
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS is not set
+CONFIG_OMAP2_DSS_DPI=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET=y
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+# CONFIG_PANEL_GENERIC_DPI is not set
+# CONFIG_PANEL_DVI is not set
+# CONFIG_PANEL_LGPHILIPS_LB035Q02 is not set
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+# CONFIG_PANEL_NEC_NL8048HL11_01B is not set
+# CONFIG_PANEL_PICODLP is not set
+CONFIG_PANEL_TPO_TD043MTEA1=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PANDORA=y
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_LOGO_PANDORA_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+CONFIG_SND_DEBUG=y
+# CONFIG_SND_DEBUG_VERBOSE is not set
+# CONFIG_SND_PCM_XRUN_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_ARM is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_SOC_CACHE_LZO is not set
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_MCBSP=y
+CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_TWL4030=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB_ARCH_HAS_XHCI is not set
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_DWC3 is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+CONFIG_USB_EHCI_HCD_OMAP=y
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+# CONFIG_USB_MUSB_TUSB6010 is not set
+CONFIG_USB_MUSB_OMAP2PLUS=y
+# CONFIG_USB_MUSB_AM35X is not set
+# CONFIG_MUSB_PIO_ONLY is not set
+# CONFIG_USB_UX500_DMA is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_RENESAS_USBHS is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+# CONFIG_USB_UAS is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+# CONFIG_USB_FUSB300 is not set
+# CONFIG_USB_OMAP is not set
+# CONFIG_USB_R8A66597 is not set
+CONFIG_USB_GADGET_MUSB_HDRC=y
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_NET2272 is not set
+# CONFIG_USB_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+# CONFIG_USB_G_NCM is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+# CONFIG_USB_G_ACM_MS is not set
+# CONFIG_USB_G_MULTI is not set
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_DBGP is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_ISP1301_OMAP is not set
+# CONFIG_USB_ULPI is not set
+CONFIG_TWL4030_USB=y
+# CONFIG_TWL6030_USB is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+# CONFIG_MMC_CLKGATE is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_ARMMMCI is not set
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_SDHCI_PXAV3 is not set
+# CONFIG_MMC_SDHCI_PXAV2 is not set
+# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_SPI is not set
+# CONFIG_MMC_DW is not set
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+CONFIG_LEDS_TWL4030_PWM=y
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_RENESAS_TPU is not set
+CONFIG_LEDS_TRIGGERS=y
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_RTC_DRV_PL030 is not set
+# CONFIG_RTC_DRV_PL031 is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+# CONFIG_STAGING is not set
+CONFIG_CLKDEV_LOOKUP=y
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKSRC_MMIO=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_VIRT_DRIVERS is not set
+# CONFIG_PM_DEVFREQ is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_FANOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_UBIFS_FS=y
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=y
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_XATTR is not set
+CONFIG_SQUASHFS_ZLIB=y
+# CONFIG_SQUASHFS_LZO is not set
+# CONFIG_SQUASHFS_XZ is not set
+# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_AUFS_FS=y
+CONFIG_AUFS_BRANCH_MAX_127=y
+# CONFIG_AUFS_BRANCH_MAX_511 is not set
+# CONFIG_AUFS_BRANCH_MAX_1023 is not set
+# CONFIG_AUFS_BRANCH_MAX_32767 is not set
+CONFIG_AUFS_SBILIST=y
+# CONFIG_AUFS_HNOTIFY is not set
+# CONFIG_AUFS_RDU is not set
+# CONFIG_AUFS_PROC_MAP is not set
+# CONFIG_AUFS_SP_IATTR is not set
+# CONFIG_AUFS_SHWH is not set
+# CONFIG_AUFS_BR_RAMFS is not set
+CONFIG_AUFS_BDEV_LOOP=y
+# CONFIG_AUFS_DEBUG is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_CEPH_FS is not set
+CONFIG_CIFS=y
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_LOCKING=y
+# CONFIG_PROVE_RCU is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+CONFIG_LOCKDEP=y
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_LOCKDEP is not set
+CONFIG_TRACE_IRQFLAGS=y
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_DEBUG_SG is not set
+CONFIG_DEBUG_NOTIFIERS=y
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_RING_BUFFER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_ARM_UNWIND=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_NONE=y
+# CONFIG_DEBUG_ICEDCC is not set
+CONFIG_EARLY_PRINTK=y
+CONFIG_OC_ETM=y
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+# CONFIG_LIBCRC32C is not set
+# CONFIG_CRC8 is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+# CONFIG_XZ_DEC is not set
+# CONFIG_XZ_DEC_BCJ is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
+CONFIG_AVERAGE=y
+# CONFIG_CORDIC is not set
diff --git a/arch/arm/configs/omap3_pandora_defconfig b/arch/arm/configs/omap3_pandora_defconfig
new file mode 100644 (file)
index 0000000..87ba1ab
--- /dev/null
@@ -0,0 +1,3501 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/arm 3.2.80 Kernel Configuration
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_HAVE_SCHED_CLOCK=y
+CONFIG_GENERIC_GPIO=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_ARM_PATCH_PHYS_VIRT=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_HAVE_IRQ_WORK=y
+CONFIG_IRQ_WORK=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="pandora"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_FHANDLE is not set
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+# CONFIG_AUDIT is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_HAVE_SPARSE_IRQ=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_CHIP=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_SPARSE_IRQ is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TINY_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_FREEZER is not set
+CONFIG_CGROUP_DEVICE=y
+# CONFIG_CPUSETS is not set
+# CONFIG_CGROUP_CPUACCT is not set
+# CONFIG_RESOURCE_COUNTERS is not set
+# CONFIG_CGROUP_PERF is not set
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_BLK_CGROUP is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_HIGHBANK is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_PRIMA2 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_MXS is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LPC32XX is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_PICOXCELL is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P64X0 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_EXYNOS is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_TCC_926 is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_PLAT_SPEAR is not set
+# CONFIG_ARCH_VT8500 is not set
+# CONFIG_ARCH_ZYNQ is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_KEYBOARD_GPIO_POLLED is not set
+
+#
+# TI OMAP Common Features
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+CONFIG_ARCH_OMAP2PLUS=y
+
+#
+# OMAP Feature Selections
+#
+CONFIG_OMAP_SMARTREFLEX=y
+CONFIG_OMAP_SMARTREFLEX_CLASS3=y
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_MUX=y
+# CONFIG_OMAP_MUX_DEBUG is not set
+# CONFIG_OMAP_MUX_WARNINGS is not set
+CONFIG_OMAP_MCBSP=y
+CONFIG_OMAP_MBOX_FWK=m
+CONFIG_OMAP_MBOX_KFIFO_SIZE=256
+CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+CONFIG_OMAP_PM_NOOP=y
+CONFIG_MACH_OMAP_GENERIC=y
+
+#
+# TI OMAP2/3/4 Specific Features
+#
+# CONFIG_ARCH_OMAP2PLUS_TYPICAL is not set
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+# CONFIG_ARCH_OMAP4 is not set
+CONFIG_SOC_OMAP3430=y
+# CONFIG_SOC_OMAPTI816X is not set
+CONFIG_OMAP_PACKAGE_CBB=y
+
+#
+# OMAP Board Type
+#
+# CONFIG_MACH_OMAP3_BEAGLE is not set
+# CONFIG_MACH_DEVKIT8000 is not set
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OMAP3530_LV_SOM is not set
+# CONFIG_MACH_OMAP3_TORPEDO is not set
+# CONFIG_MACH_OVERO is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_OMAP3517EVM is not set
+# CONFIG_MACH_CRANEBOARD is not set
+CONFIG_MACH_OMAP3_PANDORA=y
+# CONFIG_MACH_OMAP3_TOUCHBOOK is not set
+# CONFIG_MACH_OMAP_3430SDP is not set
+# CONFIG_MACH_NOKIA_RM680 is not set
+# CONFIG_MACH_NOKIA_RX51 is not set
+# CONFIG_MACH_OMAP_ZOOM2 is not set
+# CONFIG_MACH_OMAP_ZOOM3 is not set
+# CONFIG_MACH_CM_T35 is not set
+# CONFIG_MACH_CM_T3517 is not set
+# CONFIG_MACH_IGEP0020 is not set
+# CONFIG_MACH_IGEP0030 is not set
+# CONFIG_MACH_SBC3530 is not set
+# CONFIG_MACH_OMAP_3630SDP is not set
+CONFIG_OMAP3_EMU=y
+CONFIG_OMAP3_SDRC_AC_TIMING=y
+
+#
+# System MMU
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_LPAE is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_THUMBEE=y
+CONFIG_SWP_EMULATE=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
+CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_USER_L2_PLE=y
+CONFIG_USER_PMON=y
+CONFIG_CPU_HAS_PMU=y
+CONFIG_ARM_ERRATA_430973=y
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+# CONFIG_ARM_ERRATA_720789 is not set
+# CONFIG_ARM_ERRATA_743622 is not set
+# CONFIG_ARM_ERRATA_751472 is not set
+# CONFIG_ARM_ERRATA_754322 is not set
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=128
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+CONFIG_HAVE_ARCH_PFN_VALID=y
+# CONFIG_HIGHMEM is not set
+CONFIG_HW_PERF_EVENTS=y
+CONFIG_SYS_SUPPORTS_HUGETLBFS=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_COMPACTION=y
+CONFIG_MIGRATION=y
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_TRANSPARENT_HUGEPAGE=y
+# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_NEED_PER_CPU_KM=y
+# CONFIG_CLEANCACHE is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+CONFIG_UACCESS_WITH_MEMCPY=y
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
+CONFIG_CPU_V7_SYSFS=y
+
+#
+# Boot options
+#
+CONFIG_USE_OF=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+# CONFIG_ARM_APPENDED_DTB is not set
+CONFIG_CMDLINE=" debug "
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
+# CONFIG_CMDLINE_EXTEND is not set
+# CONFIG_CMDLINE_FORCE is not set
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_AUTO_ZRELADDR is not set
+
+#
+# CPU Power Management
+#
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+
+#
+# ARM CPU frequency scaling drivers
+#
+CONFIG_ARM_OMAP2PLUS_CPUFREQ=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+# CONFIG_HAVE_AOUT is not set
+CONFIG_BINFMT_MISC=m
+
+#
+# Power management options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+# CONFIG_APM_EMULATION is not set
+CONFIG_ARCH_HAS_OPP=y
+CONFIG_PM_OPP=y
+CONFIG_PM_CLK=y
+CONFIG_CPU_PM=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARM_CPU_SUSPEND=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=m
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+CONFIG_NF_CONNTRACK=m
+# CONFIG_NF_CONNTRACK_MARK is not set
+# CONFIG_NF_CONNTRACK_EVENTS is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
+# CONFIG_NF_CT_PROTO_SCTP is not set
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+# CONFIG_NF_CONNTRACK_FTP is not set
+# CONFIG_NF_CONNTRACK_H323 is not set
+# CONFIG_NF_CONNTRACK_IRC is not set
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+# CONFIG_NF_CONNTRACK_SANE is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_NF_CONNTRACK_TFTP is not set
+# CONFIG_NF_CT_NETLINK is not set
+CONFIG_NETFILTER_XTABLES=m
+
+#
+# Xtables combined modules
+#
+# CONFIG_NETFILTER_XT_MARK is not set
+# CONFIG_NETFILTER_XT_CONNMARK is not set
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+# CONFIG_NETFILTER_XT_MATCH_STATE is not set
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+# CONFIG_NF_NAT_FTP is not set
+# CONFIG_NF_NAT_IRC is not set
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+# CONFIG_NF_NAT_SIP is not set
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_RAW is not set
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+# CONFIG_IP_NF_ARP_MANGLE is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV6=m
+CONFIG_NF_CONNTRACK_IPV6=m
+# CONFIG_IP6_NF_QUEUE is not set
+CONFIG_IP6_NF_IPTABLES=m
+# CONFIG_IP6_NF_MATCH_AH is not set
+# CONFIG_IP6_NF_MATCH_EUI64 is not set
+# CONFIG_IP6_NF_MATCH_FRAG is not set
+# CONFIG_IP6_NF_MATCH_OPTS is not set
+# CONFIG_IP6_NF_MATCH_HL is not set
+# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
+# CONFIG_IP6_NF_MATCH_MH is not set
+# CONFIG_IP6_NF_MATCH_RT is not set
+# CONFIG_IP6_NF_TARGET_LOG is not set
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+# CONFIG_IP6_NF_MANGLE is not set
+# CONFIG_IP6_NF_RAW is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+CONFIG_STP=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFB is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_QFQ is not set
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+CONFIG_NET_CLS_RSVP=y
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
+# CONFIG_NET_EMATCH is not set
+# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+# CONFIG_BT_HCIUART_BCSP is not set
+# CONFIG_BT_HCIUART_ATH3K is not set
+CONFIG_BT_HCIUART_LL=y
+# CONFIG_BT_HCIBCM203X is not set
+# CONFIG_BT_HCIBPA10X is not set
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+# CONFIG_BT_ATH3K is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_HAS_RC=y
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_RFKILL_REGULATOR is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_SOC_BUS=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+CONFIG_CMA=y
+# CONFIG_CMA_DEBUG is not set
+
+#
+# Default contiguous memory area size:
+#
+CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_CMA_SIZE_SEL_MBYTES=y
+# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set
+# CONFIG_CMA_SIZE_SEL_MIN is not set
+# CONFIG_CMA_SIZE_SEL_MAX is not set
+CONFIG_CMA_ALIGNMENT=9
+CONFIG_CMA_AREAS=7
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_OF_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+# CONFIG_MTD_BLKDEVS is not set
+# CONFIG_MTD_BLOCK is not set
+# CONFIG_MTD_BLOCK_RO is not set
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_MTD_OOPS is not set
+# CONFIG_MTD_SWAP is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_DOCG3 is not set
+CONFIG_MTD_NAND_ECC=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_BCH is not set
+# CONFIG_MTD_SM_COMMON is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=2
+# CONFIG_MTD_UBI_GLUEBI is not set
+CONFIG_DTC=y
+CONFIG_OF=y
+
+#
+# Device Tree and Open Firmware support
+#
+# CONFIG_PROC_DEVICETREE is not set
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_DEVICE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_I2C=y
+CONFIG_OF_NET=y
+CONFIG_OF_SPI=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_ATMEL_PWM is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_OMAP_OVERCLOCKING=y
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_BMP085 is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+CONFIG_EEPROM_93CX6=m
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_IWMC3200TOP is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=m
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_CHR_DEV_SG=m
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+# CONFIG_MD_LINEAR is not set
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+# CONFIG_MD_RAID456 is not set
+# CONFIG_MD_MULTIPATH is not set
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM_BUILTIN=y
+CONFIG_BLK_DEV_DM=m
+# CONFIG_DM_DEBUG is not set
+CONFIG_DM_CRYPT=m
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+# CONFIG_DM_UEVENT is not set
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_TARGET_CORE is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_MII=y
+# CONFIG_MACVLAN is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+CONFIG_TUN=m
+# CONFIG_VETH is not set
+
+#
+# CAIF transport drivers
+#
+CONFIG_ETHERNET=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_DM9000 is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_ETHOC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_PHYLIB is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_CDCETHER=m
+CONFIG_USB_NET_CDC_EEM=m
+CONFIG_USB_NET_CDC_NCM=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_ZAURUS=m
+CONFIG_USB_NET_CX82310_ETH=m
+# CONFIG_USB_NET_KALMIA is not set
+CONFIG_USB_NET_INT51X1=m
+CONFIG_USB_IPHETH=m
+CONFIG_USB_SIERRA_NET=m
+CONFIG_USB_VL600=m
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+CONFIG_AT76C50X_USB=m
+CONFIG_USB_ZD1201=m
+CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_RTL8187=m
+CONFIG_RTL8187_LEDS=y
+# CONFIG_MAC80211_HWSIM is not set
+CONFIG_ATH_COMMON=m
+# CONFIG_ATH_DEBUG is not set
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+# CONFIG_ATH9K is not set
+CONFIG_ATH9K_HTC=m
+# CONFIG_ATH9K_HTC_DEBUGFS is not set
+CONFIG_CARL9170=m
+CONFIG_CARL9170_LEDS=y
+CONFIG_CARL9170_WPC=y
+# CONFIG_CARL9170_HWRNG is not set
+# CONFIG_ATH6KL is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_BRCMFMAC is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IWM is not set
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+# CONFIG_LIBERTAS_SDIO is not set
+# CONFIG_LIBERTAS_SPI is not set
+# CONFIG_LIBERTAS_DEBUG is not set
+# CONFIG_LIBERTAS_MESH is not set
+CONFIG_P54_COMMON=m
+CONFIG_P54_USB=m
+# CONFIG_P54_SPI is not set
+CONFIG_P54_LEDS=y
+CONFIG_RT2X00=m
+CONFIG_RT2500USB=m
+CONFIG_RT73USB=m
+CONFIG_RT2800USB=m
+CONFIG_RT2800USB_RT33XX=y
+CONFIG_RT2800USB_RT35XX=y
+CONFIG_RT2800USB_RT53XX=y
+CONFIG_RT2800USB_UNKNOWN=y
+CONFIG_RT2800_LIB=m
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+CONFIG_RTL8192CU=m
+CONFIG_RTLWIFI=m
+CONFIG_RTL8192C_COMMON=m
+CONFIG_WL1251=m
+# CONFIG_WL1251_SPI is not set
+CONFIG_WL1251_SDIO=m
+# CONFIG_WL12XX_MENU is not set
+CONFIG_WL12XX_PLATFORM_DATA=y
+CONFIG_ZD1211RW=m
+# CONFIG_ZD1211RW_DEBUG is not set
+# CONFIG_MWIFIEX is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=800
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+CONFIG_KEYBOARD_TWL4030=y
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+CONFIG_JOYSTICK_XPAD=m
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=m
+CONFIG_TABLET_USB_AIPTEK=m
+CONFIG_TABLET_USB_GTCO=m
+CONFIG_TABLET_USB_HANWANG=m
+CONFIG_TABLET_USB_KBTAB=m
+CONFIG_TABLET_USB_WACOM=m
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=m
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MAX11801 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
+CONFIG_TOUCHSCREEN_USB_EGALAX=y
+CONFIG_TOUCHSCREEN_USB_PANJIT=y
+CONFIG_TOUCHSCREEN_USB_3M=y
+CONFIG_TOUCHSCREEN_USB_ITM=y
+CONFIG_TOUCHSCREEN_USB_ETURBO=y
+CONFIG_TOUCHSCREEN_USB_GUNZE=y
+CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
+CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
+CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
+CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
+CONFIG_TOUCHSCREEN_USB_GOTOP=y
+CONFIG_TOUCHSCREEN_USB_JASTEC=y
+CONFIG_TOUCHSCREEN_USB_E2I=y
+CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
+CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
+CONFIG_TOUCHSCREEN_USB_NEXIO=y
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_TOUCHSCREEN_ST1232 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_TWL4030_PWRBUTTON=y
+# CONFIG_INPUT_TWL4030_VIBRA is not set
+# CONFIG_INPUT_TWL6040_VIBRA is not set
+CONFIG_INPUT_VSENSE=y
+CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_CMA3000 is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=m
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_AMBAKMI is not set
+# CONFIG_SERIO_LIBPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVKMEM=y
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=m
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+# CONFIG_SERIAL_8250_DW is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+# CONFIG_SERIAL_AMBA_PL011 is not set
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX3107 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_OF_PLATFORM is not set
+CONFIG_SERIAL_OMAP=y
+CONFIG_SERIAL_OMAP_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_RAMOOPS is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=m
+
+#
+# Multiplexer I2C Chip support
+#
+# CONFIG_I2C_MUX_GPIO is not set
+# CONFIG_I2C_MUX_PCA9541 is not set
+# CONFIG_I2C_MUX_PCA954x is not set
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_OC_TINY is not set
+CONFIG_SPI_OMAP24XX=y
+# CONFIG_SPI_PL022 is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+
+#
+# Enable Device Drivers -> PPS to see the PTP clock options.
+#
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO drivers:
+#
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_PL061 is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+CONFIG_GPIO_TWL4030=y
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_BQ20Z75 is not set
+CONFIG_BATTERY_BQ27x00=y
+CONFIG_BATTERY_BQ27X00_I2C=y
+CONFIG_BATTERY_BQ27X00_PLATFORM=y
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_BATTERY_MAX17042 is not set
+# CONFIG_CHARGER_ISP1704 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+CONFIG_CHARGER_TWL4030=y
+# CONFIG_CHARGER_GPIO is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_NTC_THERMISTOR is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+CONFIG_SENSORS_TWL4030_MADC=m
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_OMAP3_THERMAL=y
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_CORE is not set
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ARM_SP805_WATCHDOG is not set
+# CONFIG_DW_WATCHDOG is not set
+CONFIG_OMAP_WATCHDOG=y
+CONFIG_TWL4030_WATCHDOG=m
+# CONFIG_MAX63XX_WATCHDOG is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+CONFIG_TWL4030_CORE=y
+CONFIG_TWL4030_MADC=m
+CONFIG_TWL4030_POWER=y
+CONFIG_MFD_TWL4030_AUDIO=y
+# CONFIG_TWL6030_PWM is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13XXX is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_WL1273_CORE is not set
+CONFIG_MFD_OMAP_USB_HOST=y
+# CONFIG_MFD_AAT2870_CORE is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_GPIO is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+# CONFIG_REGULATOR_MAX8952 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_LP3972 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_REGULATOR_ISL6271A is not set
+# CONFIG_REGULATOR_AD5398 is not set
+# CONFIG_REGULATOR_TPS6524X is not set
+CONFIG_MEDIA_SUPPORT=m
+
+#
+# Multimedia core support
+#
+# CONFIG_MEDIA_CONTROLLER is not set
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_DVB_CORE=m
+CONFIG_DVB_NET=y
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+CONFIG_RC_CORE=m
+CONFIG_LIRC=m
+CONFIG_RC_MAP=m
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_RC5_DECODER=m
+CONFIG_IR_RC6_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_RC5_SZ_DECODER=m
+CONFIG_IR_MCE_KBD_DECODER=m
+CONFIG_IR_LIRC_CODEC=m
+# CONFIG_RC_ATI_REMOTE is not set
+# CONFIG_IR_IMON is not set
+# CONFIG_IR_MCEUSB is not set
+# CONFIG_IR_REDRAT3 is not set
+# CONFIG_IR_STREAMZAP is not set
+# CONFIG_RC_LOOPBACK is not set
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=m
+CONFIG_MEDIA_TUNER_CUSTOMISE=y
+
+#
+# Customize TV tuners
+#
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA827X=m
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_MT2060=m
+CONFIG_MEDIA_TUNER_MT2266=m
+CONFIG_MEDIA_TUNER_MT2131=m
+CONFIG_MEDIA_TUNER_QT1010=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_XC4000=m
+CONFIG_MEDIA_TUNER_MXL5005S=m
+CONFIG_MEDIA_TUNER_MXL5007T=m
+CONFIG_MEDIA_TUNER_MC44S803=m
+CONFIG_MEDIA_TUNER_MAX2165=m
+CONFIG_MEDIA_TUNER_TDA18218=m
+CONFIG_MEDIA_TUNER_TDA18212=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEOBUF_VMALLOC=m
+CONFIG_VIDEOBUF_DMA_CONTIG=m
+CONFIG_VIDEOBUF_DVB=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_VIDEOBUF2_CORE=m
+CONFIG_VIDEOBUF2_MEMOPS=m
+CONFIG_VIDEOBUF2_VMALLOC=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
+CONFIG_VIDEO_IR_I2C=m
+
+#
+# Encoders, decoders, sensors and other helper chips
+#
+
+#
+# Audio decoders, processors and mixers
+#
+CONFIG_VIDEO_TVAUDIO=m
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+CONFIG_VIDEO_MSP3400=m
+# CONFIG_VIDEO_CS5345 is not set
+CONFIG_VIDEO_CS53L32A=m
+# CONFIG_VIDEO_TLV320AIC23B is not set
+CONFIG_VIDEO_WM8775=m
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+
+#
+# RDS decoders
+#
+# CONFIG_VIDEO_SAA6588 is not set
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_ADV7180 is not set
+# CONFIG_VIDEO_BT819 is not set
+# CONFIG_VIDEO_BT856 is not set
+# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_SAA7110 is not set
+CONFIG_VIDEO_SAA711X=m
+# CONFIG_VIDEO_SAA7191 is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_TVP7002 is not set
+# CONFIG_VIDEO_VPX3220 is not set
+
+#
+# Video and audio decoders
+#
+# CONFIG_VIDEO_SAA717X is not set
+CONFIG_VIDEO_CX25840=m
+
+#
+# MPEG video encoders
+#
+CONFIG_VIDEO_CX2341X=m
+
+#
+# Video encoders
+#
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_ADV7170 is not set
+# CONFIG_VIDEO_ADV7175 is not set
+# CONFIG_VIDEO_ADV7343 is not set
+# CONFIG_VIDEO_AK881X is not set
+
+#
+# Camera sensor devices
+#
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_MT9V011 is not set
+# CONFIG_VIDEO_TCM825X is not set
+# CONFIG_VIDEO_SR030PC30 is not set
+
+#
+# Flash devices
+#
+
+#
+# Video improvement chips
+#
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+
+#
+# Miscelaneous helper chips
+#
+# CONFIG_VIDEO_THS7303 is not set
+# CONFIG_VIDEO_M52790 is not set
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_VPFE_CAPTURE is not set
+CONFIG_VIDEO_OMAP2_VOUT_VRFB=y
+CONFIG_VIDEO_OMAP2_VOUT=m
+CONFIG_VIDEO_CPIA2=m
+CONFIG_VIDEO_AU0828=m
+# CONFIG_SOC_CAMERA is not set
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_KINECT=m
+CONFIG_USB_GSPCA_KONICA=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SE401=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+CONFIG_USB_GSPCA_TOPRO=m
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+CONFIG_USB_GSPCA_XIRLINK_CIT=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_VIDEO_PVRUSB2=m
+CONFIG_VIDEO_PVRUSB2_SYSFS=y
+CONFIG_VIDEO_PVRUSB2_DVB=y
+# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
+CONFIG_VIDEO_HDPVR=m
+CONFIG_VIDEO_EM28XX=m
+CONFIG_VIDEO_EM28XX_ALSA=m
+CONFIG_VIDEO_EM28XX_DVB=m
+CONFIG_VIDEO_EM28XX_RC=y
+# CONFIG_VIDEO_TLG2300 is not set
+# CONFIG_VIDEO_CX231XX is not set
+# CONFIG_VIDEO_TM6000 is not set
+CONFIG_VIDEO_USBVISION=m
+# CONFIG_USB_ET61X251 is not set
+# CONFIG_USB_SN9C102 is not set
+CONFIG_USB_PWC=m
+# CONFIG_USB_PWC_DEBUG is not set
+CONFIG_USB_PWC_INPUT_EVDEV=y
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_I2C_SI4713 is not set
+# CONFIG_RADIO_SI4713 is not set
+CONFIG_USB_DSBR=m
+CONFIG_RADIO_SI470X=y
+CONFIG_USB_SI470X=m
+# CONFIG_I2C_SI470X is not set
+CONFIG_USB_MR800=m
+# CONFIG_RADIO_TEA5764 is not set
+# CONFIG_RADIO_SAA7706H is not set
+# CONFIG_RADIO_TEF6862 is not set
+# CONFIG_RADIO_WL1273 is not set
+
+#
+# Texas Instruments WL128x FM driver (ST based)
+#
+CONFIG_DVB_MAX_ADAPTERS=8
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+CONFIG_DVB_CAPTURE_DRIVERS=y
+CONFIG_TTPCI_EEPROM=m
+
+#
+# Supported USB Adapters
+#
+CONFIG_DVB_USB=m
+# CONFIG_DVB_USB_DEBUG is not set
+CONFIG_DVB_USB_A800=m
+CONFIG_DVB_USB_DIBUSB_MB=m
+# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
+CONFIG_DVB_USB_DIBUSB_MC=m
+CONFIG_DVB_USB_DIB0700=m
+CONFIG_DVB_USB_UMT_010=m
+CONFIG_DVB_USB_CXUSB=m
+CONFIG_DVB_USB_M920X=m
+CONFIG_DVB_USB_GL861=m
+CONFIG_DVB_USB_AU6610=m
+CONFIG_DVB_USB_DIGITV=m
+CONFIG_DVB_USB_VP7045=m
+CONFIG_DVB_USB_VP702X=m
+CONFIG_DVB_USB_GP8PSK=m
+CONFIG_DVB_USB_NOVA_T_USB2=m
+CONFIG_DVB_USB_TTUSB2=m
+CONFIG_DVB_USB_DTT200U=m
+CONFIG_DVB_USB_OPERA1=m
+CONFIG_DVB_USB_AF9005=m
+CONFIG_DVB_USB_AF9005_REMOTE=m
+CONFIG_DVB_USB_PCTV452E=m
+CONFIG_DVB_USB_DW2102=m
+CONFIG_DVB_USB_CINERGY_T2=m
+CONFIG_DVB_USB_ANYSEE=m
+CONFIG_DVB_USB_DTV5100=m
+CONFIG_DVB_USB_AF9015=m
+CONFIG_DVB_USB_CE6230=m
+CONFIG_DVB_USB_FRIIO=m
+CONFIG_DVB_USB_EC168=m
+CONFIG_DVB_USB_AZ6027=m
+CONFIG_DVB_USB_LME2510=m
+CONFIG_DVB_USB_TECHNISAT_USB2=m
+CONFIG_DVB_USB_IT913X=m
+CONFIG_DVB_USB_MXL111SF=m
+CONFIG_SMS_SIANO_MDTV=m
+
+#
+# Siano module components
+#
+CONFIG_SMS_USB_DRV=m
+# CONFIG_SMS_SDIO_DRV is not set
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+CONFIG_DVB_B2C2_FLEXCOP=m
+CONFIG_DVB_B2C2_FLEXCOP_USB=m
+# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
+
+#
+# Supported DVB Frontends
+#
+CONFIG_DVB_FE_CUSTOMISE=y
+
+#
+# Customise DVB Frontends
+#
+
+#
+# Multistandard (satellite) frontends
+#
+# CONFIG_DVB_STB0899 is not set
+# CONFIG_DVB_STB6100 is not set
+# CONFIG_DVB_STV090x is not set
+# CONFIG_DVB_STV6110x is not set
+
+#
+# Multistandard (cable + terrestrial) frontends
+#
+CONFIG_DVB_DRXK=m
+CONFIG_DVB_TDA18271C2DD=m
+
+#
+# DVB-S (satellite) frontends
+#
+# CONFIG_DVB_CX24110 is not set
+# CONFIG_DVB_CX24123 is not set
+# CONFIG_DVB_MT312 is not set
+# CONFIG_DVB_ZL10036 is not set
+# CONFIG_DVB_ZL10039 is not set
+# CONFIG_DVB_S5H1420 is not set
+# CONFIG_DVB_STV0288 is not set
+# CONFIG_DVB_STB6000 is not set
+# CONFIG_DVB_STV0299 is not set
+# CONFIG_DVB_STV6110 is not set
+# CONFIG_DVB_STV0900 is not set
+# CONFIG_DVB_TDA8083 is not set
+# CONFIG_DVB_TDA10086 is not set
+# CONFIG_DVB_TDA8261 is not set
+# CONFIG_DVB_VES1X93 is not set
+# CONFIG_DVB_TUNER_ITD1000 is not set
+# CONFIG_DVB_TUNER_CX24113 is not set
+# CONFIG_DVB_TDA826X is not set
+# CONFIG_DVB_TUA6100 is not set
+# CONFIG_DVB_CX24116 is not set
+# CONFIG_DVB_SI21XX is not set
+# CONFIG_DVB_DS3000 is not set
+# CONFIG_DVB_MB86A16 is not set
+# CONFIG_DVB_TDA10071 is not set
+
+#
+# DVB-T (terrestrial) frontends
+#
+CONFIG_DVB_SP8870=m
+CONFIG_DVB_SP887X=m
+CONFIG_DVB_CX22700=m
+CONFIG_DVB_CX22702=m
+CONFIG_DVB_S5H1432=m
+CONFIG_DVB_DRXD=m
+CONFIG_DVB_L64781=m
+CONFIG_DVB_TDA1004X=m
+CONFIG_DVB_NXT6000=m
+CONFIG_DVB_MT352=m
+CONFIG_DVB_ZL10353=m
+CONFIG_DVB_DIB3000MB=m
+CONFIG_DVB_DIB3000MC=m
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+CONFIG_DVB_DIB9000=m
+CONFIG_DVB_TDA10048=m
+CONFIG_DVB_AF9013=m
+CONFIG_DVB_EC100=m
+CONFIG_DVB_STV0367=m
+CONFIG_DVB_CXD2820R=m
+
+#
+# DVB-C (cable) frontends
+#
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_TDA10021=m
+CONFIG_DVB_TDA10023=m
+CONFIG_DVB_STV0297=m
+
+#
+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+#
+# CONFIG_DVB_NXT200X is not set
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_OR51132 is not set
+# CONFIG_DVB_BCM3510 is not set
+# CONFIG_DVB_LGDT330X is not set
+# CONFIG_DVB_LGDT3305 is not set
+# CONFIG_DVB_S5H1409 is not set
+# CONFIG_DVB_AU8522 is not set
+# CONFIG_DVB_S5H1411 is not set
+
+#
+# ISDB-T (terrestrial) frontends
+#
+CONFIG_DVB_S921=m
+CONFIG_DVB_DIB8000=m
+CONFIG_DVB_MB86A20S=m
+
+#
+# Digital terrestrial only tuners/PLL
+#
+CONFIG_DVB_PLL=m
+CONFIG_DVB_TUNER_DIB0070=m
+CONFIG_DVB_TUNER_DIB0090=m
+
+#
+# SEC control devices for DVB-S
+#
+CONFIG_DVB_LNBP21=m
+CONFIG_DVB_LNBP22=m
+CONFIG_DVB_ISL6405=m
+CONFIG_DVB_ISL6421=m
+CONFIG_DVB_ISL6423=m
+CONFIG_DVB_A8293=m
+CONFIG_DVB_LGS8GL5=m
+CONFIG_DVB_LGS8GXX=m
+CONFIG_DVB_ATBM8830=m
+CONFIG_DVB_TDA665x=m
+CONFIG_DVB_IX2505V=m
+CONFIG_DVB_IT913X_FE=m
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+
+#
+# Graphics support
+#
+CONFIG_DRM=m
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+CONFIG_FB_SYS_FILLRECT=m
+CONFIG_FB_SYS_COPYAREA=m
+CONFIG_FB_SYS_IMAGEBLIT=m
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+CONFIG_FB_SYS_FOPS=m
+# CONFIG_FB_WMT_GE_ROPS is not set
+CONFIG_FB_DEFERRED_IO=y
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_ARMCLCD is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_TMIO is not set
+CONFIG_FB_SMSCUFX=m
+CONFIG_FB_UDL=m
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=0
+# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set
+CONFIG_OMAP2_DSS_DPI=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4
+CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET=y
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+# CONFIG_PANEL_GENERIC_DPI is not set
+# CONFIG_PANEL_DVI is not set
+# CONFIG_PANEL_LGPHILIPS_LB035Q02 is not set
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+# CONFIG_PANEL_NEC_NL8048HL11_01B is not set
+# CONFIG_PANEL_PICODLP is not set
+CONFIG_PANEL_TPO_TD043MTEA1=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+CONFIG_BACKLIGHT_PANDORA=y
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_PANDORA_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=m
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_HRTIMER=m
+CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_RAWMIDI_SEQ=m
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+CONFIG_SND_ALOOP=m
+CONFIG_SND_VIRMIDI=m
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_ARM is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_UA101=m
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_USB_6FIRE=m
+CONFIG_SND_SOC=y
+# CONFIG_SND_SOC_CACHE_LZO is not set
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_MCBSP=y
+CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_TWL4030=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HIDRAW=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+# CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_A4TECH=m
+CONFIG_HID_ACRUX=m
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_PRODIKEYS=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DRAGONRISE=m
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=m
+CONFIG_HID_ELECOM=m
+CONFIG_HID_EZKEY=m
+CONFIG_HID_HOLTEK=m
+CONFIG_HOLTEK_FF=y
+CONFIG_HID_KEYTOUCH=m
+CONFIG_HID_KYE=m
+CONFIG_HID_UCLOGIC=m
+CONFIG_HID_WALTOP=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_HID_KENSINGTON=m
+CONFIG_HID_LCPOWER=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_LOGIWHEELS_FF=y
+CONFIG_HID_MAGICMOUSE=m
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_HID_NTRIG=m
+CONFIG_HID_ORTEK=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_PICOLCD=m
+CONFIG_HID_PICOLCD_FB=y
+CONFIG_HID_PICOLCD_BACKLIGHT=y
+CONFIG_HID_PICOLCD_LEDS=y
+CONFIG_HID_PRIMAX=m
+CONFIG_HID_QUANTA=m
+CONFIG_HID_ROCCAT=m
+CONFIG_HID_ROCCAT_COMMON=m
+CONFIG_HID_ROCCAT_ARVO=m
+CONFIG_HID_ROCCAT_KONE=m
+CONFIG_HID_ROCCAT_KONEPLUS=m
+CONFIG_HID_ROCCAT_KOVAPLUS=m
+CONFIG_HID_ROCCAT_PYRA=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SPEEDLINK=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_HID_WACOM=m
+CONFIG_HID_WACOM_POWER_SUPPLY=y
+CONFIG_HID_WIIMOTE=m
+CONFIG_HID_ZEROPLUS=m
+CONFIG_ZEROPLUS_FF=y
+CONFIG_HID_ZYDACRON=m
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB_ARCH_HAS_XHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_DWC3 is not set
+CONFIG_USB_MON=m
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+CONFIG_USB_EHCI_HCD_OMAP=y
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_U132_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+# CONFIG_USB_MUSB_TUSB6010 is not set
+CONFIG_USB_MUSB_OMAP2PLUS=y
+# CONFIG_USB_MUSB_AM35X is not set
+# CONFIG_MUSB_PIO_ONLY is not set
+# CONFIG_USB_UX500_DMA is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_RENESAS_USBHS is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_REALTEK=m
+CONFIG_REALTEK_AUTOPM=y
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+CONFIG_USB_STORAGE_ONETOUCH=m
+CONFIG_USB_STORAGE_KARMA=m
+CONFIG_USB_STORAGE_CYPRESS_ATACB=m
+CONFIG_USB_STORAGE_ENE_UB6250=m
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_FUNSOFT=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_MOTOROLA=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_QCAUX=m
+CONFIG_USB_SERIAL_QUALCOMM=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SAFE_PADDED=y
+CONFIG_USB_SERIAL_SIEMENS_MPI=m
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_SYMBOL=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_WWAN=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_OPTICON=m
+CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m
+CONFIG_USB_SERIAL_ZIO=m
+CONFIG_USB_SERIAL_SSU100=m
+CONFIG_USB_SERIAL_DEBUG=m
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+CONFIG_USB_ADUTUX=m
+CONFIG_USB_SEVSEG=m
+CONFIG_USB_RIO500=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+CONFIG_USB_LED=m
+CONFIG_USB_CYPRESS_CY7C63=m
+CONFIG_USB_CYTHERM=m
+CONFIG_USB_IDMOUSE=m
+CONFIG_USB_FTDI_ELAN=m
+CONFIG_USB_APPLEDISPLAY=m
+CONFIG_USB_SISUSBVGA=m
+# CONFIG_USB_SISUSBVGA_CON is not set
+CONFIG_USB_LD=m
+CONFIG_USB_TRANCEVIBRATOR=m
+CONFIG_USB_IOWARRIOR=m
+# CONFIG_USB_TEST is not set
+CONFIG_USB_ISIGHTFW=m
+CONFIG_USB_YUREX=m
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+# CONFIG_USB_FUSB300 is not set
+# CONFIG_USB_OMAP is not set
+# CONFIG_USB_R8A66597 is not set
+CONFIG_USB_GADGET_MUSB_HDRC=y
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_NET2272 is not set
+# CONFIG_USB_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_ZERO_HNPTEST is not set
+CONFIG_USB_AUDIO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+CONFIG_USB_ETH_EEM=y
+CONFIG_USB_G_NCM=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FUNCTIONFS=m
+CONFIG_USB_FUNCTIONFS_ETH=y
+CONFIG_USB_FUNCTIONFS_RNDIS=y
+# CONFIG_USB_FUNCTIONFS_GENERIC is not set
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_MIDI_GADGET=m
+CONFIG_USB_G_PRINTER=m
+CONFIG_USB_CDC_COMPOSITE=m
+CONFIG_USB_G_ACM_MS=m
+CONFIG_USB_G_MULTI=m
+CONFIG_USB_G_MULTI_RNDIS=y
+CONFIG_USB_G_MULTI_CDC=y
+CONFIG_USB_G_HID=m
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_USB_G_WEBCAM is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_ISP1301_OMAP is not set
+# CONFIG_USB_ULPI is not set
+CONFIG_TWL4030_USB=y
+# CONFIG_TWL6030_USB is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+# CONFIG_MMC_CLKGATE is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=m
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_ARMMMCI is not set
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_SDHCI_PXAV3 is not set
+# CONFIG_MMC_SDHCI_PXAV2 is not set
+# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_SPI is not set
+# CONFIG_MMC_DW is not set
+# CONFIG_MMC_VUB300 is not set
+CONFIG_MMC_USHC=m
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+CONFIG_LEDS_TWL4030_PWM=y
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_RENESAS_TPU is not set
+CONFIG_LEDS_TRIGGERS=y
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_RTC_DRV_PL030 is not set
+# CONFIG_RTC_DRV_PL031 is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+CONFIG_UIO=m
+CONFIG_UIO_PDRV=m
+CONFIG_UIO_PDRV_GENIRQ=m
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+CONFIG_STAGING=y
+CONFIG_USBIP_CORE=m
+CONFIG_USBIP_VHCI_HCD=m
+CONFIG_USBIP_HOST=m
+# CONFIG_USBIP_DEBUG is not set
+CONFIG_W35UND=m
+CONFIG_PRISM2_USB=m
+# CONFIG_ECHO is not set
+# CONFIG_ASUS_OLED is not set
+CONFIG_R8712U=m
+CONFIG_RTS5139=m
+# CONFIG_RTS5139_DEBUG is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_POHMELFS is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_USB_SERIAL_QUATECH2 is not set
+# CONFIG_USB_SERIAL_QUATECH_USB2 is not set
+# CONFIG_VT6656 is not set
+# CONFIG_IIO is not set
+CONFIG_XVMALLOC=y
+CONFIG_ZRAM=m
+# CONFIG_ZRAM_DEBUG is not set
+# CONFIG_FB_SM7XX is not set
+CONFIG_TIDSPBRIDGE=m
+CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE=0x600000
+# CONFIG_TIDSPBRIDGE_DEBUG is not set
+CONFIG_TIDSPBRIDGE_RECOVERY=y
+# CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK is not set
+# CONFIG_TIDSPBRIDGE_WDT3 is not set
+# CONFIG_TIDSPBRIDGE_NTFY_PWRERR is not set
+CONFIG_TIDSPBRIDGE_BACKTRACE=y
+# CONFIG_USB_ENESTORAGE is not set
+# CONFIG_BCM_WIMAX is not set
+CONFIG_FT1000=m
+CONFIG_FT1000_USB=m
+
+#
+# Speakup console speech
+#
+# CONFIG_SPEAKUP is not set
+# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+CONFIG_STAGING_MEDIA=y
+CONFIG_DVB_AS102=m
+CONFIG_EASYCAP=m
+# CONFIG_EASYCAP_DEBUG is not set
+# CONFIG_LIRC_STAGING is not set
+# CONFIG_DRM_OMAP is not set
+
+#
+# Android
+#
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=m
+CONFIG_ASHMEM=m
+CONFIG_ANDROID_LOGGER=m
+CONFIG_ANDROID_PERSISTENT_RAM=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+# CONFIG_ANDROID_TIMED_OUTPUT is not set
+CONFIG_ANDROID_LOW_MEMORY_KILLER=m
+# CONFIG_ANDROID_SWITCH is not set
+# CONFIG_ANDROID_INTF_ALARM is not set
+CONFIG_CLKDEV_LOOKUP=y
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKSRC_MMIO=y
+CONFIG_IOMMU_API=y
+CONFIG_IOMMU_SUPPORT=y
+CONFIG_OMAP_IOMMU=y
+CONFIG_OMAP_IOVMM=m
+CONFIG_OMAP_IOMMU_DEBUG=m
+# CONFIG_VIRT_DRIVERS is not set
+CONFIG_PM_DEVFREQ=y
+
+#
+# DEVFREQ Governors
+#
+# CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND is not set
+# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set
+# CONFIG_DEVFREQ_GOV_POWERSAVE is not set
+# CONFIG_DEVFREQ_GOV_USERSPACE is not set
+
+#
+# DEVFREQ Drivers
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_XATTR=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_SECURITY is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_XFS_FS=m
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
+CONFIG_BTRFS_FS=m
+# CONFIG_BTRFS_FS_POSIX_ACL is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=m
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+CONFIG_FSCACHE=m
+# CONFIG_FSCACHE_STATS is not set
+# CONFIG_FSCACHE_HISTOGRAM is not set
+# CONFIG_FSCACHE_DEBUG is not set
+# CONFIG_FSCACHE_OBJECT_LIST is not set
+CONFIG_CACHEFILES=m
+# CONFIG_CACHEFILES_DEBUG is not set
+# CONFIG_CACHEFILES_HISTOGRAM is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+CONFIG_ADFS_FS=m
+CONFIG_ADFS_FS_RW=y
+CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+# CONFIG_SQUASHFS_XATTR is not set
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
+CONFIG_SQUASHFS_EMBEDDED=y
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+CONFIG_MINIX_FS=m
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+CONFIG_UFS_FS=m
+CONFIG_UFS_FS_WRITE=y
+# CONFIG_UFS_DEBUG is not set
+CONFIG_AUFS_FS=m
+CONFIG_AUFS_BRANCH_MAX_127=y
+# CONFIG_AUFS_BRANCH_MAX_511 is not set
+# CONFIG_AUFS_BRANCH_MAX_1023 is not set
+# CONFIG_AUFS_BRANCH_MAX_32767 is not set
+CONFIG_AUFS_SBILIST=y
+# CONFIG_AUFS_HNOTIFY is not set
+# CONFIG_AUFS_EXPORT is not set
+CONFIG_AUFS_RDU=y
+# CONFIG_AUFS_PROC_MAP is not set
+# CONFIG_AUFS_SP_IATTR is not set
+# CONFIG_AUFS_SHWH is not set
+# CONFIG_AUFS_BR_RAMFS is not set
+CONFIG_AUFS_BR_FUSE=y
+CONFIG_AUFS_POLL=y
+CONFIG_AUFS_BR_HFSPLUS=y
+CONFIG_AUFS_BDEV_LOOP=y
+# CONFIG_AUFS_DEBUG is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_NFS_FSCACHE is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+# CONFIG_NFS_USE_NEW_IDMAPPER is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+CONFIG_NFSD_V4=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+# CONFIG_CEPH_FS is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_UPCALL is not set
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_DFS_UPCALL is not set
+# CONFIG_CIFS_FSCACHE is not set
+# CONFIG_CIFS_ACL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+CONFIG_NLS_CODEPAGE_932=m
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1072
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_RING_BUFFER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_ARM_UNWIND=y
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_LL is not set
+CONFIG_OC_ETM=y
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_ENCRYPTED_KEYS is not set
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=m
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_GHASH is not set
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_OMAP_SHAM is not set
+# CONFIG_CRYPTO_DEV_OMAP_AES is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+CONFIG_CRC7=m
+CONFIG_LIBCRC32C=m
+# CONFIG_CRC8 is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
+CONFIG_AVERAGE=y
+# CONFIG_CORDIC is not set
index b7c5d5d..a279ea7 100644 (file)
 #define ALT_UP_B(label) b label
 #endif
 
+/*
+ * Instruction barrier
+ */
+       .macro  instr_sync
+#if __LINUX_ARM_ARCH__ >= 7
+       isb
+#elif __LINUX_ARM_ARCH__ == 6
+       mcr     p15, 0, r0, c7, c5, 4
+#endif
+       .endm
+
 /*
  * SMP data memory barrier
  */
index 86976d0..b835147 100644 (file)
@@ -147,6 +147,32 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
        : "cc");
 }
 
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int oldval, newval;
+       unsigned long tmp;
+
+       smp_mb();
+
+       __asm__ __volatile__ ("@ atomic_add_unless\n"
+"1:    ldrex   %0, [%4]\n"
+"      teq     %0, %5\n"
+"      beq     2f\n"
+"      add     %1, %0, %6\n"
+"      strex   %2, %1, [%4]\n"
+"      teq     %2, #0\n"
+"      bne     1b\n"
+"2:"
+       : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "r" (u), "r" (a)
+       : "cc");
+
+       if (oldval != u)
+               smp_mb();
+
+       return oldval;
+}
+
 #else /* ARM_ARCH_6 */
 
 #ifdef CONFIG_SMP
@@ -204,10 +230,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
        raw_local_irq_restore(flags);
 }
 
-#endif /* __LINUX_ARM_ARCH__ */
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
        int c, old;
@@ -218,6 +240,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
        return c;
 }
 
+#endif /* __LINUX_ARM_ARCH__ */
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
 #define atomic_inc(v)          atomic_add(1, v)
 #define atomic_dec(v)          atomic_sub(1, v)
 
@@ -241,7 +267,7 @@ typedef struct {
 
 #define ATOMIC64_INIT(i) { (i) }
 
-static inline u64 atomic64_read(atomic64_t *v)
+static inline u64 atomic64_read(const atomic64_t *v)
 {
        u64 result;
 
index b1e0e07..752cbd4 100644 (file)
@@ -101,7 +101,7 @@ struct cpu_cache_fns {
        void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
 
        void (*coherent_kern_range)(unsigned long, unsigned long);
-       void (*coherent_user_range)(unsigned long, unsigned long);
+       int  (*coherent_user_range)(unsigned long, unsigned long);
        void (*flush_kern_dcache_area)(void *, size_t);
 
        void (*dma_map_area)(const void *, size_t, int);
@@ -142,7 +142,7 @@ extern void __cpuc_flush_kern_all(void);
 extern void __cpuc_flush_user_all(void);
 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
-extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
+extern int  __cpuc_coherent_user_range(unsigned long, unsigned long);
 extern void __cpuc_flush_dcache_area(void *, size_t);
 
 /*
@@ -250,8 +250,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
  * Harvard caches are synchronised for the user space address range.
  * This is used for the ARM private sys_cacheflush system call.
  */
-#define flush_cache_user_range(start,end) \
-       __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+#define flush_cache_user_range(s,e)    __cpuc_coherent_user_range(s,e)
 
 /*
  * Perform necessary cache operations to ensure that data previously
index 7aa3680..b69c0d3 100644 (file)
@@ -7,12 +7,16 @@
 #define ASMARM_DEVICE_H
 
 struct dev_archdata {
+       struct dma_map_ops      *dma_ops;
 #ifdef CONFIG_DMABOUNCE
        struct dmabounce_device_info *dmabounce;
 #endif
 #ifdef CONFIG_IOMMU_API
        void *iommu; /* private IOMMU data */
 #endif
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+       struct dma_iommu_mapping        *mapping;
+#endif
 };
 
 struct omap_device;
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
new file mode 100644 (file)
index 0000000..3ed37b4
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef ASMARM_DMA_CONTIGUOUS_H
+#define ASMARM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644 (file)
index 0000000..799b094
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+
+struct dma_iommu_mapping {
+       /* iommu specific data */
+       struct iommu_domain     *domain;
+
+       void                    *bitmap;
+       size_t                  bits;
+       unsigned int            order;
+       dma_addr_t              base;
+
+       spinlock_t              lock;
+       struct kref             kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+                        int order);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+                                       struct dma_iommu_mapping *mapping);
+
+#endif /* __KERNEL__ */
+#endif
index cb3b7c9..80777d8 100644 (file)
@@ -5,11 +5,35 @@
 
 #include <linux/mm_types.h>
 #include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
 #include <linux/dma-debug.h>
 
 #include <asm-generic/dma-coherent.h>
 #include <asm/memory.h>
 
+#define DMA_ERROR_CODE (~0)
+extern struct dma_map_ops arm_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+       if (dev && dev->archdata.dma_ops)
+               return dev->archdata.dma_ops;
+       return &arm_dma_ops;
+}
+
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+       BUG_ON(!dev);
+       dev->archdata.dma_ops = ops;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+       return get_dma_ops(dev)->set_dma_mask(dev, mask);
+}
+
 #ifdef __arch_page_to_dma
 #error Please update to __arch_pfn_to_dma
 #endif
@@ -61,69 +85,12 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
 }
 #endif
 
-/*
- * The DMA API is built upon the notion of "buffer ownership".  A buffer
- * is either exclusively owned by the CPU (and therefore may be accessed
- * by it) or exclusively owned by the DMA device.  These helper functions
- * represent the transitions between these two ownership states.
- *
- * Note, however, that on later ARMs, this notion does not work due to
- * speculative prefetches.  We model our approach on the assumption that
- * the CPU does do speculative prefetches, which means we clean caches
- * before transfers and delay cache invalidation until transfer completion.
- *
- * Private support functions: these are not part of the API and are
- * liable to change.  Drivers must not use these.
- */
-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
-       enum dma_data_direction dir)
-{
-       extern void ___dma_single_cpu_to_dev(const void *, size_t,
-               enum dma_data_direction);
-
-       if (!arch_is_coherent())
-               ___dma_single_cpu_to_dev(kaddr, size, dir);
-}
-
-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
-       enum dma_data_direction dir)
-{
-       extern void ___dma_single_dev_to_cpu(const void *, size_t,
-               enum dma_data_direction);
-
-       if (!arch_is_coherent())
-               ___dma_single_dev_to_cpu(kaddr, size, dir);
-}
-
-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-       size_t size, enum dma_data_direction dir)
-{
-       extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
-               size_t, enum dma_data_direction);
-
-       if (!arch_is_coherent())
-               ___dma_page_cpu_to_dev(page, off, size, dir);
-}
-
-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-       size_t size, enum dma_data_direction dir)
-{
-       extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
-               size_t, enum dma_data_direction);
-
-       if (!arch_is_coherent())
-               ___dma_page_dev_to_cpu(page, off, size, dir);
-}
-
-extern int dma_supported(struct device *, u64);
-extern int dma_set_mask(struct device *, u64);
-
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-       return dma_addr == ~0;
+       return dma_addr == DMA_ERROR_CODE;
 }
 
 /*
@@ -141,79 +108,126 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
 {
 }
 
+extern int dma_supported(struct device *dev, u64 mask);
+
 /**
- * dma_alloc_coherent - allocate consistent memory for DMA
+ * arm_dma_alloc - allocate consistent memory for DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @size: required memory size
  * @handle: bus-specific DMA address
+ * @attrs: optinal attributes that specific mapping properties
  *
- * Allocate some uncached, unbuffered memory for a device for
- * performing DMA.  This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
+ * Allocate some memory for a device for performing DMA.  This function
+ * allocates pages, and will return the CPU-viewed address, and sets @handle
+ * to be the device-viewed address.
  */
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+                          gfp_t gfp, struct dma_attrs *attrs);
+
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                      dma_addr_t *dma_handle, gfp_t flag,
+                                      struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       void *cpu_addr;
+       BUG_ON(!ops);
+
+       cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+       debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+       return cpu_addr;
+}
 
 /**
- * dma_free_coherent - free memory allocated by dma_alloc_coherent
+ * arm_dma_free - free memory allocated by arm_dma_alloc
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @size: size of memory originally requested in dma_alloc_coherent
  * @cpu_addr: CPU-view address returned from dma_alloc_coherent
  * @handle: device-view address returned from dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
  *
  * Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_coherent().
+ * arm_dma_alloc().
  *
  * References to memory and mappings associated with cpu_addr/handle
  * during and after this call executing are illegal.
  */
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+                        dma_addr_t handle, struct dma_attrs *attrs);
+
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+                                    void *cpu_addr, dma_addr_t dma_handle,
+                                    struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       BUG_ON(!ops);
+
+       debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+       ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
 
 /**
- * dma_mmap_coherent - map a coherent DMA allocation into user space
+ * arm_dma_mmap - map a coherent DMA allocation into user space
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @vma: vm_area_struct describing requested user mapping
  * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
  * @handle: device-view address returned from dma_alloc_coherent
  * @size: size of memory originally requested in dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
  *
  * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
  * into user space.  The coherent DMA buffer must not be freed by the
  * driver until the user space mapping has been released.
  */
-int dma_mmap_coherent(struct device *, struct vm_area_struct *,
-               void *, dma_addr_t, size_t);
+extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                       void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                       struct dma_attrs *attrs);
 
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
 
-/**
- * dma_alloc_writecombine - allocate writecombining memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, buffered memory for a device for
- * performing DMA.  This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
-               gfp_t);
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size, struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       BUG_ON(!ops);
+       return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+                                      dma_addr_t *dma_handle, gfp_t flag)
+{
+       DEFINE_DMA_ATTRS(attrs);
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+       return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
 
-#define dma_free_writecombine(dev,size,cpu_addr,handle) \
-       dma_free_coherent(dev,size,cpu_addr,handle)
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+                                    void *cpu_addr, dma_addr_t dma_handle)
+{
+       DEFINE_DMA_ATTRS(attrs);
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+       return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
 
-int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
-               void *, dma_addr_t, size_t);
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       DEFINE_DMA_ATTRS(attrs);
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+       return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
 
 /*
  * This can be called during boot to increase the size of the consistent
  * DMA region above it's default value of 2MB. It must be called before the
  * memory allocator is initialised, i.e. before any core_initcall.
  */
-extern void __init init_consistent_dma_size(unsigned long size);
-
+static inline void init_consistent_dma_size(unsigned long size) { }
 
-#ifdef CONFIG_DMABOUNCE
 /*
  * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
  * and utilize bounce buffers as needed to work around limited DMA windows.
@@ -253,222 +267,19 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
  */
 extern void dmabounce_unregister_dev(struct device *);
 
-/*
- * The DMA API, implemented by dmabounce.c.  See below for descriptions.
- */
-extern dma_addr_t __dma_map_page(struct device *, struct page *,
-               unsigned long, size_t, enum dma_data_direction);
-extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
-               enum dma_data_direction);
-
-/*
- * Private functions
- */
-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
-               size_t, enum dma_data_direction);
-int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
-               size_t, enum dma_data_direction);
-#else
-static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
-       unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-       return 1;
-}
 
-static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
-       unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-       return 1;
-}
-
-
-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
-            unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-       __dma_page_cpu_to_dev(page, offset, size, dir);
-       return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir)
-{
-       __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
-               handle & ~PAGE_MASK, size, dir);
-}
-#endif /* CONFIG_DMABOUNCE */
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_single() or
- * dma_sync_single_for_cpu().
- */
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
-               size_t size, enum dma_data_direction dir)
-{
-       unsigned long offset;
-       struct page *page;
-       dma_addr_t addr;
-
-       BUG_ON(!virt_addr_valid(cpu_addr));
-       BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
-       BUG_ON(!valid_dma_direction(dir));
-
-       page = virt_to_page(cpu_addr);
-       offset = (unsigned long)cpu_addr & ~PAGE_MASK;
-       addr = __dma_map_page(dev, page, offset, size, dir);
-       debug_dma_map_page(dev, page, offset, size, dir, addr, true);
-
-       return addr;
-}
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_page().
- */
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
-            unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-       dma_addr_t addr;
-
-       BUG_ON(!valid_dma_direction(dir));
-
-       addr = __dma_map_page(dev, page, offset, size, dir);
-       debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
-       return addr;
-}
-
-/**
- * dma_unmap_single - unmap a single buffer previously mapped
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_single)
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Unmap a single streaming mode DMA translation.  The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir)
-{
-       debug_dma_unmap_page(dev, handle, size, dir, true);
-       __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Unmap a page streaming mode DMA translation.  The handle and size
- * must match what was provided in the previous dma_map_page() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir)
-{
-       debug_dma_unmap_page(dev, handle, size, dir, false);
-       __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_sync_single_range_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @offset: offset of region to start sync
- * @size: size of region to sync
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Make physical memory consistent for a single streaming mode DMA
- * translation after a transfer.
- *
- * If you perform a dma_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so.  At the
- * next point you give the PCI dma address back to the card, you
- * must first the perform a dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
-               dma_addr_t handle, unsigned long offset, size_t size,
-               enum dma_data_direction dir)
-{
-       BUG_ON(!valid_dma_direction(dir));
-
-       debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
-
-       if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
-               return;
-
-       __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
-               dma_addr_t handle, unsigned long offset, size_t size,
-               enum dma_data_direction dir)
-{
-       BUG_ON(!valid_dma_direction(dir));
-
-       debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
-
-       if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
-               return;
-
-       __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
 
 /*
  * The scatter list versions of the above methods.
  */
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
-               enum dma_data_direction);
-extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
+extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
+               enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
+               enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
                enum dma_data_direction);
-extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
+extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
                enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
-               enum dma_data_direction);
-
 
 #endif /* __KERNEL__ */
 #endif
index f89515a..2bb8cac 100644 (file)
@@ -45,7 +45,7 @@ void *return_address(unsigned int);
 
 #else
 
-extern inline void *return_address(unsigned int level)
+static inline void *return_address(unsigned int level)
 {
        return NULL;
 }
diff --git a/arch/arm/include/asm/hugetlb-2level.h b/arch/arm/include/asm/hugetlb-2level.h
new file mode 100644 (file)
index 0000000..b08353a
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * arch/arm/include/asm/hugetlb-2level.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_2LEVEL_H
+#define _ASM_ARM_HUGETLB_2LEVEL_H
+
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       pmd_t pmd =  *((pmd_t *)ptep);
+       u32 pmdval = pmd_val(pmd);
+       pte_t retval;
+
+       if (!pmd_val(pmd))
+               return __pte(0);
+
+       retval = __pte((pteval_t) (pmd_val(pmd) & HPAGE_MASK)
+                       | (pmdval & 0x0c) | ((pmdval >> 8) & 0x10)
+                       | L_PTE_PRESENT | L_PTE_USER | L_PTE_VALID);
+
+       if (pmd_exec(pmd))
+               retval = pte_mkexec(retval);
+       else
+               retval = pte_mknexec(retval);
+
+       if (pmd_young(pmd))
+               retval = pte_mkyoung(retval);
+       else
+               retval = pte_mkold(retval);
+
+       if (pmd_dirty(pmd))
+               retval = pte_mkdirty(retval);
+       else
+               retval = pte_mkclean(retval);
+
+       if (pmd_write(pmd))
+               retval = pte_mkwrite(retval);
+       else
+               retval = pte_wrprotect(retval);
+
+       return retval;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                                  pte_t *ptep, pte_t pte)
+{
+       pmdval_t pmdval = (pmdval_t) pte_val(pte);
+       pmd_t *pmdp = (pmd_t *) ptep;
+
+       /* take the target address bits from the pte only */
+       pmdval &= HPAGE_MASK;
+
+       /*
+        * now use pmd_modify to translate the permission bits from the pte
+        * and set the memory type information.
+        */
+       pmdval = pmd_val(pmd_modify(__pmd(pmdval), __pgprot(pte_val(pte))));
+
+       __sync_icache_dcache(pte);
+
+       set_pmd_at(mm, addr, pmdp, __pmd(pmdval));
+}
+
+static inline pte_t pte_mkhuge(pte_t pte) { return pte; }
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep)
+{
+       pmd_t *pmdp = (pmd_t *)ptep;
+       pmd_clear(pmdp);
+       flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       pmd_t *pmdp = (pmd_t *) ptep;
+       set_pmd_at(mm, addr, pmdp, pmd_wrprotect(*pmdp));
+}
+
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                                           unsigned long addr, pte_t *ptep)
+{
+       pmd_t *pmdp = (pmd_t *)ptep;
+       pte_t pte = huge_ptep_get(ptep);
+       pmd_clear(pmdp);
+
+       return pte;
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       int changed = !pte_same(huge_ptep_get(ptep), pte);
+       if (changed) {
+               set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+               flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
+       }
+
+       return changed;
+}
+
+#endif /* _ASM_ARM_HUGETLB_2LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h
new file mode 100644 (file)
index 0000000..4868064
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * arch/arm/include/asm/hugetlb-3level.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
+#define _ASM_ARM_HUGETLB_3LEVEL_H
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                                  pte_t *ptep, pte_t pte)
+{
+       set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep)
+{
+       ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                                           unsigned long addr, pte_t *ptep)
+{
+       return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
new file mode 100644 (file)
index 0000000..1e92975
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * arch/arm/include/asm/hugetlb.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_H
+#define _ASM_ARM_HUGETLB_H
+
+#include <asm/page.h>
+
+#ifdef CONFIG_ARM_LPAE
+#include <asm/hugetlb-3level.h>
+#else
+#include <asm/hugetlb-2level.h>
+#endif
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+                                         unsigned long addr, unsigned long end,
+                                         unsigned long floor,
+                                         unsigned long ceiling)
+{
+       free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+                                        unsigned long addr, unsigned long len)
+{
+       return 0;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+                                        unsigned long addr, unsigned long len)
+{
+       struct hstate *h = hstate_file(file);
+       if (len & ~huge_page_mask(h))
+               return -EINVAL;
+       if (addr & ~huge_page_mask(h))
+               return -EINVAL;
+       return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+       clear_bit(PG_dcache_clean, &page->flags);
+}
+
+#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
new file mode 100644 (file)
index 0000000..bf863ed
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __ASM_IDMAP_H
+#define __ASM_IDMAP_H
+
+#include <linux/compiler.h>
+#include <asm/pgtable.h>
+
+/* Tag a function as requiring to be executed via an identity mapping. */
+#define __idmap __section(.idmap.text) noinline notrace
+
+extern pgd_t *idmap_pgd;
+
+void setup_mm_for_reboot(void);
+
+#endif /* __ASM_IDMAP_H */
index 065d100..cb81b3b 100644 (file)
@@ -83,6 +83,9 @@ extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
 extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached);
 extern void __iounmap(volatile void __iomem *addr);
 
+extern void __iomem *ioremap_prot(resource_size_t, unsigned long size,
+       unsigned long prot_val);
+
 /*
  * Bad read/write accesses...
  */
index b36f365..a6efcdd 100644 (file)
@@ -30,6 +30,7 @@ struct map_desc {
 #define MT_MEMORY_DTCM         12
 #define MT_MEMORY_ITCM         13
 #define MT_MEMORY_SO           14
+#define MT_MEMORY_DMA_READY    15
 
 #ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
new file mode 100644 (file)
index 0000000..c0efdd6
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ *  arch/arm/include/asm/opcodes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_OPCODES_H
+#define __ASM_ARM_OPCODES_H
+
+#ifndef __ASSEMBLY__
+extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
+#endif
+
+#define ARM_OPCODE_CONDTEST_FAIL   0
+#define ARM_OPCODE_CONDTEST_PASS   1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
+#endif /* __ASM_ARM_OPCODES_H */
index ca94653..97b440c 100644 (file)
@@ -151,7 +151,11 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
 #define clear_page(page)       memset((void *)(page), 0, PAGE_SIZE)
 extern void copy_page(void *to, const void *from);
 
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level-types.h>
+#else
 #include <asm/pgtable-2level-types.h>
+#endif
 
 #endif /* CONFIG_MMU */
 
index 3e08fd3..943504f 100644 (file)
 #define _PAGE_USER_TABLE       (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
 #define _PAGE_KERNEL_TABLE     (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
 
+#ifdef CONFIG_ARM_LPAE
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+       return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+       BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+       free_page((unsigned long)pmd);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+       set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+}
+
+#else  /* !CONFIG_ARM_LPAE */
+
 /*
  * Since we have only two-level page tables, these are trivial
  */
 #define pmd_alloc_one(mm,addr)         ({ BUG(); ((pmd_t *)2); })
 #define pmd_free(mm, pmd)              do { } while (0)
-#define pgd_populate(mm,pmd,pte)       BUG()
+#define pud_populate(mm,pmd,pte)       BUG()
+
+#endif /* CONFIG_ARM_LPAE */
 
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
@@ -109,7 +131,9 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
 {
        pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
        pmdp[0] = __pmd(pmdval);
+#ifndef CONFIG_ARM_LPAE
        pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
+#endif
        flush_pmd_entry(pmdp);
 }
 
index 66cb5b0..93ce673 100644 (file)
@@ -64,4 +64,6 @@ typedef pteval_t pgprot_t;
 
 #endif /* STRICT_MM_TYPECHECKS */
 
+#define pte_pgprot(pte)        __pgprot(pte_val(pte))
+
 #endif /* _ASM_PGTABLE_2LEVEL_TYPES_H */
index 1cb80c4..1300888 100644 (file)
  * The PTE table pointer refers to the hardware entries; the "Linux"
  * entries are stored 1024 bytes below.
  */
+#define L_PTE_VALID            (_AT(pteval_t, 1) << 0)         /* Valid */
 #define L_PTE_PRESENT          (_AT(pteval_t, 1) << 0)
 #define L_PTE_YOUNG            (_AT(pteval_t, 1) << 1)
 #define L_PTE_FILE             (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
 #define L_PTE_MT_VECTORS       (_AT(pteval_t, 0x0f) << 2)      /* 1111 */
 #define L_PTE_MT_MASK          (_AT(pteval_t, 0x0f) << 2)
 
+#ifndef __ASSEMBLY__
+
+/*
+ * The "pud_xxx()" functions here are trivial when the pmd is folded into
+ * the pud: the pud entry is never bad, always exists, and can't be set or
+ * cleared.
+ */
+#define pud_none(pud)          (0)
+#define pud_bad(pud)           (0)
+#define pud_present(pud)       (1)
+#define pud_clear(pudp)                do { } while (0)
+#define set_pud(pud,pudp)      do { } while (0)
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+       return (pmd_t *)pud;
+}
+
+#define pmd_bad(pmd)           (pmd_val(pmd) & 2)
+
+#define copy_pmd(pmdpd,pmdps)          \
+       do {                            \
+               pmdpd[0] = pmdps[0];    \
+               pmdpd[1] = pmdps[1];    \
+               flush_pmd_entry(pmdpd); \
+       } while (0)
+
+#define pmd_clear(pmdp)                        \
+       do {                            \
+               pmdp[0] = __pmd(0);     \
+               pmdp[1] = __pmd(0);     \
+               clean_pmd_entry(pmdp);  \
+       } while (0)
+
+/* we don't need complex calculations here as the pmd is folded into the pgd */
+#define pmd_addr_end(addr,end) (end)
+
+#define pmd_present(pmd)        ((pmd_val(pmd) & PMD_TYPE_MASK) != PMD_TYPE_FAULT)
+
+#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+
+
+#ifdef CONFIG_SYS_SUPPORTS_HUGETLBFS
+
+/*
+ * now follows some of the definitions to allow huge page support, we can't put
+ * these in the hugetlb source files as they are also required for transparent
+ * hugepage support.
+ */
+
+#define HPAGE_SHIFT             PMD_SHIFT
+#define HPAGE_SIZE              (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK              (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
+
+#define HUGE_LINUX_PTE_COUNT       (PAGE_OFFSET >> HPAGE_SHIFT)
+#define HUGE_LINUX_PTE_SIZE        (HUGE_LINUX_PTE_COUNT * sizeof(pte_t *))
+#define HUGE_LINUX_PTE_INDEX(addr) (addr >> HPAGE_SHIFT)
+
+/*
+ *  We re-purpose the following domain bits in the section descriptor
+ */
+#define PMD_DOMAIN_MASK                (_AT(pmdval_t, 0xF) << 5)
+#define PMD_DSECT_DIRTY                (_AT(pmdval_t, 1) << 5)
+#define PMD_DSECT_AF           (_AT(pmdval_t, 1) << 6)
+#define PMD_DSECT_SPLITTING    (_AT(pmdval_t, 1) << 7)
+
+#define PMD_BIT_FUNC(fn,op) \
+static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+                               pmd_t *pmdp, pmd_t pmd)
+{
+       /*
+        * we can sometimes be passed a pmd pointing to a level 2 descriptor
+        * from collapse_huge_page.
+        */
+       if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) {
+               pmdp[0] = __pmd(pmd_val(pmd));
+               pmdp[1] = __pmd(pmd_val(pmd) + 256 * sizeof(pte_t));
+       } else {
+               pmdp[0] = __pmd(pmd_val(pmd));                  /* first 1M section  */
+               pmdp[1] = __pmd(pmd_val(pmd) + SECTION_SIZE);   /* second 1M section */
+       }
+
+       flush_pmd_entry(pmdp);
+}
+
+#define pmd_mkhuge(pmd)                (__pmd((pmd_val(pmd) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
+
+PMD_BIT_FUNC(mkold, &= ~PMD_DSECT_AF);
+PMD_BIT_FUNC(mkdirty, |= PMD_DSECT_DIRTY);
+PMD_BIT_FUNC(mkclean, &= ~PMD_DSECT_DIRTY);
+PMD_BIT_FUNC(mkyoung, |= PMD_DSECT_AF);
+PMD_BIT_FUNC(mkwrite, |= PMD_SECT_AP_WRITE);
+PMD_BIT_FUNC(wrprotect,        &= ~PMD_SECT_AP_WRITE);
+PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
+PMD_BIT_FUNC(mkexec,   &= ~PMD_SECT_XN);
+PMD_BIT_FUNC(mknexec,  |= PMD_SECT_XN);
+
+#define pmd_young(pmd)                 (pmd_val(pmd) & PMD_DSECT_AF)
+#define pmd_write(pmd)                 (pmd_val(pmd) & PMD_SECT_AP_WRITE)
+#define pmd_exec(pmd)                  (!(pmd_val(pmd) & PMD_SECT_XN))
+#define pmd_dirty(pmd)                 (pmd_val(pmd) & PMD_DSECT_DIRTY)
+
+#define __HAVE_ARCH_PMD_WRITE
+
+#define pmd_modify(pmd, prot)                                                  \
+({                                                                             \
+       pmd_t pmdret = __pmd(pmd_val(pmd) & (PMD_MASK | PMD_DOMAIN_MASK));      \
+       pgprot_t inprot = prot;                                                 \
+       u32 inprotval = pgprot_val(inprot);                                     \
+       pte_t newprot = __pte(inprotval);                                       \
+                                                                               \
+       if (pte_dirty(newprot))                                                 \
+               pmdret = pmd_mkdirty(pmdret);                                   \
+       else                                                                    \
+               pmdret = pmd_mkclean(pmdret);                                   \
+                                                                               \
+       if (pte_exec(newprot))                                                  \
+               pmdret = pmd_mkexec(pmdret);                                    \
+       else                                                                    \
+               pmdret = pmd_mknexec(pmdret);                                   \
+                                                                               \
+       if (pte_write(newprot))                                                 \
+               pmdret = pmd_mkwrite(pmdret);                                   \
+       else                                                                    \
+               pmdret = pmd_wrprotect(pmdret);                                 \
+                                                                               \
+       if (pte_young(newprot))                                                 \
+               pmdret = pmd_mkyoung(pmdret);                                   \
+       else                                                                    \
+               pmdret = pmd_mkold(pmdret);                                     \
+       pmdret = __pmd(pmd_val(pmdret) | (inprotval & 0x0c)                     \
+                       | ((inprotval << 8) & 0x1000)                           \
+                       | PMD_TYPE_SECT | PMD_SECT_AP_WRITE                     \
+                       | PMD_SECT_AP_READ | PMD_SECT_nG);                      \
+                                                                               \
+       pmdret;                                                                 \
+})
+
+#define pmd_hugewillfault(pmd) (       !pmd_young(pmd) ||      \
+                                       !pmd_write(pmd) ||      \
+                                       !pmd_dirty(pmd) )
+#define pmd_thp_or_huge(pmd)           ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+#else
+#define HPAGE_SIZE 0
+#define pmd_hugewillfault(pmd) (0)
+#define pmd_thp_or_huge(pmd)   (0)
+#endif /* CONFIG_SYS_SUPPORTS_HUGETLBFS */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_mkhuge(pmd)                (__pmd((pmd_val(pmd) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
+
+PMD_BIT_FUNC(mksplitting, |= PMD_DSECT_SPLITTING);
+#define pmd_trans_splitting(pmd)       (pmd_val(pmd) & PMD_DSECT_SPLITTING)
+#define pmd_trans_huge(pmd)            ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+       /*
+        * for a section, we need to mask off more of the pmd
+        * before looking up the pfn
+        */
+       if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+               return __phys_to_pfn(pmd_val(pmd) & HPAGE_MASK);
+       else
+               return __phys_to_pfn(pmd_val(pmd) & PHYS_MASK);
+}
+
+#define pfn_pmd(pfn,prot) pmd_modify(__pmd(__pfn_to_phys(pfn)),prot);
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot);
+
+static inline int has_transparent_hugepage(void)
+{
+       return 1;
+}
+
+#define _PMD_HUGE(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+#define _PMD_HPAGE(pmd) (phys_to_page(pmd_val(pmd) & HPAGE_MASK))
+#else
+#define _PMD_HUGE(pmd) (0)
+#define _PMD_HPAGE(pmd) (0)
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+       /*
+        * for a section, we need to mask off more of the pmd
+        * before looking up the page as it is a section descriptor.
+        */
+       if (_PMD_HUGE(pmd))
+               return _PMD_HPAGE(pmd);
+
+       return phys_to_page(pmd_val(pmd) & PHYS_MASK);
+}
+
+#endif /* __ASSEMBLY__ */
+
 #endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
new file mode 100644 (file)
index 0000000..53c7f67
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * arch/arm/include/asm/pgtable-3level-hwdef.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_HWDEF_H
+#define _ASM_PGTABLE_3LEVEL_HWDEF_H
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1/2 descriptor
+ *   - common
+ */
+#define PMD_TYPE_MASK          (_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_FAULT         (_AT(pmdval_t, 0) << 0)
+#define PMD_TYPE_TABLE         (_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_SECT          (_AT(pmdval_t, 1) << 0)
+#define PMD_BIT4               (_AT(pmdval_t, 0))
+#define PMD_DOMAIN(x)          (_AT(pmdval_t, 0))
+
+/*
+ *   - section
+ */
+#define PMD_SECT_BUFFERABLE    (_AT(pmdval_t, 1) << 2)
+#define PMD_SECT_CACHEABLE     (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_USER          (_AT(pmdval_t, 1) << 6)         /* AP[1] */
+#define PMD_SECT_RDONLY                (_AT(pmdval_t, 1) << 7)         /* AP[2] */
+#define PMD_SECT_S             (_AT(pmdval_t, 3) << 8)
+#define PMD_SECT_AF            (_AT(pmdval_t, 1) << 10)
+#define PMD_SECT_nG            (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_XN            (_AT(pmdval_t, 1) << 54)
+#define PMD_SECT_AP_WRITE      (_AT(pmdval_t, 0))
+#define PMD_SECT_AP_READ       (_AT(pmdval_t, 0))
+#define PMD_SECT_TEX(x)                (_AT(pmdval_t, 0))
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define PMD_SECT_UNCACHED      (_AT(pmdval_t, 0) << 2) /* strongly ordered */
+#define PMD_SECT_BUFFERED      (_AT(pmdval_t, 1) << 2) /* normal non-cacheable */
+#define PMD_SECT_WT            (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
+#define PMD_SECT_WB            (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
+#define PMD_SECT_WBWA          (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
+
+/*
+ * + Level 3 descriptor (PTE)
+ */
+#define PTE_TYPE_MASK          (_AT(pteval_t, 3) << 0)
+#define PTE_TYPE_FAULT         (_AT(pteval_t, 0) << 0)
+#define PTE_TYPE_PAGE          (_AT(pteval_t, 3) << 0)
+#define PTE_BUFFERABLE         (_AT(pteval_t, 1) << 2)         /* AttrIndx[0] */
+#define PTE_CACHEABLE          (_AT(pteval_t, 1) << 3)         /* AttrIndx[1] */
+#define PTE_EXT_SHARED         (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
+#define PTE_EXT_AF             (_AT(pteval_t, 1) << 10)        /* Access Flag */
+#define PTE_EXT_NG             (_AT(pteval_t, 1) << 11)        /* nG */
+#define PTE_EXT_XN             (_AT(pteval_t, 1) << 54)        /* XN */
+
+/*
+ * 40-bit physical address supported.
+ */
+#define PHYS_MASK_SHIFT                (40)
+#define PHYS_MASK              ((1ULL << PHYS_MASK_SHIFT) - 1)
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-3level-types.h b/arch/arm/include/asm/pgtable-3level-types.h
new file mode 100644 (file)
index 0000000..921aa30
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * arch/arm/include/asm/pgtable-3level-types.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_TYPES_H
+#define _ASM_PGTABLE_3LEVEL_TYPES_H
+
+#include <asm/types.h>
+
+typedef u64 pteval_t;
+typedef u64 pmdval_t;
+typedef u64 pgdval_t;
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { pteval_t pte; } pte_t;
+typedef struct { pmdval_t pmd; } pmd_t;
+typedef struct { pgdval_t pgd; } pgd_t;
+typedef struct { pteval_t pgprot; } pgprot_t;
+
+#define pte_val(x)      ((x).pte)
+#define pmd_val(x)      ((x).pmd)
+#define pgd_val(x)     ((x).pgd)
+#define pgprot_val(x)   ((x).pgprot)
+
+#define __pte(x)        ((pte_t) { (x) } )
+#define __pmd(x)        ((pmd_t) { (x) } )
+#define __pgd(x)       ((pgd_t) { (x) } )
+#define __pgprot(x)     ((pgprot_t) { (x) } )
+
+#else  /* !STRICT_MM_TYPECHECKS */
+
+typedef pteval_t pte_t;
+typedef pmdval_t pmd_t;
+typedef pgdval_t pgd_t;
+typedef pteval_t pgprot_t;
+
+#define pte_val(x)     (x)
+#define pmd_val(x)     (x)
+#define pgd_val(x)     (x)
+#define pgprot_val(x)  (x)
+
+#define __pte(x)       (x)
+#define __pmd(x)       (x)
+#define __pgd(x)       (x)
+#define __pgprot(x)    (x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+#endif /* _ASM_PGTABLE_3LEVEL_TYPES_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
new file mode 100644 (file)
index 0000000..de446f9
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ * arch/arm/include/asm/pgtable-3level.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_H
+#define _ASM_PGTABLE_3LEVEL_H
+
+/*
+ * With LPAE, there are 3 levels of page tables. Each level has 512 entries of
+ * 8 bytes each, occupying a 4K page. The first level table covers a range of
+ * 512GB, each entry representing 1GB. Since we are limited to 4GB input
+ * address range, only 4 entries in the PGD are used.
+ *
+ * There are enough spare bits in a page table entry for the kernel specific
+ * state.
+ */
+#define PTRS_PER_PTE           512
+#define PTRS_PER_PMD           512
+#define PTRS_PER_PGD           4
+
+#define PTE_HWTABLE_PTRS       (PTRS_PER_PTE)
+#define PTE_HWTABLE_OFF                (0)
+#define PTE_HWTABLE_SIZE       (PTRS_PER_PTE * sizeof(u64))
+
+/*
+ * PGDIR_SHIFT determines the size a top-level page table entry can map.
+ */
+#define PGDIR_SHIFT            30
+
+/*
+ * PMD_SHIFT determines the size a middle-level page table entry can map.
+ */
+#define PMD_SHIFT              21
+
+#define PMD_SIZE               (1UL << PMD_SHIFT)
+#define PMD_MASK               (~(PMD_SIZE-1))
+#define PGDIR_SIZE             (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK             (~(PGDIR_SIZE-1))
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT          21
+#define SECTION_SIZE           (1UL << SECTION_SHIFT)
+#define SECTION_MASK           (~(SECTION_SIZE-1))
+
+#define USER_PTRS_PER_PGD      (PAGE_OFFSET / PGDIR_SIZE)
+
+/*
+ * Hugetlb definitions.
+ */
+#define HPAGE_SHIFT            PMD_SHIFT
+#define HPAGE_SIZE             (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK             (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
+
+/*
+ * "Linux" PTE definitions for LPAE.
+ *
+ * These bits overlap with the hardware bits but the naming is preserved for
+ * consistency with the classic page table format.
+ */
+#define L_PTE_VALID            (_AT(pteval_t, 1) << 0)         /* Valid */
+#define L_PTE_PRESENT          (_AT(pteval_t, 3) << 0)         /* Present */
+#define L_PTE_FILE             (_AT(pteval_t, 1) << 2)         /* only when !PRESENT */
+#define L_PTE_BUFFERABLE       (_AT(pteval_t, 1) << 2)         /* AttrIndx[0] */
+#define L_PTE_CACHEABLE                (_AT(pteval_t, 1) << 3)         /* AttrIndx[1] */
+#define L_PTE_USER             (_AT(pteval_t, 1) << 6)         /* AP[1] */
+#define L_PTE_RDONLY           (_AT(pteval_t, 1) << 7)         /* AP[2] */
+#define L_PTE_SHARED           (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
+#define L_PTE_YOUNG            (_AT(pteval_t, 1) << 10)        /* AF */
+#define L_PTE_XN               (_AT(pteval_t, 1) << 54)        /* XN */
+#define L_PTE_DIRTY            (_AT(pteval_t, 1) << 55)        /* unused */
+#define L_PTE_SPECIAL          (_AT(pteval_t, 1) << 56)        /* unused */
+
+#define PMD_SECT_DIRTY         (_AT(pmdval_t, 1) << 55)
+#define PMD_SECT_SPLITTING     (_AT(pmdval_t, 1) << 57)
+
+/*
+ * To be used in assembly code with the upper page attributes.
+ */
+#define L_PTE_XN_HIGH          (1 << (54 - 32))
+#define L_PTE_DIRTY_HIGH       (1 << (55 - 32))
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define L_PTE_MT_UNCACHED      (_AT(pteval_t, 0) << 2) /* strongly ordered */
+#define L_PTE_MT_BUFFERABLE    (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
+#define L_PTE_MT_WRITETHROUGH  (_AT(pteval_t, 2) << 2) /* normal inner write-through */
+#define L_PTE_MT_WRITEBACK     (_AT(pteval_t, 3) << 2) /* normal inner write-back */
+#define L_PTE_MT_WRITEALLOC    (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */
+#define L_PTE_MT_DEV_SHARED    (_AT(pteval_t, 4) << 2) /* device */
+#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */
+#define L_PTE_MT_DEV_WC                (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
+#define L_PTE_MT_DEV_CACHED    (_AT(pteval_t, 3) << 2) /* normal inner write-back */
+#define L_PTE_MT_MASK          (_AT(pteval_t, 7) << 2)
+
+/*
+ * Software PGD flags.
+ */
+#define L_PGD_SWAPPER          (_AT(pgdval_t, 1) << 55)        /* swapper_pg_dir entry */
+
+#ifndef __ASSEMBLY__
+
+#define pud_none(pud)          (!pud_val(pud))
+#define pud_bad(pud)           (!(pud_val(pud) & 2))
+#define pud_present(pud)       (pud_val(pud))
+
+#define pud_clear(pudp)                        \
+       do {                            \
+               *pudp = __pud(0);       \
+               clean_pmd_entry(pudp);  \
+       } while (0)
+
+#define set_pud(pudp, pud)             \
+       do {                            \
+               *pudp = pud;            \
+               flush_pmd_entry(pudp);  \
+       } while (0)
+
+static inline pmd_t *pud_page_vaddr(pud_t pud)
+{
+       return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
+}
+
+/* Find an entry in the second-level page table.. */
+#define pmd_index(addr)                (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+       return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
+}
+
+#define pmd_bad(pmd)           (!(pmd_val(pmd) & 2))
+
+#define copy_pmd(pmdpd,pmdps)          \
+       do {                            \
+               *pmdpd = *pmdps;        \
+               flush_pmd_entry(pmdpd); \
+       } while (0)
+
+#define pmd_clear(pmdp)                        \
+       do {                            \
+               *pmdp = __pmd(0);       \
+               clean_pmd_entry(pmdp);  \
+       } while (0)
+
+/*
+ * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
+ * that are written to a page table but not for ptes created with mk_pte.
+ *
+ * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
+ * hugetlb_cow, where it is compared with an entry in a page table.
+ * This comparison test fails erroneously leading ultimately to a memory leak.
+ *
+ * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
+ * present before running the comparison.
+ */
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(pte_a,pte_b)  ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG     \
+                                       : pte_val(pte_a))                               \
+                               == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG   \
+                                       : pte_val(pte_b)))
+
+#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
+
+#define pte_huge(pte)          ((pte_val(pte) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+
+#define pte_mkhuge(pte)                (__pte((pte_val(pte) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
+
+
+#define pmd_present(pmd)       ((pmd_val(pmd) & PMD_TYPE_MASK) != PMD_TYPE_FAULT)
+#define pmd_young(pmd)         (pmd_val(pmd) & PMD_SECT_AF)
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd)         (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+
+#define pmd_hugewillfault(pmd) ( !pmd_young(pmd) || !pmd_write(pmd) )
+#define pmd_thp_or_huge(pmd)   ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_trans_huge(pmd)    ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
+#endif
+
+#define PMD_BIT_FUNC(fn,op) \
+static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+PMD_BIT_FUNC(wrprotect,        |= PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkold,    &= ~PMD_SECT_AF);
+PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+PMD_BIT_FUNC(mkwrite,   &= ~PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkdirty,   |= PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
+PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
+
+#define pmd_mkhuge(pmd)                (__pmd((pmd_val(pmd) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
+
+#define pmd_pfn(pmd)           (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot)      (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define mk_pmd(page,prot)      pfn_pmd(page_to_pfn(page),prot)
+
+#define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+       const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY;
+       pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+       return pmd;
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+                             pmd_t *pmdp, pmd_t pmd)
+{
+       BUG_ON(addr >= TASK_SIZE);
+       *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
+       flush_pmd_entry(pmdp);
+}
+
+static inline int has_transparent_hugepage(void)
+{
+       return 1;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_3LEVEL_H */
index 1831111..8426229 100644 (file)
 #ifndef _ASMARM_PGTABLE_HWDEF_H
 #define _ASMARM_PGTABLE_HWDEF_H
 
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level-hwdef.h>
+#else
 #include <asm/pgtable-2level-hwdef.h>
+#endif
 
 #endif
index fcbac3c..1ce99a7 100644 (file)
 #define _ASMARM_PGTABLE_H
 
 #include <linux/const.h>
-#include <asm-generic/4level-fixup.h>
 #include <asm/proc-fns.h>
 
 #ifndef CONFIG_MMU
 
+#include <asm-generic/4level-fixup.h>
 #include "pgtable-nommu.h"
 
 #else
 
+#include <asm-generic/pgtable-nopud.h>
 #include <asm/memory.h>
-#include <mach/vmalloc.h>
 #include <asm/pgtable-hwdef.h>
 
+
+#include <asm/tlbflush.h>
+
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level.h>
+#else
 #include <asm/pgtable-2level.h>
+#endif
 
 /*
  * Just any arbitrary offset to the start of the vmalloc VM area: the
  * any out-of-bounds memory accesses will hopefully be caught.
  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  * area for the same reason. ;)
- *
- * Note that platforms may override VMALLOC_START, but they must provide
- * VMALLOC_END.  VMALLOC_END defines the (exclusive) limit of this space,
- * which may not overlap IO space.
  */
-#ifndef VMALLOC_START
 #define VMALLOC_OFFSET         (8*1024*1024)
 #define VMALLOC_START          (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-#endif
+#define VMALLOC_END            0xff000000UL
 
 #define LIBRARY_TEXT_START     0x0c000000
 
@@ -61,6 +63,12 @@ extern void __pgd_error(const char *file, int line, pgd_t);
  */
 #define FIRST_USER_ADDRESS     PAGE_SIZE
 
+/* OMAP hack - high vector CPUs, so don't limit it */
+#ifdef CONFIG_ARCH_OMAP
+#undef FIRST_USER_ADDRESS
+#define FIRST_USER_ADDRESS     0
+#endif
+
 /*
  * The pgprot_* and protection_map entries will be fixed up in runtime
  * to include the cachable and bufferable bits based on memory policy,
@@ -104,6 +112,9 @@ extern pgprot_t             pgprot_kernel;
 #define pgprot_stronglyordered(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
 
+#define pgprot_writethrough(prot) \
+       __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITETHROUGH)
+
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #define pgprot_dmacoherent(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
@@ -163,51 +174,18 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)
 
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-#define pgd_none(pgd)          (0)
-#define pgd_bad(pgd)           (0)
-#define pgd_present(pgd)       (1)
-#define pgd_clear(pgdp)                do { } while (0)
-#define set_pgd(pgd,pgdp)      do { } while (0)
-#define set_pud(pud,pudp)      do { } while (0)
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir, addr)  ((pmd_t *)(dir))
-
 #define pmd_none(pmd)          (!pmd_val(pmd))
-#define pmd_present(pmd)       (pmd_val(pmd))
-#define pmd_bad(pmd)           (pmd_val(pmd) & 2)
-
-#define copy_pmd(pmdpd,pmdps)          \
-       do {                            \
-               pmdpd[0] = pmdps[0];    \
-               pmdpd[1] = pmdps[1];    \
-               flush_pmd_entry(pmdpd); \
-       } while (0)
-
-#define pmd_clear(pmdp)                        \
-       do {                            \
-               pmdp[0] = __pmd(0);     \
-               pmdp[1] = __pmd(0);     \
-               clean_pmd_entry(pmdp);  \
-       } while (0)
 
 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 {
+#ifdef SYS_SUPPORTS_HUGETLBFS
+       if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+               return __va(pmd_val(pmd) & HPAGE_MASK);
+#endif
+
        return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
 }
 
-#define pmd_page(pmd)          pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-
-/* we don't need complex calculations here as the pmd is folded into the pgd */
-#define pmd_addr_end(addr,end) (end)
-
-
 #ifndef CONFIG_HIGHPTE
 #define __pte_map(pmd)         pmd_page_vaddr(*(pmd))
 #define __pte_unmap(pte)       do { } while (0)
@@ -229,7 +207,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 #define pte_page(pte)          pfn_to_page(pte_pfn(pte))
 #define mk_pte(page,prot)      pfn_pte(page_to_pfn(page), prot)
 
-#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
 #define pte_clear(mm,addr,ptep)        set_pte_ext(ptep, __pte(0), 0)
 
 #define pte_none(pte)          (!pte_val(pte))
@@ -240,9 +217,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 #define pte_exec(pte)          (!(pte_val(pte) & L_PTE_XN))
 #define pte_special(pte)       (0)
 
-#define pte_present_user(pte) \
-       ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
-        (L_PTE_PRESENT | L_PTE_USER))
+#define pte_present_user(pte)  (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
 
 #if __LINUX_ARM_ARCH__ < 6
 static inline void __sync_icache_dcache(pte_t pteval)
@@ -274,6 +249,8 @@ PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);
 PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);
 PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);
 PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);
+PTE_BIT_FUNC(mkexec,   &= ~L_PTE_XN);
+PTE_BIT_FUNC(mknexec,  |= L_PTE_XN);
 
 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 
@@ -348,9 +325,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 
 #define pgtable_cache_init() do { } while (0)
 
-void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
-void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* CONFIG_MMU */
index 9e92cb2..f3628fb 100644 (file)
@@ -65,7 +65,11 @@ extern struct processor {
         * Set a possibly extended PTE.  Non-extended PTEs should
         * ignore 'ext'.
         */
+#ifdef CONFIG_ARM_LPAE
+       void (*set_pte_ext)(pte_t *ptep, pte_t pte);
+#else
        void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
+#endif
 
        /* Suspend/resume */
        unsigned int suspend_size;
@@ -79,7 +83,11 @@ extern void cpu_proc_fin(void);
 extern int cpu_do_idle(void);
 extern void cpu_dcache_clean_area(void *, int);
 extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+#ifdef CONFIG_ARM_LPAE
+extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
+#else
 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+#endif
 extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
 
 /* These three are private to arch/arm/kernel/suspend.c */
@@ -107,6 +115,18 @@ extern void cpu_resume(void);
 
 #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
 
+#ifdef CONFIG_ARM_LPAE
+#define cpu_get_pgd()  \
+       ({                                              \
+               unsigned long pg, pg2;                  \
+               __asm__("mrrc   p15, 0, %0, %1, c2"     \
+                       : "=r" (pg), "=r" (pg2)         \
+                       :                               \
+                       : "cc");                        \
+               pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1);  \
+               (pgd_t *)phys_to_virt(pg);              \
+       })
+#else
 #define cpu_get_pgd()  \
        ({                                              \
                unsigned long pg;                       \
@@ -115,6 +135,7 @@ extern void cpu_resume(void);
                pg &= ~0x3fff;                          \
                (pgd_t *)phys_to_virt(pg);              \
        })
+#endif
 
 #endif
 
index 3352451..b81b61c 100644 (file)
@@ -106,9 +106,7 @@ static inline void prefetch(const void *ptr)
 {
        __asm__ __volatile__(
                "pld\t%a0"
-               :
-               : "p" (ptr)
-               : "cc");
+               :: "p" (ptr));
 }
 
 #define ARCH_HAS_PREFETCHW
index c8e6ddf..e3f7572 100644 (file)
@@ -8,113 +8,7 @@
 #ifndef ASM_SCHED_CLOCK
 #define ASM_SCHED_CLOCK
 
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct clock_data {
-       u64 epoch_ns;
-       u32 epoch_cyc;
-       u32 epoch_cyc_copy;
-       u32 mult;
-       u32 shift;
-};
-
-#define DEFINE_CLOCK_DATA(name)        struct clock_data name
-
-static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
-{
-       return (cyc * mult) >> shift;
-}
-
-/*
- * Atomically update the sched_clock epoch.  Your update callback will
- * be called from a timer before the counter wraps - read the current
- * counter value, and call this function to safely move the epochs
- * forward.  Only use this from the update callback.
- */
-static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask)
-{
-       unsigned long flags;
-       u64 ns = cd->epoch_ns +
-               cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift);
-
-       /*
-        * Write epoch_cyc and epoch_ns in a way that the update is
-        * detectable in cyc_to_fixed_sched_clock().
-        */
-       raw_local_irq_save(flags);
-       cd->epoch_cyc = cyc;
-       smp_wmb();
-       cd->epoch_ns = ns;
-       smp_wmb();
-       cd->epoch_cyc_copy = cyc;
-       raw_local_irq_restore(flags);
-}
-
-/*
- * If your clock rate is known at compile time, using this will allow
- * you to optimize the mult/shift loads away.  This is paired with
- * init_fixed_sched_clock() to ensure that your mult/shift are correct.
- */
-static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd,
-       u32 cyc, u32 mask, u32 mult, u32 shift)
-{
-       u64 epoch_ns;
-       u32 epoch_cyc;
-
-       /*
-        * Load the epoch_cyc and epoch_ns atomically.  We do this by
-        * ensuring that we always write epoch_cyc, epoch_ns and
-        * epoch_cyc_copy in strict order, and read them in strict order.
-        * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
-        * the middle of an update, and we should repeat the load.
-        */
-       do {
-               epoch_cyc = cd->epoch_cyc;
-               smp_rmb();
-               epoch_ns = cd->epoch_ns;
-               smp_rmb();
-       } while (epoch_cyc != cd->epoch_cyc_copy);
-
-       return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift);
-}
-
-/*
- * Otherwise, you need to use this, which will obtain the mult/shift
- * from the clock_data structure.  Use init_sched_clock() with this.
- */
-static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd,
-       u32 cyc, u32 mask)
-{
-       return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift);
-}
-
-/*
- * Initialize the clock data - calculate the appropriate multiplier
- * and shift.  Also setup a timer to ensure that the epoch is refreshed
- * at the appropriate time interval, which will call your update
- * handler.
- */
-void init_sched_clock(struct clock_data *, void (*)(void),
-       unsigned int, unsigned long);
-
-/*
- * Use this initialization function rather than init_sched_clock() if
- * you're using cyc_to_fixed_sched_clock, which will warn if your
- * constants are incorrect.
- */
-static inline void init_fixed_sched_clock(struct clock_data *cd,
-       void (*update)(void), unsigned int bits, unsigned long rate,
-       u32 mult, u32 shift)
-{
-       init_sched_clock(cd, update, bits, rate);
-       if (cd->mult != mult || cd->shift != shift) {
-               pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n"
-                       "sched_clock: fix multiply/shift to avoid scheduler hiccups\n",
-                       mult, shift, cd->mult, cd->shift);
-       }
-}
-
 extern void sched_clock_postinit(void);
+extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
 
 #endif
index 984014b..1344f5f 100644 (file)
@@ -80,6 +80,14 @@ struct siginfo;
 void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
                unsigned long err, unsigned long trap);
 
+#ifdef CONFIG_ARM_LPAE
+#define FAULT_CODE_ALIGNMENT   33
+#define FAULT_CODE_DEBUG       34
+#else
+#define FAULT_CODE_ALIGNMENT   1
+#define FAULT_CODE_DEBUG       2
+#endif
+
 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
                                       struct pt_regs *),
                     int sig, int code, const char *name);
@@ -101,6 +109,7 @@ extern int __pure cpu_architecture(void);
 extern void cpu_init(void);
 
 void arm_machine_restart(char mode, const char *cmd);
+void soft_restart(unsigned long);
 extern void (*arm_pm_restart)(char str, const char *cmd);
 
 #define UDBG_UNDEFINED (1 << 0)
@@ -108,6 +117,7 @@ extern void (*arm_pm_restart)(char str, const char *cmd);
 #define UDBG_BADABORT  (1 << 2)
 #define UDBG_SEGV      (1 << 3)
 #define UDBG_BUS       (1 << 4)
+#define UDBG_SEGV_SHORT        (1 << 8)
 
 extern unsigned int user_debug;
 
index 7b5cc8d..f45f2ef 100644 (file)
@@ -58,8 +58,10 @@ struct thread_info {
        struct cpu_context_save cpu_context;    /* cpu context */
        __u32                   syscall;        /* syscall number */
        __u8                    used_cp[16];    /* thread used copro */
-       unsigned long           tp_value;
+       unsigned long           tp_value[2];    /* TLS registers */
+#ifdef CONFIG_CRUNCH
        struct crunch_state     crunchstate;
+#endif
        union fp_state          fpstate __attribute__((aligned(8)));
        union vfp_state         vfpstate;
 #ifdef CONFIG_ARM_THUMBEE
index 265f908..5ff07f6 100644 (file)
@@ -92,10 +92,16 @@ static inline void tlb_flush(struct mmu_gather *tlb)
 static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
 {
        if (!tlb->fullmm) {
+               unsigned long size = PAGE_SIZE;
+
                if (addr < tlb->range_start)
                        tlb->range_start = addr;
-               if (addr + PAGE_SIZE > tlb->range_end)
-                       tlb->range_end = addr + PAGE_SIZE;
+
+               if (tlb->vma && is_vm_hugetlb_page(tlb->vma))
+                       size = HPAGE_SIZE;
+
+               if (addr + size > tlb->range_end)
+                       tlb->range_end = addr + size;
        }
 }
 
@@ -202,8 +208,24 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
        tlb_remove_page(tlb, pte);
 }
 
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+                                 unsigned long addr)
+{
+#ifdef CONFIG_ARM_LPAE
+       tlb_add_flush(tlb, addr);
+       tlb_remove_page(tlb, virt_to_page(pmdp));
+#endif
+}
+
+static inline void
+tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+{
+       tlb_add_flush(tlb, addr);
+}
+
 #define pte_free_tlb(tlb, ptep, addr)  __pte_free_tlb(tlb, ptep, addr)
-#define pmd_free_tlb(tlb, pmdp, addr)  pmd_free((tlb)->mm, pmdp)
+#define pmd_free_tlb(tlb, pmdp, addr)  __pmd_free_tlb(tlb, pmdp, addr)
+#define pud_free_tlb(tlb, pudp, addr)  pud_free((tlb)->mm, pudp)
 
 #define tlb_migrate_finish(mm)         do { } while (0)
 
index 02b2f82..c86b09a 100644 (file)
@@ -542,6 +542,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 }
 #endif
 
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
 #endif
 
 #endif /* CONFIG_MMU */
index 73409e6..83259b8 100644 (file)
@@ -2,27 +2,30 @@
 #define __ASMARM_TLS_H
 
 #ifdef __ASSEMBLY__
-       .macro set_tls_none, tp, tmp1, tmp2
+#include <asm/asm-offsets.h>
+       .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
        .endm
 
-       .macro set_tls_v6k, tp, tmp1, tmp2
+       .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
+       mrc     p15, 0, \tmp2, c13, c0, 2       @ get the user r/w register
        mcr     p15, 0, \tp, c13, c0, 3         @ set TLS register
-       mov     \tmp1, #0
-       mcr     p15, 0, \tmp1, c13, c0, 2       @ clear user r/w TLS register
+       mcr     p15, 0, \tpuser, c13, c0, 2     @ and the user r/w register
+       str     \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
        .endm
 
-       .macro set_tls_v6, tp, tmp1, tmp2
+       .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
        ldr     \tmp1, =elf_hwcap
        ldr     \tmp1, [\tmp1, #0]
        mov     \tmp2, #0xffff0fff
        tst     \tmp1, #HWCAP_TLS               @ hardware TLS available?
-       mcrne   p15, 0, \tp, c13, c0, 3         @ yes, set TLS register
-       movne   \tmp1, #0
-       mcrne   p15, 0, \tmp1, c13, c0, 2       @ clear user r/w TLS register
        streq   \tp, [\tmp2, #-15]              @ set TLS value at 0xffff0ff0
+       mrcne   p15, 0, \tmp2, c13, c0, 2       @ get the user r/w register
+       mcrne   p15, 0, \tp, c13, c0, 3         @ yes, set TLS register
+       mcrne   p15, 0, \tpuser, c13, c0, 2     @ set user r/w register
+       strne   \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
        .endm
 
-       .macro set_tls_software, tp, tmp1, tmp2
+       .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
        mov     \tmp1, #0xffff0fff
        str     \tp, [\tmp1, #-15]              @ set TLS value at 0xffff0ff0
        .endm
 #ifdef CONFIG_TLS_REG_EMUL
 #define tls_emu                1
 #define has_tls_reg            1
-#define set_tls                set_tls_none
+#define switch_tls     switch_tls_none
 #elif defined(CONFIG_CPU_V6)
 #define tls_emu                0
 #define has_tls_reg            (elf_hwcap & HWCAP_TLS)
-#define set_tls                set_tls_v6
+#define switch_tls     switch_tls_v6
 #elif defined(CONFIG_CPU_32v6K)
 #define tls_emu                0
 #define has_tls_reg            1
-#define set_tls                set_tls_v6k
+#define switch_tls     switch_tls_v6k
 #else
 #define tls_emu                0
 #define has_tls_reg            0
-#define set_tls                set_tls_software
+#define switch_tls     switch_tls_software
 #endif
 
+#ifndef __ASSEMBLY__
+static inline unsigned long get_tpuser(void)
+{
+       unsigned long reg = 0;
+
+       if (has_tls_reg && !tls_emu)
+               __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg));
+
+       return reg;
+}
+#endif
 #endif /* __ASMARM_TLS_H */
index 16eed6a..59ce92f 100644 (file)
@@ -13,7 +13,7 @@ CFLAGS_REMOVE_return_address.o = -pg
 
 # Object file lists.
 
-obj-y          := elf.o entry-armv.o entry-common.o irq.o \
+obj-y          := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
                   process.o ptrace.o return_address.o setup.o signal.o \
                   sys_arm.o stacktrace.o time.o traps.o
 
@@ -61,6 +61,7 @@ obj-$(CONFIG_CRASH_DUMP)      += crash_dump.o
 obj-$(CONFIG_SWP_EMULATE)      += swp_emulate.o
 CFLAGS_swp_emulate.o           := -Wa,-march=armv7-a
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)       += hw_breakpoint.o
+obj-$(CONFIG_CPU_V7_SYSFS)     += sysfs_v7.o
 
 obj-$(CONFIG_CRUNCH)           += crunch.o crunch-bits.o
 AFLAGS_crunch-bits.o           := -Wa,-mcpu=ep9312
index 1429d89..c985b48 100644 (file)
@@ -59,10 +59,12 @@ int main(void)
   DEFINE(TI_USED_CP,           offsetof(struct thread_info, used_cp));
   DEFINE(TI_TP_VALUE,          offsetof(struct thread_info, tp_value));
   DEFINE(TI_FPSTATE,           offsetof(struct thread_info, fpstate));
+#ifdef CONFIG_VFP
   DEFINE(TI_VFPSTATE,          offsetof(struct thread_info, vfpstate));
 #ifdef CONFIG_SMP
   DEFINE(VFP_CPU,              offsetof(union vfp_state, hard.cpu));
 #endif
+#endif
 #ifdef CONFIG_ARM_THUMBEE
   DEFINE(TI_THUMBEE_STATE,     offsetof(struct thread_info, thumbee_state));
 #endif
index ece0996..89892d5 100644 (file)
@@ -729,15 +729,16 @@ ENTRY(__switch_to)
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )
        add     ip, r1, #TI_CPU_SAVE
-       ldr     r3, [r2, #TI_TP_VALUE]
  ARM(  stmia   ip!, {r4 - sl, fp, sp, lr} )    @ Store most regs on stack
  THUMB(        stmia   ip!, {r4 - sl, fp}         )    @ Store most regs on stack
  THUMB(        str     sp, [ip], #4               )
  THUMB(        str     lr, [ip], #4               )
+       ldr     r4, [r2, #TI_TP_VALUE]
+       ldr     r5, [r2, #TI_TP_VALUE + 4]
 #ifdef CONFIG_CPU_USE_DOMAINS
        ldr     r6, [r2, #TI_CPU_DOMAIN]
 #endif
-       set_tls r3, r4, r5
+       switch_tls r1, r4, r5, r3, r7
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
        ldr     r7, [r2, #TI_TASK]
        ldr     r8, =__stack_chk_guard
index b2a27b6..1df0380 100644 (file)
@@ -405,7 +405,7 @@ ENTRY(vector_swi)
 
 #endif
 
-#ifdef CONFIG_ALIGNMENT_TRAP
+#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 7
        ldr     ip, __cr_alignment
        ldr     ip, [ip]
        mcr     p15, 0, ip, c1, c0              @ update control register
@@ -485,7 +485,7 @@ __sys_trace_return:
        b       ret_slow_syscall
 
        .align  5
-#ifdef CONFIG_ALIGNMENT_TRAP
+#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 7
        .type   __cr_alignment, #object
 __cr_alignment:
        .word   cr_alignment
index 9d95a46..de87bf2 100644 (file)
@@ -37,7 +37,7 @@
        .endm
 
        .macro  alignment_trap, rtemp
-#ifdef CONFIG_ALIGNMENT_TRAP
+#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 7
        ldr     \rtemp, .LCcralign
        ldr     \rtemp, [\rtemp]
        mcr     p15, 0, \rtemp, c1, c0
index 3606e85..67eeef7 100644 (file)
 #error KERNEL_RAM_VADDR must start at 0xXXXX8000
 #endif
 
+#ifdef CONFIG_ARM_LPAE
+       /* LPAE requires an additional page for the PGD */
+#define PG_DIR_SIZE    0x5000
+#define PMD_ORDER      3
+#else
 #define PG_DIR_SIZE    0x4000
 #define PMD_ORDER      2
+#endif
 
        .globl  swapper_pg_dir
        .equ    swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
@@ -93,6 +99,14 @@ ENTRY(stext)
  THUMB( it     eq )            @ force fixup-able long branch encoding
        beq     __error_p                       @ yes, error 'p'
 
+#ifdef CONFIG_ARM_LPAE
+       mrc     p15, 0, r3, c0, c1, 4           @ read ID_MMFR0
+       and     r3, r3, #0xf                    @ extract VMSA support
+       cmp     r3, #5                          @ long-descriptor translation table format?
+ THUMB( it     lo )                            @ force fixup-able long branch encoding
+       blo     __error_p                       @ only classic page table format
+#endif
+
 #ifndef CONFIG_XIP_KERNEL
        adr     r3, 2f
        ldmia   r3, {r4, r8}
@@ -164,17 +178,36 @@ __create_page_tables:
        teq     r0, r6
        bne     1b
 
+#ifdef CONFIG_ARM_LPAE
+       /*
+        * Build the PGD table (first level) to point to the PMD table. A PGD
+        * entry is 64-bit wide.
+        */
+       mov     r0, r4
+       add     r3, r4, #0x1000                 @ first PMD table address
+       orr     r3, r3, #3                      @ PGD block type
+       mov     r6, #4                          @ PTRS_PER_PGD
+       mov     r7, #1 << (55 - 32)             @ L_PGD_SWAPPER
+1:     str     r3, [r0], #4                    @ set bottom PGD entry bits
+       str     r7, [r0], #4                    @ set top PGD entry bits
+       add     r3, r3, #0x1000                 @ next PMD table
+       subs    r6, r6, #1
+       bne     1b
+
+       add     r4, r4, #0x1000                 @ point to the PMD tables
+#endif
+
        ldr     r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
 
        /*
         * Create identity mapping to cater for __enable_mmu.
         * This identity mapping will be removed by paging_init().
         */
-       adr     r0, __enable_mmu_loc
+       adr     r0, __turn_mmu_on_loc
        ldmia   r0, {r3, r5, r6}
        sub     r0, r0, r3                      @ virt->phys offset
-       add     r5, r5, r0                      @ phys __enable_mmu
-       add     r6, r6, r0                      @ phys __enable_mmu_end
+       add     r5, r5, r0                      @ phys __turn_mmu_on
+       add     r6, r6, r0                      @ phys __turn_mmu_on_end
        mov     r5, r5, lsr #SECTION_SHIFT
        mov     r6, r6, lsr #SECTION_SHIFT
 
@@ -219,8 +252,8 @@ __create_page_tables:
 #endif
 
        /*
-        * Then map boot params address in r2 or
-        * the first 1MB of ram if boot params address is not specified.
+        * Then map boot params address in r2 or the first 1MB (2MB with LPAE)
+        * of ram if boot params address is not specified.
         * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
         */
        mov     r0, r2, lsr #SECTION_SHIFT
@@ -254,7 +287,15 @@ __create_page_tables:
        mov     r3, r7, lsr #SECTION_SHIFT
        ldr     r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
        orr     r3, r7, r3, lsl #SECTION_SHIFT
+#ifdef CONFIG_ARM_LPAE
+       mov     r7, #1 << (54 - 32)             @ XN
+#else
+       orr     r3, r3, #PMD_SECT_XN
+#endif
 1:     str     r3, [r0], #4
+#ifdef CONFIG_ARM_LPAE
+       str     r7, [r0], #4
+#endif
        add     r3, r3, #1 << SECTION_SHIFT
        cmp     r0, r6
        blo     1b
@@ -285,15 +326,18 @@ __create_page_tables:
        add     r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
        str     r3, [r0]
 #endif
+#endif
+#ifdef CONFIG_ARM_LPAE
+       sub     r4, r4, #0x1000         @ point to the PGD table
 #endif
        mov     pc, lr
 ENDPROC(__create_page_tables)
        .ltorg
        .align
-__enable_mmu_loc:
+__turn_mmu_on_loc:
        .long   .
-       .long   __enable_mmu
-       .long   __enable_mmu_end
+       .long   __turn_mmu_on
+       .long   __turn_mmu_on_end
 
 #if defined(CONFIG_SMP)
        __CPUINIT
@@ -377,12 +421,29 @@ __enable_mmu:
 #ifdef CONFIG_CPU_ICACHE_DISABLE
        bic     r0, r0, #CR_I
 #endif
+#ifdef CONFIG_USER_L2_PLE
+       mov     r5, #3
+       mcr     p15, 0, r5, c11, c1, 0
+#endif
+#ifdef CONFIG_ARM_LPAE
+       mov     r5, #0
+       mcrr    p15, 0, r4, r5, c2              @ load TTBR0
+#else
+#ifndef        CONFIG_SYS_SUPPORTS_HUGETLBFS
        mov     r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
                      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
                      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
                      domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+#else
+       @ set ourselves as the client in all domains
+       @ this allows us to then use the 4 domain bits in the
+       @ section descriptors in our transparent huge pages
+       ldr     r5, =0x55555555
+#endif /* CONFIG_SYS_SUPPORTS_HUGETLBFS */
+
        mcr     p15, 0, r5, c3, c0, 0           @ load domain access register
        mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
+#endif /* CONFIG_ARM_LPAE */
        b       __turn_mmu_on
 ENDPROC(__enable_mmu)
 
@@ -401,15 +462,19 @@ ENDPROC(__enable_mmu)
  * other registers depend on the function called upon completion
  */
        .align  5
-__turn_mmu_on:
+       .pushsection    .idmap.text, "ax"
+ENTRY(__turn_mmu_on)
        mov     r0, r0
+       instr_sync
        mcr     p15, 0, r0, c1, c0, 0           @ write control reg
        mrc     p15, 0, r3, c0, c0, 0           @ read id reg
+       instr_sync
        mov     r3, r3
        mov     r3, r13
        mov     pc, r3
-__enable_mmu_end:
+__turn_mmu_on_end:
 ENDPROC(__turn_mmu_on)
+       .popsection
 
 
 #ifdef CONFIG_SMP_ON_UP
index 2bc1a8e..c1193ff 100644 (file)
@@ -1041,10 +1041,10 @@ static int __init arch_hw_breakpoint_init(void)
        }
 
        /* Register debug fault handler. */
-       hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
-                       "watchpoint debug exception");
-       hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
-                       "breakpoint debug exception");
+       hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
+                       TRAP_HWBKPT, "watchpoint debug exception");
+       hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
+                       TRAP_HWBKPT, "breakpoint debug exception");
 
        /* Register hotplug notifier. */
        register_cpu_notifier(&dbg_reset_nb);
index e59bbd4..764bd45 100644 (file)
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 #include <asm/mach-types.h>
+#include <asm/system.h>
 
 extern const unsigned char relocate_new_kernel[];
 extern const unsigned int relocate_new_kernel_size;
 
-extern void setup_mm_for_reboot(char mode);
-
 extern unsigned long kexec_start_address;
 extern unsigned long kexec_indirection_page;
 extern unsigned long kexec_mach_type;
@@ -111,14 +110,6 @@ void machine_kexec(struct kimage *image)
 
        if (kexec_reinit)
                kexec_reinit();
-       local_irq_disable();
-       local_fiq_disable();
-       setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/
-       flush_cache_all();
-       outer_flush_all();
-       outer_disable();
-       cpu_proc_fin();
-       outer_inv_all();
-       flush_cache_all();
-       cpu_reset(reboot_code_buffer_phys);
+
+       soft_restart(reboot_code_buffer_phys);
 }
diff --git a/arch/arm/kernel/opcodes.c b/arch/arm/kernel/opcodes.c
new file mode 100644 (file)
index 0000000..f8179c6
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ *  linux/arch/arm/kernel/opcodes.c
+ *
+ *  A32 condition code lookup feature moved from nwfpe/fpopcode.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <asm/opcodes.h>
+
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+/*
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+       0xF0F0,                 /* EQ == Z set            */
+       0x0F0F,                 /* NE                     */
+       0xCCCC,                 /* CS == C set            */
+       0x3333,                 /* CC                     */
+       0xFF00,                 /* MI == N set            */
+       0x00FF,                 /* PL                     */
+       0xAAAA,                 /* VS == V set            */
+       0x5555,                 /* VC                     */
+       0x0C0C,                 /* HI == C set && Z clear */
+       0xF3F3,                 /* LS == C clear || Z set */
+       0xAA55,                 /* GE == (N==V)           */
+       0x55AA,                 /* LT == (N!=V)           */
+       0x0A05,                 /* GT == (!Z && (N==V))   */
+       0xF5FA,                 /* LE == (Z || (N!=V))    */
+       0xFFFF,                 /* AL always              */
+       0                       /* NV                     */
+};
+
+/*
+ * Returns:
+ * ARM_OPCODE_CONDTEST_FAIL   - if condition fails
+ * ARM_OPCODE_CONDTEST_PASS   - if condition passes (including AL)
+ * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional
+ *                              opcode space from v5 onwards
+ *
+ * Code that tests whether a conditional instruction would pass its condition
+ * check should check that return value == ARM_OPCODE_CONDTEST_PASS.
+ *
+ * Code that tests if a condition means that the instruction would be executed
+ * (regardless of conditional or unconditional) should instead check that the
+ * return value != ARM_OPCODE_CONDTEST_FAIL.
+ */
+asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr)
+{
+       u32 cc_bits  = opcode >> 28;
+       u32 psr_cond = psr >> 28;
+       unsigned int ret;
+
+       if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+               if ((cc_map[cc_bits] >> (psr_cond)) & 1)
+                       ret = ARM_OPCODE_CONDTEST_PASS;
+               else
+                       ret = ARM_OPCODE_CONDTEST_FAIL;
+       } else {
+               ret = ARM_OPCODE_CONDTEST_UNCOND;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(arm_check_condition);
index 4a2db48..40dbe11 100644 (file)
@@ -177,8 +177,14 @@ armpmu_event_set_period(struct perf_event *event,
                ret = 1;
        }
 
-       if (left > (s64)armpmu->max_period)
-               left = armpmu->max_period;
+       /*
+        * Limit the maximum period to prevent the counter value
+        * from overtaking the one we are about to program. In
+        * effect we are reducing max_period to account for
+        * interrupt latency (and we are being very conservative).
+        */
+       if (left > (armpmu->max_period >> 1))
+               left = armpmu->max_period >> 1;
 
        local64_set(&hwc->prev_count, (u64)-left);
 
@@ -778,11 +784,16 @@ user_backtrace(struct frame_tail __user *tail,
               struct perf_callchain_entry *entry)
 {
        struct frame_tail buftail;
+       unsigned long err;
 
-       /* Also check accessibility of one struct frame_tail beyond */
        if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
                return NULL;
-       if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
+
+       pagefault_disable();
+       err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+       pagefault_enable();
+
+       if (err)
                return NULL;
 
        perf_callchain_store(entry, buftail.lr);
@@ -804,6 +815,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 
 
        perf_callchain_store(entry, regs->ARM_pc);
+
+       if (!current->mm)
+               return;
+
        tail = (struct frame_tail __user *)regs->ARM_fp - 1;
 
        while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
index 0ad3c6f..b7e0c25 100644 (file)
@@ -65,13 +65,15 @@ enum armv6_counters {
  * accesses/misses in hardware.
  */
 static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV6_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6_PERFCTR_INSTR_EXEC,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6_PERFCTR_BR_MISPREDICT,
-       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV6_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV6_PERFCTR_INSTR_EXEC,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]            = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV6_PERFCTR_BR_EXEC,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV6_PERFCTR_BR_MISPREDICT,
+       [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV6_PERFCTR_LSU_FULL_STALL,
 };
 
 static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -104,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ICACHE_MISS,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -218,13 +220,15 @@ enum armv6mpcore_perf_types {
  * accesses/misses in hardware.
  */
 static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
-       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]            = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV6MPCORE_PERFCTR_BR_EXEC,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
+       [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
 };
 
 static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -255,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
index 510456d..3086184 100644 (file)
@@ -28,165 +28,87 @@ static struct arm_pmu armv7pmu;
  * they are not available.
  */
 enum armv7_perf_types {
-       ARMV7_PERFCTR_PMNC_SW_INCR              = 0x00,
-       ARMV7_PERFCTR_IFETCH_MISS               = 0x01,
-       ARMV7_PERFCTR_ITLB_MISS                 = 0x02,
-       ARMV7_PERFCTR_DCACHE_REFILL             = 0x03, /* L1 */
-       ARMV7_PERFCTR_DCACHE_ACCESS             = 0x04, /* L1 */
-       ARMV7_PERFCTR_DTLB_REFILL               = 0x05,
-       ARMV7_PERFCTR_DREAD                     = 0x06,
-       ARMV7_PERFCTR_DWRITE                    = 0x07,
-       ARMV7_PERFCTR_INSTR_EXECUTED            = 0x08,
-       ARMV7_PERFCTR_EXC_TAKEN                 = 0x09,
-       ARMV7_PERFCTR_EXC_EXECUTED              = 0x0A,
-       ARMV7_PERFCTR_CID_WRITE                 = 0x0B,
-       /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
+       ARMV7_PERFCTR_PMNC_SW_INCR                      = 0x00,
+       ARMV7_PERFCTR_L1_ICACHE_REFILL                  = 0x01,
+       ARMV7_PERFCTR_ITLB_REFILL                       = 0x02,
+       ARMV7_PERFCTR_L1_DCACHE_REFILL                  = 0x03,
+       ARMV7_PERFCTR_L1_DCACHE_ACCESS                  = 0x04,
+       ARMV7_PERFCTR_DTLB_REFILL                       = 0x05,
+       ARMV7_PERFCTR_MEM_READ                          = 0x06,
+       ARMV7_PERFCTR_MEM_WRITE                         = 0x07,
+       ARMV7_PERFCTR_INSTR_EXECUTED                    = 0x08,
+       ARMV7_PERFCTR_EXC_TAKEN                         = 0x09,
+       ARMV7_PERFCTR_EXC_EXECUTED                      = 0x0A,
+       ARMV7_PERFCTR_CID_WRITE                         = 0x0B,
+
+       /*
+        * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
         * It counts:
-        *  - all branch instructions,
+        *  - all (taken) branch instructions,
         *  - instructions that explicitly write the PC,
         *  - exception generating instructions.
         */
-       ARMV7_PERFCTR_PC_WRITE                  = 0x0C,
-       ARMV7_PERFCTR_PC_IMM_BRANCH             = 0x0D,
-       ARMV7_PERFCTR_PC_PROC_RETURN            = 0x0E,
-       ARMV7_PERFCTR_UNALIGNED_ACCESS          = 0x0F,
+       ARMV7_PERFCTR_PC_WRITE                          = 0x0C,
+       ARMV7_PERFCTR_PC_IMM_BRANCH                     = 0x0D,
+       ARMV7_PERFCTR_PC_PROC_RETURN                    = 0x0E,
+       ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS              = 0x0F,
+       ARMV7_PERFCTR_PC_BRANCH_MIS_PRED                = 0x10,
+       ARMV7_PERFCTR_CLOCK_CYCLES                      = 0x11,
+       ARMV7_PERFCTR_PC_BRANCH_PRED                    = 0x12,
 
        /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
-       ARMV7_PERFCTR_PC_BRANCH_MIS_PRED        = 0x10,
-       ARMV7_PERFCTR_CLOCK_CYCLES              = 0x11,
-       ARMV7_PERFCTR_PC_BRANCH_PRED            = 0x12,
-       ARMV7_PERFCTR_MEM_ACCESS                = 0x13,
-       ARMV7_PERFCTR_L1_ICACHE_ACCESS          = 0x14,
-       ARMV7_PERFCTR_L1_DCACHE_WB              = 0x15,
-       ARMV7_PERFCTR_L2_DCACHE_ACCESS          = 0x16,
-       ARMV7_PERFCTR_L2_DCACHE_REFILL          = 0x17,
-       ARMV7_PERFCTR_L2_DCACHE_WB              = 0x18,
-       ARMV7_PERFCTR_BUS_ACCESS                = 0x19,
-       ARMV7_PERFCTR_MEMORY_ERROR              = 0x1A,
-       ARMV7_PERFCTR_INSTR_SPEC                = 0x1B,
-       ARMV7_PERFCTR_TTBR_WRITE                = 0x1C,
-       ARMV7_PERFCTR_BUS_CYCLES                = 0x1D,
-
-       ARMV7_PERFCTR_CPU_CYCLES                = 0xFF
+       ARMV7_PERFCTR_MEM_ACCESS                        = 0x13,
+       ARMV7_PERFCTR_L1_ICACHE_ACCESS                  = 0x14,
+       ARMV7_PERFCTR_L1_DCACHE_WB                      = 0x15,
+       ARMV7_PERFCTR_L2_CACHE_ACCESS                   = 0x16,
+       ARMV7_PERFCTR_L2_CACHE_REFILL                   = 0x17,
+       ARMV7_PERFCTR_L2_CACHE_WB                       = 0x18,
+       ARMV7_PERFCTR_BUS_ACCESS                        = 0x19,
+       ARMV7_PERFCTR_MEM_ERROR                         = 0x1A,
+       ARMV7_PERFCTR_INSTR_SPEC                        = 0x1B,
+       ARMV7_PERFCTR_TTBR_WRITE                        = 0x1C,
+       ARMV7_PERFCTR_BUS_CYCLES                        = 0x1D,
+
+       ARMV7_PERFCTR_CPU_CYCLES                        = 0xFF
 };
 
 /* ARMv7 Cortex-A8 specific event types */
 enum armv7_a8_perf_types {
-       ARMV7_PERFCTR_WRITE_BUFFER_FULL         = 0x40,
-       ARMV7_PERFCTR_L2_STORE_MERGED           = 0x41,
-       ARMV7_PERFCTR_L2_STORE_BUFF             = 0x42,
-       ARMV7_PERFCTR_L2_ACCESS                 = 0x43,
-       ARMV7_PERFCTR_L2_CACH_MISS              = 0x44,
-       ARMV7_PERFCTR_AXI_READ_CYCLES           = 0x45,
-       ARMV7_PERFCTR_AXI_WRITE_CYCLES          = 0x46,
-       ARMV7_PERFCTR_MEMORY_REPLAY             = 0x47,
-       ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY   = 0x48,
-       ARMV7_PERFCTR_L1_DATA_MISS              = 0x49,
-       ARMV7_PERFCTR_L1_INST_MISS              = 0x4A,
-       ARMV7_PERFCTR_L1_DATA_COLORING          = 0x4B,
-       ARMV7_PERFCTR_L1_NEON_DATA              = 0x4C,
-       ARMV7_PERFCTR_L1_NEON_CACH_DATA         = 0x4D,
-       ARMV7_PERFCTR_L2_NEON                   = 0x4E,
-       ARMV7_PERFCTR_L2_NEON_HIT               = 0x4F,
-       ARMV7_PERFCTR_L1_INST                   = 0x50,
-       ARMV7_PERFCTR_PC_RETURN_MIS_PRED        = 0x51,
-       ARMV7_PERFCTR_PC_BRANCH_FAILED          = 0x52,
-       ARMV7_PERFCTR_PC_BRANCH_TAKEN           = 0x53,
-       ARMV7_PERFCTR_PC_BRANCH_EXECUTED        = 0x54,
-       ARMV7_PERFCTR_OP_EXECUTED               = 0x55,
-       ARMV7_PERFCTR_CYCLES_INST_STALL         = 0x56,
-       ARMV7_PERFCTR_CYCLES_INST               = 0x57,
-       ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL    = 0x58,
-       ARMV7_PERFCTR_CYCLES_NEON_INST_STALL    = 0x59,
-       ARMV7_PERFCTR_NEON_CYCLES               = 0x5A,
-
-       ARMV7_PERFCTR_PMU0_EVENTS               = 0x70,
-       ARMV7_PERFCTR_PMU1_EVENTS               = 0x71,
-       ARMV7_PERFCTR_PMU_EVENTS                = 0x72,
+       ARMV7_A8_PERFCTR_L2_CACHE_ACCESS                = 0x43,
+       ARMV7_A8_PERFCTR_L2_CACHE_REFILL                = 0x44,
+       ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS               = 0x50,
+       ARMV7_A8_PERFCTR_STALL_ISIDE                    = 0x56,
 };
 
 /* ARMv7 Cortex-A9 specific event types */
 enum armv7_a9_perf_types {
-       ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC     = 0x40,
-       ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC     = 0x41,
-       ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC       = 0x42,
-
-       ARMV7_PERFCTR_COHERENT_LINE_MISS        = 0x50,
-       ARMV7_PERFCTR_COHERENT_LINE_HIT         = 0x51,
-
-       ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES   = 0x60,
-       ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES   = 0x61,
-       ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
-       ARMV7_PERFCTR_STREX_EXECUTED_PASSED     = 0x63,
-       ARMV7_PERFCTR_STREX_EXECUTED_FAILED     = 0x64,
-       ARMV7_PERFCTR_DATA_EVICTION             = 0x65,
-       ARMV7_PERFCTR_ISSUE_STAGE_NO_INST       = 0x66,
-       ARMV7_PERFCTR_ISSUE_STAGE_EMPTY         = 0x67,
-       ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE  = 0x68,
-
-       ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
-
-       ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST   = 0x70,
-       ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
-       ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST  = 0x72,
-       ARMV7_PERFCTR_FP_EXECUTED_INST          = 0x73,
-       ARMV7_PERFCTR_NEON_EXECUTED_INST        = 0x74,
-
-       ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
-       ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES  = 0x81,
-       ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES        = 0x82,
-       ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES        = 0x83,
-       ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES  = 0x84,
-       ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES  = 0x85,
-       ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES      = 0x86,
-
-       ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES  = 0x8A,
-       ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
-
-       ARMV7_PERFCTR_ISB_INST                  = 0x90,
-       ARMV7_PERFCTR_DSB_INST                  = 0x91,
-       ARMV7_PERFCTR_DMB_INST                  = 0x92,
-       ARMV7_PERFCTR_EXT_INTERRUPTS            = 0x93,
-
-       ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED     = 0xA0,
-       ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED       = 0xA1,
-       ARMV7_PERFCTR_PLE_FIFO_FLUSH            = 0xA2,
-       ARMV7_PERFCTR_PLE_RQST_COMPLETED        = 0xA3,
-       ARMV7_PERFCTR_PLE_FIFO_OVERFLOW         = 0xA4,
-       ARMV7_PERFCTR_PLE_RQST_PROG             = 0xA5
+       ARMV7_A9_PERFCTR_INSTR_CORE_RENAME              = 0x68,
+       ARMV7_A9_PERFCTR_STALL_ICACHE                   = 0x60,
+       ARMV7_A9_PERFCTR_STALL_DISPATCH                 = 0x66,
 };
 
 /* ARMv7 Cortex-A5 specific event types */
 enum armv7_a5_perf_types {
-       ARMV7_PERFCTR_IRQ_TAKEN                 = 0x86,
-       ARMV7_PERFCTR_FIQ_TAKEN                 = 0x87,
-
-       ARMV7_PERFCTR_EXT_MEM_RQST              = 0xc0,
-       ARMV7_PERFCTR_NC_EXT_MEM_RQST           = 0xc1,
-       ARMV7_PERFCTR_PREFETCH_LINEFILL         = 0xc2,
-       ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP    = 0xc3,
-       ARMV7_PERFCTR_ENTER_READ_ALLOC          = 0xc4,
-       ARMV7_PERFCTR_READ_ALLOC                = 0xc5,
-
-       ARMV7_PERFCTR_STALL_SB_FULL             = 0xc9,
+       ARMV7_A5_PERFCTR_PREFETCH_LINEFILL              = 0xc2,
+       ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP         = 0xc3,
 };
 
 /* ARMv7 Cortex-A15 specific event types */
 enum armv7_a15_perf_types {
-       ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS     = 0x40,
-       ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS    = 0x41,
-       ARMV7_PERFCTR_L1_DCACHE_READ_REFILL     = 0x42,
-       ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL    = 0x43,
+       ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ         = 0x40,
+       ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE        = 0x41,
+       ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ         = 0x42,
+       ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE        = 0x43,
 
-       ARMV7_PERFCTR_L1_DTLB_READ_REFILL       = 0x4C,
-       ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL      = 0x4D,
+       ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ           = 0x4C,
+       ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE          = 0x4D,
 
-       ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS     = 0x50,
-       ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS    = 0x51,
-       ARMV7_PERFCTR_L2_DCACHE_READ_REFILL     = 0x52,
-       ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL    = 0x53,
+       ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ          = 0x50,
+       ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE         = 0x51,
+       ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ          = 0x52,
+       ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE         = 0x53,
 
-       ARMV7_PERFCTR_SPEC_PC_WRITE             = 0x76,
+       ARMV7_A15_PERFCTR_PC_WRITE_SPEC                 = 0x76,
 };
 
 /*
@@ -197,13 +119,15 @@ enum armv7_a15_perf_types {
  * accesses/misses in hardware.
  */
 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-       [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+       [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = HW_OP_UNSUPPORTED,
 };
 
 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -217,12 +141,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                 * combined.
                 */
                [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -231,12 +155,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        },
        [C(L1I)] = {
                [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_INST,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_INST_MISS,
+                       [C(RESULT_ACCESS)]      = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_INST,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_INST_MISS,
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -245,12 +169,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        },
        [C(LL)] = {
                [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L2_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L2_CACH_MISS,
+                       [C(RESULT_ACCESS)]      = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L2_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L2_CACH_MISS,
+                       [C(RESULT_ACCESS)]      = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -274,11 +198,11 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        [C(ITLB)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -287,14 +211,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        },
        [C(BPU)] = {
                [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -321,14 +243,15 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  * Cortex-A9 HW events mapping
  */
 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        =
-                                       ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_DCACHE_ACCESS,
-       [PERF_COUNT_HW_CACHE_MISSES]        = ARMV7_PERFCTR_DCACHE_REFILL,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-       [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+       [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV7_A9_PERFCTR_STALL_DISPATCH,
 };
 
 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -342,12 +265,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                 * combined.
                 */
                [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -357,11 +280,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        [C(L1I)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -399,11 +322,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        [C(ITLB)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -412,14 +335,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        },
        [C(BPU)] = {
                [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -446,13 +367,15 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  * Cortex-A5 HW events mapping
  */
 static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+       [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = HW_OP_UNSUPPORTED,
 };
 
 static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -460,42 +383,34 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                                        [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
        [C(L1D)] = {
                [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]
-                                       = ARMV7_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_DCACHE_REFILL,
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]
-                                       = ARMV7_PERFCTR_DCACHE_ACCESS,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_DCACHE_REFILL,
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
                },
                [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]
-                                       = ARMV7_PERFCTR_PREFETCH_LINEFILL,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+                       [C(RESULT_ACCESS)]      = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
+                       [C(RESULT_MISS)]        = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
                },
        },
        [C(L1I)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                /*
                 * The prefetch counters don't differentiate between the I
                 * side and the D side.
                 */
                [C(OP_PREFETCH)] = {
-                       [C(RESULT_ACCESS)]
-                                       = ARMV7_PERFCTR_PREFETCH_LINEFILL,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+                       [C(RESULT_ACCESS)]      = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
+                       [C(RESULT_MISS)]        = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
                },
        },
        [C(LL)] = {
@@ -529,11 +444,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        [C(ITLB)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -543,13 +458,25 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        [C(BPU)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(NODE)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -562,13 +489,15 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  * Cortex-A15 HW events mapping
  */
 static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-       [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_BUS_CYCLES,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+       [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]              = ARMV7_PERFCTR_BUS_CYCLES,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = HW_OP_UNSUPPORTED,
 };
 
 static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -576,16 +505,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                                        [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
        [C(L1D)] = {
                [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]
-                                       = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
+                       [C(RESULT_ACCESS)]      = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
+                       [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]
-                                       = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
+                       [C(RESULT_ACCESS)]      = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
+                       [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -601,11 +526,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                 */
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -614,16 +539,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        },
        [C(LL)] = {
                [C(OP_READ)] = {
-                       [C(RESULT_ACCESS)]
-                                       = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
+                       [C(RESULT_ACCESS)]      = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
+                       [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]
-                                       = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
+                       [C(RESULT_ACCESS)]      = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
+                       [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -633,13 +554,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        [C(DTLB)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
+                       [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
+                       [C(RESULT_MISS)]        = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -649,11 +568,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        [C(ITLB)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_REFILL,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -663,13 +582,25 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        [C(BPU)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
-                       [C(RESULT_MISS)]
-                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(NODE)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
index 9fc2c95..a9bf92b 100644 (file)
@@ -48,13 +48,15 @@ enum xscale_counters {
 };
 
 static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = XSCALE_PERFCTR_CCNT,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = XSCALE_PERFCTR_INSTRUCTION,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = XSCALE_PERFCTR_BRANCH_MISS,
-       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CPU_CYCLES]              = XSCALE_PERFCTR_CCNT,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = XSCALE_PERFCTR_INSTRUCTION,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]            = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = XSCALE_PERFCTR_BRANCH,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = XSCALE_PERFCTR_BRANCH_MISS,
+       [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = HW_OP_UNSUPPORTED,
 };
 
 static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -81,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_ICACHE_MISS,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
index d9e3c61..cda4286 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/thread_notify.h>
 #include <asm/stacktrace.h>
 #include <asm/mach/time.h>
+#include <asm/tls.h>
 
 #ifdef CONFIG_CC_STACKPROTECTOR
 #include <linux/stackprotector.h>
@@ -57,7 +58,7 @@ static const char *isa_modes[] = {
   "ARM" , "Thumb" , "Jazelle", "ThumbEE"
 };
 
-extern void setup_mm_for_reboot(char mode);
+extern void setup_mm_for_reboot(void);
 
 static volatile int hlt_counter;
 
@@ -92,18 +93,24 @@ static int __init hlt_setup(char *__unused)
 __setup("nohlt", nohlt_setup);
 __setup("hlt", hlt_setup);
 
-void arm_machine_restart(char mode, const char *cmd)
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+typedef void (*phys_reset_t)(unsigned long);
+
+/*
+ * A temporary stack to use for CPU reset. This is static so that we
+ * don't clobber it with the identity mapping. When running with this
+ * stack, any references to the current task *will not work* so you
+ * should really do as little as possible before jumping to your reset
+ * code.
+ */
+static u64 soft_restart_stack[16];
+
+static void __soft_restart(void *addr)
 {
-       /* Disable interrupts first */
-       local_irq_disable();
-       local_fiq_disable();
+       phys_reset_t phys_reset;
 
-       /*
-        * Tell the mm system that we are going to reboot -
-        * we may need it to insert some 1:1 mappings so that
-        * soft boot works.
-        */
-       setup_mm_for_reboot(mode);
+       /* Take out a flat memory mapping. */
+       setup_mm_for_reboot();
 
        /* Clean and invalidate caches */
        flush_cache_all();
@@ -114,19 +121,41 @@ void arm_machine_restart(char mode, const char *cmd)
        /* Push out any further dirty data, and ensure cache is empty */
        flush_cache_all();
 
-       /*
-        * Now call the architecture specific reboot code.
-        */
-       arch_reset(mode, cmd);
+       /* Switch to the identity mapping. */
+       phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+       phys_reset((unsigned long)addr);
 
-       /*
-        * Whoops - the architecture was unable to reboot.
-        * Tell the user!
-        */
-       mdelay(1000);
-       printk("Reboot failed -- System halted\n");
+       /* Should never get here. */
+       BUG();
+}
+
+void soft_restart(unsigned long addr)
+{
+       u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
+
+       /* Disable interrupts first */
        local_irq_disable();
-       while (1);
+       local_fiq_disable();
+
+       /* Disable the L2 if we're the last man standing. */
+       if (num_online_cpus() == 1)
+               outer_disable();
+
+       /* Change to the new stack and continue with the reset. */
+       call_with_stack(__soft_restart, (void *)addr, (void *)stack);
+
+       /* Should never get here. */
+       BUG();
+}
+
+void arm_machine_restart(char mode, const char *cmd)
+{
+       /* Disable interrupts first */
+       local_irq_disable();
+       local_fiq_disable();
+
+       /* Call the architecture specific reboot code. */
+       arch_reset(mode, cmd);
 }
 
 /*
@@ -255,7 +284,16 @@ void machine_power_off(void)
 void machine_restart(char *cmd)
 {
        machine_shutdown();
+
        arm_pm_restart(reboot_mode, cmd);
+
+       /* Give a grace period for failure to restart of 1s */
+       mdelay(1000);
+
+       /* Whoops - the platform was unable to reboot. Tell the user! */
+       printk("Reboot failed -- System halted\n");
+       local_irq_disable();
+       while (1);
 }
 
 void __show_regs(struct pt_regs *regs)
@@ -377,7 +415,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
        clear_ptrace_hw_breakpoint(p);
 
        if (clone_flags & CLONE_SETTLS)
-               thread->tp_value = regs->ARM_r3;
+               thread->tp_value[0] = regs->ARM_r3;
+       thread->tp_value[1] = get_tpuser();
 
        thread_notify(THREAD_NOTIFY_COPY, thread);
 
index 90fa8b3..7ded864 100644 (file)
@@ -842,7 +842,7 @@ long arch_ptrace(struct task_struct *child, long request,
 #endif
 
                case PTRACE_GET_THREAD_AREA:
-                       ret = put_user(task_thread_info(child)->tp_value,
+                       ret = put_user(task_thread_info(child)->tp_value[0],
                                       datap);
                        break;
 
index 8085417..2bd1926 100644 (file)
@@ -26,7 +26,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
        struct return_address_data *data = d;
 
        if (!data->level) {
-               data->addr = (void *)frame->lr;
+               data->addr = (void *)frame->pc;
 
                return 1;
        } else {
@@ -41,7 +41,8 @@ void *return_address(unsigned int level)
        struct stackframe frame;
        register unsigned long current_sp asm ("sp");
 
-       data.level = level + 1;
+       data.level = level + 2;
+       data.addr = NULL;
 
        frame.fp = (unsigned long)__builtin_frame_address(0);
        frame.sp = current_sp;
@@ -56,17 +57,6 @@ void *return_address(unsigned int level)
                return NULL;
 }
 
-#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
-
-#if defined(CONFIG_ARM_UNWIND)
-#warning "TODO: return_address should use unwind tables"
-#endif
-
-void *return_address(unsigned int level)
-{
-       return NULL;
-}
-
-#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
+#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
 
 EXPORT_SYMBOL_GPL(return_address);
index 9a46370..5416c7c 100644 (file)
 
 #include <asm/sched_clock.h>
 
+struct clock_data {
+       u64 epoch_ns;
+       u32 epoch_cyc;
+       u32 epoch_cyc_copy;
+       u32 mult;
+       u32 shift;
+};
+
 static void sched_clock_poll(unsigned long wrap_ticks);
 static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
-static void (*sched_clock_update_fn)(void);
+
+static struct clock_data cd = {
+       .mult   = NSEC_PER_SEC / HZ,
+};
+
+static u32 __read_mostly sched_clock_mask = 0xffffffff;
+
+static u32 notrace jiffy_sched_clock_read(void)
+{
+       return (u32)(jiffies - INITIAL_JIFFIES);
+}
+
+static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
+
+static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+{
+       return (cyc * mult) >> shift;
+}
+
+static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
+{
+       u64 epoch_ns;
+       u32 epoch_cyc;
+
+       /*
+        * Load the epoch_cyc and epoch_ns atomically.  We do this by
+        * ensuring that we always write epoch_cyc, epoch_ns and
+        * epoch_cyc_copy in strict order, and read them in strict order.
+        * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
+        * the middle of an update, and we should repeat the load.
+        */
+       do {
+               epoch_cyc = cd.epoch_cyc;
+               smp_rmb();
+               epoch_ns = cd.epoch_ns;
+               smp_rmb();
+       } while (epoch_cyc != cd.epoch_cyc_copy);
+
+       return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
+}
+
+/*
+ * Atomically update the sched_clock epoch.
+ */
+static void notrace update_sched_clock(void)
+{
+       unsigned long flags;
+       u32 cyc;
+       u64 ns;
+
+       cyc = read_sched_clock();
+       ns = cd.epoch_ns +
+               cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
+                         cd.mult, cd.shift);
+       /*
+        * Write epoch_cyc and epoch_ns in a way that the update is
+        * detectable in cyc_to_fixed_sched_clock().
+        */
+       raw_local_irq_save(flags);
+       cd.epoch_cyc = cyc;
+       smp_wmb();
+       cd.epoch_ns = ns;
+       smp_wmb();
+       cd.epoch_cyc_copy = cyc;
+       raw_local_irq_restore(flags);
+}
 
 static void sched_clock_poll(unsigned long wrap_ticks)
 {
        mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
-       sched_clock_update_fn();
+       update_sched_clock();
 }
 
-void __init init_sched_clock(struct clock_data *cd, void (*update)(void),
-       unsigned int clock_bits, unsigned long rate)
+void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
 {
        unsigned long r, w;
        u64 res, wrap;
        char r_unit;
 
-       sched_clock_update_fn = update;
+       BUG_ON(bits > 32);
+       WARN_ON(!irqs_disabled());
+       WARN_ON(read_sched_clock != jiffy_sched_clock_read);
+       read_sched_clock = read;
+       sched_clock_mask = (1 << bits) - 1;
 
        /* calculate the mult/shift to convert counter ticks to ns. */
-       clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 0);
+       clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
 
        r = rate;
        if (r >= 4000000) {
                r /= 1000000;
                r_unit = 'M';
-       } else {
+       } else if (r >= 1000) {
                r /= 1000;
                r_unit = 'k';
-       }
+       } else
+               r_unit = ' ';
 
        /* calculate how many ns until we wrap */
-       wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift);
+       wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
        do_div(wrap, NSEC_PER_MSEC);
        w = wrap;
 
        /* calculate the ns resolution of this counter */
-       res = cyc_to_ns(1ULL, cd->mult, cd->shift);
+       res = cyc_to_ns(1ULL, cd.mult, cd.shift);
        pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
-               clock_bits, r, r_unit, res, w);
+               bits, r, r_unit, res, w);
 
        /*
         * Start the timer to keep sched_clock() properly updated and
         * sets the initial epoch.
         */
        sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
-       update();
+       update_sched_clock();
 
        /*
         * Ensure that sched_clock() starts off at 0ns
         */
-       cd->epoch_ns = 0;
+       cd.epoch_ns = 0;
+
+       pr_debug("Registered %pF as sched_clock source\n", read);
+}
+
+unsigned long long notrace sched_clock(void)
+{
+       u32 cyc = read_sched_clock();
+       return cyc_to_sched_clock(cyc, sched_clock_mask);
 }
 
 void __init sched_clock_postinit(void)
 {
+       /*
+        * If no sched_clock function has been provided at that point,
+        * make it the final one one.
+        */
+       if (read_sched_clock == jiffy_sched_clock_read)
+               setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
+
        sched_clock_poll(sched_clock_timer.data);
 }
index 8fc2c8f..e482ef9 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/memblock.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
+#include <linux/sort.h>
 
 #include <asm/unified.h>
 #include <asm/cpu.h>
@@ -78,6 +79,7 @@ __setup("fpe=", fpe_setup);
 extern void paging_init(struct machine_desc *desc);
 extern void sanity_check_meminfo(void);
 extern void reboot_setup(char *str);
+extern void setup_dma_zone(struct machine_desc *desc);
 
 unsigned int processor_id;
 EXPORT_SYMBOL(processor_id);
@@ -890,6 +892,12 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr)
        return mdesc;
 }
 
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+       const struct membank *a = _a, *b = _b;
+       long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+       return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
 
 void __init setup_arch(char **cmdline_p)
 {
@@ -902,12 +910,8 @@ void __init setup_arch(char **cmdline_p)
        machine_desc = mdesc;
        machine_name = mdesc->name;
 
-#ifdef CONFIG_ZONE_DMA
-       if (mdesc->dma_zone_size) {
-               extern unsigned long arm_dma_zone_size;
-               arm_dma_zone_size = mdesc->dma_zone_size;
-       }
-#endif
+       setup_dma_zone(mdesc);
+
        if (mdesc->soft_reboot)
                reboot_setup("s");
 
@@ -922,6 +926,7 @@ void __init setup_arch(char **cmdline_p)
 
        parse_early_param();
 
+       sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
        sanity_check_meminfo();
        arm_memblock_init(&meminfo, mdesc);
 
index 020e99c..1f268bd 100644 (file)
@@ -54,14 +54,18 @@ ENDPROC(cpu_suspend_abort)
  * r0 = control register value
  */
        .align  5
+       .pushsection    .idmap.text,"ax"
 ENTRY(cpu_resume_mmu)
        ldr     r3, =cpu_resume_after_mmu
+       instr_sync
        mcr     p15, 0, r0, c1, c0, 0   @ turn on MMU, I-cache, etc
        mrc     p15, 0, r0, c0, c0, 0   @ read id reg
+       instr_sync
        mov     r0, r0
        mov     r0, r0
        mov     pc, r3                  @ jump to virtual address
 ENDPROC(cpu_resume_mmu)
+       .popsection
 cpu_resume_after_mmu:
        bl      cpu_init                @ restore the und/abt/irq banked regs
        mov     r0, #0                  @ return zero on success
index bfa0eeb..250106d 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/exception.h>
+#include <asm/idmap.h>
 #include <asm/topology.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
@@ -61,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
 {
        struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
        struct task_struct *idle = ci->idle;
-       pgd_t *pgd;
        int ret;
 
        /*
@@ -83,30 +83,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
                init_idle(idle, cpu);
        }
 
-       /*
-        * Allocate initial page tables to allow the new CPU to
-        * enable the MMU safely.  This essentially means a set
-        * of our "standard" page tables, with the addition of
-        * a 1:1 mapping for the physical address of the kernel.
-        */
-       pgd = pgd_alloc(&init_mm);
-       if (!pgd)
-               return -ENOMEM;
-
-       if (PHYS_OFFSET != PAGE_OFFSET) {
-#ifndef CONFIG_HOTPLUG_CPU
-               identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
-#endif
-               identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
-               identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
-       }
-
        /*
         * We need to tell the secondary core where to find
         * its stack and the page tables.
         */
        secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
-       secondary_data.pgdir = virt_to_phys(pgd);
+       secondary_data.pgdir = virt_to_phys(idmap_pgd);
        secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
        __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
        outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
@@ -142,16 +124,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
        secondary_data.stack = NULL;
        secondary_data.pgdir = 0;
 
-       if (PHYS_OFFSET != PAGE_OFFSET) {
-#ifndef CONFIG_HOTPLUG_CPU
-               identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
-#endif
-               identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
-               identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
-       }
-
-       pgd_free(&init_mm, pgd);
-
        return ret;
 }
 
@@ -554,6 +526,10 @@ static void ipi_cpu_stop(unsigned int cpu)
        local_fiq_disable();
        local_irq_disable();
 
+#ifdef CONFIG_HOTPLUG_CPU
+       platform_cpu_kill(cpu);
+#endif
+
        while (1)
                cpu_relax();
 }
index 93a22d2..1794cc3 100644 (file)
@@ -1,13 +1,12 @@
 #include <linux/init.h>
 
+#include <asm/idmap.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/memory.h>
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
 
-static pgd_t *suspend_pgd;
-
 extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
 extern void cpu_resume_mmu(void);
 
@@ -21,7 +20,7 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
        *save_ptr = virt_to_phys(ptr);
 
        /* This must correspond to the LDM in cpu_resume() assembly */
-       *ptr++ = virt_to_phys(suspend_pgd);
+       *ptr++ = virt_to_phys(idmap_pgd);
        *ptr++ = sp;
        *ptr++ = virt_to_phys(cpu_do_resume);
 
@@ -42,7 +41,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        struct mm_struct *mm = current->active_mm;
        int ret;
 
-       if (!suspend_pgd)
+       if (!idmap_pgd)
                return -EINVAL;
 
        /*
@@ -59,14 +58,3 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 
        return ret;
 }
-
-static int __init cpu_suspend_init(void)
-{
-       suspend_pgd = pgd_alloc(&init_mm);
-       if (suspend_pgd) {
-               unsigned long addr = virt_to_phys(cpu_resume_mmu);
-               identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE);
-       }
-       return suspend_pgd ? 0 : -ENOMEM;
-}
-core_initcall(cpu_suspend_init);
index 5d9b1ee..ab1017b 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/syscalls.h>
 #include <linux/perf_event.h>
 
+#include <asm/opcodes.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
 
@@ -187,6 +188,21 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
 
        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
 
+       res = arm_check_condition(instr, regs->ARM_cpsr);
+       switch (res) {
+       case ARM_OPCODE_CONDTEST_PASS:
+               break;
+       case ARM_OPCODE_CONDTEST_FAIL:
+               /* Condition failed - return to next instruction */
+               regs->ARM_pc += 4;
+               return 0;
+       case ARM_OPCODE_CONDTEST_UNCOND:
+               /* If unconditional encoding - not a SWP, undef */
+               return -EFAULT;
+       default:
+               return -EINVAL;
+       }
+
        if (current->pid != previous_pid) {
                pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
                         current->comm, (unsigned long)current->pid);
diff --git a/arch/arm/kernel/sysfs_v7.c b/arch/arm/kernel/sysfs_v7.c
new file mode 100644 (file)
index 0000000..fcafe95
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ *  linux/arch/arm/kernel/sysfs.c
+ *
+ *  Copyright (C) 2008 Mans Rullgard
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/sysdev.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+#define SETBITS(val, bits, new)                        \
+       do {                                    \
+               val &= ~bits;                   \
+               val |= new & bits;              \
+       } while (0)
+
+#define SHOW_REG(name, opc1, crn, crm, opc2)                           \
+static ssize_t name##_show(struct sys_device *dev,                     \
+                          struct sysdev_attribute *attr,               \
+                          char *buf)                                   \
+{                                                                      \
+       unsigned val;                                                   \
+       asm ("mrc p15,"#opc1", %0,"#crn","#crm","#opc2 : "=r"(val));    \
+       return snprintf(buf, PAGE_SIZE, "%08x\n", val);                 \
+}
+
+#define STORE_REG(name, opc1, crn, crm, opc2, bits)                    \
+static ssize_t name##_store(struct sys_device *dev,                    \
+                           struct sysdev_attribute *attr,              \
+                           const char *buf, size_t size)               \
+{                                                                      \
+       char *end;                                                      \
+       unsigned new = simple_strtoul(buf, &end, 0);                    \
+       unsigned val;                                                   \
+                                                                       \
+       if (end == buf)                                                 \
+               return -EINVAL;                                         \
+                                                                       \
+       asm ("mrc p15,"#opc1", %0,"#crn","#crm","#opc2 : "=r"(val));    \
+       SETBITS(val, bits, new);                                        \
+       asm ("mcr p15,"#opc1", %0,"#crn","#crm","#opc2 :: "r"(val));    \
+                                                                       \
+       return end - buf;                                               \
+}
+
+#define RD_REG(name, opc1, crn, crm, opc2)                             \
+       SHOW_REG(name, opc1, crn, crm, opc2)                            \
+       static SYSDEV_ATTR(name, S_IRUGO|S_IWUSR, name##_show, NULL)
+
+#define RDWR_REG(name, opc1, crn, crm, opc2, bits)                     \
+       SHOW_REG(name, opc1, crn, crm, opc2)                            \
+       STORE_REG(name, opc1, crn, crm, opc2, bits)                     \
+       static SYSDEV_ATTR(name, S_IRUGO|S_IWUSR, name##_show, name##_store)
+
+RDWR_REG(control, 0, c1, c0, 0, 0x802);
+
+SHOW_REG(aux_ctl, 0, c1, c0, 1)
+
+#ifdef CONFIG_ARCH_OMAP34XX
+static ssize_t aux_ctl_store(struct sys_device *dev,
+                            struct sysdev_attribute *attr,
+                            const char *buf, size_t size)
+{
+       char *end;
+       unsigned new = simple_strtoul(buf, &end, 0);
+       unsigned val;
+
+       if (end == buf)
+               return -EINVAL;
+
+       asm ("mrc p15, 0, %0, c1, c0, 1" : "=r"(val));
+       SETBITS(val, 0xff8, new);
+       val &= ~2;
+       asm ("mov r0,  %0       \n\t"
+            "mov r12, #3       \n\t"
+            "smc #0            \n\t"
+            :: "r"(val) : "r0", "r12");
+
+       return end - buf;
+}
+#define AUX_WR S_IWUSR
+#else
+#define aux_ctl_store NULL
+#define AUX_WR 0
+#endif
+
+static SYSDEV_ATTR(aux_control, S_IRUGO|AUX_WR, aux_ctl_show, aux_ctl_store);
+
+SHOW_REG(l2_aux_ctl, 1, c9, c0, 2)
+
+#ifdef CONFIG_ARCH_OMAP34XX
+static ssize_t l2_aux_ctl_store(struct sys_device *dev,
+                               struct sysdev_attribute *attr,
+                               const char *buf, size_t size)
+{
+       char *end;
+       unsigned new = simple_strtoul(buf, &end, 0);
+       unsigned val;
+
+       if (end == buf)
+               return -EINVAL;
+
+       asm ("mrc p15, 1, %0, c9, c0, 2" : "=r"(val));
+       SETBITS(val, 0xbc00000, new);
+       asm ("mov r0,  %0       \n\t"
+            "mov r12, #2       \n\t"
+            "smc #0            \n\t"
+            :: "r"(val) : "r0", "r12");
+
+       return end - buf;
+}
+#define L2AUX_WR S_IWUSR
+#else
+#define l2_aux_ctl_store NULL
+#define L2AUX_WR 0
+#endif
+
+static SYSDEV_ATTR(l2_aux_control, S_IRUGO|L2AUX_WR,
+                  l2_aux_ctl_show, l2_aux_ctl_store);
+
+RDWR_REG(pmon_pmnc,   0, c9, c12, 0, 0x3f)
+RDWR_REG(pmon_cntens, 0, c9, c12, 1, 0x8000000f)
+RDWR_REG(pmon_cntenc, 0, c9, c12, 2, 0x8000000f)
+RDWR_REG(pmon_ccnt,   0, c9, c13, 0, 0xffffffff)
+RDWR_REG(pmon_useren, 0, c9, c14, 0, 1)
+
+#define REG_ATTR(sysdev, name)                                         \
+       do {                                                            \
+               int err = sysfs_create_file(&sysdev->kobj, &name.attr); \
+               WARN_ON(err != 0);                                      \
+       } while (0)
+
+static int __init cpu_sysfs_init(void)
+{
+       struct sys_device *sysdev;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               sysdev = get_cpu_sysdev(cpu);
+               REG_ATTR(sysdev, attr_control);
+               REG_ATTR(sysdev, attr_aux_control);
+               REG_ATTR(sysdev, attr_l2_aux_control);
+               REG_ATTR(sysdev, attr_pmon_pmnc);
+               REG_ATTR(sysdev, attr_pmon_cntens);
+               REG_ATTR(sysdev, attr_pmon_cntenc);
+               REG_ATTR(sysdev, attr_pmon_ccnt);
+               REG_ATTR(sysdev, attr_pmon_useren);
+       }
+
+       return 0;
+}
+device_initcall(cpu_sysfs_init);
index d45fd22..071772c 100644 (file)
@@ -48,7 +48,7 @@ static const char *handler[]= {
 void *vectors_page;
 
 #ifdef CONFIG_DEBUG_USER
-unsigned int user_debug;
+unsigned int user_debug = UDBG_SEGV_SHORT;
 
 static int __init user_debug_setup(char *str)
 {
@@ -472,14 +472,14 @@ static int bad_syscall(int n, struct pt_regs *regs)
        return regs->ARM_r0;
 }
 
-static inline void
+static inline int
 do_cache_op(unsigned long start, unsigned long end, int flags)
 {
        struct mm_struct *mm = current->active_mm;
        struct vm_area_struct *vma;
 
        if (end < start || flags)
-               return;
+               return -EINVAL;
 
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, start);
@@ -490,10 +490,10 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
                        end = vma->vm_end;
 
                up_read(&mm->mmap_sem);
-               flush_cache_user_range(start, end);
-               return;
+               return flush_cache_user_range(start, end);
        }
        up_read(&mm->mmap_sem);
+       return -EINVAL;
 }
 
 /*
@@ -539,8 +539,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
         * the specified region).
         */
        case NR(cacheflush):
-               do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
-               return 0;
+               return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
 
        case NR(usr26):
                if (!(elf_hwcap & HWCAP_26BIT))
@@ -555,7 +554,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
                return regs->ARM_r0;
 
        case NR(set_tls):
-               thread->tp_value = regs->ARM_r0;
+               thread->tp_value[0] = regs->ARM_r0;
                if (tls_emu)
                        return 0;
                if (has_tls_reg) {
@@ -673,7 +672,7 @@ static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
        int reg = (instr >> 12) & 15;
        if (reg == 15)
                return 1;
-       regs->uregs[reg] = current_thread_info()->tp_value;
+       regs->uregs[reg] = current_thread_info()->tp_value[0];
        regs->ARM_pc += 4;
        return 0;
 }
index 20b3041..1077e4f 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/page.h>
        *(.proc.info.init)                                              \
        VMLINUX_SYMBOL(__proc_info_end) = .;
 
+#define IDMAP_TEXT                                                     \
+       ALIGN_FUNCTION();                                               \
+       VMLINUX_SYMBOL(__idmap_text_start) = .;                         \
+       *(.idmap.text)                                                  \
+       VMLINUX_SYMBOL(__idmap_text_end) = .;
+
 #ifdef CONFIG_HOTPLUG_CPU
 #define ARM_CPU_DISCARD(x)
 #define ARM_CPU_KEEP(x)                x
@@ -92,6 +99,7 @@ SECTIONS
                        SCHED_TEXT
                        LOCK_TEXT
                        KPROBES_TEXT
+                       IDMAP_TEXT
 #ifdef CONFIG_MMU
                        *(.fixup)
 #endif
@@ -174,7 +182,7 @@ SECTIONS
        }
 #endif
 
-       PERCPU_SECTION(32)
+       PERCPU_SECTION(L1_CACHE_BYTES)
 
 #ifdef CONFIG_XIP_KERNEL
        __data_loc = ALIGN(4);          /* location in binary */
@@ -205,8 +213,8 @@ SECTIONS
 #endif
 
                NOSAVE_DATA
-               CACHELINE_ALIGNED_DATA(32)
-               READ_MOSTLY_DATA(32)
+               CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
+               READ_MOSTLY_DATA(L1_CACHE_BYTES)
 
                /*
                 * The exception fixup table (might need resorting at runtime)
index cf73a7f..0ade0ac 100644 (file)
@@ -13,7 +13,8 @@ lib-y         := backtrace.o changebit.o csumipv6.o csumpartial.o   \
                   testchangebit.o testclearbit.o testsetbit.o        \
                   ashldi3.o ashrdi3.o lshrdi3.o muldi3.o             \
                   ucmpdi2.o lib1funcs.o div64.o                      \
-                  io-readsb.o io-writesb.o io-readsl.o io-writesl.o
+                  io-readsb.o io-writesb.o io-readsl.o io-writesl.o  \
+                  call_with_stack.o
 
 mmu-y  := clear_user.o copy_page.o getuser.o putuser.o
 
diff --git a/arch/arm/lib/call_with_stack.S b/arch/arm/lib/call_with_stack.S
new file mode 100644 (file)
index 0000000..916c80f
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * arch/arm/lib/call_with_stack.S
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Written by Will Deacon <will.deacon@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * void call_with_stack(void (*fn)(void *), void *arg, void *sp)
+ *
+ * Change the stack to that pointed at by sp, then invoke fn(arg) with
+ * the new stack.
+ */
+ENTRY(call_with_stack)
+       str     sp, [r2, #-4]!
+       str     lr, [r2, #-4]!
+
+       mov     sp, r2
+       mov     r2, r0
+       mov     r0, r1
+
+       adr     lr, BSYM(1f)
+       mov     pc, r2
+
+1:     ldr     lr, [sp]
+       ldr     sp, [sp, #4]
+       mov     pc, lr
+ENDPROC(call_with_stack)
index 31d3cb3..ba9e754 100644 (file)
@@ -125,6 +125,7 @@ ENTRY(csum_partial)
                adcs    sum, sum, td1
                adcs    sum, sum, td2
                adcs    sum, sum, td3
+       PLD(    pld     [buf, #128]             )
                sub     ip, ip, #32
                teq     ip, #0
                bne     2b
index 3c9a05c..0fc77ec 100644 (file)
@@ -43,6 +43,8 @@ ENTRY(__const_udelay)                         @ 0 <= r0 <= 0x7fffff06
  * Oh, if only we had a cycle counter...
  */
 
+.align 3
+
 @ Delay routine
 ENTRY(__delay)
                subs    r0, r0, #1
index 025f742..ae9fde2 100644 (file)
@@ -40,7 +40,35 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
                return 0;
 
        pmd = pmd_offset(pud, addr);
-       if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
+       if (unlikely(pmd_none(*pmd)))
+               return 0;
+
+       /*
+        * A pmd can be bad if it refers to a HugeTLB or THP page.
+        *
+        * Both THP and HugeTLB pages have the same pmd layout
+        * and should not be manipulated by the pte functions.
+        * 
+        * Lock the page table for the destination and check
+        * to see that it's still huge and whether or not we will
+        * need to fault on write, or if we have a splitting THP.
+        */
+       if (unlikely(pmd_thp_or_huge(*pmd))) {
+               ptl = &current->mm->page_table_lock;
+               spin_lock(ptl);
+               if (unlikely( !pmd_thp_or_huge(*pmd)
+                       || pmd_hugewillfault(*pmd)
+                       || pmd_trans_splitting(*pmd))) {
+                       spin_unlock(ptl);
+                       return 0;
+               }
+
+               *ptep = NULL;
+               *ptlp = ptl;
+               return 1;
+       }
+
+       if (unlikely(pmd_bad(*pmd)))
                return 0;
 
        pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
@@ -94,7 +122,10 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
                from += tocopy;
                n -= tocopy;
 
-               pte_unmap_unlock(pte, ptl);
+               if (pte)
+                       pte_unmap_unlock(pte, ptl);
+               else
+                       spin_unlock(ptl);
        }
        if (!atomic)
                up_read(&current->mm->mmap_sem);
@@ -147,7 +178,10 @@ __clear_user_memset(void __user *addr, unsigned long n)
                addr += tocopy;
                n -= tocopy;
 
-               pte_unmap_unlock(pte, ptl);
+               if (pte)
+                       pte_unmap_unlock(pte, ptl);
+               else
+                       spin_unlock(ptl);
        }
        up_read(&current->mm->mmap_sem);
 
diff --git a/arch/arm/mach-at91/include/mach/vmalloc.h b/arch/arm/mach-at91/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 8e4a1bd..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/vmalloc.h
- *
- *  Copyright (C) 2003 SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#include <mach/hardware.h>
-
-#define VMALLOC_END            (AT91_VIRT_BASE & PGDIR_MASK)
-
-#endif
diff --git a/arch/arm/mach-bcmring/include/mach/vmalloc.h b/arch/arm/mach-bcmring/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 7397bd7..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- *
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-/*
- * Move VMALLOC_END to 0xf0000000 so that the vm space can range from
- * 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles
- * larger physical memory designs better.
- */
-#define VMALLOC_END       0xf0000000UL
index f916cd7..6c11993 100644 (file)
@@ -34,7 +34,7 @@ static inline void arch_idle(void)
 
 static inline void arch_reset(char mode, const char *cmd)
 {
-       cpu_reset(0);
+       soft_restart(0);
 }
 
 #endif
diff --git a/arch/arm/mach-clps711x/include/mach/vmalloc.h b/arch/arm/mach-clps711x/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 467b961..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  arch/arm/mach-clps711x/include/mach/vmalloc.h
- *
- *  Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END       0xd0000000UL
diff --git a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 1dd231d..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Copyright 2000 Russell King.
- * Copyright 2003 ARM Limited
- * Copyright 2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- */
-
-#define VMALLOC_END            0xd8000000UL
diff --git a/arch/arm/mach-davinci/include/mach/vmalloc.h b/arch/arm/mach-davinci/include/mach/vmalloc.h
deleted file mode 100644 (file)
index d49646a..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * DaVinci vmalloc definitions
- *
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <mach/hardware.h>
-
-/* Allow vmalloc range until the IO virtual range minus a 2M "hole" */
-#define VMALLOC_END      (IO_VIRT - (2<<20))
diff --git a/arch/arm/mach-dove/include/mach/vmalloc.h b/arch/arm/mach-dove/include/mach/vmalloc.h
deleted file mode 100644 (file)
index a28792c..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-dove/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END    0xfd800000UL
index 9a26245..0d5df72 100644 (file)
@@ -34,6 +34,6 @@ static inline void arch_idle(void)
        asm volatile ("mcr p15, 0, ip, c15, c1, 2" : : : "cc");
 }
 
-#define arch_reset(mode, cmd)  cpu_reset(0x80000000)
+#define arch_reset(mode, cmd)  soft_restart(0x80000000)
 
 #endif
diff --git a/arch/arm/mach-ebsa110/include/mach/vmalloc.h b/arch/arm/mach-ebsa110/include/mach/vmalloc.h
deleted file mode 100644 (file)
index ea141b7..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- *  arch/arm/mach-ebsa110/include/mach/vmalloc.h
- *
- *  Copyright (C) 1998 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define VMALLOC_END       0xdf000000UL
diff --git a/arch/arm/mach-ep93xx/include/mach/vmalloc.h b/arch/arm/mach-ep93xx/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 1b3f25d..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END    0xfe800000UL
diff --git a/arch/arm/mach-exynos/include/mach/vmalloc.h b/arch/arm/mach-exynos/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 284330e..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/vmalloc.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Based on arch/arm/mach-s5p6440/include/mach/vmalloc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * EXYNOS4 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H __FILE__
-
-#define VMALLOC_END    0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
index 0b29315..249f895 100644 (file)
@@ -24,7 +24,7 @@ static inline void arch_reset(char mode, const char *cmd)
                /*
                 * Jump into the ROM
                 */
-               cpu_reset(0x41000000);
+               soft_restart(0x41000000);
        } else {
                if (machine_is_netwinder()) {
                        /* open up the SuperIO chip
diff --git a/arch/arm/mach-footbridge/include/mach/vmalloc.h b/arch/arm/mach-footbridge/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 40ba78e..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- *  arch/arm/mach-footbridge/include/mach/vmalloc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#define VMALLOC_END       0xf0000000UL
diff --git a/arch/arm/mach-gemini/include/mach/vmalloc.h b/arch/arm/mach-gemini/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 45371eb..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- *  Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#define VMALLOC_END    0xf0000000UL
diff --git a/arch/arm/mach-h720x/include/mach/vmalloc.h b/arch/arm/mach-h720x/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 8520b4a..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-h720x/include/mach/vmalloc.h
- */
-
-#ifndef __ARCH_ARM_VMALLOC_H
-#define __ARCH_ARM_VMALLOC_H
-
-#define VMALLOC_END       0xd0000000UL
-
-#endif
diff --git a/arch/arm/mach-highbank/include/mach/vmalloc.h b/arch/arm/mach-highbank/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 1969e95..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#define VMALLOC_END            0xFEE00000UL
diff --git a/arch/arm/mach-integrator/include/mach/vmalloc.h b/arch/arm/mach-integrator/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 2f5a2ba..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  arch/arm/mach-integrator/include/mach/vmalloc.h
- *
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END       0xd0000000UL
diff --git a/arch/arm/mach-iop13xx/include/mach/vmalloc.h b/arch/arm/mach-iop13xx/include/mach/vmalloc.h
deleted file mode 100644 (file)
index c534567..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _VMALLOC_H_
-#define _VMALLOC_H_
-#define VMALLOC_END    0xfa000000UL
-#endif
index a4b808f..4865a9b 100644 (file)
@@ -30,5 +30,5 @@ static inline void arch_reset(char mode, const char *cmd)
        *IOP3XX_PCSR = 0x30;
 
        /* Jump into ROM at address 0 */
-       cpu_reset(0);
+       soft_restart(0);
 }
diff --git a/arch/arm/mach-iop32x/include/mach/vmalloc.h b/arch/arm/mach-iop32x/include/mach/vmalloc.h
deleted file mode 100644 (file)
index c4862d4..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-iop32x/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END    0xfe000000UL
index f192a34..86d1b20 100644 (file)
@@ -19,5 +19,5 @@ static inline void arch_reset(char mode, const char *cmd)
        *IOP3XX_PCSR = 0x30;
 
        /* Jump into ROM at address 0 */
-       cpu_reset(0);
+       soft_restart(0);
 }
diff --git a/arch/arm/mach-iop33x/include/mach/vmalloc.h b/arch/arm/mach-iop33x/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 48331dc..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-iop33x/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END    0xfe000000UL
diff --git a/arch/arm/mach-ixp2000/include/mach/vmalloc.h b/arch/arm/mach-ixp2000/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 61c8dae..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-ixp2000/include/mach/vmalloc.h
- *
- * Author: Naeem Afzal <naeem.m.afzal@intel.com>
- *
- * Copyright 2002 Intel Corp.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts.  That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_END        0xfb000000UL
diff --git a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h b/arch/arm/mach-ixp23xx/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 896c56a..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-ixp23xx/include/mach/vmalloc.h
- *
- * Copyright (c) 2005 MontaVista Software, Inc.
- *
- * NPU mappings end at 0xf0000000 and we allocate 64MB for board
- * specific static I/O.
- */
-
-#define VMALLOC_END    (0xec000000UL)
index caf28fc..7eeb623 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/serial.h>
-#include <linux/sched.h>
 #include <linux/tty.h>
 #include <linux/platform_device.h>
 #include <linux/serial_core.h>
@@ -447,18 +446,9 @@ void __init ixp4xx_sys_init(void)
 /*
  * sched_clock()
  */
-static DEFINE_CLOCK_DATA(cd);
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace ixp4xx_read_sched_clock(void)
 {
-       u32 cyc = *IXP4XX_OSTS;
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace ixp4xx_update_sched_clock(void)
-{
-       u32 cyc = *IXP4XX_OSTS;
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return *IXP4XX_OSTS;
 }
 
 /*
@@ -474,7 +464,7 @@ unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
 EXPORT_SYMBOL(ixp4xx_timer_freq);
 static void __init ixp4xx_clocksource_init(void)
 {
-       init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
+       setup_sched_clock(ixp4xx_read_sched_clock, 32, ixp4xx_timer_freq);
 
        clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
                        ixp4xx_clocksource_read);
index c30e7e9..034bb2a 100644 (file)
@@ -23,8 +23,6 @@
 #define PCIBIOS_MAX_MEM                0x4BFFFFFF
 #endif
 
-#define ARCH_HAS_DMA_SET_COHERENT_MASK
-
 /* Register locations and bits */
 #include "ixp4xx-regs.h"
 
index 54c0af7..24337d9 100644 (file)
@@ -26,7 +26,7 @@ static inline void arch_reset(char mode, const char *cmd)
 {
        if ( 1 && mode == 's') {
                /* Jump into ROM at address 0 */
-               cpu_reset(0);
+               soft_restart(0);
        } else {
                /* Use on-chip reset capability */
 
diff --git a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h b/arch/arm/mach-ixp4xx/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 9bcd64d..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/vmalloc.h
- */
-#define VMALLOC_END       (0xff000000UL)
-
diff --git a/arch/arm/mach-kirkwood/include/mach/vmalloc.h b/arch/arm/mach-kirkwood/include/mach/vmalloc.h
deleted file mode 100644 (file)
index bf162ca..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-kirkwood/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END    0xfe800000UL
index fb1dda9..ceb19c9 100644 (file)
@@ -32,7 +32,7 @@ static void arch_reset(char mode, const char *cmd)
        unsigned int reg;
 
        if (mode == 's')
-               cpu_reset(0);
+               soft_restart(0);
 
        /* disable timer0 */
        reg = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
diff --git a/arch/arm/mach-ks8695/include/mach/vmalloc.h b/arch/arm/mach-ks8695/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 744ac66..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-ks8695/include/mach/vmalloc.h
- *
- * Copyright (C) 2006 Ben Dooks
- * Copyright (C) 2006 Simtec Electronics <linux@simtec.co.uk>
- *
- * KS8695 vmalloc definition
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END      (KS8695_IO_VA & PGDIR_MASK)
-
-#endif
diff --git a/arch/arm/mach-lpc32xx/include/mach/vmalloc.h b/arch/arm/mach-lpc32xx/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 720fa43..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * arch/arm/mach-lpc32xx/include/mach/vmalloc.h
- *
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END    0xF0000000UL
-
-#endif
index 1a8a25e..cb06379 100644 (file)
@@ -19,8 +19,8 @@ static inline void arch_idle(void)
 static inline void arch_reset(char mode, const char *cmd)
 {
        if (cpu_is_pxa168())
-               cpu_reset(0xffff0000);
+               soft_restart(0xffff0000);
        else
-               cpu_reset(0);
+               soft_restart(0);
 }
 #endif /* __ASM_MACH_SYSTEM_H */
diff --git a/arch/arm/mach-mmp/include/mach/vmalloc.h b/arch/arm/mach-mmp/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 1d0bac0..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * linux/arch/arm/mach-mmp/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END    0xfe000000UL
index 4e91ee6..71fc4ee 100644 (file)
@@ -25,7 +25,6 @@
 
 #include <linux/io.h>
 #include <linux/irq.h>
-#include <linux/sched.h>
 
 #include <asm/sched_clock.h>
 #include <mach/addr-map.h>
@@ -42,8 +41,6 @@
 #define MAX_DELTA              (0xfffffffe)
 #define MIN_DELTA              (16)
 
-static DEFINE_CLOCK_DATA(cd);
-
 /*
  * FIXME: the timer needs some delay to stablize the counter capture
  */
@@ -59,16 +56,9 @@ static inline uint32_t timer_read(void)
        return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(1));
 }
 
-unsigned long long notrace sched_clock(void)
+static u32 notrace mmp_read_sched_clock(void)
 {
-       u32 cyc = timer_read();
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace mmp_update_sched_clock(void)
-{
-       u32 cyc = timer_read();
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return timer_read();
 }
 
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
@@ -201,7 +191,7 @@ void __init timer_init(int irq)
 {
        timer_config();
 
-       init_sched_clock(&cd, mmp_update_sched_clock, 32, CLOCK_TICK_RATE);
+       setup_sched_clock(mmp_read_sched_clock, 32, CLOCK_TICK_RATE);
 
        ckevt.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, ckevt.shift);
        ckevt.max_delta_ns = clockevent_delta2ns(MAX_DELTA, &ckevt);
index 8736aff..0c56a5a 100644 (file)
@@ -215,7 +215,7 @@ static const struct file_operations debug_ops = {
        .llseek = default_llseek,
 };
 
-static void debug_create(const char *name, mode_t mode,
+static void debug_create(const char *name, umode_t mode,
                         struct dentry *dent,
                         int (*fill)(char *buf, int max))
 {
diff --git a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h b/arch/arm/mach-mv78xx0/include/mach/vmalloc.h
deleted file mode 100644 (file)
index ba26fe9..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-mv78xx0/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END    0xfe000000UL
diff --git a/arch/arm/mach-mxs/include/mach/vmalloc.h b/arch/arm/mach-mxs/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 103b016..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- *  Copyright (C) 2000 Russell King.
- *  Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_MXS_VMALLOC_H__
-#define __MACH_MXS_VMALLOC_H__
-
-/* vmalloc ending address */
-#define VMALLOC_END       0xf4000000UL
-
-#endif /* __MACH_MXS_VMALLOC_H__ */
index 20ec3bd..cab8836 100644 (file)
@@ -53,7 +53,7 @@ void arch_reset(char mode, const char *cmd)
        mdelay(50);
 
        /* We'll take a jump through zero as a poor second */
-       cpu_reset(0);
+       soft_restart(0);
 }
 
 static int __init mxs_arch_reset_init(void)
diff --git a/arch/arm/mach-netx/include/mach/vmalloc.h b/arch/arm/mach-netx/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 871f1ef..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- *  arch/arm/mach-netx/include/mach/vmalloc.h
- *
- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END       0xd0000000UL
diff --git a/arch/arm/mach-nomadik/include/mach/vmalloc.h b/arch/arm/mach-nomadik/include/mach/vmalloc.h
deleted file mode 100644 (file)
index f83d574..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-
-#define VMALLOC_END       0xe8000000UL
diff --git a/arch/arm/mach-omap1/include/mach/vmalloc.h b/arch/arm/mach-omap1/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 22ec4a4..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  arch/arm/mach-omap1/include/mach/vmalloc.h
- *
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END    0xd8000000UL
index a183777..92b5847 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -190,30 +189,9 @@ static __init void omap_init_mpu_timer(unsigned long rate)
  * ---------------------------------------------------------------------------
  */
 
-static DEFINE_CLOCK_DATA(cd);
-
-static inline unsigned long long notrace _omap_mpu_sched_clock(void)
-{
-       u32 cyc = ~omap_mpu_timer_read(1);
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-#ifndef CONFIG_OMAP_32K_TIMER
-unsigned long long notrace sched_clock(void)
-{
-       return _omap_mpu_sched_clock();
-}
-#else
-static unsigned long long notrace omap_mpu_sched_clock(void)
-{
-       return _omap_mpu_sched_clock();
-}
-#endif
-
-static void notrace mpu_update_sched_clock(void)
+static u32 notrace omap_mpu_read_sched_clock(void)
 {
-       u32 cyc = ~omap_mpu_timer_read(1);
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return ~omap_mpu_timer_read(1);
 }
 
 static void __init omap_init_clocksource(unsigned long rate)
@@ -223,7 +201,7 @@ static void __init omap_init_clocksource(unsigned long rate)
                        "%s: can't register clocksource!\n";
 
        omap_mpu_timer_start(1, ~0, 1);
-       init_sched_clock(&cd, mpu_update_sched_clock, 32, rate);
+       setup_sched_clock(omap_mpu_read_sched_clock, 32, rate);
 
        if (clocksource_mmio_init(&timer->read_tim, "mpu_timer2", rate,
                        300, 32, clocksource_mmio_readl_down))
@@ -254,30 +232,6 @@ static inline void omap_mpu_timer_init(void)
 }
 #endif /* CONFIG_OMAP_MPU_TIMER */
 
-#if defined(CONFIG_OMAP_MPU_TIMER) && defined(CONFIG_OMAP_32K_TIMER)
-static unsigned long long (*preferred_sched_clock)(void);
-
-unsigned long long notrace sched_clock(void)
-{
-       if (!preferred_sched_clock)
-               return 0;
-
-       return preferred_sched_clock();
-}
-
-static inline void preferred_sched_clock_init(bool use_32k_sched_clock)
-{
-       if (use_32k_sched_clock)
-               preferred_sched_clock = omap_32k_sched_clock;
-       else
-               preferred_sched_clock = omap_mpu_sched_clock;
-}
-#else
-static inline void preferred_sched_clock_init(bool use_32k_sched_clcok)
-{
-}
-#endif
-
 static inline int omap_32k_timer_usable(void)
 {
        int res = false;
@@ -299,12 +253,8 @@ static inline int omap_32k_timer_usable(void)
  */
 static void __init omap1_timer_init(void)
 {
-       if (omap_32k_timer_usable()) {
-               preferred_sched_clock_init(1);
-       } else {
+       if (!omap_32k_timer_usable())
                omap_mpu_timer_init();
-               preferred_sched_clock_init(0);
-       }
 }
 
 struct sys_timer omap1_timer = {
index e1293aa..527c58d 100644 (file)
@@ -36,6 +36,7 @@ config ARCH_OMAP3
        select ARCH_HAS_OPP
        select PM_OPP if PM
        select ARM_CPU_SUSPEND if PM
+       select SOC_BUS
 
 config ARCH_OMAP4
        bool "TI OMAP4"
index b009f17..d904d31 100644 (file)
@@ -184,6 +184,10 @@ ifneq ($(CONFIG_TIDSPBRIDGE),)
 obj-y                                  += dsp.o
 endif
 
+ifneq ($(CONFIG_DRM_OMAP),)
+obj-y                                  += drm.o
+endif
+
 # Specific board support
 obj-$(CONFIG_MACH_OMAP_GENERIC)                += board-generic.o
 obj-$(CONFIG_MACH_OMAP_H4)             += board-h4.o
index f7811f4..6d22101 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/nand.h>
 #include <linux/leds.h>
+#include <linux/leds_pwm.h>
 #include <linux/input.h>
 #include <linux/input/matrix_keypad.h>
 #include <linux/gpio.h>
@@ -35,6 +36,9 @@
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
 #include <linux/regulator/fixed.h>
+#include <linux/i2c/vsense.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
 #include "mux.h"
 #include "sdram-micron-mt46h32m32lf-6.h"
+#include "sdram-micron-mt29c4g96mazapcjg-5.h"
 #include "hsmmc.h"
 #include "common-board-devices.h"
 
 #define PANDORA_WIFI_IRQ_GPIO          21
 #define PANDORA_WIFI_NRESET_GPIO       23
 #define OMAP3_PANDORA_TS_GPIO          94
+#define PANDORA_EN_USB_5V_GPIO         164
 
 static struct mtd_partition omap3pandora_nand_partitions[] = {
        {
@@ -88,6 +94,7 @@ static struct omap_nand_platform_data pandora_nand_data = {
        .xfer_type      = NAND_OMAP_PREFETCH_DMA,
        .parts          = omap3pandora_nand_partitions,
        .nr_parts       = ARRAY_SIZE(omap3pandora_nand_partitions),
+       .dev_ready      = true,
 };
 
 static struct gpio_led pandora_gpio_leds[] = {
@@ -121,6 +128,39 @@ static struct platform_device pandora_leds_gpio = {
        },
 };
 
+static struct led_pwm pandora_pwm_leds[] = {
+       {
+               .name                   = "pandora::keypad_bl",
+               .pwm_id                 = 0, /* LEDA */
+       }, {
+               .name                   = "pandora::power",
+               .default_trigger        = "default-on",
+               .pwm_id                 = 1, /* LEDB */
+       }, {
+               .name                   = "pandora::charger",
+               .default_trigger        = "twl4030_bci-charging",
+               .pwm_id                 = 3, /* PWM1 */
+       }
+};
+
+static struct led_pwm_platform_data pandora_pwm_led_data = {
+       .leds           = pandora_pwm_leds,
+       .num_leds       = ARRAY_SIZE(pandora_pwm_leds),
+};
+
+static struct platform_device pandora_leds_pwm = {
+       .name   = "leds-twl4030-pwm",
+       .id     = -1,
+       .dev    = {
+               .platform_data  = &pandora_pwm_led_data,
+       },
+};
+
+static struct platform_device pandora_bl = {
+       .name   = "pandora-backlight",
+       .id     = -1,
+};
+
 #define GPIO_BUTTON(gpio_num, ev_type, ev_code, act_low, descr)        \
 {                                                              \
        .gpio           = gpio_num,                             \
@@ -167,6 +207,9 @@ static struct platform_device pandora_keys_gpio = {
        },
 };
 
+/* HACK: this requires patched twl4030_keypad driver */
+#define FNKEY(row, col, code) KEY((row + 8), col, code)
+
 static const uint32_t board_keymap[] = {
        /* row, col, code */
        KEY(0, 0, KEY_9),
@@ -212,6 +255,50 @@ static const uint32_t board_keymap[] = {
        KEY(7, 2, KEY_Q),
        KEY(7, 3, KEY_LEFTSHIFT),
        KEY(7, 4, KEY_COMMA),
+       /* Fn keys */
+       FNKEY(0, 0, KEY_F9),
+       FNKEY(0, 1, KEY_F8),
+       FNKEY(0, 2, KEY_BRIGHTNESSUP),
+       FNKEY(0, 3, KEY_F13),           /* apostrophe, differs from Fn-A? */
+       FNKEY(0, 4, KEY_F22),
+       FNKEY(0, 5, KEY_F23),
+       FNKEY(1, 0, KEY_F10),
+       FNKEY(1, 1, KEY_F7),
+       FNKEY(1, 2, KEY_BRIGHTNESSDOWN),
+       FNKEY(1, 3, KEY_GRAVE),
+       FNKEY(1, 4, KEY_F14),           /* pipe/bar */
+       FNKEY(1, 5, KEY_TAB),
+       FNKEY(2, 0, KEY_INSERT),
+       FNKEY(2, 1, KEY_F6),
+       FNKEY(2, 2, KEY_F15),           /* dash */
+       FNKEY(2, 3, KEY_EQUAL),
+       FNKEY(2, 4, KEY_F16),           /* # (pound/hash) */
+       FNKEY(2, 5, KEY_FN),
+       FNKEY(3, 0, KEY_F11),
+       FNKEY(3, 1, KEY_F5),
+       FNKEY(3, 2, KEY_F17),           /* ! */
+       FNKEY(3, 3, KEY_KPPLUS),
+       FNKEY(3, 4, KEY_BACKSLASH),
+       FNKEY(4, 0, KEY_F12),
+       FNKEY(4, 1, KEY_F4),
+       FNKEY(4, 2, KEY_RIGHTBRACE),
+       FNKEY(4, 3, KEY_KPMINUS),
+       FNKEY(4, 4, KEY_QUESTION),
+       FNKEY(5, 0, KEY_F18),           /* £ (pound) */
+       FNKEY(5, 1, KEY_F3),
+       FNKEY(5, 2, KEY_LEFTBRACE),
+       FNKEY(5, 3, KEY_F19),           /* " */
+       FNKEY(5, 4, KEY_SLASH),
+       FNKEY(6, 0, KEY_YEN),
+       FNKEY(6, 1, KEY_F2),
+       FNKEY(6, 2, KEY_F20),           /* @ */
+       FNKEY(6, 3, KEY_APOSTROPHE),
+       FNKEY(6, 4, KEY_F21),           /* : */
+       FNKEY(7, 0, KEY_ENTER),
+       FNKEY(7, 1, KEY_F1),
+       FNKEY(7, 2, KEY_ESC),
+       FNKEY(7, 3, KEY_CAPSLOCK),
+       FNKEY(7, 4, KEY_SEMICOLON),
 };
 
 static struct matrix_keymap_data board_map_data = {
@@ -263,7 +350,7 @@ static void pandora_wl1251_init_card(struct mmc_card *card)
        card->cis.vendor = 0x104c;
        card->cis.device = 0x9066;
        card->cis.blksize = 512;
-       card->cis.max_dtr = 20000000;
+       card->cis.max_dtr = 24000000;
 }
 
 static struct omap2_hsmmc_info omap3pandora_mmc[] = {
@@ -271,14 +358,16 @@ static struct omap2_hsmmc_info omap3pandora_mmc[] = {
                .mmc            = 1,
                .caps           = MMC_CAP_4_BIT_DATA,
                .gpio_cd        = -EINVAL,
-               .gpio_wp        = 126,
+               //.gpio_wp      = 126,
+               .gpio_wp        = -EINVAL,
                .ext_clock      = 0,
        },
        {
                .mmc            = 2,
                .caps           = MMC_CAP_4_BIT_DATA,
                .gpio_cd        = -EINVAL,
-               .gpio_wp        = 127,
+               //.gpio_wp      = 127,
+               .gpio_wp        = -EINVAL,
                .ext_clock      = 1,
                .transceiver    = true,
        },
@@ -343,7 +432,7 @@ static struct regulator_consumer_supply pandora_vcc_lcd_supply[] = {
 };
 
 static struct regulator_consumer_supply pandora_usb_phy_supply[] = {
-       REGULATOR_SUPPLY("hsusb0", "ehci-omap.0"),
+       REGULATOR_SUPPLY("hsusb1", "ehci-omap.0"),
 };
 
 /* ads7846 on SPI and 2 nub controllers on I2C */
@@ -355,6 +444,7 @@ static struct regulator_consumer_supply pandora_vaux4_supplies[] = {
 
 static struct regulator_consumer_supply pandora_adac_supply[] = {
        REGULATOR_SUPPLY("vcc", "soc-audio"),
+       REGULATOR_SUPPLY("lidsw", NULL),
 };
 
 /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -420,8 +510,8 @@ static struct regulator_init_data pandora_vaux2 = {
 /* VAUX4 for ads7846 and nubs */
 static struct regulator_init_data pandora_vaux4 = {
        .constraints = {
-               .min_uV                 = 2800000,
-               .max_uV                 = 2800000,
+               .min_uV                 = 3000000,
+               .max_uV                 = 3000000,
                .apply_uV               = true,
                .valid_modes_mask       = REGULATOR_MODE_NORMAL
                                        | REGULATOR_MODE_STANDBY,
@@ -474,7 +564,59 @@ static struct platform_device pandora_vwlan_device = {
        },
 };
 
-static struct twl4030_bci_platform_data pandora_bci_data;
+static char *pandora_power_supplied_to[] = {
+       "bq27500-0",
+};
+
+static struct twl4030_bci_platform_data pandora_bci_data = {
+       .supplied_to            = pandora_power_supplied_to,
+       .num_supplicants        = ARRAY_SIZE(pandora_power_supplied_to),
+};
+
+static struct twl4030_ins sleep_on_seq[] = {
+       { MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_SLEEP), 2 },
+};
+
+static struct twl4030_script sleep_on_script = {
+       .script = sleep_on_seq,
+       .size   = ARRAY_SIZE(sleep_on_seq),
+       .flags  = TWL4030_SLEEP_SCRIPT,
+};
+
+static struct twl4030_ins wakeup_p3_seq[] = {
+       { MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_ACTIVE), 2 },
+};
+
+static struct twl4030_script wakeup_p3_script = {
+       .script = wakeup_p3_seq,
+       .size   = ARRAY_SIZE(wakeup_p3_seq),
+       .flags  = TWL4030_WAKEUP3_SCRIPT,
+};
+
+static struct twl4030_script *twl4030_scripts[] = {
+       /* wakeup script should be loaded before sleep script, otherwise a
+          board might hit retention before loading of wakeup script is
+          completed. This can cause boot failures depending on timing issues.
+       */
+       &wakeup_p3_script,
+       &sleep_on_script,
+};
+
+static struct twl4030_resconfig twl4030_rconfig[] = {
+       {
+               .resource = RES_HFCLKOUT, .devgroup = DEV_GRP_P3,
+               .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+       },
+       { 0, 0},
+};
+
+static struct twl4030_power_data pandora_power_data = {
+       .scripts        = twl4030_scripts,
+       .num            = ARRAY_SIZE(twl4030_scripts),
+       .resource_config = twl4030_rconfig,
+
+       .use_poweroff   = 1,
+};
 
 static struct twl4030_platform_data omap3pandora_twldata = {
        .gpio           = &omap3pandora_gpio_data,
@@ -486,10 +628,29 @@ static struct twl4030_platform_data omap3pandora_twldata = {
        .vsim           = &pandora_vsim,
        .keypad         = &pandora_kp_data,
        .bci            = &pandora_bci_data,
+       .power          = &pandora_power_data,
+};
+
+static struct vsense_platform_data omap3pandora_nub1_data = {
+       .gpio_irq       = 161,
+       .gpio_reset     = 156,
+};
+
+static struct vsense_platform_data omap3pandora_nub2_data = {
+       .gpio_irq       = 162,
+       .gpio_reset     = 156,
 };
 
 static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = {
        {
+               I2C_BOARD_INFO("vsense", 0x66),
+               .flags = I2C_CLIENT_WAKE,
+               .platform_data = &omap3pandora_nub1_data,
+       }, {
+               I2C_BOARD_INFO("vsense", 0x67),
+               .flags = I2C_CLIENT_WAKE,
+               .platform_data = &omap3pandora_nub2_data,
+       }, {
                I2C_BOARD_INFO("bq27500", 0x55),
                .flags = I2C_CLIENT_WAKE,
        },
@@ -553,44 +714,230 @@ fail:
        printk(KERN_ERR "wl1251 board initialisation failed\n");
 }
 
+static void __init pandora_usb_host_init(void)
+{
+       int ret;
+
+       ret = gpio_request_one(PANDORA_EN_USB_5V_GPIO, GPIOF_OUT_INIT_HIGH,
+               "ehci vbus");
+       if (ret < 0)
+               pr_err("Cannot set vbus GPIO, ret=%d\n", ret);
+}
+
+#include <linux/spi/ads7846.h>
+#include <asm/system.h>
+
+static DECLARE_COMPLETION(ts_completion);
+static struct timespec ts_last_framedone;
+static int pendown_state;
+
+static void vsync_irq_wait_handler(void *data, u32 mask)
+{
+       getnstimeofday(&ts_last_framedone);
+
+       /* reliable to read here */
+       pendown_state = !gpio_get_value(OMAP3_PANDORA_TS_GPIO);
+
+       complete(&ts_completion);
+}
+
+static void ads7846_wait_for_sync(void)
+{
+       struct timespec now, diff;
+       u32 diff_us;
+
+       getnstimeofday(&now);
+       diff = timespec_sub(now, ts_last_framedone);
+       diff_us = diff.tv_nsec / NSEC_PER_USEC + diff.tv_sec * USEC_PER_SEC;
+       if (diff_us < 1023 || diff_us > 40000)
+               /* still blanking or display inactive */
+               return;
+
+       /* wait for blanking period */
+       disable_hlt();
+       wait_for_completion_timeout(&ts_completion, HZ / 30);
+       enable_hlt();
+}
+
+static int pandora_pendown_state(void)
+{
+       static int val_old;
+       struct timespec now, diff;
+       u32 diff_us;
+       int val;
+       int ret;
+       
+       /* This line is a noisy mess. It doesn't trigger spuriously, i.e.
+        * there is no signal when pen is up, but when it's down we have
+        * white noise of sorts. */
+       val = !gpio_get_value(OMAP3_PANDORA_TS_GPIO);
+       pendown_state |= val;
+
+       if (in_irq() || in_atomic())
+               /* no time to fight noise.. */
+               return val | pendown_state;
+
+       if (val == 0) {
+               getnstimeofday(&now);
+               diff = timespec_sub(now, ts_last_framedone);
+               diff_us = diff.tv_nsec / NSEC_PER_USEC + diff.tv_sec * USEC_PER_SEC;
+
+               if (diff_us < 40000)
+                       /* assume pendown_state is up-to-date */
+                       val = pendown_state;
+               else
+                       pendown_state = 0;
+       }
+
+       if (val != val_old) {
+               init_completion(&ts_completion);
+               dispc_runtime_get();
+               if (val)
+                       ret = omap_dispc_register_isr(vsync_irq_wait_handler,
+                               NULL, DISPC_IRQ_VSYNC);
+               else
+                       ret = omap_dispc_unregister_isr(vsync_irq_wait_handler,
+                               NULL, DISPC_IRQ_VSYNC);
+               dispc_runtime_put();
+               if (ret != 0)
+                       pr_err("%s: can't (un)register isr: %d %d\n",
+                               __func__, val, ret);
+               val_old = val;
+       }
+
+       return val;
+}
+
+static struct ads7846_platform_data pandora_ads7846_cfg = {
+       .x_max                  = 0x0fff,
+       .y_max                  = 0x0fff,
+       .x_plate_ohms           = 180,
+       .pressure_max           = 255,
+       .debounce_max           = 10,
+       .debounce_tol           = 3,
+       .debounce_rep           = 1,
+       .gpio_pendown           = OMAP3_PANDORA_TS_GPIO,
+       .keep_vref_on           = 1,
+       .wait_for_sync          = ads7846_wait_for_sync,
+       .get_pendown_state      = pandora_pendown_state,
+};
+
+static struct platform_device pandora_ram_console = {
+       .name   = "ram_console",
+       .id     = -1,
+};
+
+static struct platform_device pandora_c64_tools = {
+       .name   = "c64_tools",
+       .dev    = {
+               .coherent_dma_mask = DMA_BIT_MASK(32),
+       },
+};
+
 static struct platform_device *omap3pandora_devices[] __initdata = {
        &pandora_leds_gpio,
+       &pandora_leds_pwm,
+       &pandora_bl,
        &pandora_keys_gpio,
        &pandora_vwlan_device,
+       &pandora_ram_console,
+       &pandora_c64_tools,
 };
 
 static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
 
-       .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
-       .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
+       .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
+       .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
        .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
 
        .phy_reset  = true,
-       .reset_gpio_port[0]  = 16,
-       .reset_gpio_port[1]  = -EINVAL,
+       .reset_gpio_port[0]  = -EINVAL,
+       .reset_gpio_port[1]  = 16,
        .reset_gpio_port[2]  = -EINVAL
 };
 
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
+       /* enable wakeup for pandora button (GPIO99) */
+       OMAP3_MUX(CAM_D0, OMAP_INPUT_EN | OMAP_WAKEUP_EN | OMAP_MUX_MODE4),
+       /* noisy unused signal from LCD cable */
+       OMAP3_MUX(CAM_VS, OMAP_INPUT_EN | OMAP_PULL_ENA | OMAP_PULL_UP | OMAP_MUX_MODE7),
        { .reg_offset = OMAP_MUX_TERMINATOR },
 };
 #endif
 
+static struct regulator *lid_switch_power;
+
+#ifdef CONFIG_PM_SLEEP
+static int pandora_pm_suspend(struct device *dev)
+{
+       if (!IS_ERR_OR_NULL(lid_switch_power))
+               regulator_disable(lid_switch_power);
+
+       return 0;
+}
+
+static int pandora_pm_resume(struct device *dev)
+{
+       if (!IS_ERR_OR_NULL(lid_switch_power))
+               regulator_enable(lid_switch_power);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pandora_pm, pandora_pm_suspend, pandora_pm_resume);
+
+static int __devinit pandora_pm_probe(struct platform_device *pdev)
+{
+       lid_switch_power = regulator_get(NULL, "lidsw");
+       if (!IS_ERR(lid_switch_power))
+               regulator_enable(lid_switch_power);
+
+       return 0;
+}
+
+static struct platform_driver pandora_pm_driver = {
+       .probe          = pandora_pm_probe,
+       .driver         = {
+               .name   = "pandora-pm",
+               .pm     = &pandora_pm,
+       },
+};
+
+static struct platform_device pandora_pm_dev = {
+       .name   = "pandora-pm",
+       .id     = -1,
+};
+
+static int __init pandora_pm_drv_reg(void)
+{
+       platform_device_register(&pandora_pm_dev);
+       return platform_driver_register(&pandora_pm_driver);
+}
+late_initcall(pandora_pm_drv_reg);
+
 static void __init omap3pandora_init(void)
 {
+       struct omap_sdrc_params *sdrc_params;
+
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+       pandora_usb_host_init();
        omap3pandora_i2c_init();
        pandora_wl1251_init();
        platform_add_devices(omap3pandora_devices,
                        ARRAY_SIZE(omap3pandora_devices));
        omap_display_init(&pandora_dss_data);
        omap_serial_init();
-       omap_sdrc_init(mt46h32m32lf6_sdrc_params,
-                                 mt46h32m32lf6_sdrc_params);
+
+       sdrc_params = mt46h32m32lf6_sdrc_params;
+       if (cpu_is_omap3630() || omap_rev() >= OMAP3430_REV_ES3_0)
+               sdrc_params = mt29c4g96mazapcjg5_sdrc_params;
+       omap_sdrc_init(sdrc_params, sdrc_params);
+
        spi_register_board_info(omap3pandora_spi_board_info,
                        ARRAY_SIZE(omap3pandora_spi_board_info));
-       omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 0, NULL);
+       omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 31, &pandora_ads7846_cfg);
        usbhs_init(&usbhs_bdata);
        usb_musb_init(NULL);
        gpmc_nand_init(&pandora_nand_data);
@@ -600,9 +947,49 @@ static void __init omap3pandora_init(void)
        omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
 }
 
+/* HACK: create it here, so that others don't need to bother */
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+
+static int __init proc_pandora_init(void)
+{
+       struct proc_dir_entry *ret;
+
+       ret = proc_mkdir("pandora", NULL);
+       if (!ret)
+               return -ENOMEM;
+       return 0;
+}
+fs_initcall(proc_pandora_init);
+#endif
+
+/* for debug.. */
+#include <../drivers/staging/android/persistent_ram.h>
+
+struct persistent_ram_descriptor ram_console_desc = {
+       .name           = "ram_console",
+       .size           = 0x20000,
+};
+
+struct persistent_ram ram_console_ram = {
+       .start          = 0x80fe0000,
+       .size           = 0x20000,
+       .num_descs      = 1,
+       .descs          = &ram_console_desc,
+};
+
+void __init pandora_reserve(void)
+{
+       omap_reserve();
+       dma_declare_contiguous(&pandora_c64_tools.dev, 4 * SZ_1M, 0x86000000, 0);
+#ifdef CONFIG_ANDROID_PERSISTENT_RAM
+       persistent_ram_early_init(&ram_console_ram);
+#endif
+}
+
 MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
        .atag_offset    = 0x100,
-       .reserve        = omap_reserve,
+       .reserve        = pandora_reserve,
        .map_io         = omap3_map_io,
        .init_early     = omap35xx_init_early,
        .init_irq       = omap3_init_irq,
index d6e34dd..caae8b7 100644 (file)
@@ -55,20 +55,46 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
        unsigned long validrate, sdrcrate, _mpurate;
        struct omap_sdrc_params *sdrc_cs0;
        struct omap_sdrc_params *sdrc_cs1;
+       u32 cm_clksel1_pll;
+       struct dpll_data *dd;
        int ret;
 
        if (!clk || !rate)
                return -EINVAL;
 
-       validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
-       if (validrate != rate)
+       if (!clk->parent || !clk->parent->dpll_data)
                return -EINVAL;
 
+       dd = clk->parent->dpll_data;
+       cm_clksel1_pll = __raw_readl(dd->mult_div1_reg);
+
+       validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
+       if (validrate == rate) {
+               cm_clksel1_pll &= ~clk->clksel_mask;
+               cm_clksel1_pll |= new_div << __ffs(clk->clksel_mask);
+       } else {
+               rate = omap2_dpll_round_rate(clk->parent, rate);
+               if (rate == ~0)
+                       return -EINVAL;
+
+               cm_clksel1_pll &= ~(dd->mult_mask | dd->div1_mask | clk->clksel_mask);
+               cm_clksel1_pll |= dd->last_rounded_m << __ffs(dd->mult_mask);
+               cm_clksel1_pll |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
+               cm_clksel1_pll |= 1 << __ffs(clk->clksel_mask);
+
+               validrate = rate;
+       }
+
+#if 0
        sdrcrate = sdrc_ick_p->rate;
        if (rate > clk->rate)
                sdrcrate <<= ((rate / clk->rate) >> 1);
        else
                sdrcrate >>= ((clk->rate / rate) >> 1);
+#else
+       /* HACK! */
+       sdrcrate = rate / 2;
+#endif
 
        ret = omap2_sdrc_get_params(sdrcrate, &sdrc_cs0, &sdrc_cs1);
        if (ret)
@@ -104,19 +130,22 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
 
        if (sdrc_cs1)
                omap3_configure_core_dpll(
-                                 new_div, unlock_dll, c, rate > clk->rate,
+                                 cm_clksel1_pll, unlock_dll, c, rate > clk->rate,
                                  sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
                                  sdrc_cs0->actim_ctrlb, sdrc_cs0->mr,
                                  sdrc_cs1->rfr_ctrl, sdrc_cs1->actim_ctrla,
                                  sdrc_cs1->actim_ctrlb, sdrc_cs1->mr);
        else
                omap3_configure_core_dpll(
-                                 new_div, unlock_dll, c, rate > clk->rate,
+                                 cm_clksel1_pll, unlock_dll, c, rate > clk->rate,
                                  sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
                                  sdrc_cs0->actim_ctrlb, sdrc_cs0->mr,
                                  0, 0, 0, 0);
        clk->rate = rate;
 
+       /* HACK */
+       clk->parent->rate = clk->parent->recalc(clk->parent);
+
        return 0;
 }
 
index e25364d..e378fe7 100644 (file)
@@ -460,6 +460,21 @@ int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
        return 0;
 }
 
+int omap2_clksel_force_divisor(struct clk *clk, int new_div)
+{
+       u32 field_val;
+
+       field_val = _divisor_to_clksel(clk, new_div);
+       if (field_val == ~0)
+               return -EINVAL;
+
+       _write_clksel_reg(clk, field_val);
+
+       clk->rate = clk->parent->rate / new_div;
+
+       return 0;
+}
+
 /*
  * Clksel parent setting function - not passed in struct clk function
  * pointer - instead, the OMAP clock code currently assumes that any
index 2311bc2..4d96a17 100644 (file)
@@ -61,6 +61,12 @@ void omap3_dpll_allow_idle(struct clk *clk);
 void omap3_dpll_deny_idle(struct clk *clk);
 u32 omap3_dpll_autoidle_read(struct clk *clk);
 int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate);
+#if CONFIG_ARCH_OMAP3
+int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel);
+/* If you are using this function and not on OMAP3, you are
+ * Doing It Wrong(tm), so there is no stub.
+ */
+#endif
 int omap3_noncore_dpll_enable(struct clk *clk);
 void omap3_noncore_dpll_disable(struct clk *clk);
 int omap4_dpllmx_gatectrl_read(struct clk *clk);
@@ -86,6 +92,7 @@ unsigned long omap2_clksel_recalc(struct clk *clk);
 long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate);
 int omap2_clksel_set_rate(struct clk *clk, unsigned long rate);
 int omap2_clksel_set_parent(struct clk *clk, struct clk *new_parent);
+int omap2_clksel_force_divisor(struct clk *clk, int new_div);
 
 /* clkt_iclk.c public functions */
 extern void omap2_clkt_iclk_allow_idle(struct clk *clk);
index 952c3e0..8acdabb 100644 (file)
 /* needed by omap3_core_dpll_m2_set_rate() */
 struct clk *sdrc_ick_p, *arm_fck_p;
 
+struct dpll_settings {
+       int rate, m, n, f;
+};
+
+
+static int omap3_dpll5_apply_erratum21(struct clk *clk, struct clk *dpll5_m2)
+{
+       struct clk *sys_clk;
+       int i, rv;
+       static const struct dpll_settings precomputed[] = {
+               /* From DM3730 errata (sprz319e), table 36
+               * N+1 is because the values in the table are register values;
+               * dpll_program() will subtract one from the N we give it,
+               * so ...
+               */
+               { 13000000, 443, 5+1, 8 },
+               { 26000000, 443, 11+1, 8 }
+       };
+
+       sys_clk = clk_get(NULL, "sys_ck");
+
+       for (i = 0 ; i < (sizeof(precomputed)/sizeof(struct dpll_settings)) ;
+               ++i) {
+               const struct dpll_settings *d = &precomputed[i];
+               if (sys_clk->rate == d->rate) {
+                       rv =  omap3_noncore_dpll_program(clk, d->m , d->n, 0);
+                       if (rv)
+                               return 1;
+                       rv =  omap2_clksel_force_divisor(dpll5_m2 , d->f);
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+int omap3_dpll5_set_rate(struct clk *clk, unsigned long rate)
+{
+       struct clk *dpll5_m2;
+       int rv;
+       dpll5_m2 = clk_get(NULL, "dpll5_m2_ck");
+
+       if (cpu_is_omap3630() && rate == DPLL5_FREQ_FOR_USBHOST &&
+               omap3_dpll5_apply_erratum21(clk, dpll5_m2)) {
+               return 1;
+       }
+       rv = omap3_noncore_dpll_set_rate(clk, rate);
+       if (rv)
+               goto out;
+       rv = clk_set_rate(dpll5_m2, rate);
+
+out:
+       return rv;
+}
+
 int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate)
 {
        /*
@@ -59,19 +113,14 @@ int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate)
 void __init omap3_clk_lock_dpll5(void)
 {
        struct clk *dpll5_clk;
-       struct clk *dpll5_m2_clk;
 
        dpll5_clk = clk_get(NULL, "dpll5_ck");
        clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
-       clk_enable(dpll5_clk);
 
-       /* Program dpll5_m2_clk divider for no division */
-       dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
-       clk_enable(dpll5_m2_clk);
-       clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
+       /* dpll5_m2_ck is now (grottily!) handled by dpll5_clk's set routine,
+        * to cope with an erratum on DM3730
+        */
 
-       clk_disable(dpll5_m2_clk);
-       clk_disable(dpll5_clk);
        return;
 }
 
index 8bbeeaf..0ede513 100644 (file)
@@ -10,6 +10,7 @@
 
 int omap3xxx_clk_init(void);
 int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate);
+int omap3_dpll5_set_rate(struct clk *clk, unsigned long rate);
 int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate);
 void omap3_clk_lock_dpll5(void);
 
index 5d0064a..087c01b 100644 (file)
@@ -745,7 +745,7 @@ static struct clk dpll4_m3_ck = {
        .parent         = &dpll4_ck,
        .init           = &omap2_init_clksel_parent,
        .clksel_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-       .clksel_mask    = OMAP3430_CLKSEL_TV_MASK,
+       .clksel_mask    = OMAP3630_CLKSEL_TV_MASK,
        .clksel         = dpll4_clksel,
        .clkdm_name     = "dpll4_clkdm",
        .recalc         = &omap2_clksel_recalc,
@@ -830,7 +830,7 @@ static struct clk dpll4_m4_ck = {
        .parent         = &dpll4_ck,
        .init           = &omap2_init_clksel_parent,
        .clksel_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-       .clksel_mask    = OMAP3430_CLKSEL_DSS1_MASK,
+       .clksel_mask    = OMAP3630_CLKSEL_DSS1_MASK,
        .clksel         = dpll4_clksel,
        .clkdm_name     = "dpll4_clkdm",
        .recalc         = &omap2_clksel_recalc,
@@ -857,7 +857,7 @@ static struct clk dpll4_m5_ck = {
        .parent         = &dpll4_ck,
        .init           = &omap2_init_clksel_parent,
        .clksel_reg     = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
-       .clksel_mask    = OMAP3430_CLKSEL_CAM_MASK,
+       .clksel_mask    = OMAP3630_CLKSEL_CAM_MASK,
        .clksel         = dpll4_clksel,
        .clkdm_name     = "dpll4_clkdm",
        .set_rate       = &omap2_clksel_set_rate,
@@ -884,7 +884,7 @@ static struct clk dpll4_m6_ck = {
        .parent         = &dpll4_ck,
        .init           = &omap2_init_clksel_parent,
        .clksel_reg     = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-       .clksel_mask    = OMAP3430_DIV_DPLL4_MASK,
+       .clksel_mask    = OMAP3630_DIV_DPLL4_MASK,
        .clksel         = dpll4_clksel,
        .clkdm_name     = "dpll4_clkdm",
        .recalc         = &omap2_clksel_recalc,
@@ -942,7 +942,7 @@ static struct clk dpll5_ck = {
        .parent         = &sys_ck,
        .dpll_data      = &dpll5_dd,
        .round_rate     = &omap2_dpll_round_rate,
-       .set_rate       = &omap3_noncore_dpll_set_rate,
+       .set_rate       = &omap3_dpll5_set_rate,
        .clkdm_name     = "dpll5_clkdm",
        .recalc         = &omap3_dpll_recalc,
 };
@@ -1392,6 +1392,7 @@ static struct clk cpefuse_fck = {
        .name           = "cpefuse_fck",
        .ops            = &clkops_omap2_dflt,
        .parent         = &sys_ck,
+       .clkdm_name     = "core_l4_clkdm",
        .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
        .enable_bit     = OMAP3430ES2_EN_CPEFUSE_SHIFT,
        .recalc         = &followparent_recalc,
@@ -1401,6 +1402,7 @@ static struct clk ts_fck = {
        .name           = "ts_fck",
        .ops            = &clkops_omap2_dflt,
        .parent         = &omap_32k_fck,
+       .clkdm_name     = "core_l4_clkdm",
        .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
        .enable_bit     = OMAP3430ES2_EN_TS_SHIFT,
        .recalc         = &followparent_recalc,
@@ -1410,6 +1412,7 @@ static struct clk usbtll_fck = {
        .name           = "usbtll_fck",
        .ops            = &clkops_omap2_dflt_wait,
        .parent         = &dpll5_m2_ck,
+       .clkdm_name     = "core_l4_clkdm",
        .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
        .enable_bit     = OMAP3430ES2_EN_USBTLL_SHIFT,
        .recalc         = &followparent_recalc,
@@ -1615,6 +1618,7 @@ static struct clk fshostusb_fck = {
        .name           = "fshostusb_fck",
        .ops            = &clkops_omap2_dflt_wait,
        .parent         = &core_48m_fck,
+       .clkdm_name     = "core_l4_clkdm",
        .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
        .enable_bit     = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
        .recalc         = &followparent_recalc,
@@ -2041,6 +2045,7 @@ static struct clk omapctrl_ick = {
        .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
        .enable_bit     = OMAP3430_EN_OMAPCTRL_SHIFT,
        .flags          = ENABLE_ON_INIT,
+       .clkdm_name     = "core_l4_clkdm",
        .recalc         = &followparent_recalc,
 };
 
@@ -2092,6 +2097,7 @@ static struct clk usb_l4_ick = {
        .clksel_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
        .clksel_mask    = OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
        .clksel         = usb_l4_clksel,
+       .clkdm_name     = "core_l4_clkdm",
        .recalc         = &omap2_clksel_recalc,
 };
 
index 0798a80..37931a5 100644 (file)
@@ -3341,17 +3341,6 @@ static struct omap_clk omap44xx_clks[] = {
        CLK(NULL,       "auxclk5_ck",                   &auxclk5_ck,    CK_443X),
        CLK(NULL,       "auxclkreq5_ck",                &auxclkreq5_ck, CK_443X),
        CLK(NULL,       "gpmc_ck",                      &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt1_ick",                     &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt2_ick",                     &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt3_ick",                     &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt4_ick",                     &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt5_ick",                     &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt6_ick",                     &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt7_ick",                     &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt8_ick",                     &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt9_ick",                     &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt10_ick",                    &dummy_ck,      CK_443X),
-       CLK(NULL,       "gpt11_ick",                    &dummy_ck,      CK_443X),
        CLK("omap_i2c.1",       "ick",                          &dummy_ck,      CK_443X),
        CLK("omap_i2c.2",       "ick",                          &dummy_ck,      CK_443X),
        CLK("omap_i2c.3",       "ick",                          &dummy_ck,      CK_443X),
index ad07689..7153a7d 100644 (file)
@@ -768,6 +768,7 @@ int clkdm_sleep(struct clockdomain *clkdm)
        spin_lock_irqsave(&clkdm->lock, flags);
        clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
        ret = arch_clkdm->clkdm_sleep(clkdm);
+       ret |= pwrdm_state_switch(clkdm->pwrdm.ptr);
        spin_unlock_irqrestore(&clkdm->lock, flags);
        return ret;
 }
@@ -914,15 +915,18 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
        if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)
                return -EINVAL;
 
+       spin_lock_irqsave(&clkdm->lock, flags);
+
        /*
         * For arch's with no autodeps, clkcm_clk_enable
         * should be called for every clock instance or hwmod that is
         * enabled, so the clkdm can be force woken up.
         */
-       if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps)
+       if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps) {
+               spin_unlock_irqrestore(&clkdm->lock, flags);
                return 0;
+       }
 
-       spin_lock_irqsave(&clkdm->lock, flags);
        arch_clkdm->clkdm_clk_enable(clkdm);
        pwrdm_wait_transition(clkdm->pwrdm.ptr);
        pwrdm_clkdm_state_switch(clkdm);
@@ -940,15 +944,19 @@ static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
        if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
                return -EINVAL;
 
+       spin_lock_irqsave(&clkdm->lock, flags);
+
        if (atomic_read(&clkdm->usecount) == 0) {
+               spin_unlock_irqrestore(&clkdm->lock, flags);
                WARN_ON(1); /* underflow */
                return -ERANGE;
        }
 
-       if (atomic_dec_return(&clkdm->usecount) > 0)
+       if (atomic_dec_return(&clkdm->usecount) > 0) {
+               spin_unlock_irqrestore(&clkdm->lock, flags);
                return 0;
+       }
 
-       spin_lock_irqsave(&clkdm->lock, flags);
        arch_clkdm->clkdm_clk_disable(clkdm);
        pwrdm_clkdm_state_switch(clkdm);
        spin_unlock_irqrestore(&clkdm->lock, flags);
index f7b5860..6227e95 100644 (file)
  *
  * CLKDM_NO_AUTODEPS: Prevent "autodeps" from being added/removed from this
  *     clockdomain.  (Currently, this applies to OMAP3 clockdomains only.)
+ * CLKDM_ACTIVE_WITH_MPU: The PRCM guarantees that this clockdomain is
+ *     active whenever the MPU is active.  True for interconnects and
+ *     the WKUP clockdomains.
  */
 #define CLKDM_CAN_FORCE_SLEEP                  (1 << 0)
 #define CLKDM_CAN_FORCE_WAKEUP                 (1 << 1)
 #define CLKDM_CAN_ENABLE_AUTO                  (1 << 2)
 #define CLKDM_CAN_DISABLE_AUTO                 (1 << 3)
 #define CLKDM_NO_AUTODEPS                      (1 << 4)
+#define CLKDM_ACTIVE_WITH_MPU                  (1 << 5)
 
 #define CLKDM_CAN_HWSUP                (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
 #define CLKDM_CAN_SWSUP                (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
index a0d68db..f99e65c 100644 (file)
@@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm)
                _clkdm_del_autodeps(clkdm);
 }
 
+static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)
+{
+       bool hwsup = false;
+
+       if (!clkdm->clktrctrl_mask)
+               return 0;
+
+       hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+                               clkdm->clktrctrl_mask);
+
+       if (hwsup) {
+               /* Disable HW transitions when we are changing deps */
+               _disable_hwsup(clkdm);
+               _clkdm_add_autodeps(clkdm);
+               _enable_hwsup(clkdm);
+       } else {
+               if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
+                       omap3_clkdm_wakeup(clkdm);
+       }
+
+       return 0;
+}
+
+static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)
+{
+       bool hwsup = false;
+
+       if (!clkdm->clktrctrl_mask)
+               return 0;
+
+       hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+                               clkdm->clktrctrl_mask);
+
+       if (hwsup) {
+               /* Disable HW transitions when we are changing deps */
+               _disable_hwsup(clkdm);
+               _clkdm_del_autodeps(clkdm);
+               _enable_hwsup(clkdm);
+       } else {
+               if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
+                       omap3_clkdm_sleep(clkdm);
+       }
+
+       return 0;
+}
+
 struct clkdm_ops omap2_clkdm_operations = {
        .clkdm_add_wkdep        = omap2_clkdm_add_wkdep,
        .clkdm_del_wkdep        = omap2_clkdm_del_wkdep,
@@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = {
        .clkdm_wakeup           = omap3_clkdm_wakeup,
        .clkdm_allow_idle       = omap3_clkdm_allow_idle,
        .clkdm_deny_idle        = omap3_clkdm_deny_idle,
-       .clkdm_clk_enable       = omap2_clkdm_clk_enable,
-       .clkdm_clk_disable      = omap2_clkdm_clk_disable,
+       .clkdm_clk_enable       = omap3xxx_clkdm_clk_enable,
+       .clkdm_clk_disable      = omap3xxx_clkdm_clk_disable,
 };
index 0a6a048..8a62c7f 100644 (file)
@@ -88,6 +88,7 @@ struct clockdomain wkup_common_clkdm = {
        .name           = "wkup_clkdm",
        .pwrdm          = { .name = "wkup_pwrdm" },
        .dep_bit        = OMAP_EN_WKUP_SHIFT,
+       .flags          = CLKDM_ACTIVE_WITH_MPU,
 };
 
 struct clockdomain prm_common_clkdm = {
index 9299ac2..91c8ed7 100644 (file)
@@ -381,7 +381,7 @@ static struct clockdomain l4_wkup_44xx_clkdm = {
        .cm_inst          = OMAP4430_PRM_WKUP_CM_INST,
        .clkdm_offs       = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
        .dep_bit          = OMAP4430_L4WKUP_STATDEP_SHIFT,
-       .flags            = CLKDM_CAN_HWSUP,
+       .flags            = CLKDM_CAN_HWSUP | CLKDM_ACTIVE_WITH_MPU,
 };
 
 static struct clockdomain emu_sys_44xx_clkdm = {
index bcb0c58..799a617 100644 (file)
@@ -76,13 +76,15 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
                        gpio_set_debounce(gpio_pendown, gpio_debounce);
        }
 
-       ads7846_config.gpio_pendown = gpio_pendown;
-
        spi_bi->bus_num = bus_num;
        spi_bi->irq     = OMAP_GPIO_IRQ(gpio_pendown);
 
-       if (board_pdata)
+       if (board_pdata) {
+               board_pdata->gpio_pendown = gpio_pendown;
                spi_bi->platform_data = board_pdata;
+       } else {
+               ads7846_config.gpio_pendown = gpio_pendown;
+       }
 
        spi_register_board_info(&ads7846_spi_board_info, 1);
 }
index 942bb4f..bbb1a21 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/sched.h>
 #include <linux/cpuidle.h>
 #include <linux/export.h>
+#include <linux/cpu_pm.h>
 
 #include <plat/prcm.h>
 #include <plat/irqs.h>
@@ -72,20 +73,6 @@ struct omap3_idle_statedata omap3_idle_data[OMAP3_NUM_STATES];
 
 struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
 
-static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
-                               struct clockdomain *clkdm)
-{
-       clkdm_allow_idle(clkdm);
-       return 0;
-}
-
-static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
-                               struct clockdomain *clkdm)
-{
-       clkdm_deny_idle(clkdm);
-       return 0;
-}
-
 /**
  * omap3_enter_idle - Programs OMAP3 to enter the specified state
  * @dev: cpuidle device
@@ -102,42 +89,51 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
        struct omap3_idle_statedata *cx =
                        cpuidle_get_statedata(&dev->states_usage[index]);
        struct timespec ts_preidle, ts_postidle, ts_idle;
-       u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
        int idle_time;
 
        /* Used to keep track of the total time in idle */
        getnstimeofday(&ts_preidle);
 
        local_irq_disable();
-       local_fiq_disable();
-
-       pwrdm_set_next_pwrst(mpu_pd, mpu_state);
-       pwrdm_set_next_pwrst(core_pd, core_state);
 
        if (omap_irq_pending() || need_resched())
                goto return_sleep_time;
 
        /* Deny idle for C1 */
        if (index == 0) {
-               pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
-               pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
+               clkdm_deny_idle(mpu_pd->pwrdm_clkdms[0]);
+       } else {
+               pwrdm_set_next_pwrst(mpu_pd, cx->mpu_state);
+               pwrdm_set_next_pwrst(core_pd, cx->core_state);
        }
 
+       /*
+        * Call idle CPU PM enter notifier chain so that
+        * VFP context is saved.
+        */
+       if (cx->mpu_state == PWRDM_POWER_OFF)
+               cpu_pm_enter();
+
        /* Execute ARM wfi */
        omap_sram_idle();
 
+       /*
+        * Call idle CPU PM enter notifier chain to restore
+        * VFP context.
+        */
+       if (cx->mpu_state == PWRDM_POWER_OFF &&
+           pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
+               cpu_pm_exit();
+
        /* Re-allow idle for C1 */
-       if (index == 0) {
-               pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
-               pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
-       }
+       if (index == 0)
+               clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
 
 return_sleep_time:
        getnstimeofday(&ts_postidle);
        ts_idle = timespec_sub(ts_postidle, ts_preidle);
 
        local_irq_enable();
-       local_fiq_enable();
 
        idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
                                                                USEC_PER_SEC;
@@ -183,6 +179,9 @@ static int next_valid_state(struct cpuidle_device *dev,
                        core_deepest_state = PWRDM_POWER_OFF;
        }
 
+       if (!omap_uart_can_sleep())
+               core_deepest_state = PWRDM_POWER_RET;
+
        /* Check if current state is valid */
        if ((cx->valid) &&
            (cx->mpu_state >= mpu_deepest_state) &&
@@ -236,28 +235,22 @@ static int next_valid_state(struct cpuidle_device *dev,
  * the device to the specified or a safer state.
  */
 static int omap3_enter_idle_bm(struct cpuidle_device *dev,
-                               struct cpuidle_driver *drv,
+                              struct cpuidle_driver *drv,
                               int index)
 {
        int new_state_idx;
-       u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
+       u32 core_next_state, per_next_state = 0, per_saved_state = 0;
        struct omap3_idle_statedata *cx;
        int ret;
 
-       if (!omap3_can_sleep()) {
-               new_state_idx = drv->safe_state_index;
-               goto select_state;
-       }
-
        /*
-        * Prevent idle completely if CAM is active.
+        * Use only C1 if CAM is active.
         * CAM does not have wakeup capability in OMAP3.
         */
-       cam_state = pwrdm_read_pwrst(cam_pd);
-       if (cam_state == PWRDM_POWER_ON) {
+       if (pwrdm_read_pwrst(cam_pd) == PWRDM_POWER_ON)
                new_state_idx = drv->safe_state_index;
-               goto select_state;
-       }
+       else
+               new_state_idx = next_valid_state(dev, drv, index);
 
        /*
         * FIXME: we currently manage device-specific idle states
@@ -267,24 +260,28 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
         *        its own code.
         */
 
-       /*
-        * Prevent PER off if CORE is not in retention or off as this
-        * would disable PER wakeups completely.
-        */
-       cx = cpuidle_get_statedata(&dev->states_usage[index]);
+       /* Program PER state */
+       cx = cpuidle_get_statedata(&dev->states_usage[new_state_idx]);
        core_next_state = cx->core_state;
        per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
-       if ((per_next_state == PWRDM_POWER_OFF) &&
-           (core_next_state > PWRDM_POWER_RET))
-               per_next_state = PWRDM_POWER_RET;
+       if (new_state_idx == 0) {
+               /* In C1 do not allow PER state lower than CORE state */
+               if (per_next_state < core_next_state)
+                       per_next_state = core_next_state;
+       } else {
+               /*
+                * Prevent PER OFF if CORE is not in RETention or OFF as this
+                * would disable PER wakeups completely.
+                */
+               if ((per_next_state == PWRDM_POWER_OFF) &&
+                   (core_next_state > PWRDM_POWER_RET))
+                       per_next_state = PWRDM_POWER_RET;
+       }
 
        /* Are we changing PER target state? */
        if (per_next_state != per_saved_state)
                pwrdm_set_next_pwrst(per_pd, per_next_state);
 
-       new_state_idx = next_valid_state(dev, drv, index);
-
-select_state:
        ret = omap3_enter_idle(dev, drv, new_state_idx);
 
        /* Restore original PER state if it was modified */
index c15cfad..982eba8 100644 (file)
@@ -673,6 +673,29 @@ static inline void omap_init_vout(void) {}
 
 /*-------------------------------------------------------------------------*/
 
+#if defined(CONFIG_ARCH_OMAP3) && \
+       (defined(CONFIG_OMAP3_THERMAL) || defined(CONFIG_OMAP3_THERMAL_MODULE))
+static void omap_init_temp_sensor(void)
+{
+       struct omap_hwmod *oh;
+       struct platform_device *pdev;
+
+       oh = omap_hwmod_lookup("bandgap_ts");
+       if (!oh) {
+               pr_err("%s: unable to find hwmod\n", __func__);
+               return;
+       }
+
+       pdev = omap_device_build("omap3-thermal", -1, oh, NULL, 0, NULL, 0, 0);
+       WARN(IS_ERR(pdev), "%s: could not build device, err %ld\n",
+                          __func__, PTR_ERR(pdev));
+}
+#else
+static inline void omap_init_temp_sensor(void) {}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
 static int __init omap2_init_devices(void)
 {
        /*
@@ -690,6 +713,7 @@ static int __init omap2_init_devices(void)
        omap_init_sham();
        omap_init_aes();
        omap_init_vout();
+       omap_init_temp_sensor();
 
        return 0;
 }
index fc56745..50ccee4 100644 (file)
@@ -291,7 +291,7 @@ static void _lookup_sddiv(struct clk *clk, u8 *sd_div, u16 m, u8 n)
  * Program the DPLL with the supplied M, N values, and wait for the DPLL to
  * lock..  Returns -EINVAL upon error, or 0 upon success.
  */
-static int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel)
+int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel)
 {
        struct dpll_data *dd = clk->dpll_data;
        u8 dco, sd_div;
diff --git a/arch/arm/mach-omap2/drm.c b/arch/arm/mach-omap2/drm.c
new file mode 100644 (file)
index 0000000..72e0f01
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * DRM/KMS device registration for TI OMAP platforms
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/omap_device.h>
+#include <plat/omap_hwmod.h>
+
+#if defined(CONFIG_DRM_OMAP) || (CONFIG_DRM_OMAP_MODULE)
+
+static struct platform_device omap_drm_device = {
+       .dev = {
+               .coherent_dma_mask = DMA_BIT_MASK(32),
+       },
+       .name = "omapdrm",
+       .id = 0,
+};
+
+static int __init omap_init_drm(void)
+{
+       struct omap_hwmod *oh = NULL;
+       struct platform_device *pdev;
+
+       /* lookup and populate the DMM information, if present - OMAP4+ */
+       oh = omap_hwmod_lookup("dmm");
+
+       if (oh) {
+               pdev = omap_device_build(oh->name, -1, oh, NULL, 0, NULL, 0,
+                                       false);
+               WARN(IS_ERR(pdev), "Could not build omap_device for %s\n",
+                       oh->name);
+       }
+
+       return platform_device_register(&omap_drm_device);
+
+}
+
+arch_initcall(omap_init_drm);
+
+#endif
index 8ad210b..0ebc8a1 100644 (file)
@@ -102,11 +102,6 @@ int __init gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data)
                return err;
        }
 
-       /* Enable RD PIN Monitoring Reg */
-       if (gpmc_nand_data->dev_ready) {
-               gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_RDY_BSY, 1);
-       }
-
        err = platform_device_register(&gpmc_nand_device);
        if (err < 0) {
                dev_err(dev, "Unable to register NAND device\n");
index dfffbbf..82979d2 100644 (file)
@@ -480,6 +480,11 @@ int gpmc_read_status(int cmd)
                status = regval & GPMC_STATUS_BUFF_EMPTY;
                break;
 
+       case GPMC_STATUS_WAIT:
+               regval = gpmc_read_reg(GPMC_STATUS);
+               status = regval & 0x100;
+               break;
+
        default:
                printk(KERN_ERR "gpmc_read_status: Not supported\n");
        }
index f4a1020..a7035a5 100644 (file)
@@ -19,6 +19,7 @@
 #include <plat/omap-pm.h>
 #include <plat/mux.h>
 #include <plat/omap_device.h>
+#include <asm/mach-types.h>
 
 #include "mux.h"
 #include "hsmmc.h"
@@ -171,7 +172,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
        }
 }
 
-static void hsmmc23_before_set_reg(struct device *dev, int slot,
+static void hsmmc2_before_set_reg(struct device *dev, int slot,
                                   int power_on, int vdd)
 {
        struct omap_mmc_platform_data *mmc = dev->platform_data;
@@ -180,14 +181,14 @@ static void hsmmc23_before_set_reg(struct device *dev, int slot,
                mmc->slots[0].remux(dev, slot, power_on);
 
        if (power_on) {
-               /* Only MMC2 supports a CLKIN */
-               if (mmc->slots[0].internal_clock) {
-                       u32 reg;
+               u32 reg;
 
-                       reg = omap_ctrl_readl(control_devconf1_offset);
+               reg = omap_ctrl_readl(control_devconf1_offset);
+               if (mmc->slots[0].internal_clock)
                        reg |= OMAP2_MMCSDIO2ADPCLKISEL;
-                       omap_ctrl_writel(reg, control_devconf1_offset);
-               }
+               else
+                       reg &= ~OMAP2_MMCSDIO2ADPCLKISEL;
+               omap_ctrl_writel(reg, control_devconf1_offset);
        }
 }
 
@@ -200,10 +201,12 @@ static int nop_mmc_set_power(struct device *dev, int slot, int power_on,
 static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
                        int controller_nr)
 {
-       if (gpio_is_valid(mmc_controller->slots[0].switch_pin))
+       if (gpio_is_valid(mmc_controller->slots[0].switch_pin) &&
+               (mmc_controller->slots[0].switch_pin < OMAP_MAX_GPIO_LINES))
                omap_mux_init_gpio(mmc_controller->slots[0].switch_pin,
                                        OMAP_PIN_INPUT_PULLUP);
-       if (gpio_is_valid(mmc_controller->slots[0].gpio_wp))
+       if (gpio_is_valid(mmc_controller->slots[0].gpio_wp) &&
+               (mmc_controller->slots[0].gpio_wp < OMAP_MAX_GPIO_LINES))
                omap_mux_init_gpio(mmc_controller->slots[0].gpio_wp,
                                        OMAP_PIN_INPUT_PULLUP);
        if (cpu_is_omap34xx()) {
@@ -378,14 +381,13 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
                        c->caps &= ~MMC_CAP_8_BIT_DATA;
                        c->caps |= MMC_CAP_4_BIT_DATA;
                }
-               /* FALLTHROUGH */
-       case 3:
                if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
                        /* off-chip level shifting, or none */
-                       mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
+                       mmc->slots[0].before_set_reg = hsmmc2_before_set_reg;
                        mmc->slots[0].after_set_reg = NULL;
                }
                break;
+       case 3:
        case 4:
        case 5:
                mmc->slots[0].before_set_reg = NULL;
@@ -439,6 +441,13 @@ void __init omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
        if (oh->dev_attr != NULL) {
                mmc_dev_attr = oh->dev_attr;
                mmc_data->controller_flags = mmc_dev_attr->flags;
+               /*
+                * erratum 2.1.1.128 doesn't apply if board has
+                * a transceiver is attached
+                */
+               if (hsmmcinfo->transceiver)
+                       mmc_data->controller_flags &=
+                               ~OMAP_HSMMC_BROKEN_MULTIBLOCK_READ;
        }
 
        pdev = omap_device_build(name, ctrl_nr - 1, oh, mmc_data,
@@ -470,6 +479,13 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
                        control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
                        control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
                }
+
+               if (machine_is_omap3_pandora()) {
+                       /* needed for gpio_126 - gpio_129 to work correctly */
+                       reg = omap_ctrl_readl(control_pbias_offset);
+                       reg &= ~OMAP343X_PBIASLITEVMODE1;
+                       omap_ctrl_writel(reg, control_pbias_offset);
+               }
        } else {
                control_pbias_offset =
                        OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
index 7f47092..fbede45 100644 (file)
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_SOC_BUS
+#include <linux/sys_soc.h>
+#include <linux/err.h>
+#endif
 
 #include <asm/cputype.h>
 
 
 #include "control.h"
 
-static unsigned int omap_revision;
+#define OMAP_SOC_MAX_NAME_LENGTH               16
 
+static unsigned int omap_revision;
+static char soc_name[OMAP_SOC_MAX_NAME_LENGTH];
+static char soc_rev[OMAP_SOC_MAX_NAME_LENGTH];
 u32 omap_features;
 
 unsigned int omap_rev(void)
@@ -112,7 +121,7 @@ void omap_get_die_id(struct omap_die_id *odi)
        odi->id_3 = read_tap_reg(OMAP_TAP_DIE_ID_3);
 }
 
-static void __init omap24xx_check_revision(void)
+void __init omap2xxx_check_revision(void)
 {
        int i, j;
        u32 idcode, prod_id;
@@ -160,19 +169,70 @@ static void __init omap24xx_check_revision(void)
                j = i;
        }
 
-       pr_info("OMAP%04x", omap_rev() >> 16);
+       sprintf(soc_name, "OMAP%04x", omap_rev() >> 16);
+       sprintf(soc_rev, "ES%x", (omap_rev() >> 12) & 0xf);
+
+       pr_info("%s", soc_name);
        if ((omap_rev() >> 8) & 0x0f)
-               pr_info("ES%x", (omap_rev() >> 12) & 0xf);
+               pr_info("%s", soc_rev);
        pr_info("\n");
 }
 
+#define OMAP3_SHOW_FEATURE(feat)               \
+       if (omap3_has_ ##feat())                \
+               printk(#feat" ");
+
+static void __init omap3_cpuinfo(void)
+{
+       const char *cpu_name;
+
+       /*
+        * OMAP3430 and OMAP3530 are assumed to be same.
+        *
+        * OMAP3525, OMAP3515 and OMAP3503 can be detected only based
+        * on available features. Upon detection, update the CPU id
+        * and CPU class bits.
+        */
+       if (cpu_is_omap3630()) {
+               cpu_name = "OMAP3630";
+       } else if (cpu_is_omap3517()) {
+               /* AM35xx devices */
+               cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
+       } else if (cpu_is_ti816x()) {
+               cpu_name = "TI816X";
+       } else if (omap3_has_iva() && omap3_has_sgx()) {
+               /* OMAP3430, OMAP3525, OMAP3515, OMAP3503 devices */
+               cpu_name = "OMAP3430/3530";
+       } else if (omap3_has_iva()) {
+               cpu_name = "OMAP3525";
+       } else if (omap3_has_sgx()) {
+               cpu_name = "OMAP3515";
+       } else {
+               cpu_name = "OMAP3503";
+       }
+
+       sprintf(soc_name, "%s", cpu_name);
+
+       /* Print verbose information */
+       pr_info("%s %s (", soc_name, soc_rev);
+
+       OMAP3_SHOW_FEATURE(l2cache);
+       OMAP3_SHOW_FEATURE(iva);
+       OMAP3_SHOW_FEATURE(sgx);
+       OMAP3_SHOW_FEATURE(neon);
+       OMAP3_SHOW_FEATURE(isp);
+       OMAP3_SHOW_FEATURE(192mhz_clk);
+
+       printk(")\n");
+}
+
 #define OMAP3_CHECK_FEATURE(status,feat)                               \
        if (((status & OMAP3_ ##feat## _MASK)                           \
                >> OMAP3_ ##feat## _SHIFT) != FEAT_ ##feat## _NONE) {   \
                omap_features |= OMAP3_HAS_ ##feat;                     \
        }
 
-static void __init omap3_check_features(void)
+void __init omap3xxx_check_features(void)
 {
        u32 status;
 
@@ -199,9 +259,11 @@ static void __init omap3_check_features(void)
         * TODO: Get additional info (where applicable)
         *       e.g. Size of L2 cache.
         */
+
+       omap3_cpuinfo();
 }
 
-static void __init omap4_check_features(void)
+void __init omap4xxx_check_features(void)
 {
        u32 si_type;
 
@@ -226,13 +288,15 @@ static void __init omap4_check_features(void)
        }
 }
 
-static void __init ti816x_check_features(void)
+void __init ti81xx_check_features(void)
 {
        omap_features = OMAP3_HAS_NEON;
+       omap3_cpuinfo();
 }
 
-static void __init omap3_check_revision(const char **cpu_rev)
+void __init omap3xxx_check_revision(void)
 {
+       const char *cpu_rev;
        u32 cpuid, idcode;
        u16 hawkeye;
        u8 rev;
@@ -245,7 +309,7 @@ static void __init omap3_check_revision(const char **cpu_rev)
        cpuid = read_cpuid(CPUID_ID);
        if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) {
                omap_revision = OMAP3430_REV_ES1_0;
-               *cpu_rev = "1.0";
+               cpu_rev = "1.0";
                return;
        }
 
@@ -266,26 +330,26 @@ static void __init omap3_check_revision(const char **cpu_rev)
                case 0: /* Take care of early samples */
                case 1:
                        omap_revision = OMAP3430_REV_ES2_0;
-                       *cpu_rev = "2.0";
+                       cpu_rev = "2.0";
                        break;
                case 2:
                        omap_revision = OMAP3430_REV_ES2_1;
-                       *cpu_rev = "2.1";
+                       cpu_rev = "2.1";
                        break;
                case 3:
                        omap_revision = OMAP3430_REV_ES3_0;
-                       *cpu_rev = "3.0";
+                       cpu_rev = "3.0";
                        break;
                case 4:
                        omap_revision = OMAP3430_REV_ES3_1;
-                       *cpu_rev = "3.1";
+                       cpu_rev = "3.1";
                        break;
                case 7:
                /* FALLTHROUGH */
                default:
                        /* Use the latest known revision as default */
                        omap_revision = OMAP3430_REV_ES3_1_2;
-                       *cpu_rev = "3.1.2";
+                       cpu_rev = "3.1.2";
                }
                break;
        case 0xb868:
@@ -298,13 +362,13 @@ static void __init omap3_check_revision(const char **cpu_rev)
                switch (rev) {
                case 0:
                        omap_revision = OMAP3517_REV_ES1_0;
-                       *cpu_rev = "1.0";
+                       cpu_rev = "1.0";
                        break;
                case 1:
                /* FALLTHROUGH */
                default:
                        omap_revision = OMAP3517_REV_ES1_1;
-                       *cpu_rev = "1.1";
+                       cpu_rev = "1.1";
                }
                break;
        case 0xb891:
@@ -313,42 +377,43 @@ static void __init omap3_check_revision(const char **cpu_rev)
                switch(rev) {
                case 0: /* Take care of early samples */
                        omap_revision = OMAP3630_REV_ES1_0;
-                       *cpu_rev = "1.0";
+                       cpu_rev = "1.0";
                        break;
                case 1:
                        omap_revision = OMAP3630_REV_ES1_1;
-                       *cpu_rev = "1.1";
+                       cpu_rev = "1.1";
                        break;
                case 2:
                /* FALLTHROUGH */
                default:
                        omap_revision = OMAP3630_REV_ES1_2;
-                       *cpu_rev = "1.2";
+                       cpu_rev = "1.2";
                }
                break;
        case 0xb81e:
                switch (rev) {
                case 0:
                        omap_revision = TI8168_REV_ES1_0;
-                       *cpu_rev = "1.0";
+                       cpu_rev = "1.0";
                        break;
                case 1:
                /* FALLTHROUGH */
                default:
                        omap_revision = TI8168_REV_ES1_1;
-                       *cpu_rev = "1.1";
+                       cpu_rev = "1.1";
                        break;
                }
                break;
        default:
                /* Unknown default to latest silicon rev as default */
                omap_revision = OMAP3630_REV_ES1_2;
-               *cpu_rev = "1.2";
+               cpu_rev = "1.2";
                pr_warn("Warning: unknown chip type; assuming OMAP3630ES1.2\n");
        }
+       sprintf(soc_rev, "ES%s", cpu_rev);
 }
 
-static void __init omap4_check_revision(void)
+void __init omap4xxx_check_revision(void)
 {
        u32 idcode;
        u16 hawkeye;
@@ -406,87 +471,10 @@ static void __init omap4_check_revision(void)
                omap_revision = OMAP4430_REV_ES2_2;
        }
 
-       pr_info("OMAP%04x ES%d.%d\n", omap_rev() >> 16,
-               ((omap_rev() >> 12) & 0xf), ((omap_rev() >> 8) & 0xf));
-}
-
-#define OMAP3_SHOW_FEATURE(feat)               \
-       if (omap3_has_ ##feat())                \
-               printk(#feat" ");
-
-static void __init omap3_cpuinfo(const char *cpu_rev)
-{
-       const char *cpu_name;
-
-       /*
-        * OMAP3430 and OMAP3530 are assumed to be same.
-        *
-        * OMAP3525, OMAP3515 and OMAP3503 can be detected only based
-        * on available features. Upon detection, update the CPU id
-        * and CPU class bits.
-        */
-       if (cpu_is_omap3630()) {
-               cpu_name = "OMAP3630";
-       } else if (cpu_is_omap3517()) {
-               /* AM35xx devices */
-               cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
-       } else if (cpu_is_ti816x()) {
-               cpu_name = "TI816X";
-       } else if (omap3_has_iva() && omap3_has_sgx()) {
-               /* OMAP3430, OMAP3525, OMAP3515, OMAP3503 devices */
-               cpu_name = "OMAP3430/3530";
-       } else if (omap3_has_iva()) {
-               cpu_name = "OMAP3525";
-       } else if (omap3_has_sgx()) {
-               cpu_name = "OMAP3515";
-       } else {
-               cpu_name = "OMAP3503";
-       }
-
-       /* Print verbose information */
-       pr_info("%s ES%s (", cpu_name, cpu_rev);
-
-       OMAP3_SHOW_FEATURE(l2cache);
-       OMAP3_SHOW_FEATURE(iva);
-       OMAP3_SHOW_FEATURE(sgx);
-       OMAP3_SHOW_FEATURE(neon);
-       OMAP3_SHOW_FEATURE(isp);
-       OMAP3_SHOW_FEATURE(192mhz_clk);
-
-       printk(")\n");
-}
-
-/*
- * Try to detect the exact revision of the omap we're running on
- */
-void __init omap2_check_revision(void)
-{
-       const char *cpu_rev;
-
-       /*
-        * At this point we have an idea about the processor revision set
-        * earlier with omap2_set_globals_tap().
-        */
-       if (cpu_is_omap24xx()) {
-               omap24xx_check_revision();
-       } else if (cpu_is_omap34xx()) {
-               omap3_check_revision(&cpu_rev);
-
-               /* TI816X doesn't have feature register */
-               if (!cpu_is_ti816x())
-                       omap3_check_features();
-               else
-                       ti816x_check_features();
-
-               omap3_cpuinfo(cpu_rev);
-               return;
-       } else if (cpu_is_omap44xx()) {
-               omap4_check_revision();
-               omap4_check_features();
-               return;
-       } else {
-               pr_err("OMAP revision unknown, please fix!\n");
-       }
+       sprintf(soc_name, "OMAP%04x", omap_rev() >> 16);
+       sprintf(soc_rev, "ES%d.%d", (omap_rev() >> 12) & 0xf,
+                                               (omap_rev() >> 8) & 0xf);
+       pr_info("%s %s\n", soc_name, soc_rev);
 }
 
 /*
@@ -506,3 +494,66 @@ void __init omap2_set_globals_tap(struct omap_globals *omap2_globals)
        else
                tap_prod_id = 0x0208;
 }
+
+#ifdef CONFIG_SOC_BUS
+
+static const char const *omap_types[] = {
+       [OMAP2_DEVICE_TYPE_TEST]        = "TST",
+       [OMAP2_DEVICE_TYPE_EMU]         = "EMU",
+       [OMAP2_DEVICE_TYPE_SEC]         = "HS",
+       [OMAP2_DEVICE_TYPE_GP]          = "GP",
+       [OMAP2_DEVICE_TYPE_BAD]         = "BAD",
+};
+
+static const char * __init omap_get_family(void)
+{
+       if (cpu_is_omap24xx())
+               return kasprintf(GFP_KERNEL, "OMAP2");
+       else if (cpu_is_omap34xx())
+               return kasprintf(GFP_KERNEL, "OMAP3");
+       else if (cpu_is_omap44xx())
+               return kasprintf(GFP_KERNEL, "OMAP4");
+       else
+               return kasprintf(GFP_KERNEL, "Unknown");
+}
+
+static ssize_t omap_get_type(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       return sprintf(buf, "%s\n", omap_types[omap_type()]);
+}
+
+static struct device_attribute omap_soc_attr =
+       __ATTR(type,  S_IRUGO, omap_get_type,  NULL);
+
+int __init omap_soc_device_init(void)
+{
+       struct device *parent;
+       struct soc_device *soc_dev;
+       struct soc_device_attribute *soc_dev_attr;
+       int ret = 0;
+
+       soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+       if (!soc_dev_attr)
+               return -ENOMEM;
+
+       soc_dev_attr->machine  = soc_name;
+       soc_dev_attr->family   = omap_get_family();
+       soc_dev_attr->revision = soc_rev;
+
+       soc_dev = soc_device_register(soc_dev_attr);
+       if (IS_ERR_OR_NULL(soc_dev)) {
+               kfree(soc_dev_attr);
+               return -ENODEV;
+       }
+
+       parent = soc_device_to_device(soc_dev);
+       if (!IS_ERR_OR_NULL(parent))
+               ret = device_create_file(parent, &omap_soc_attr);
+
+       return ret;
+}
+late_initcall(omap_soc_device_init);
+
+#endif /* CONFIG_SOC_BUS */
diff --git a/arch/arm/mach-omap2/include/mach/vmalloc.h b/arch/arm/mach-omap2/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 8663199..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  arch/arm/plat-omap/include/mach/vmalloc.h
- *
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END      0xf8000000UL
index 25d20ce..d2f3077 100644 (file)
@@ -321,7 +321,6 @@ void __iomem *omap_irq_base;
 
 static void __init omap_common_init_early(void)
 {
-       omap2_check_revision();
        omap_ioremap_init();
        omap_init_consistent_dma_size();
 }
@@ -363,6 +362,7 @@ static void __init omap_hwmod_init_postsetup(void)
 void __init omap2420_init_early(void)
 {
        omap2_set_globals_242x();
+       omap2xxx_check_revision();
        omap_common_init_early();
        omap2xxx_voltagedomains_init();
        omap242x_powerdomains_init();
@@ -375,6 +375,7 @@ void __init omap2420_init_early(void)
 void __init omap2430_init_early(void)
 {
        omap2_set_globals_243x();
+       omap2xxx_check_revision();
        omap_common_init_early();
        omap2xxx_voltagedomains_init();
        omap243x_powerdomains_init();
@@ -393,6 +394,8 @@ void __init omap2430_init_early(void)
 void __init omap3_init_early(void)
 {
        omap2_set_globals_3xxx();
+       omap3xxx_check_revision();
+       omap3xxx_check_features();
        omap_common_init_early();
        omap3xxx_voltagedomains_init();
        omap3xxx_powerdomains_init();
@@ -425,6 +428,8 @@ void __init am35xx_init_early(void)
 void __init ti816x_init_early(void)
 {
        omap2_set_globals_ti816x();
+       omap3xxx_check_revision();
+       ti81xx_check_features();
        omap_common_init_early();
        omap3xxx_voltagedomains_init();
        omap3xxx_powerdomains_init();
@@ -439,6 +444,8 @@ void __init ti816x_init_early(void)
 void __init omap4430_init_early(void)
 {
        omap2_set_globals_443x();
+       omap4xxx_check_revision();
+       omap4xxx_check_features();
        omap_common_init_early();
        omap44xx_voltagedomains_init();
        omap44xx_powerdomains_init();
index 65f1be6..9fbeb2c 100644 (file)
@@ -133,6 +133,7 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
        ct->chip.irq_ack = omap_mask_ack_irq;
        ct->chip.irq_mask = irq_gc_mask_disable_reg;
        ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+       ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
 
        ct->regs.ack = INTC_CONTROL;
        ct->regs.enable = INTC_MIR_CLEAR0;
index 207a2ff..0d81b14 100644 (file)
@@ -316,7 +316,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v)
 }
 
 /**
- * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v
+ * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v
  * @oh: struct omap_hwmod *
  * @v: pointer to register contents to modify
  *
@@ -343,6 +343,68 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
        return 0;
 }
 
+/**
+ * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v
+ * @oh: struct omap_hwmod *
+ * @v: pointer to register contents to modify
+ *
+ * Clear the SOFTRESET bit in @v for hwmod @oh.  Returns -EINVAL upon
+ * error or 0 upon success.
+ */
+static int _clear_softreset(struct omap_hwmod *oh, u32 *v)
+{
+       u32 softrst_mask;
+
+       if (!oh->class->sysc ||
+           !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET))
+               return -EINVAL;
+
+       if (!oh->class->sysc->sysc_fields) {
+               WARN(1,
+                    "omap_hwmod: %s: sysc_fields absent for sysconfig class\n",
+                    oh->name);
+               return -EINVAL;
+       }
+
+       softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
+
+       *v &= ~softrst_mask;
+
+       return 0;
+}
+
+/**
+ * _wait_softreset_complete - wait for an OCP softreset to complete
+ * @oh: struct omap_hwmod * to wait on
+ *
+ * Wait until the IP block represented by @oh reports that its OCP
+ * softreset is complete.  This can be triggered by software (see
+ * _ocp_softreset()) or by hardware upon returning from off-mode (one
+ * example is HSMMC).  Waits for up to MAX_MODULE_SOFTRESET_WAIT
+ * microseconds.  Returns the number of microseconds waited.
+ */
+static int _wait_softreset_complete(struct omap_hwmod *oh)
+{
+       struct omap_hwmod_class_sysconfig *sysc;
+       u32 softrst_mask;
+       int c = 0;
+
+       sysc = oh->class->sysc;
+
+       if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
+               omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs)
+                                  & SYSS_RESETDONE_MASK),
+                                 MAX_MODULE_SOFTRESET_WAIT, c);
+       else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
+               softrst_mask = (0x1 << sysc->sysc_fields->srst_shift);
+               omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs)
+                                   & softrst_mask),
+                                 MAX_MODULE_SOFTRESET_WAIT, c);
+       }
+
+       return c;
+}
+
 /**
  * _set_module_autoidle: set the OCP_SYSCONFIG AUTOIDLE field in @v
  * @oh: struct omap_hwmod *
@@ -880,30 +942,70 @@ static void __iomem * __init _find_mpu_rt_base(struct omap_hwmod *oh, u8 index)
  * _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG
  * @oh: struct omap_hwmod *
  *
- * If module is marked as SWSUP_SIDLE, force the module out of slave
- * idle; otherwise, configure it for smart-idle.  If module is marked
- * as SWSUP_MSUSPEND, force the module out of master standby;
- * otherwise, configure it for smart-standby.  No return value.
+ * Ensure that the OCP_SYSCONFIG register for the IP block represented
+ * by @oh is set to indicate to the PRCM that the IP block is active.
+ * Usually this means placing the module into smart-idle mode and
+ * smart-standby, but if there is a bug in the automatic idle handling
+ * for the IP block, it may need to be placed into the force-idle or
+ * no-idle variants of these modes.  No return value.
  */
 static void _enable_sysc(struct omap_hwmod *oh)
 {
        u8 idlemode, sf;
        u32 v;
+       bool clkdm_act;
 
        if (!oh->class->sysc)
                return;
 
+#if 0 /* causes data abort on venc on 3.2 */
+       /*
+        * Wait until reset has completed, this is needed as the IP
+        * block is reset automatically by hardware in some cases
+        * (off-mode for example), and the drivers require the
+        * IP to be ready when they access it
+        */
+       if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+               _enable_optional_clocks(oh);
+       _wait_softreset_complete(oh);
+       if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+               _disable_optional_clocks(oh);
+#endif
+
        v = oh->_sysc_cache;
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                       HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+               if (oh->flags & HWMOD_SWSUP_SIDLE) {
+                       idlemode = HWMOD_IDLEMODE_NO;
+               } else {
+                       if (sf & SYSC_HAS_ENAWAKEUP)
+                               _enable_wakeup(oh, &v);
+                       if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+                               idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+                       else
+                               idlemode = HWMOD_IDLEMODE_SMART;
+               }
+
+               /*
+                * This is special handling for some IPs like
+                * 32k sync timer. Force them to idle!
+                */
+               clkdm_act = ((oh->clkdm &&
+                             oh->clkdm->flags & CLKDM_ACTIVE_WITH_MPU) ||
+                            (oh->_clk && oh->_clk->clkdm &&
+                             oh->_clk->clkdm->flags & CLKDM_ACTIVE_WITH_MPU));
+               if (clkdm_act && !(oh->class->sysc->idlemodes &
+                                  (SIDLE_SMART | SIDLE_SMART_WKUP)))
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
        if (sf & SYSC_HAS_MIDLEMODE) {
-               if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+               if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
                        idlemode = HWMOD_IDLEMODE_NO;
                } else {
                        if (sf & SYSC_HAS_ENAWAKEUP)
@@ -925,11 +1027,9 @@ static void _enable_sysc(struct omap_hwmod *oh)
            (sf & SYSC_HAS_CLOCKACTIVITY))
                _set_clockactivity(oh, oh->class->sysc->clockact, &v);
 
-       /* If slave is in SMARTIDLE, also enable wakeup */
-       if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
-               _enable_wakeup(oh, &v);
-
-       _write_sysconfig(v, oh);
+       /* If the cached value is the same as the new value, skip the write */
+       if (oh->_sysc_cache != v)
+               _write_sysconfig(v, oh);
 
        /*
         * Set the autoidle bit only after setting the smartidle bit
@@ -964,13 +1064,22 @@ static void _idle_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                       HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
+               if (oh->flags & HWMOD_SWSUP_SIDLE) {
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               } else {
+                       if (sf & SYSC_HAS_ENAWAKEUP)
+                               _enable_wakeup(oh, &v);
+                       if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+                               idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+                       else
+                               idlemode = HWMOD_IDLEMODE_SMART;
+               }
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
        if (sf & SYSC_HAS_MIDLEMODE) {
-               if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+               if ((oh->flags & HWMOD_SWSUP_MSTANDBY) ||
+                   (oh->flags & HWMOD_FORCE_MSTANDBY)) {
                        idlemode = HWMOD_IDLEMODE_FORCE;
                } else {
                        if (sf & SYSC_HAS_ENAWAKEUP)
@@ -983,10 +1092,6 @@ static void _idle_sysc(struct omap_hwmod *oh)
                _set_master_standbymode(oh, idlemode, &v);
        }
 
-       /* If slave is in SMARTIDLE, also enable wakeup */
-       if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
-               _enable_wakeup(oh, &v);
-
        _write_sysconfig(v, oh);
 }
 
@@ -1366,32 +1471,30 @@ static int _ocp_softreset(struct omap_hwmod *oh)
        ret = _set_softreset(oh, &v);
        if (ret)
                goto dis_opt_clks;
-       _write_sysconfig(v, oh);
 
-       if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
-               omap_test_timeout((omap_hwmod_read(oh,
-                                                   oh->class->sysc->syss_offs)
-                                  & SYSS_RESETDONE_MASK),
-                                 MAX_MODULE_SOFTRESET_WAIT, c);
-       else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS)
-               omap_test_timeout(!(omap_hwmod_read(oh,
-                                                    oh->class->sysc->sysc_offs)
-                                  & SYSC_TYPE2_SOFTRESET_MASK),
-                                 MAX_MODULE_SOFTRESET_WAIT, c);
+       _write_sysconfig(v, oh);
 
-       if (c == MAX_MODULE_SOFTRESET_WAIT)
+       c = _wait_softreset_complete(oh);
+       if (c == MAX_MODULE_SOFTRESET_WAIT) {
                pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
                           oh->name, MAX_MODULE_SOFTRESET_WAIT);
-       else
+               ret = -ETIMEDOUT;
+               goto dis_opt_clks;
+       } else {
                pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
+       }
+
+       ret = _clear_softreset(oh, &v);
+       if (ret)
+               goto dis_opt_clks;
+
+       _write_sysconfig(v, oh);
 
        /*
         * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from
         * _wait_target_ready() or _reset()
         */
 
-       ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
-
 dis_opt_clks:
        if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
                _disable_optional_clocks(oh);
@@ -1852,6 +1955,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh)
                goto error;
        _write_sysconfig(v, oh);
 
+       ret = _clear_softreset(oh, &v);
+       if (ret)
+               goto error;
+       _write_sysconfig(v, oh);
+
 error:
        return ret;
 }
index a5409ce..3e89380 100644 (file)
@@ -1000,7 +1000,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
                        .flags  = OMAP_FIREWALL_L4,
                }
        },
-       .flags          = OCPIF_SWSUP_IDLE,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1376,7 +1375,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
 };
 
 static struct omap_hwmod omap2420_mcspi1_hwmod = {
-       .name           = "mcspi1_hwmod",
+       .name           = "mcspi1",
        .mpu_irqs       = omap2_mcspi1_mpu_irqs,
        .sdma_reqs      = omap2_mcspi1_sdma_reqs,
        .main_clk       = "mcspi1_fck",
@@ -1405,7 +1404,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
 };
 
 static struct omap_hwmod omap2420_mcspi2_hwmod = {
-       .name           = "mcspi2_hwmod",
+       .name           = "mcspi2",
        .mpu_irqs       = omap2_mcspi2_mpu_irqs,
        .sdma_reqs      = omap2_mcspi2_sdma_reqs,
        .main_clk       = "mcspi2_fck",
index c4f56cb..6a156a9 100644 (file)
@@ -1049,7 +1049,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
        .slave          = &omap2430_dss_venc_hwmod,
        .clk            = "dss_ick",
        .addr           = omap2_dss_venc_addrs,
-       .flags          = OCPIF_SWSUP_IDLE,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1481,7 +1480,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
 };
 
 static struct omap_hwmod omap2430_mcspi1_hwmod = {
-       .name           = "mcspi1_hwmod",
+       .name           = "mcspi1",
        .mpu_irqs       = omap2_mcspi1_mpu_irqs,
        .sdma_reqs      = omap2_mcspi1_sdma_reqs,
        .main_clk       = "mcspi1_fck",
@@ -1510,7 +1509,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
 };
 
 static struct omap_hwmod omap2430_mcspi2_hwmod = {
-       .name           = "mcspi2_hwmod",
+       .name           = "mcspi2",
        .mpu_irqs       = omap2_mcspi2_mpu_irqs,
        .sdma_reqs      = omap2_mcspi2_sdma_reqs,
        .main_clk       = "mcspi2_fck",
@@ -1552,7 +1551,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
 };
 
 static struct omap_hwmod omap2430_mcspi3_hwmod = {
-       .name           = "mcspi3_hwmod",
+       .name           = "mcspi3",
        .mpu_irqs       = omap2430_mcspi3_mpu_irqs,
        .sdma_reqs      = omap2430_mcspi3_sdma_reqs,
        .main_clk       = "mcspi3_fck",
index c11273d..f08e442 100644 (file)
@@ -55,27 +55,6 @@ struct omap_hwmod_class omap2_dss_hwmod_class = {
        .reset  = omap_dss_reset,
 };
 
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
-                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-struct omap_hwmod_class omap2_dispc_hwmod_class = {
-       .name   = "dispc",
-       .sysc   = &omap2_dispc_sysc,
-};
-
 /*
  * 'rfbi' class
  * remote frame buffer interface
index 177dee2..2a67297 100644 (file)
@@ -28,6 +28,28 @@ struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
        { .name = "dispc", .dma_req = 5 },
        { .dma_req = -1 }
 };
+
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
+                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2_dispc_hwmod_class = {
+       .name   = "dispc",
+       .sysc   = &omap2_dispc_sysc,
+};
+
 /* OMAP2xxx Timer Common */
 static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
        .rev_offs       = 0x0000,
index eef43e2..8859d47 100644 (file)
@@ -29,6 +29,7 @@
 
 #include "omap_hwmod_common_data.h"
 
+#include "smartreflex.h"
 #include "prm-regbits-34xx.h"
 #include "cm-regbits-34xx.h"
 #include "wd_timer.h"
@@ -356,6 +357,16 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+static struct omap_hwmod_irq_info omap3_smartreflex_mpu_irqs[] = {
+       { .irq = 18},
+       { .irq = -1 }
+};
+
+static struct omap_hwmod_irq_info omap3_smartreflex_core_irqs[] = {
+       { .irq = 19},
+       { .irq = -1 }
+};
+
 /* L4 CORE -> SR1 interface */
 static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
        {
@@ -1067,7 +1078,7 @@ static struct omap_hwmod omap3xxx_timer11_hwmod = {
        .class          = &omap3xxx_timer_hwmod_class,
 };
 
-/* timer12*/
+/* timer12 */
 static struct omap_hwmod omap3xxx_timer12_hwmod;
 static struct omap_hwmod_irq_info omap3xxx_timer12_mpu_irqs[] = {
        { .irq = 95, },
@@ -1426,6 +1437,28 @@ static struct omap_hwmod omap3xxx_dss_core_hwmod = {
        .masters_cnt    = ARRAY_SIZE(omap3xxx_dss_masters),
 };
 
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap3_dispc_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
+                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+                          SYSC_HAS_ENAWAKEUP),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3_dispc_hwmod_class = {
+       .name   = "dispc",
+       .sysc   = &omap3_dispc_sysc,
+};
+
 /* l4_core -> dss_dispc */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
        .master         = &omap3xxx_l4_core_hwmod,
@@ -1449,7 +1482,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = {
 
 static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
        .name           = "dss_dispc",
-       .class          = &omap2_dispc_hwmod_class,
+       .class          = &omap3_dispc_hwmod_class,
        .mpu_irqs       = omap2_dispc_irqs,
        .main_clk       = "dss1_alwon_fck",
        .prcm           = {
@@ -1589,7 +1622,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
                        .flags  = OMAP_FIREWALL_L4,
                }
        },
-       .flags          = OCPIF_SWSUP_IDLE,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2588,15 +2620,18 @@ static struct omap_hwmod_class omap36xx_smartreflex_hwmod_class = {
 };
 
 /* SR1 */
+static struct omap_smartreflex_dev_attr sr1_dev_attr = {
+       .sensor_voltdm_name   = "mpu_iva",
+};
+
 static struct omap_hwmod_ocp_if *omap3_sr1_slaves[] = {
        &omap3_l4_core__sr1,
 };
 
 static struct omap_hwmod omap34xx_sr1_hwmod = {
-       .name           = "sr1_hwmod",
+       .name           = "sr1",
        .class          = &omap34xx_smartreflex_hwmod_class,
        .main_clk       = "sr1_fck",
-       .vdd_name       = "mpu_iva",
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
@@ -2608,14 +2643,15 @@ static struct omap_hwmod omap34xx_sr1_hwmod = {
        },
        .slaves         = omap3_sr1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3_sr1_slaves),
+       .dev_attr       = &sr1_dev_attr,
+       .mpu_irqs       = omap3_smartreflex_mpu_irqs,
        .flags          = HWMOD_SET_DEFAULT_CLOCKACT,
 };
 
 static struct omap_hwmod omap36xx_sr1_hwmod = {
-       .name           = "sr1_hwmod",
+       .name           = "sr1",
        .class          = &omap36xx_smartreflex_hwmod_class,
        .main_clk       = "sr1_fck",
-       .vdd_name       = "mpu_iva",
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
@@ -2627,18 +2663,23 @@ static struct omap_hwmod omap36xx_sr1_hwmod = {
        },
        .slaves         = omap3_sr1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3_sr1_slaves),
+       .dev_attr       = &sr1_dev_attr,
+       .mpu_irqs       = omap3_smartreflex_mpu_irqs,
 };
 
 /* SR2 */
+static struct omap_smartreflex_dev_attr sr2_dev_attr = {
+       .sensor_voltdm_name     = "core",
+};
+
 static struct omap_hwmod_ocp_if *omap3_sr2_slaves[] = {
        &omap3_l4_core__sr2,
 };
 
 static struct omap_hwmod omap34xx_sr2_hwmod = {
-       .name           = "sr2_hwmod",
+       .name           = "sr2",
        .class          = &omap34xx_smartreflex_hwmod_class,
        .main_clk       = "sr2_fck",
-       .vdd_name       = "core",
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
@@ -2650,14 +2691,15 @@ static struct omap_hwmod omap34xx_sr2_hwmod = {
        },
        .slaves         = omap3_sr2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3_sr2_slaves),
+       .dev_attr       = &sr2_dev_attr,
+       .mpu_irqs       = omap3_smartreflex_core_irqs,
        .flags          = HWMOD_SET_DEFAULT_CLOCKACT,
 };
 
 static struct omap_hwmod omap36xx_sr2_hwmod = {
-       .name           = "sr2_hwmod",
+       .name           = "sr2",
        .class          = &omap36xx_smartreflex_hwmod_class,
        .main_clk       = "sr2_fck",
-       .vdd_name       = "core",
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
@@ -2669,6 +2711,8 @@ static struct omap_hwmod omap36xx_sr2_hwmod = {
        },
        .slaves         = omap3_sr2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3_sr2_slaves),
+       .dev_attr       = &sr2_dev_attr,
+       .mpu_irqs       = omap3_smartreflex_core_irqs,
 };
 
 /*
@@ -2946,6 +2990,44 @@ static struct omap_hwmod omap34xx_mcspi4 = {
        .dev_attr       = &omap_mcspi4_dev_attr,
 };
 
+/* temp. sensor */
+struct omap_hwmod_class omap34xx_bandgap_ts_class = {
+       .name   = "bandgap_ts",
+};
+
+static struct omap_hwmod_addr_space omap3xxx_bandgap_ts_addrs[] = {
+       {
+               .name           = "mpu",
+               .pa_start       = 0x48002524,
+               .pa_end         = 0x48002524 + 4,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+static struct omap_hwmod omap34xx_bandgap_ts;
+
+/* l4_core -> bandgap */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__bandgap_ts = {
+       .master         = &omap3xxx_l4_core_hwmod,
+       .slave          = &omap34xx_bandgap_ts,
+       .addr           = omap3xxx_bandgap_ts_addrs,
+       .user           = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_bandgap_ts_slaves[] = {
+       &omap3xxx_l4_core__bandgap_ts,
+};
+
+static struct omap_hwmod omap34xx_bandgap_ts = {
+       .name           = "bandgap_ts",
+       .main_clk       = "ts_fck",
+       .slaves         = omap3xxx_bandgap_ts_slaves,
+       .slaves_cnt     = ARRAY_SIZE(omap3xxx_bandgap_ts_slaves),
+       .class          = &omap34xx_bandgap_ts_class,
+       .flags          = HWMOD_NO_IDLEST,
+};
+
 /*
  * usbhsotg
  */
@@ -2997,9 +3079,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
         * Erratum ID: i479  idle_req / idle_ack mechanism potentially
         * broken when autoidle is enabled
         * workaround is to disable the autoidle bit at module level.
+        *
+        * Enabling the device in any other MIDLEMODE setting but force-idle
+        * causes core_pwrdm not enter idle states at least on OMAP3630.
+        * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY
+        * signal when MIDLEMODE is set to force-idle.
         */
        .flags          = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
-                               | HWMOD_SWSUP_MSTANDBY,
+                               | HWMOD_FORCE_MSTANDBY,
 };
 
 /* usb_otg_hs */
@@ -3072,7 +3159,35 @@ static struct omap_mmc_dev_attr mmc1_dev_attr = {
        .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
 };
 
-static struct omap_hwmod omap3xxx_mmc1_hwmod = {
+/* See 35xx errata 2.1.1.128 in SPRZ278F */
+static struct omap_mmc_dev_attr mmc1_pre_es3_dev_attr = {
+       .flags = (OMAP_HSMMC_SUPPORTS_DUAL_VOLT |
+                 OMAP_HSMMC_BROKEN_MULTIBLOCK_READ),
+};
+
+static struct omap_hwmod omap3xxx_pre_es3_mmc1_hwmod = {
+       .name           = "mmc1",
+       .mpu_irqs       = omap34xx_mmc1_mpu_irqs,
+       .sdma_reqs      = omap34xx_mmc1_sdma_reqs,
+       .opt_clks       = omap34xx_mmc1_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(omap34xx_mmc1_opt_clks),
+       .main_clk       = "mmchs1_fck",
+       .prcm           = {
+               .omap2 = {
+                       .module_offs = CORE_MOD,
+                       .prcm_reg_id = 1,
+                       .module_bit = OMAP3430_EN_MMC1_SHIFT,
+                       .idlest_reg_id = 1,
+                       .idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT,
+               },
+       },
+       .dev_attr       = &mmc1_pre_es3_dev_attr,
+       .slaves         = omap3xxx_mmc1_slaves,
+       .slaves_cnt     = ARRAY_SIZE(omap3xxx_mmc1_slaves),
+       .class          = &omap34xx_mmc_class,
+};
+
+static struct omap_hwmod omap3xxx_es3plus_mmc1_hwmod = {
        .name           = "mmc1",
        .mpu_irqs       = omap34xx_mmc1_mpu_irqs,
        .sdma_reqs      = omap34xx_mmc1_sdma_reqs,
@@ -3115,7 +3230,34 @@ static struct omap_hwmod_ocp_if *omap3xxx_mmc2_slaves[] = {
        &omap3xxx_l4_core__mmc2,
 };
 
-static struct omap_hwmod omap3xxx_mmc2_hwmod = {
+/* See 35xx errata 2.1.1.128 in SPRZ278F */
+static struct omap_mmc_dev_attr mmc2_pre_es3_dev_attr = {
+       .flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
+};
+
+static struct omap_hwmod omap3xxx_pre_es3_mmc2_hwmod = {
+       .name           = "mmc2",
+       .mpu_irqs       = omap34xx_mmc2_mpu_irqs,
+       .sdma_reqs      = omap34xx_mmc2_sdma_reqs,
+       .opt_clks       = omap34xx_mmc2_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(omap34xx_mmc2_opt_clks),
+       .main_clk       = "mmchs2_fck",
+       .prcm           = {
+               .omap2 = {
+                       .module_offs = CORE_MOD,
+                       .prcm_reg_id = 1,
+                       .module_bit = OMAP3430_EN_MMC2_SHIFT,
+                       .idlest_reg_id = 1,
+                       .idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT,
+               },
+       },
+       .dev_attr       = &mmc2_pre_es3_dev_attr,
+       .slaves         = omap3xxx_mmc2_slaves,
+       .slaves_cnt     = ARRAY_SIZE(omap3xxx_mmc2_slaves),
+       .class          = &omap34xx_mmc_class,
+};
+
+static struct omap_hwmod omap3xxx_es3plus_mmc2_hwmod = {
        .name           = "mmc2",
        .mpu_irqs       = omap34xx_mmc2_mpu_irqs,
        .sdma_reqs      = omap34xx_mmc2_sdma_reqs,
@@ -3182,8 +3324,6 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
        &omap3xxx_l4_core_hwmod,
        &omap3xxx_l4_per_hwmod,
        &omap3xxx_l4_wkup_hwmod,
-       &omap3xxx_mmc1_hwmod,
-       &omap3xxx_mmc2_hwmod,
        &omap3xxx_mmc3_hwmod,
        &omap3xxx_mpu_hwmod,
 
@@ -3198,17 +3338,11 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
        &omap3xxx_timer9_hwmod,
        &omap3xxx_timer10_hwmod,
        &omap3xxx_timer11_hwmod,
-       &omap3xxx_timer12_hwmod,
 
        &omap3xxx_wd_timer2_hwmod,
        &omap3xxx_uart1_hwmod,
        &omap3xxx_uart2_hwmod,
        &omap3xxx_uart3_hwmod,
-       /* dss class */
-       &omap3xxx_dss_dispc_hwmod,
-       &omap3xxx_dss_dsi1_hwmod,
-       &omap3xxx_dss_rfbi_hwmod,
-       &omap3xxx_dss_venc_hwmod,
 
        /* i2c class */
        &omap3xxx_i2c1_hwmod,
@@ -3242,9 +3376,17 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
        &omap34xx_mcspi3,
        &omap34xx_mcspi4,
 
+       &omap34xx_bandgap_ts,
+
        NULL,
 };
 
+/* GP-only hwmods */
+static __initdata struct omap_hwmod *omap3xxx_gp_hwmods[] = {
+       &omap3xxx_timer12_hwmod,
+       NULL
+};
+
 /* 3430ES1-only hwmods */
 static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
        &omap3430es1_dss_core_hwmod,
@@ -3258,6 +3400,20 @@ static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = {
        NULL
 };
 
+/* <= 3430ES3-only hwmods */
+static struct omap_hwmod *omap3430_pre_es3_hwmods[] __initdata = {
+       &omap3xxx_pre_es3_mmc1_hwmod,
+       &omap3xxx_pre_es3_mmc2_hwmod,
+       NULL
+};
+
+/* 3430ES3+-only hwmods */
+static struct omap_hwmod *omap3430_es3plus_hwmods[] __initdata = {
+       &omap3xxx_es3plus_mmc1_hwmod,
+       &omap3xxx_es3plus_mmc2_hwmod,
+       NULL
+};
+
 /* 34xx-only hwmods (all ES revisions) */
 static __initdata struct omap_hwmod *omap34xx_hwmods[] = {
        &omap3xxx_iva_hwmod,
@@ -3276,12 +3432,25 @@ static __initdata struct omap_hwmod *omap36xx_hwmods[] = {
        &omap36xx_sr2_hwmod,
        &omap3xxx_usbhsotg_hwmod,
        &omap3xxx_mailbox_hwmod,
+       &omap3xxx_es3plus_mmc1_hwmod,
+       &omap3xxx_es3plus_mmc2_hwmod,
        NULL
 };
 
 static __initdata struct omap_hwmod *am35xx_hwmods[] = {
        &omap3xxx_dss_core_hwmod, /* XXX ??? */
        &am35xx_usbhsotg_hwmod,
+       &omap3xxx_es3plus_mmc1_hwmod,
+       &omap3xxx_es3plus_mmc2_hwmod,
+       NULL
+};
+
+static __initdata struct omap_hwmod *omap3xxx_dss_hwmods[] = {
+       /* dss class */
+       &omap3xxx_dss_dispc_hwmod,
+       &omap3xxx_dss_dsi1_hwmod,
+       &omap3xxx_dss_rfbi_hwmod,
+       &omap3xxx_dss_venc_hwmod,
        NULL
 };
 
@@ -3296,6 +3465,13 @@ int __init omap3xxx_hwmod_init(void)
        if (r < 0)
                return r;
 
+       /* Register GP-only hwmods. */
+       if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
+               r = omap_hwmod_register(omap3xxx_gp_hwmods);
+               if (r < 0)
+                       return r;
+       }
+
        rev = omap_rev();
 
        /*
@@ -3334,8 +3510,36 @@ int __init omap3xxx_hwmod_init(void)
                h = omap3430es2plus_hwmods;
        };
 
+       if (h) {
+               r = omap_hwmod_register(h);
+               if (r < 0)
+                       return r;
+       }
+
+       h = NULL;
+       if (rev == OMAP3430_REV_ES1_0 || rev == OMAP3430_REV_ES2_0 ||
+           rev == OMAP3430_REV_ES2_1) {
+               h = omap3430_pre_es3_hwmods;
+       } else if (rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 ||
+                  rev == OMAP3430_REV_ES3_1_2) {
+               h = omap3430_es3plus_hwmods;
+       };
+
        if (h)
                r = omap_hwmod_register(h);
+       if (r < 0)
+               return r;
+
+       /*
+        * DSS code presumes that dss_core hwmod is handled first,
+        * _before_ any other DSS related hwmods so register common
+        * DSS hwmods last to ensure that dss_core is already registered.
+        * Otherwise some change things may happen, for ex. if dispc
+        * is handled before dss_core and DSS is enabled in bootloader
+        * DIPSC will be reset with outputs enabled which sometimes leads
+        * to unrecoverable L3 error.
+        */
+       r = omap_hwmod_register(omap3xxx_dss_hwmods);
 
        return r;
 }
index daaf165..d11f5f1 100644 (file)
@@ -34,6 +34,7 @@
 
 #include "omap_hwmod_common_data.h"
 
+#include "smartreflex.h"
 #include "cm1_44xx.h"
 #include "cm2_44xx.h"
 #include "prm44xx.h"
@@ -3958,6 +3959,10 @@ static struct omap_hwmod_class omap44xx_smartreflex_hwmod_class = {
 };
 
 /* smartreflex_core */
+static struct omap_smartreflex_dev_attr smartreflex_core_dev_attr = {
+       .sensor_voltdm_name   = "core",
+};
+
 static struct omap_hwmod omap44xx_smartreflex_core_hwmod;
 static struct omap_hwmod_irq_info omap44xx_smartreflex_core_irqs[] = {
        { .irq = 19 + OMAP44XX_IRQ_GIC_START },
@@ -3994,7 +3999,6 @@ static struct omap_hwmod omap44xx_smartreflex_core_hwmod = {
        .mpu_irqs       = omap44xx_smartreflex_core_irqs,
 
        .main_clk       = "smartreflex_core_fck",
-       .vdd_name       = "core",
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = OMAP4_CM_ALWON_SR_CORE_CLKCTRL_OFFSET,
@@ -4004,9 +4008,14 @@ static struct omap_hwmod omap44xx_smartreflex_core_hwmod = {
        },
        .slaves         = omap44xx_smartreflex_core_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap44xx_smartreflex_core_slaves),
+       .dev_attr       = &smartreflex_core_dev_attr,
 };
 
 /* smartreflex_iva */
+static struct omap_smartreflex_dev_attr smartreflex_iva_dev_attr = {
+       .sensor_voltdm_name     = "iva",
+};
+
 static struct omap_hwmod omap44xx_smartreflex_iva_hwmod;
 static struct omap_hwmod_irq_info omap44xx_smartreflex_iva_irqs[] = {
        { .irq = 102 + OMAP44XX_IRQ_GIC_START },
@@ -4042,7 +4051,6 @@ static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = {
        .clkdm_name     = "l4_ao_clkdm",
        .mpu_irqs       = omap44xx_smartreflex_iva_irqs,
        .main_clk       = "smartreflex_iva_fck",
-       .vdd_name       = "iva",
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = OMAP4_CM_ALWON_SR_IVA_CLKCTRL_OFFSET,
@@ -4052,9 +4060,14 @@ static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = {
        },
        .slaves         = omap44xx_smartreflex_iva_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap44xx_smartreflex_iva_slaves),
+       .dev_attr       = &smartreflex_iva_dev_attr,
 };
 
 /* smartreflex_mpu */
+static struct omap_smartreflex_dev_attr smartreflex_mpu_dev_attr = {
+       .sensor_voltdm_name     = "mpu",
+};
+
 static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod;
 static struct omap_hwmod_irq_info omap44xx_smartreflex_mpu_irqs[] = {
        { .irq = 18 + OMAP44XX_IRQ_GIC_START },
@@ -4090,7 +4103,6 @@ static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = {
        .clkdm_name     = "l4_ao_clkdm",
        .mpu_irqs       = omap44xx_smartreflex_mpu_irqs,
        .main_clk       = "smartreflex_mpu_fck",
-       .vdd_name       = "mpu",
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = OMAP4_CM_ALWON_SR_MPU_CLKCTRL_OFFSET,
@@ -4100,6 +4112,7 @@ static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = {
        },
        .slaves         = omap44xx_smartreflex_mpu_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap44xx_smartreflex_mpu_slaves),
+       .dev_attr       = &smartreflex_mpu_dev_attr,
 };
 
 /*
index a05a62f..df18e02 100644 (file)
@@ -183,7 +183,8 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
                 * of such errors and handle the others. timeout error
                 * is severe and not expected to occur.
                 */
-               BUG_ON(status & L3_STATUS_0_TIMEOUT_MASK);
+               WARN(status & L3_STATUS_0_TIMEOUT_MASK,
+                       "L3_APPLICATION_ERROR, status %llx", status);
        } else {
                status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1);
                /* No timeout error for debug sources */
index 4411163..814bcd9 100644 (file)
@@ -220,8 +220,8 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *dir)
                return 0;
 
        d = debugfs_create_dir(pwrdm->name, (struct dentry *)dir);
-
-       (void) debugfs_create_file("suspend", S_IRUGO|S_IWUSR, d,
+       if (!(IS_ERR_OR_NULL(d)))
+               (void) debugfs_create_file("suspend", S_IRUGO|S_IWUSR, d,
                        (void *)pwrdm, &pwrdm_suspend_fops);
 
        return 0;
@@ -264,7 +264,7 @@ static int __init pm_dbg_init(void)
                return 0;
 
        d = debugfs_create_dir("pm_debug", NULL);
-       if (IS_ERR(d))
+       if (IS_ERR_OR_NULL(d))
                return PTR_ERR(d);
 
        (void) debugfs_create_file("count", S_IRUGO,
index 00bff46..8407dab 100644 (file)
@@ -72,28 +72,27 @@ static void omap2_init_processor_devices(void)
  * This sets pwrdm state (other than mpu & core. Currently only ON &
  * RET are supported.
  */
-int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
+int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 pwrst)
 {
-       u32 cur_state;
-       int sleep_switch = -1;
-       int ret = 0;
-       int hwsup = 0;
+       u8 curr_pwrst, next_pwrst;
+       int sleep_switch = -1, ret = 0, hwsup = 0;
 
-       if (pwrdm == NULL || IS_ERR(pwrdm))
+       if (!pwrdm || IS_ERR(pwrdm))
                return -EINVAL;
 
-       while (!(pwrdm->pwrsts & (1 << state))) {
-               if (state == PWRDM_POWER_OFF)
+       while (!(pwrdm->pwrsts & (1 << pwrst))) {
+               if (pwrst == PWRDM_POWER_OFF)
                        return ret;
-               state--;
+               pwrst--;
        }
 
-       cur_state = pwrdm_read_next_pwrst(pwrdm);
-       if (cur_state == state)
+       next_pwrst = pwrdm_read_next_pwrst(pwrdm);
+       if (next_pwrst == pwrst)
                return ret;
 
-       if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) {
-               if ((pwrdm_read_pwrst(pwrdm) > state) &&
+       curr_pwrst = pwrdm_read_pwrst(pwrdm);
+       if (curr_pwrst < PWRDM_POWER_ON) {
+               if ((curr_pwrst > pwrst) &&
                        (pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) {
                        sleep_switch = LOWPOWERSTATE_SWITCH;
                } else {
@@ -103,12 +102,10 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
                }
        }
 
-       ret = pwrdm_set_next_pwrst(pwrdm, state);
-       if (ret) {
-               pr_err("%s: unable to set state of powerdomain: %s\n",
+       ret = pwrdm_set_next_pwrst(pwrdm, pwrst);
+       if (ret)
+               pr_err("%s: unable to set power state of powerdomain: %s\n",
                       __func__, pwrdm->name);
-               goto err;
-       }
 
        switch (sleep_switch) {
        case FORCEWAKEUP_SWITCH:
@@ -119,13 +116,11 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
                break;
        case LOWPOWERSTATE_SWITCH:
                pwrdm_set_lowpwrstchange(pwrdm);
+               pwrdm_wait_transition(pwrdm);
+               pwrdm_state_switch(pwrdm);
                break;
-       default:
-               return ret;
        }
 
-       pwrdm_state_switch(pwrdm);
-err:
        return ret;
 }
 
@@ -159,7 +154,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
        }
 
        voltdm = voltdm_lookup(vdd_name);
-       if (IS_ERR(voltdm)) {
+       if (!voltdm) {
                pr_err("%s: unable to get vdd pointer for vdd_%s\n",
                        __func__, vdd_name);
                goto exit;
@@ -174,14 +169,17 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
        freq = clk->rate;
        clk_put(clk);
 
+       rcu_read_lock();
        opp = opp_find_freq_ceil(dev, &freq);
        if (IS_ERR(opp)) {
+               rcu_read_unlock();
                pr_err("%s: unable to find boot up OPP for vdd_%s\n",
                        __func__, vdd_name);
                goto exit;
        }
 
        bootup_volt = opp_get_voltage(opp);
+       rcu_read_unlock();
        if (!bootup_volt) {
                pr_err("%s: unable to find voltage corresponding "
                        "to the bootup OPP for vdd_%s\n", __func__, vdd_name);
index 4e166ad..eac6fce 100644 (file)
@@ -18,7 +18,6 @@
 extern void *omap3_secure_ram_storage;
 extern void omap3_pm_off_mode_enable(int);
 extern void omap_sram_idle(void);
-extern int omap3_can_sleep(void);
 extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
 extern int omap3_idle_init(void);
 
index cf0c216..4797c11 100644 (file)
@@ -251,7 +251,6 @@ static int omap2_can_sleep(void)
 static void omap2_pm_idle(void)
 {
        local_irq_disable();
-       local_fiq_disable();
 
        if (!omap2_can_sleep()) {
                if (omap_irq_pending())
@@ -266,7 +265,6 @@ static void omap2_pm_idle(void)
        omap2_enter_full_retention();
 
 out:
-       local_fiq_enable();
        local_irq_enable();
 }
 
index efa6649..4889a08 100644 (file)
@@ -83,7 +83,6 @@ void (*omap3_do_wfi_sram)(void);
 
 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
 static struct powerdomain *core_pwrdm, *per_pwrdm;
-static struct powerdomain *cam_pwrdm;
 
 static inline void omap3_per_save_context(void)
 {
@@ -339,11 +338,6 @@ void omap_sram_idle(void)
        int core_prev_state, per_prev_state;
        u32 sdrc_pwr = 0;
 
-       pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
-       pwrdm_clear_all_prev_pwrst(neon_pwrdm);
-       pwrdm_clear_all_prev_pwrst(core_pwrdm);
-       pwrdm_clear_all_prev_pwrst(per_pwrdm);
-
        mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
        switch (mpu_next_state) {
        case PWRDM_POWER_ON:
@@ -382,10 +376,12 @@ void omap_sram_idle(void)
                        if (!console_trylock())
                                goto console_still_active;
 
-       pwrdm_pre_transition();
+       if (mpu_next_state < PWRDM_POWER_ON)
+               pwrdm_pre_transition(mpu_pwrdm);
 
        /* PER */
        if (per_next_state < PWRDM_POWER_ON) {
+               pwrdm_pre_transition(per_pwrdm);
                per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
                omap_uart_prepare_idle(2);
                omap_uart_prepare_idle(3);
@@ -398,6 +394,7 @@ void omap_sram_idle(void)
        if (core_next_state < PWRDM_POWER_ON) {
                omap_uart_prepare_idle(0);
                omap_uart_prepare_idle(1);
+               pwrdm_pre_transition(core_pwrdm);
                if (core_next_state == PWRDM_POWER_OFF) {
                        omap3_core_save_context();
                        omap3_cm_save_context();
@@ -448,15 +445,10 @@ void omap_sram_idle(void)
                }
                omap_uart_resume_idle(0);
                omap_uart_resume_idle(1);
-               if (core_next_state == PWRDM_POWER_OFF)
-                       omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
-                                              OMAP3430_GR_MOD,
-                                              OMAP3_PRM_VOLTCTRL_OFFSET);
+               pwrdm_post_transition(core_pwrdm);
        }
        omap3_intc_resume_idle();
 
-       pwrdm_post_transition();
-
        /* PER */
        if (per_next_state < PWRDM_POWER_ON) {
                per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
@@ -465,6 +457,7 @@ void omap_sram_idle(void)
                        omap3_per_restore_context();
                omap_uart_resume_idle(2);
                omap_uart_resume_idle(3);
+               pwrdm_post_transition(per_pwrdm);
        }
 
        if (!is_suspending())
@@ -481,23 +474,13 @@ console_still_active:
                        omap3_disable_io_chain();
        }
 
-       clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
-}
-
-int omap3_can_sleep(void)
-{
-       if (!omap_uart_can_sleep())
-               return 0;
-       return 1;
+       if (mpu_next_state < PWRDM_POWER_ON)
+               pwrdm_post_transition(mpu_pwrdm);
 }
 
 static void omap3_pm_idle(void)
 {
        local_irq_disable();
-       local_fiq_disable();
-
-       if (!omap3_can_sleep())
-               goto out;
 
        if (omap_irq_pending() || need_resched())
                goto out;
@@ -511,7 +494,6 @@ static void omap3_pm_idle(void)
        trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 
 out:
-       local_fiq_enable();
        local_irq_enable();
 }
 
@@ -750,6 +732,11 @@ static void __init prcm_setup_regs(void)
 
        omap3_iva_idle();
        omap3_d2d_idle();
+
+       /* enable sys_clkreq signalling */
+       omap2_prm_rmw_mod_reg_bits((OMAP3430_AUTO_OFF_MASK | OMAP3430_AUTO_RET_MASK
+               | OMAP3430_AUTO_SLEEP_MASK), OMAP3430_AUTO_RET_MASK,
+               OMAP3430_GR_MOD, OMAP3_PRM_VOLTCTRL_OFFSET);
 }
 
 void omap3_pm_off_mode_enable(int enable)
@@ -906,7 +893,6 @@ static int __init omap3_pm_init(void)
        neon_pwrdm = pwrdm_lookup("neon_pwrdm");
        per_pwrdm = pwrdm_lookup("per_pwrdm");
        core_pwrdm = pwrdm_lookup("core_pwrdm");
-       cam_pwrdm = pwrdm_lookup("cam_pwrdm");
 
        neon_clkdm = clkdm_lookup("neon_clkdm");
        mpu_clkdm = clkdm_lookup("mpu_clkdm");
@@ -937,14 +923,12 @@ static int __init omap3_pm_init(void)
                                        "allocating for secure sram context\n");
 
                local_irq_disable();
-               local_fiq_disable();
 
                omap_dma_global_context_save();
                omap3_save_secure_ram_context();
                omap_dma_global_context_restore();
 
                local_irq_enable();
-               local_fiq_enable();
        }
 
        omap3_save_scratchpad_contents();
index 8a18d1b..bb6780d 100644 (file)
@@ -972,7 +972,13 @@ int pwrdm_wait_transition(struct powerdomain *pwrdm)
 
 int pwrdm_state_switch(struct powerdomain *pwrdm)
 {
-       return _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW);
+       int ret;
+
+       ret = pwrdm_wait_transition(pwrdm);
+       if (!ret)
+               ret = _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW);
+
+       return ret;
 }
 
 int pwrdm_clkdm_state_switch(struct clockdomain *clkdm)
@@ -985,15 +991,23 @@ int pwrdm_clkdm_state_switch(struct clockdomain *clkdm)
        return -EINVAL;
 }
 
-int pwrdm_pre_transition(void)
+int pwrdm_pre_transition(struct powerdomain *pwrdm)
 {
-       pwrdm_for_each(_pwrdm_pre_transition_cb, NULL);
+       if (pwrdm)
+               _pwrdm_pre_transition_cb(pwrdm, NULL);
+       else
+               pwrdm_for_each(_pwrdm_pre_transition_cb, NULL);
+
        return 0;
 }
 
-int pwrdm_post_transition(void)
+int pwrdm_post_transition(struct powerdomain *pwrdm)
 {
-       pwrdm_for_each(_pwrdm_post_transition_cb, NULL);
+       if (pwrdm)
+               _pwrdm_post_transition_cb(pwrdm, NULL);
+       else
+               pwrdm_for_each(_pwrdm_post_transition_cb, NULL);
+
        return 0;
 }
 
index 0d72a8a..a468de4 100644 (file)
@@ -214,8 +214,8 @@ int pwrdm_wait_transition(struct powerdomain *pwrdm);
 
 int pwrdm_state_switch(struct powerdomain *pwrdm);
 int pwrdm_clkdm_state_switch(struct clockdomain *clkdm);
-int pwrdm_pre_transition(void);
-int pwrdm_post_transition(void);
+int pwrdm_pre_transition(struct powerdomain *pwrdm);
+int pwrdm_post_transition(struct powerdomain *pwrdm);
 int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm);
 int pwrdm_get_context_loss_count(struct powerdomain *pwrdm);
 bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm);
diff --git a/arch/arm/mach-omap2/sdram-micron-mt29c4g96mazapcjg-5.h b/arch/arm/mach-omap2/sdram-micron-mt29c4g96mazapcjg-5.h
new file mode 100644 (file)
index 0000000..ea83cc7
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * SDRC register values for the Micron mt29c4g96mazapcjg-5
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ARCH_ARM_MACH_OMAP2_SDRAM_MICRON_mt29c4g96mazapcjg5
+#define ARCH_ARM_MACH_OMAP2_SDRAM_MICRON_mt29c4g96mazapcjg5
+
+#include <plat/sdrc.h>
+
+static struct omap_sdrc_params mt29c4g96mazapcjg5_sdrc_params[] = {
+       [0] = {
+               .rate        = 200000000,
+               .actim_ctrla = 0x7ae1b4c6,
+               .actim_ctrlb = 0x00021217,
+               .rfr_ctrl    = 0x0005e601,
+               .mr          = 0x00000032,
+       },
+       [1] = {
+               .rate        = 166000000,
+               .actim_ctrla = 0x629db4c6,
+               .actim_ctrlb = 0x00011113,
+               .rfr_ctrl    = 0x0004e201,
+               .mr          = 0x00000032,
+       },
+       /* ??? */
+       [2] = {
+               .rate        = 83000000,
+               .actim_ctrla = 0x51512283,
+               .actim_ctrlb = 0x0001120c,
+               .rfr_ctrl    = 0x00025501,
+               .mr          = 0x00000032,
+       },
+       [4] = {
+               .rate        = 0
+       },
+};
+
+#endif
index a391b49..d503989 100644 (file)
 /* Micron MT46H32M32LF-6 */
 /* XXX Using ARE = 0x1 (no autorefresh burst) -- can this be changed? */
 static struct omap_sdrc_params mt46h32m32lf6_sdrc_params[] = {
-       [0] = {
+       [0] = { /* fake entry for overclocking */
+               .rate        = 200000000,
+               .actim_ctrla = 0xb325b4c6,
+               .actim_ctrlb = 0x0002121b,
+               .rfr_ctrl    = 0x0005e601,
+               .mr          = 0x00000032,
+       },
+       [1] = {
                .rate        = 166000000,
                .actim_ctrla = 0x9a9db4c6,
                .actim_ctrlb = 0x00011217,
                .rfr_ctrl    = 0x0004dc01,
                .mr          = 0x00000032,
        },
-       [1] = {
+       [2] = {
                .rate        = 165941176,
                .actim_ctrla = 0x9a9db4c6,
                .actim_ctrlb = 0x00011217,
                .rfr_ctrl    = 0x0004dc01,
                .mr          = 0x00000032,
        },
-       [2] = {
+       [3] = {
                .rate        = 83000000,
                .actim_ctrla = 0x51512283,
                .actim_ctrlb = 0x0001120c,
                .rfr_ctrl    = 0x00025501,
                .mr          = 0x00000032,
        },
-       [3] = {
+       [4] = {
                .rate        = 82970588,
                .actim_ctrla = 0x51512283,
                .actim_ctrlb = 0x0001120c,
                .rfr_ctrl    = 0x00025501,
                .mr          = 0x00000032,
        },
-       [4] = {
+       [5] = {
                .rate        = 0
        },
 };
index 8f27828..e806adc 100644 (file)
@@ -100,17 +100,23 @@ int omap2_sdrc_get_params(unsigned long r,
        sp0 = sdrc_init_params_cs0;
        sp1 = sdrc_init_params_cs1;
 
-       while (sp0->rate && sp0->rate != r) {
+       /* assume max clock */
+       *sdrc_cs0 = sp0;
+       *sdrc_cs1 = sp1;
+
+       while (sp0->rate) {
+               if (sp0->rate >= r) {
+                       *sdrc_cs0 = sp0;
+                       *sdrc_cs1 = sp1;
+               }
                sp0++;
                if (sdrc_init_params_cs1)
                        sp1++;
        }
 
-       if (!sp0->rate)
+       if (*sdrc_cs0 == NULL)
                return -1;
 
-       *sdrc_cs0 = sp0;
-       *sdrc_cs1 = sp1;
        return 0;
 }
 
index 9992dbf..3b6756e 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/serial_8250.h>
 #include <linux/pm_runtime.h>
 #include <linux/console.h>
+#include <linux/module.h>
 
 #ifdef CONFIG_SERIAL_OMAP
 #include <plat/omap-serial.h>
@@ -59,7 +60,7 @@
  * disabled via sysfs. This also causes that any deeper omap sleep states are
  * blocked. 
  */
-#define DEFAULT_TIMEOUT 0
+#define DEFAULT_TIMEOUT (10 * HZ)
 
 #define MAX_UART_HWMOD_NAME_LEN                16
 
@@ -73,6 +74,7 @@ struct omap_uart_state {
        void __iomem *wk_en;
        u32 wk_mask;
        u32 padconf;
+       u32 padconf2;
        u32 dma_enabled;
 
        struct clk *ick;
@@ -288,10 +290,17 @@ static void omap_uart_enable_wakeup(struct omap_uart_state *uart)
        }
 
        /* Ensure IOPAD wake-enables are set */
-       if (cpu_is_omap34xx() && uart->padconf) {
-               u16 v = omap_ctrl_readw(uart->padconf);
-               v |= OMAP3_PADCONF_WAKEUPENABLE0;
-               omap_ctrl_writew(v, uart->padconf);
+       if (cpu_is_omap34xx()) {
+               if (uart->padconf) {
+                       u16 v = omap_ctrl_readw(uart->padconf);
+                       v |= OMAP3_PADCONF_WAKEUPENABLE0;
+                       omap_ctrl_writew(v, uart->padconf);
+               }
+               if (uart->padconf2) {
+                       u16 v = omap_ctrl_readw(uart->padconf2);
+                       v |= OMAP3_PADCONF_WAKEUPENABLE0;
+                       omap_ctrl_writew(v, uart->padconf2);
+               }
        }
 }
 
@@ -305,10 +314,17 @@ static void omap_uart_disable_wakeup(struct omap_uart_state *uart)
        }
 
        /* Ensure IOPAD wake-enables are cleared */
-       if (cpu_is_omap34xx() && uart->padconf) {
-               u16 v = omap_ctrl_readw(uart->padconf);
-               v &= ~OMAP3_PADCONF_WAKEUPENABLE0;
-               omap_ctrl_writew(v, uart->padconf);
+       if (cpu_is_omap34xx()) {
+               if (uart->padconf) {
+                       u16 v = omap_ctrl_readw(uart->padconf);
+                       v &= ~OMAP3_PADCONF_WAKEUPENABLE0;
+                       omap_ctrl_writew(v, uart->padconf);
+               }
+               if (uart->padconf2) {
+                       u16 v = omap_ctrl_readw(uart->padconf2);
+                       v &= ~OMAP3_PADCONF_WAKEUPENABLE0;
+                       omap_ctrl_writew(v, uart->padconf2);
+               }
        }
 }
 
@@ -360,6 +376,19 @@ static void omap_uart_allow_sleep(struct omap_uart_state *uart)
        del_timer(&uart->timer);
 }
 
+void omap_uart_block_sleep_id(int num)
+{
+       struct omap_uart_state *uart;
+
+       list_for_each_entry(uart, &uart_list, node) {
+               if (num == uart->num && uart->can_sleep) {
+                       omap_uart_block_sleep(uart);
+                       return;
+               }
+       }
+}
+EXPORT_SYMBOL(omap_uart_block_sleep_id);
+
 static void omap_uart_idle_timer(unsigned long data)
 {
        struct omap_uart_state *uart = (struct omap_uart_state *)data;
@@ -388,8 +417,12 @@ void omap_uart_resume_idle(int num)
                        omap_uart_enable_clocks(uart);
 
                        /* Check for IO pad wakeup */
-                       if (cpu_is_omap34xx() && uart->padconf) {
-                               u16 p = omap_ctrl_readw(uart->padconf);
+                       if (cpu_is_omap34xx()) {
+                               u16 p = 0;
+                               if (uart->padconf)
+                                       p |= omap_ctrl_readw(uart->padconf);
+                               if (uart->padconf2)
+                                       p |= omap_ctrl_readw(uart->padconf2);
 
                                if (p & OMAP3_PADCONF_WAKEUPEVENT0)
                                        omap_uart_block_sleep(uart);
@@ -468,6 +501,7 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
                u32 mod = (uart->num > 1) ? OMAP3430_PER_MOD : CORE_MOD;
                u32 wk_mask = 0;
                u32 padconf = 0;
+               u32 padconf2 = 0;
 
                /* XXX These PRM accesses do not belong here */
                uart->wk_en = OMAP34XX_PRM_REGADDR(mod, PM_WKEN1);
@@ -476,6 +510,7 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
                case 0:
                        wk_mask = OMAP3430_ST_UART1_MASK;
                        padconf = 0x182;
+                       padconf2 = 0x180;
                        break;
                case 1:
                        wk_mask = OMAP3430_ST_UART2_MASK;
@@ -492,6 +527,7 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
                }
                uart->wk_mask = wk_mask;
                uart->padconf = padconf;
+               uart->padconf2 = padconf2;
        } else if (cpu_is_omap24xx()) {
                u32 wk_mask = 0;
                u32 wk_en = PM_WKEN1, wk_st = PM_WKST1;
@@ -522,6 +558,7 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
                uart->wk_st = NULL;
                uart->wk_mask = 0;
                uart->padconf = 0;
+               uart->padconf2 = 0;
        }
 
        uart->irqflags |= IRQF_SHARED;
@@ -821,7 +858,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
 
        console_unlock();
 
-       if ((cpu_is_omap34xx() && uart->padconf) ||
+       if ((cpu_is_omap34xx() && (uart->padconf || uart->padconf2)) ||
            (uart->wk_en && uart->wk_mask)) {
                device_init_wakeup(&pdev->dev, true);
                DEV_CREATE_FILE(&pdev->dev, &dev_attr_sleep_timeout);
index 53d9d0a..955566e 100644 (file)
@@ -29,6 +29,7 @@ static int sr_class3_enable(struct voltagedomain *voltdm)
 
 static int sr_class3_disable(struct voltagedomain *voltdm, int is_volt_reset)
 {
+       sr_disable_errgen(voltdm);
        omap_vp_disable(voltdm);
        sr_disable(voltdm);
        if (is_volt_reset)
index cf246b3..f535d3c 100644 (file)
 #define SR_DISABLE_TIMEOUT     200
 
 struct omap_sr {
+       struct list_head                node;
+       struct platform_device          *pdev;
+       struct omap_sr_nvalue_table     *nvalue_table;
+       struct voltagedomain            *voltdm;
+       struct dentry                   *dbg_dir;
+       unsigned int                    irq;
        int                             srid;
        int                             ip_type;
        int                             nvalue_count;
@@ -49,13 +55,7 @@ struct omap_sr {
        u32                             senp_avgweight;
        u32                             senp_mod;
        u32                             senn_mod;
-       unsigned int                    irq;
        void __iomem                    *base;
-       struct platform_device          *pdev;
-       struct list_head                node;
-       struct omap_sr_nvalue_table     *nvalue_table;
-       struct voltagedomain            *voltdm;
-       struct dentry                   *dbg_dir;
 };
 
 /* sr_list contains all the instances of smartreflex module */
@@ -74,10 +74,6 @@ static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask,
                                        u32 value)
 {
        u32 reg_val;
-       u32 errconfig_offs = 0, errconfig_mask = 0;
-
-       reg_val = __raw_readl(sr->base + offset);
-       reg_val &= ~mask;
 
        /*
         * Smartreflex error config register is special as it contains
@@ -88,16 +84,15 @@ static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask,
         * if they are currently set, but does allow the caller to write
         * those bits.
         */
-       if (sr->ip_type == SR_TYPE_V1) {
-               errconfig_offs = ERRCONFIG_V1;
-               errconfig_mask = ERRCONFIG_STATUS_V1_MASK;
-       } else if (sr->ip_type == SR_TYPE_V2) {
-               errconfig_offs = ERRCONFIG_V2;
-               errconfig_mask = ERRCONFIG_VPBOUNDINTST_V2;
-       }
+       if (sr->ip_type == SR_TYPE_V1 && offset == ERRCONFIG_V1)
+               mask |= ERRCONFIG_STATUS_V1_MASK;
+       else if (sr->ip_type == SR_TYPE_V2 && offset == ERRCONFIG_V2)
+               mask |= ERRCONFIG_VPBOUNDINTST_V2;
+
+       reg_val = __raw_readl(sr->base + offset);
+       reg_val &= ~mask;
 
-       if (offset == errconfig_offs)
-               reg_val &= ~errconfig_mask;
+       value &= mask;
 
        reg_val |= value;
 
@@ -128,21 +123,28 @@ static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm)
 
 static irqreturn_t sr_interrupt(int irq, void *data)
 {
-       struct omap_sr *sr_info = (struct omap_sr *)data;
+       struct omap_sr *sr_info = data;
        u32 status = 0;
 
-       if (sr_info->ip_type == SR_TYPE_V1) {
+       switch (sr_info->ip_type) {
+       case SR_TYPE_V1:
                /* Read the status bits */
                status = sr_read_reg(sr_info, ERRCONFIG_V1);
 
                /* Clear them by writing back */
                sr_write_reg(sr_info, ERRCONFIG_V1, status);
-       } else if (sr_info->ip_type == SR_TYPE_V2) {
+               break;
+       case SR_TYPE_V2:
                /* Read the status bits */
                status = sr_read_reg(sr_info, IRQSTATUS);
 
                /* Clear them by writing back */
                sr_write_reg(sr_info, IRQSTATUS, status);
+               break;
+       default:
+               dev_err(&sr_info->pdev->dev, "UNKNOWN IP type %d\n",
+                       sr_info->ip_type);
+               return IRQ_NONE;
        }
 
        if (sr_class->notify)
@@ -166,6 +168,7 @@ static void sr_set_clk_length(struct omap_sr *sr)
                        __func__);
                return;
        }
+
        sys_clk_speed = clk_get_rate(sys_ck);
        clk_put(sys_ck);
 
@@ -267,7 +270,7 @@ static int sr_late_init(struct omap_sr *sr_info)
                        goto error;
                }
                ret = request_irq(sr_info->irq, sr_interrupt,
-                               0, name, (void *)sr_info);
+                               0, name, sr_info);
                if (ret)
                        goto error;
                disable_irq(sr_info->irq);
@@ -288,12 +291,15 @@ error:
                "not function as desired\n", __func__);
        kfree(name);
        kfree(sr_info);
+
        return ret;
 }
 
 static void sr_v1_disable(struct omap_sr *sr)
 {
        int timeout = 0;
+       int errconf_val = ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
+                       ERRCONFIG_MCUBOUNDINTST;
 
        /* Enable MCUDisableAcknowledge interrupt */
        sr_modify_reg(sr, ERRCONFIG_V1,
@@ -302,13 +308,13 @@ static void sr_v1_disable(struct omap_sr *sr)
        /* SRCONFIG - disable SR */
        sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
 
-       /* Disable all other SR interrupts and clear the status */
+       /* Disable all other SR interrupts and clear the status as needed */
+       if (sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_VPBOUNDINTST_V1)
+               errconf_val |= ERRCONFIG_VPBOUNDINTST_V1;
        sr_modify_reg(sr, ERRCONFIG_V1,
                        (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
                        ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1),
-                       (ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
-                       ERRCONFIG_MCUBOUNDINTST |
-                       ERRCONFIG_VPBOUNDINTST_V1));
+                       errconf_val);
 
        /*
         * Wait for SR to be disabled.
@@ -337,9 +343,17 @@ static void sr_v2_disable(struct omap_sr *sr)
        /* SRCONFIG - disable SR */
        sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
 
-       /* Disable all other SR interrupts and clear the status */
-       sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+       /*
+        * Disable all other SR interrupts and clear the status
+        * write to status register ONLY on need basis - only if status
+        * is set.
+        */
+       if (sr_read_reg(sr, ERRCONFIG_V2) & ERRCONFIG_VPBOUNDINTST_V2)
+               sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
                        ERRCONFIG_VPBOUNDINTST_V2);
+       else
+               sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+                               0x0);
        sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT |
                        IRQENABLE_MCUVALIDINT |
                        IRQENABLE_MCUBOUNDSINT));
@@ -398,15 +412,16 @@ static u32 sr_retrieve_nvalue(struct omap_sr *sr, u32 efuse_offs)
  */
 int sr_configure_errgen(struct voltagedomain *voltdm)
 {
-       u32 sr_config, sr_errconfig, errconfig_offs, vpboundint_en;
-       u32 vpboundint_st, senp_en = 0, senn_en = 0;
+       u32 sr_config, sr_errconfig, errconfig_offs;
+       u32 vpboundint_en, vpboundint_st;
+       u32 senp_en = 0, senn_en = 0;
        u8 senp_shift, senn_shift;
        struct omap_sr *sr = _sr_lookup(voltdm);
 
        if (IS_ERR(sr)) {
                pr_warning("%s: omap_sr struct for sr_%s not found\n",
                        __func__, voltdm->name);
-               return -EINVAL;
+               return PTR_ERR(sr);
        }
 
        if (!sr->clk_length)
@@ -418,20 +433,23 @@ int sr_configure_errgen(struct voltagedomain *voltdm)
        sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
                SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN;
 
-       if (sr->ip_type == SR_TYPE_V1) {
+       switch (sr->ip_type) {
+       case SR_TYPE_V1:
                sr_config |= SRCONFIG_DELAYCTRL;
                senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
                senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
                errconfig_offs = ERRCONFIG_V1;
                vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
                vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
-       } else if (sr->ip_type == SR_TYPE_V2) {
+               break;
+       case SR_TYPE_V2:
                senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
                senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
                errconfig_offs = ERRCONFIG_V2;
                vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
                vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
-       } else {
+               break;
+       default:
                dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
                        "module without specifying the ip\n", __func__);
                return -EINVAL;
@@ -447,8 +465,55 @@ int sr_configure_errgen(struct voltagedomain *voltdm)
                sr_errconfig);
 
        /* Enabling the interrupts if the ERROR module is used */
-       sr_modify_reg(sr, errconfig_offs,
-               vpboundint_en, (vpboundint_en | vpboundint_st));
+       sr_modify_reg(sr, errconfig_offs, (vpboundint_en | vpboundint_st),
+                     vpboundint_en);
+
+       return 0;
+}
+
+/**
+ * sr_disable_errgen() - Disables SmartReflex AVS module's errgen component
+ * @voltdm:    VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable the error generator module inside the smartreflex module.
+ *
+ * Returns 0 on success and error value in case of failure.
+ */
+int sr_disable_errgen(struct voltagedomain *voltdm)
+{
+       u32 errconfig_offs;
+       u32 vpboundint_en, vpboundint_st;
+       struct omap_sr *sr = _sr_lookup(voltdm);
+
+       if (IS_ERR(sr)) {
+               pr_warning("%s: omap_sr struct for sr_%s not found\n",
+                       __func__, voltdm->name);
+               return PTR_ERR(sr);
+       }
+
+       switch (sr->ip_type) {
+       case SR_TYPE_V1:
+               errconfig_offs = ERRCONFIG_V1;
+               vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+               vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+               break;
+       case SR_TYPE_V2:
+               errconfig_offs = ERRCONFIG_V2;
+               vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+               vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+               break;
+       default:
+               dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
+                       "module without specifying the ip\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Disable the interrupts of ERROR module */
+       sr_modify_reg(sr, errconfig_offs, vpboundint_en | vpboundint_st, 0);
+
+       /* Disable the Sensor and errorgen */
+       sr_modify_reg(sr, SRCONFIG, SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN, 0);
 
        return 0;
 }
@@ -475,7 +540,7 @@ int sr_configure_minmax(struct voltagedomain *voltdm)
        if (IS_ERR(sr)) {
                pr_warning("%s: omap_sr struct for sr_%s not found\n",
                        __func__, voltdm->name);
-               return -EINVAL;
+               return PTR_ERR(sr);
        }
 
        if (!sr->clk_length)
@@ -488,14 +553,17 @@ int sr_configure_minmax(struct voltagedomain *voltdm)
                SRCONFIG_SENENABLE |
                (sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT);
 
-       if (sr->ip_type == SR_TYPE_V1) {
+       switch (sr->ip_type) {
+       case SR_TYPE_V1:
                sr_config |= SRCONFIG_DELAYCTRL;
                senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
                senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
-       } else if (sr->ip_type == SR_TYPE_V2) {
+               break;
+       case SR_TYPE_V2:
                senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
                senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
-       } else {
+               break;
+       default:
                dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
                        "module without specifying the ip\n", __func__);
                return -EINVAL;
@@ -511,20 +579,27 @@ int sr_configure_minmax(struct voltagedomain *voltdm)
         * Enabling the interrupts if MINMAXAVG module is used.
         * TODO: check if all the interrupts are mandatory
         */
-       if (sr->ip_type == SR_TYPE_V1) {
+       switch (sr->ip_type) {
+       case SR_TYPE_V1:
                sr_modify_reg(sr, ERRCONFIG_V1,
                        (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
                        ERRCONFIG_MCUBOUNDINTEN),
                        (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST |
                         ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST |
                         ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST));
-       } else if (sr->ip_type == SR_TYPE_V2) {
+               break;
+       case SR_TYPE_V2:
                sr_write_reg(sr, IRQSTATUS,
                        IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT |
                        IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT);
                sr_write_reg(sr, IRQENABLE_SET,
                        IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT |
                        IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT);
+               break;
+       default:
+               dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
+                       "module without specifying the ip\n", __func__);
+               return -EINVAL;
        }
 
        return 0;
@@ -543,15 +618,15 @@ int sr_configure_minmax(struct voltagedomain *voltdm)
  */
 int sr_enable(struct voltagedomain *voltdm, unsigned long volt)
 {
-       u32 nvalue_reciprocal;
        struct omap_volt_data *volt_data;
        struct omap_sr *sr = _sr_lookup(voltdm);
+       u32 nvalue_reciprocal;
        int ret;
 
        if (IS_ERR(sr)) {
                pr_warning("%s: omap_sr struct for sr_%s not found\n",
                        __func__, voltdm->name);
-               return -EINVAL;
+               return PTR_ERR(sr);
        }
 
        volt_data = omap_voltage_get_voltdata(sr->voltdm, volt);
@@ -559,7 +634,7 @@ int sr_enable(struct voltagedomain *voltdm, unsigned long volt)
        if (IS_ERR(volt_data)) {
                dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table"
                        "for nominal voltage %ld\n", __func__, volt);
-               return -ENODATA;
+               return PTR_ERR(volt_data);
        }
 
        nvalue_reciprocal = sr_retrieve_nvalue(sr, volt_data->sr_efuse_offs);
@@ -617,10 +692,17 @@ void sr_disable(struct voltagedomain *voltdm)
         * disable the clocks.
         */
        if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) {
-               if (sr->ip_type == SR_TYPE_V1)
+               switch (sr->ip_type) {
+               case SR_TYPE_V1:
                        sr_v1_disable(sr);
-               else if (sr->ip_type == SR_TYPE_V2)
+                       break;
+               case SR_TYPE_V2:
                        sr_v2_disable(sr);
+                       break;
+               default:
+                       dev_err(&sr->pdev->dev, "UNKNOWN IP type %d\n",
+                               sr->ip_type);
+               }
        }
 
        pm_runtime_put_sync_suspend(&sr->pdev->dev);
@@ -779,10 +861,10 @@ void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data)
        sr_pmic_data = pmic_data;
 }
 
-/* PM Debug Fs enteries to enable disable smartreflex. */
+/* PM Debug FS entries to enable and disable smartreflex. */
 static int omap_sr_autocomp_show(void *data, u64 *val)
 {
-       struct omap_sr *sr_info = (struct omap_sr *) data;
+       struct omap_sr *sr_info = data;
 
        if (!sr_info) {
                pr_warning("%s: omap_sr struct not found\n", __func__);
@@ -796,7 +878,7 @@ static int omap_sr_autocomp_show(void *data, u64 *val)
 
 static int omap_sr_autocomp_store(void *data, u64 val)
 {
-       struct omap_sr *sr_info = (struct omap_sr *) data;
+       struct omap_sr *sr_info = data;
 
        if (!sr_info) {
                pr_warning("%s: omap_sr struct not found\n", __func__);
@@ -804,7 +886,7 @@ static int omap_sr_autocomp_store(void *data, u64 val)
        }
 
        /* Sanity check */
-       if (val && (val != 1)) {
+       if (val > 1) {
                pr_warning("%s: Invalid argument %lld\n", __func__, val);
                return -EINVAL;
        }
@@ -821,11 +903,11 @@ static int omap_sr_autocomp_store(void *data, u64 val)
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show,
-               omap_sr_autocomp_store, "%llu\n");
+                       omap_sr_autocomp_store, "%llu\n");
 
 static int __init omap_sr_probe(struct platform_device *pdev)
 {
-       struct omap_sr *sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL);
+       struct omap_sr *sr_info;
        struct omap_sr_data *pdata = pdev->dev.platform_data;
        struct resource *mem, *irq;
        struct dentry *nvalue_dir;
@@ -833,12 +915,15 @@ static int __init omap_sr_probe(struct platform_device *pdev)
        int i, ret = 0;
        char *name;
 
+       sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL);
        if (!sr_info) {
                dev_err(&pdev->dev, "%s: unable to allocate sr_info\n",
                        __func__);
                return -ENOMEM;
        }
 
+       platform_set_drvdata(pdev, sr_info);
+
        if (!pdata) {
                dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
                ret = -EINVAL;
@@ -897,14 +982,14 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                ret = sr_late_init(sr_info);
                if (ret) {
                        pr_warning("%s: Error in SR late init\n", __func__);
-                       return ret;
+                       goto err_iounmap;
                }
        }
 
        dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__);
        if (!sr_dbg_dir) {
                sr_dbg_dir = debugfs_create_dir("smartreflex", NULL);
-               if (!sr_dbg_dir) {
+               if (IS_ERR_OR_NULL(sr_dbg_dir)) {
                        ret = PTR_ERR(sr_dbg_dir);
                        pr_err("%s:sr debugfs dir creation failed(%d)\n",
                                __func__, ret);
@@ -921,7 +1006,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
        }
        sr_info->dbg_dir = debugfs_create_dir(name, sr_dbg_dir);
        kfree(name);
-       if (IS_ERR(sr_info->dbg_dir)) {
+       if (IS_ERR_OR_NULL(sr_info->dbg_dir)) {
                dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
                        __func__);
                ret = PTR_ERR(sr_info->dbg_dir);
@@ -938,7 +1023,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                        &sr_info->err_minlimit);
 
        nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir);
-       if (IS_ERR(nvalue_dir)) {
+       if (IS_ERR_OR_NULL(nvalue_dir)) {
                dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
                        "for n-values\n", __func__);
                ret = PTR_ERR(nvalue_dir);
@@ -994,7 +1079,7 @@ static int __devexit omap_sr_remove(struct platform_device *pdev)
        if (IS_ERR(sr_info)) {
                dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
                        __func__);
-               return -EINVAL;
+               return PTR_ERR(sr_info);
        }
 
        if (sr_info->autocomp_active)
@@ -1011,8 +1096,32 @@ static int __devexit omap_sr_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void __devexit omap_sr_shutdown(struct platform_device *pdev)
+{
+       struct omap_sr_data *pdata = pdev->dev.platform_data;
+       struct omap_sr *sr_info;
+
+       if (!pdata) {
+               dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+               return;
+       }
+
+       sr_info = _sr_lookup(pdata->voltdm);
+       if (IS_ERR(sr_info)) {
+               dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+                       __func__);
+               return;
+       }
+
+       if (sr_info->autocomp_active)
+               sr_stop_vddautocomp(sr_info);
+
+       return;
+}
+
 static struct platform_driver smartreflex_driver = {
-       .remove         = omap_sr_remove,
+       .remove         = __devexit_p(omap_sr_remove),
+       .shutdown       = __devexit_p(omap_sr_shutdown),
        .driver         = {
                .name   = "smartreflex",
        },
@@ -1042,12 +1151,12 @@ static int __init sr_init(void)
 
        return 0;
 }
+late_initcall(sr_init);
 
 static void __exit sr_exit(void)
 {
        platform_driver_unregister(&smartreflex_driver);
 }
-late_initcall(sr_init);
 module_exit(sr_exit);
 
 MODULE_DESCRIPTION("OMAP Smartreflex Driver");
index 5f35b9e..5809141 100644 (file)
@@ -152,6 +152,15 @@ struct omap_sr_pmic_data {
        void (*sr_pmic_init) (void);
 };
 
+/**
+ * struct omap_smartreflex_dev_attr - Smartreflex Device attribute.
+ *
+ * @sensor_voltdm_name:       Name of voltdomain of SR instance
+ */
+struct omap_smartreflex_dev_attr {
+       const char      *sensor_voltdm_name;
+};
+
 #ifdef CONFIG_OMAP_SMARTREFLEX
 /*
  * The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR.
@@ -231,6 +240,7 @@ void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data);
 int sr_enable(struct voltagedomain *voltdm, unsigned long volt);
 void sr_disable(struct voltagedomain *voltdm);
 int sr_configure_errgen(struct voltagedomain *voltdm);
+int sr_disable_errgen(struct voltagedomain *voltdm);
 int sr_configure_minmax(struct voltagedomain *voltdm);
 
 /* API to register the smartreflex class driver with the smartreflex driver */
index 9f43fcc..1705204 100644 (file)
@@ -74,6 +74,7 @@ static int sr_dev_init(struct omap_hwmod *oh, void *user)
        struct omap_sr_data *sr_data;
        struct platform_device *pdev;
        struct omap_volt_data *volt_data;
+       struct omap_smartreflex_dev_attr *sr_dev_attr;
        char *name = "smartreflex";
        static int i;
 
@@ -84,9 +85,11 @@ static int sr_dev_init(struct omap_hwmod *oh, void *user)
                return -ENOMEM;
        }
 
-       if (!oh->vdd_name) {
+       sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
+       if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
                pr_err("%s: No voltage domain specified for %s."
-                       "Cannot initialize\n", __func__, oh->name);
+                               "Cannot initialize\n", __func__,
+                                       oh->name);
                goto exit;
        }
 
@@ -94,10 +97,10 @@ static int sr_dev_init(struct omap_hwmod *oh, void *user)
        sr_data->senn_mod = 0x1;
        sr_data->senp_mod = 0x1;
 
-       sr_data->voltdm = voltdm_lookup(oh->vdd_name);
-       if (IS_ERR(sr_data->voltdm)) {
+       sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
+       if (!sr_data->voltdm) {
                pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
-                       __func__, oh->vdd_name);
+                       __func__, sr_dev_attr->sensor_voltdm_name);
                goto exit;
        }
 
index 6f5849a..dcd93be 100644 (file)
@@ -215,10 +215,14 @@ wait_sdrc_idle:
        bx      lr
 configure_core_dpll:
        ldr     r11, omap3_cm_clksel1_pll
+#if 0
        ldr     r12, [r11]
        ldr     r10, core_m2_mask_val   @ modify m2 for core dpll
        and     r12, r12, r10
        orr     r12, r12, r0, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT
+#else
+       mov     r12, r0  @ HACK!
+#endif
        str     r12, [r11]
        ldr     r12, [r11]              @ posted-write barrier for CM
        bx      lr
index 037b0d7..ad273b7 100644 (file)
@@ -90,7 +90,7 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
 }
 
 static struct irqaction omap2_gp_timer_irq = {
-       .name           = "gp timer",
+       .name           = "gp_timer",
        .flags          = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
        .handler        = omap2_gp_timer_interrupt,
 };
@@ -99,7 +99,7 @@ static int omap2_gp_timer_set_next_event(unsigned long cycles,
                                         struct clock_event_device *evt)
 {
        __omap_dm_timer_load_start(&clkev, OMAP_TIMER_CTRL_ST,
-                                               0xffffffff - cycles, 1);
+                                  0xffffffff - cycles, OMAP_TIMER_POSTED);
 
        return 0;
 }
@@ -109,7 +109,7 @@ static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
 {
        u32 period;
 
-       __omap_dm_timer_stop(&clkev, 1, clkev.rate);
+       __omap_dm_timer_stop(&clkev, OMAP_TIMER_POSTED, clkev.rate);
 
        switch (mode) {
        case CLOCK_EVT_MODE_PERIODIC:
@@ -117,10 +117,10 @@ static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
                period -= 1;
                /* Looks like we need to first set the load value separately */
                __omap_dm_timer_write(&clkev, OMAP_TIMER_LOAD_REG,
-                                       0xffffffff - period, 1);
+                                     0xffffffff - period, OMAP_TIMER_POSTED);
                __omap_dm_timer_load_start(&clkev,
                                        OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
-                                               0xffffffff - period, 1);
+                                       0xffffffff - period, OMAP_TIMER_POSTED);
                break;
        case CLOCK_EVT_MODE_ONESHOT:
                break;
@@ -132,16 +132,30 @@ static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
 }
 
 static struct clock_event_device clockevent_gpt = {
-       .name           = "gp timer",
+       .name           = "gp_timer",
        .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
        .shift          = 32,
        .set_next_event = omap2_gp_timer_set_next_event,
        .set_mode       = omap2_gp_timer_set_mode,
 };
 
+/**
+ * omap_dm_timer_get_errata - get errata flags for a timer
+ *
+ * Get the timer errata flags that are specific to the OMAP device being used.
+ */
+u32 __init omap_dm_timer_get_errata(void)
+{
+       if (cpu_is_omap24xx())
+               return 0;
+
+       return OMAP_TIMER_ERRATA_I103_I767;
+}
+
 static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
                                                int gptimer_id,
-                                               const char *fck_source)
+                                               const char *fck_source,
+                                               int posted)
 {
        char name[10]; /* 10 = sizeof("gptXX_Xck0") */
        struct omap_hwmod *oh;
@@ -169,13 +183,6 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
        if (IS_ERR(timer->fclk))
                return -ENODEV;
 
-       sprintf(name, "gpt%d_ick", gptimer_id);
-       timer->iclk = clk_get(NULL, name);
-       if (IS_ERR(timer->iclk)) {
-               clk_put(timer->fclk);
-               return -ENODEV;
-       }
-
        omap_hwmod_enable(oh);
 
        sys_timer_reserved |= (1 << (gptimer_id - 1));
@@ -196,10 +203,15 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
        }
        __omap_dm_timer_init_regs(timer);
        __omap_dm_timer_reset(timer, 1, 1);
-       timer->posted = 1;
 
-       timer->rate = clk_get_rate(timer->fclk);
+       if (posted)
+               __omap_dm_timer_enable_posted(timer);
+
+       /* Check that the intended posted configuration matches the actual */
+       if (posted != timer->posted)
+               return -EINVAL;
 
+       timer->rate = clk_get_rate(timer->fclk);
        timer->reserved = 1;
 
        return res;
@@ -210,7 +222,17 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
 {
        int res;
 
-       res = omap_dm_timer_init_one(&clkev, gptimer_id, fck_source);
+       clkev.errata = omap_dm_timer_get_errata();
+
+       /*
+        * For clock-event timers we never read the timer counter and
+        * so we are not impacted by errata i103 and i767. Therefore,
+        * we can safely ignore this errata for clock-event timers.
+        */
+       __omap_dm_timer_override_errata(&clkev, OMAP_TIMER_ERRATA_I103_I767);
+
+       res = omap_dm_timer_init_one(&clkev, gptimer_id, fck_source,
+                                    OMAP_TIMER_POSTED);
        BUG_ON(res);
 
        omap2_gp_timer_irq.dev_id = (void *)&clkev;
@@ -254,37 +276,27 @@ static struct omap_dm_timer clksrc;
 /*
  * clocksource
  */
-static DEFINE_CLOCK_DATA(cd);
 static cycle_t clocksource_read_cycles(struct clocksource *cs)
 {
-       return (cycle_t)__omap_dm_timer_read_counter(&clksrc, 1);
+       return (cycle_t)__omap_dm_timer_read_counter(&clksrc,
+                                                    OMAP_TIMER_NONPOSTED);
 }
 
 static struct clocksource clocksource_gpt = {
-       .name           = "gp timer",
+       .name           = "gp_timer",
        .rating         = 300,
        .read           = clocksource_read_cycles,
        .mask           = CLOCKSOURCE_MASK(32),
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static void notrace dmtimer_update_sched_clock(void)
+static u32 notrace dmtimer_read_sched_clock(void)
 {
-       u32 cyc;
-
-       cyc = __omap_dm_timer_read_counter(&clksrc, 1);
-
-       update_sched_clock(&cd, cyc, (u32)~0);
-}
-
-unsigned long long notrace sched_clock(void)
-{
-       u32 cyc = 0;
-
        if (clksrc.reserved)
-               cyc = __omap_dm_timer_read_counter(&clksrc, 1);
+               return __omap_dm_timer_read_counter(&clksrc,
+                                                   OMAP_TIMER_NONPOSTED);
 
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+       return 0;
 }
 
 /* Setup free-running counter for clocksource */
@@ -293,15 +305,19 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
 {
        int res;
 
-       res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source);
+       clksrc.errata = omap_dm_timer_get_errata();
+
+       res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source,
+                                    OMAP_TIMER_NONPOSTED);
        BUG_ON(res);
 
        pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
                gptimer_id, clksrc.rate);
 
        __omap_dm_timer_load_start(&clksrc,
-                       OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
-       init_sched_clock(&cd, dmtimer_update_sched_clock, 32, clksrc.rate);
+                                  OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0,
+                                  OMAP_TIMER_NONPOSTED);
+       setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate);
 
        if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
                pr_err("Could not register clocksource %s\n",
@@ -463,6 +479,7 @@ static int __init omap_timer_init(struct omap_hwmod *oh, void *unused)
        if ((sys_timer_reserved >> (id - 1)) & 0x1)
                pdata->reserved = 1;
 
+       pdata->timer_errata = omap_dm_timer_get_errata();
        pwrdm = omap_hwmod_get_pwrdm(oh);
        pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm);
 #ifdef CONFIG_PM
index 10b20c6..b42e971 100644 (file)
 
 #include "twl-common.h"
 #include "pm.h"
+#include "voltage.h"
 
 static struct i2c_board_info __initdata pmic_i2c_board_info = {
        .addr           = 0x48,
        .flags          = I2C_CLIENT_WAKE,
 };
 
+static int twl_set_voltage(void *data, int target_uV)
+{
+       struct voltagedomain *voltdm = (struct voltagedomain *)data;
+       return voltdm_scale(voltdm, target_uV);
+}
+
+static int twl_get_voltage(void *data)
+{
+       struct voltagedomain *voltdm = (struct voltagedomain *)data;
+       return voltdm_get_voltage(voltdm);
+}
+
 void __init omap_pmic_init(int bus, u32 clkrate,
                           const char *pmic_type, int pmic_irq,
                           struct twl4030_platform_data *pmic_data)
@@ -126,6 +139,49 @@ static struct regulator_init_data omap3_vpll2_idata = {
        .consumer_supplies              = omap3_vpll2_supplies,
 };
 
+static struct regulator_consumer_supply omap3_vdd1_supply[] = {
+       REGULATOR_SUPPLY("vcc", "mpu.0"),
+       REGULATOR_SUPPLY("vcc", "iva.0"),
+};
+
+static struct regulator_consumer_supply omap3_vdd2_supply[] = {
+       REGULATOR_SUPPLY("vcc", "l3_main.0"),
+};
+
+static struct regulator_init_data omap3_vdd1 = {
+       .constraints = {
+               .name                   = "vdd_mpu_iva",
+               .min_uV                 = 600000,
+               .max_uV                 = 1450000,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL,
+               .valid_ops_mask         = REGULATOR_CHANGE_VOLTAGE,
+       },
+       .num_consumer_supplies          = ARRAY_SIZE(omap3_vdd1_supply),
+       .consumer_supplies              = omap3_vdd1_supply,
+};
+
+static struct regulator_init_data omap3_vdd2 = {
+       .constraints = {
+               .name                   = "vdd_core",
+               .min_uV                 = 600000,
+               .max_uV                 = 1450000,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL,
+               .valid_ops_mask         = REGULATOR_CHANGE_VOLTAGE,
+       },
+       .num_consumer_supplies          = ARRAY_SIZE(omap3_vdd2_supply),
+       .consumer_supplies              = omap3_vdd2_supply,
+};
+
+static struct twl_regulator_driver_data omap3_vdd1_drvdata = {
+       .get_voltage = twl_get_voltage,
+       .set_voltage = twl_set_voltage,
+};
+
+static struct twl_regulator_driver_data omap3_vdd2_drvdata = {
+       .get_voltage = twl_get_voltage,
+       .set_voltage = twl_set_voltage,
+};
+
 void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
                                  u32 pdata_flags, u32 regulators_flags)
 {
@@ -133,6 +189,16 @@ void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
                pmic_data->irq_base = TWL4030_IRQ_BASE;
        if (!pmic_data->irq_end)
                pmic_data->irq_end = TWL4030_IRQ_END;
+       if (!pmic_data->vdd1) {
+               omap3_vdd1.driver_data = &omap3_vdd1_drvdata;
+               omap3_vdd1_drvdata.data = voltdm_lookup("mpu_iva");
+               pmic_data->vdd1 = &omap3_vdd1;
+       }
+       if (!pmic_data->vdd2) {
+               omap3_vdd2.driver_data = &omap3_vdd2_drvdata;
+               omap3_vdd2_drvdata.data = voltdm_lookup("core");
+               pmic_data->vdd2 = &omap3_vdd2;
+       }
 
        /* Common platform data configurations */
        if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
index cfe348e..0136ad5 100644 (file)
@@ -46,6 +46,7 @@ static struct omap_vc_common omap3_vc_common = {
 };
 
 struct omap_vc_channel omap3_vc_mpu = {
+       .flags = OMAP_VC_CHANNEL_DEFAULT,
        .common = &omap3_vc_common,
        .smps_sa_reg     = OMAP3_PRM_VC_SMPS_SA_OFFSET,
        .smps_volra_reg  = OMAP3_PRM_VC_SMPS_VOL_RA_OFFSET,
index 1f8fdf7..244c4a1 100644 (file)
@@ -73,7 +73,8 @@ unsigned long voltdm_get_voltage(struct voltagedomain *voltdm)
 int voltdm_scale(struct voltagedomain *voltdm,
                 unsigned long target_volt)
 {
-       int ret;
+       int ret, i;
+       unsigned long volt = 0;
 
        if (!voltdm || IS_ERR(voltdm)) {
                pr_warning("%s: VDD specified does not exist!\n", __func__);
@@ -86,9 +87,23 @@ int voltdm_scale(struct voltagedomain *voltdm,
                return -ENODATA;
        }
 
-       ret = voltdm->scale(voltdm, target_volt);
+       /* Adjust voltage to the exact voltage from the OPP table */
+       for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
+               if (voltdm->volt_data[i].volt_nominal >= target_volt) {
+                       volt = voltdm->volt_data[i].volt_nominal;
+                       break;
+               }
+       }
+
+       if (!volt) {
+               pr_warning("%s: not scaling. OPP voltage for %lu, not found.\n",
+                          __func__, target_volt);
+               return -EINVAL;
+       }
+
+       ret = voltdm->scale(voltdm, volt);
        if (!ret)
-               voltdm->nominal_volt = target_volt;
+               voltdm->nominal_volt = volt;
 
        return ret;
 }
index 3b52027..70daf80 100644 (file)
@@ -61,8 +61,8 @@ void __init omap_vp_init(struct voltagedomain *voltdm)
        vddmin = voltdm->pmic->vp_vddmin;
        vddmax = voltdm->pmic->vp_vddmax;
 
-       waittime = ((voltdm->pmic->step_size / voltdm->pmic->slew_rate) *
-                   sys_clk_rate) / 1000;
+       waittime = DIV_ROUND_UP(voltdm->pmic->step_size * sys_clk_rate,
+                               1000 * voltdm->pmic->slew_rate);
        vstepmin = voltdm->pmic->vp_vstepmin;
        vstepmax = voltdm->pmic->vp_vstepmax;
 
diff --git a/arch/arm/mach-orion5x/include/mach/vmalloc.h b/arch/arm/mach-orion5x/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 06b50ae..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-orion5x/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END       0xfd800000UL
diff --git a/arch/arm/mach-picoxcell/include/mach/vmalloc.h b/arch/arm/mach-picoxcell/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 0216cc4..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#define VMALLOC_END    0xfe000000UL
index 90a554f..6c89cf8 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
-#include <linux/sched.h>
 
 #include <asm/mach/time.h>
 #include <asm/sched_clock.h>
@@ -66,21 +65,11 @@ static void picoxcell_add_clocksource(struct device_node *source_timer)
        dw_apb_clocksource_register(cs);
 }
 
-static DEFINE_CLOCK_DATA(cd);
 static void __iomem *sched_io_base;
 
-unsigned long long notrace sched_clock(void)
+unsigned u32 notrace picoxcell_read_sched_clock(void)
 {
-       cycle_t cyc = sched_io_base ? __raw_readl(sched_io_base) : 0;
-
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace picoxcell_update_sched_clock(void)
-{
-       cycle_t cyc = sched_io_base ? __raw_readl(sched_io_base) : 0;
-
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return __raw_readl(sched_io_base);
 }
 
 static const struct of_device_id picoxcell_rtc_ids[] __initconst = {
@@ -100,7 +89,7 @@ static void picoxcell_init_sched_clock(void)
        timer_get_base_and_rate(sched_timer, &sched_io_base, &rate);
        of_node_put(sched_timer);
 
-       init_sched_clock(&cd, picoxcell_update_sched_clock, 32, rate);
+       setup_sched_clock(picoxcell_read_sched_clock, 32, rate);
 }
 
 static const struct of_device_id picoxcell_timer_ids[] __initconst = {
index 5dda2bb..5d6384a 100644 (file)
@@ -32,7 +32,7 @@ static void arch_idle(void)
 
 static inline void arch_reset(char mode, const char *cmd)
 {
-       cpu_reset(0);
+       soft_restart(0);
 }
 
 #endif
diff --git a/arch/arm/mach-pnx4008/include/mach/vmalloc.h b/arch/arm/mach-pnx4008/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 184913c..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-pnx4008/include/mach/vmalloc.h
- *
- * Author: Vitaly Wool <source@mvista.com>
- *
- * 2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-/*
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts.  That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_END       0xd0000000UL
diff --git a/arch/arm/mach-prima2/include/mach/vmalloc.h b/arch/arm/mach-prima2/include/mach/vmalloc.h
deleted file mode 100644 (file)
index c9f90fe..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * arch/arm/ach-prima2/include/mach/vmalloc.h
- *
- * Copyright (c) 2010 – 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
- */
-
-#ifndef __MACH_VMALLOC_H
-#define __MACH_VMALLOC_H
-
-#include <linux/const.h>
-
-#define VMALLOC_END    _AC(0xFEC00000, UL)
-
-#endif
index 61d3c72..109ccd2 100644 (file)
@@ -108,6 +108,7 @@ config CSB726_CSB701
 
 config MACH_ARMCORE
        bool "CompuLab CM-X255/CM-X270 modules"
+       select ARCH_HAS_DMA_SET_COHERENT_MASK if PCI
        select PXA27x
        select IWMMXT
        select PXA25x
index 8184669..54b64ea 100644 (file)
@@ -337,8 +337,4 @@ extern unsigned int get_memclk_frequency_10khz(void);
 extern unsigned long get_clock_tick_rate(void);
 #endif
 
-#if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI)
-#define ARCH_HAS_DMA_SET_COHERENT_MASK
-#endif
-
 #endif  /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-pxa/include/mach/vmalloc.h b/arch/arm/mach-pxa/include/mach/vmalloc.h
deleted file mode 100644 (file)
index bfecfbf..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * arch/arm/mach-pxa/include/mach/vmalloc.h
- *
- * Author:     Nicolas Pitre
- * Copyright:  (C) 2001 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define VMALLOC_END       (0xe8000000UL)
index 0e25348..2f539d5 100644 (file)
@@ -95,7 +95,7 @@ void arch_reset(char mode, const char *cmd)
        switch (mode) {
        case 's':
                /* Jump into ROM at address 0 */
-               cpu_reset(0);
+               soft_restart(0);
                break;
        case 'g':
                do_gpio_reset();
index de68470..b503049 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/clockchips.h>
-#include <linux/sched.h>
 
 #include <asm/div64.h>
 #include <asm/mach/irq.h>
  * long as there is always less than 582 seconds between successive
  * calls to sched_clock() which should always be the case in practice.
  */
-static DEFINE_CLOCK_DATA(cd);
 
-unsigned long long notrace sched_clock(void)
+static u32 notrace pxa_read_sched_clock(void)
 {
-       u32 cyc = OSCR;
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace pxa_update_sched_clock(void)
-{
-       u32 cyc = OSCR;
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return OSCR;
 }
 
 
@@ -119,7 +110,7 @@ static void __init pxa_timer_init(void)
        OIER = 0;
        OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3;
 
-       init_sched_clock(&cd, pxa_update_sched_clock, 32, clock_tick_rate);
+       setup_sched_clock(pxa_read_sched_clock, 32, clock_tick_rate);
 
        clockevents_calc_mult_shift(&ckevt_pxa_osmr0, clock_tick_rate, 4);
        ckevt_pxa_osmr0.max_delta_ns =
diff --git a/arch/arm/mach-realview/include/mach/vmalloc.h b/arch/arm/mach-realview/include/mach/vmalloc.h
deleted file mode 100644 (file)
index a2a4c68..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- *  arch/arm/mach-realview/include/mach/vmalloc.h
- *
- *  Copyright (C) 2003 ARM Limited
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END            0xf8000000UL
index 45c7b93..a354f4d 100644 (file)
@@ -23,5 +23,5 @@ static inline void arch_reset(char mode, const char *cmd)
        /*
         * Jump into the ROM
         */
-       cpu_reset(0);
+       soft_restart(0);
 }
diff --git a/arch/arm/mach-rpc/include/mach/vmalloc.h b/arch/arm/mach-rpc/include/mach/vmalloc.h
deleted file mode 100644 (file)
index fb70022..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- *  arch/arm/mach-rpc/include/mach/vmalloc.h
- *
- *  Copyright (C) 1997 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define VMALLOC_END       0xdc000000UL
index 6faadce..913893d 100644 (file)
@@ -19,7 +19,7 @@ static void
 arch_reset(char mode, const char *cmd)
 {
        if (mode == 's') {
-               cpu_reset(0);
+               soft_restart(0);
        }
 
        if (s3c24xx_reset_hook)
@@ -28,5 +28,5 @@ arch_reset(char mode, const char *cmd)
        arch_wdt_reset();
 
        /* we'll take a jump through zero as a poor second */
-       cpu_reset(0);
+       soft_restart(0);
 }
diff --git a/arch/arm/mach-s3c2410/include/mach/vmalloc.h b/arch/arm/mach-s3c2410/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 7a311e8..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/vmalloc.h
- *
- * from arch/arm/mach-iop3xx/include/mach/vmalloc.h
- *
- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
- *                   http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C2410 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END    0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
index 2e58cb7..d8ca578 100644 (file)
@@ -24,7 +24,7 @@ static void arch_reset(char mode, const char *cmd)
                arch_wdt_reset();
 
        /* if all else fails, or mode was for soft, jump to 0 */
-       cpu_reset(0);
+       soft_restart(0);
 }
 
 #endif /* __ASM_ARCH_IRQ_H */
diff --git a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h b/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 23f75e5..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-s3c64xx/include/mach/vmalloc.h
- *
- * from arch/arm/mach-iop3xx/include/mach/vmalloc.h
- *
- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
- *                   http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C6400 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END    0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p64x0/include/mach/vmalloc.h b/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 38dcc71..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C6400 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END    0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/vmalloc.h b/arch/arm/mach-s5pc100/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 44c8e57..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* arch/arm/mach-s5pc100/include/mach/vmalloc.h
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C6400 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END    0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/vmalloc.h b/arch/arm/mach-s5pv210/include/mach/vmalloc.h
deleted file mode 100644 (file)
index a6c659d..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/vmalloc.h
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com/
- *
- * Based on arch/arm/mach-s5p6442/include/mach/vmalloc.h
- *
- * S5PV210 vmalloc definition
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H __FILE__
-
-#define VMALLOC_END    0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
index ba9da9f..345d35b 100644 (file)
@@ -14,7 +14,7 @@ static inline void arch_reset(char mode, const char *cmd)
 {
        if (mode == 's') {
                /* Jump into ROM at address 0 */
-               cpu_reset(0);
+               soft_restart(0);
        } else {
                /* Use on-chip reset capability */
                RSRR = RSRR_SWR;
diff --git a/arch/arm/mach-sa1100/include/mach/vmalloc.h b/arch/arm/mach-sa1100/include/mach/vmalloc.h
deleted file mode 100644 (file)
index b3d0023..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/vmalloc.h
- */
-#define VMALLOC_END       (0xe8000000UL)
index fa66024..1ee6d4c 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
-#include <linux/sched.h>       /* just for sched_clock() - funny that */
 #include <linux/timex.h>
 #include <linux/clockchips.h>
 
 #include <asm/sched_clock.h>
 #include <mach/hardware.h>
 
-/*
- * This is the SA11x0 sched_clock implementation.
- */
-static DEFINE_CLOCK_DATA(cd);
-
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 3.6864MHz,
- * NSEC_PER_SEC, 60).
- * This gives a resolution of about 271ns and a wrap period of about 19min.
- */
-#define SC_MULT                2275555556u
-#define SC_SHIFT       23
-
-unsigned long long notrace sched_clock(void)
-{
-       u32 cyc = OSCR;
-       return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
-}
-
-static void notrace sa1100_update_sched_clock(void)
+static u32 notrace sa100_read_sched_clock(void)
 {
-       u32 cyc = OSCR;
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return OSCR;
 }
 
 #define MIN_OSCR_DELTA 2
@@ -109,8 +88,7 @@ static void __init sa1100_timer_init(void)
        OIER = 0;
        OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3;
 
-       init_fixed_sched_clock(&cd, sa1100_update_sched_clock, 32,
-                              3686400, SC_MULT, SC_SHIFT);
+       setup_sched_clock(sa1100_read_sched_clock, 32, 3686400);
 
        clockevents_calc_mult_shift(&ckevt_sa1100_osmr0, 3686400, 4);
        ckevt_sa1100_osmr0.max_delta_ns =
diff --git a/arch/arm/mach-shark/include/mach/vmalloc.h b/arch/arm/mach-shark/include/mach/vmalloc.h
deleted file mode 100644 (file)
index b10df98..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/vmalloc.h
- */
-#define VMALLOC_END       0xd0000000UL
index 76a687e..956ac18 100644 (file)
@@ -8,7 +8,7 @@ static inline void arch_idle(void)
 
 static inline void arch_reset(char mode, const char *cmd)
 {
-       cpu_reset(0);
+       soft_restart(0);
 }
 
 #endif
diff --git a/arch/arm/mach-shmobile/include/mach/vmalloc.h b/arch/arm/mach-shmobile/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 2b8fd8b..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_MACH_VMALLOC_H
-#define __ASM_MACH_VMALLOC_H
-
-/* Vmalloc at ... - 0xe5ffffff */
-#define VMALLOC_END 0xe6000000UL
-
-#endif /* __ASM_MACH_VMALLOC_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/vmalloc.h b/arch/arm/mach-spear3xx/include/mach/vmalloc.h
deleted file mode 100644 (file)
index df977b3..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-spear3xx/include/mach/vmalloc.h
- *
- * Defining Vmalloc area for SPEAr3xx machine family
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MACH_VMALLOC_H
-#define __MACH_VMALLOC_H
-
-#include <plat/vmalloc.h>
-
-#endif /* __MACH_VMALLOC_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/vmalloc.h b/arch/arm/mach-spear6xx/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 4a0b56c..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-spear6xx/include/mach/vmalloc.h
- *
- * Defining Vmalloc area for SPEAr6xx machine family
- *
- * Copyright (C) 2009 ST Microelectronics
- * Rajeev Kumar<rajeev-dlh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MACH_VMALLOC_H
-#define __MACH_VMALLOC_H
-
-#include <plat/vmalloc.h>
-
-#endif /* __MACH_VMALLOC_H */
index e2272d2..732c724 100644 (file)
@@ -19,7 +19,6 @@
 
 #include <linux/init.h>
 #include <linux/err.h>
-#include <linux/sched.h>
 #include <linux/time.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
@@ -106,25 +105,9 @@ static struct clock_event_device tegra_clockevent = {
        .set_mode       = tegra_timer_set_mode,
 };
 
-static DEFINE_CLOCK_DATA(cd);
-
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 1MHz, NSEC_PER_SEC, 60).
- * This gives a resolution of about 1us and a wrap period of about 1h11min.
- */
-#define SC_MULT                4194304000u
-#define SC_SHIFT       22
-
-unsigned long long notrace sched_clock(void)
-{
-       u32 cyc = timer_readl(TIMERUS_CNTR_1US);
-       return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
-}
-
-static void notrace tegra_update_sched_clock(void)
+static u32 notrace tegra_read_sched_clock(void)
 {
-       u32 cyc = timer_readl(TIMERUS_CNTR_1US);
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return timer_readl(TIMERUS_CNTR_1US);
 }
 
 /*
@@ -218,8 +201,7 @@ static void __init tegra_init_timer(void)
                WARN(1, "Unknown clock rate");
        }
 
-       init_fixed_sched_clock(&cd, tegra_update_sched_clock, 32,
-                              1000000, SC_MULT, SC_SHIFT);
+       setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
 
        if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
                "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
diff --git a/arch/arm/mach-u300/include/mach/vmalloc.h b/arch/arm/mach-u300/include/mach/vmalloc.h
deleted file mode 100644 (file)
index ec423b9..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- *
- * arch/arm/mach-u300/include/mach/vmalloc.h
- *
- *
- * Copyright (C) 2006-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * Virtual memory allocations
- * End must be above the I/O registers and on an even 2MiB boundary.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#define VMALLOC_END    0xfe800000UL
index 5f51bde..bc1c789 100644 (file)
@@ -9,7 +9,6 @@
  * Author: Linus Walleij <linus.walleij@stericsson.com>
  */
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 #include <linux/time.h>
 #include <linux/timex.h>
 #include <linux/clockchips.h>
@@ -337,18 +336,10 @@ static struct irqaction u300_timer_irq = {
  * this wraps around for now, since it is just a relative time
  * stamp. (Inspired by OMAP implementation.)
  */
-static DEFINE_CLOCK_DATA(cd);
 
-unsigned long long notrace sched_clock(void)
+static u32 notrace u300_read_sched_clock(void)
 {
-       u32 cyc = readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC);
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace u300_update_sched_clock(void)
-{
-       u32 cyc = readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC);
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC);
 }
 
 
@@ -366,7 +357,7 @@ static void __init u300_timer_init(void)
        clk_enable(clk);
        rate = clk_get_rate(clk);
 
-       init_sched_clock(&cd, u300_update_sched_clock, 32, rate);
+       setup_sched_clock(u300_read_sched_clock, 32, rate);
 
        /*
         * Disable the "OS" and "DD" timers - these are designed for Symbian!
diff --git a/arch/arm/mach-ux500/include/mach/vmalloc.h b/arch/arm/mach-ux500/include/mach/vmalloc.h
deleted file mode 100644 (file)
index a4945cb..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- *  Copyright (C) 2009 ST-Ericsson
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END    0xf0000000UL
diff --git a/arch/arm/mach-versatile/include/mach/vmalloc.h b/arch/arm/mach-versatile/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 7d8e069..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- *  arch/arm/mach-versatile/include/mach/vmalloc.h
- *
- *  Copyright (C) 2003 ARM Limited
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END            0xd8000000UL
diff --git a/arch/arm/mach-vexpress/include/mach/vmalloc.h b/arch/arm/mach-vexpress/include/mach/vmalloc.h
deleted file mode 100644 (file)
index f43a36e..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- *  arch/arm/mach-vexpress/include/mach/vmalloc.h
- *
- *  Copyright (C) 2003 ARM Limited
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END            0xf8000000UL
diff --git a/arch/arm/mach-vt8500/include/mach/vmalloc.h b/arch/arm/mach-vt8500/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 4642290..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  arch/arm/mach-vt8500/include/mach/vmalloc.h
- *
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END    0xd0000000UL
index ce228bd..68875a1 100644 (file)
@@ -33,7 +33,7 @@ static void arch_reset(char mode, const char *cmd)
 {
        if (mode == 's') {
                /* Jump into ROM at address 0 */
-               cpu_reset(0);
+               soft_restart(0);
        } else {
                __raw_writel(WTE | WTRE | WTCLK, WTCR);
        }
diff --git a/arch/arm/mach-w90x900/include/mach/vmalloc.h b/arch/arm/mach-w90x900/include/mach/vmalloc.h
deleted file mode 100644 (file)
index b067e44..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * arch/arm/mach-w90x900/include/mach/vmalloc.h
- *
- * Copyright (c) 2008 Nuvoton technology corporation
- * All rights reserved.
- *
- * Wan ZongShun <mcuos.com@gmail.com>
- *
- * Based on arch/arm/mach-s3c2410/include/mach/vmalloc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END      (0xe0000000UL)
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-zynq/include/mach/vmalloc.h b/arch/arm/mach-zynq/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 2398eff..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-zynq/include/mach/vmalloc.h
- *
- *  Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_VMALLOC_H__
-#define __MACH_VMALLOC_H__
-
-#define VMALLOC_END       0xE0000000UL
-
-#endif
index 4e1ef6e..ff426dd 100644 (file)
@@ -628,6 +628,24 @@ config IO_36
 
 comment "Processor Features"
 
+config ARM_LPAE
+       bool "Support for the Large Physical Address Extension"
+       depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \
+               !CPU_32v4 && !CPU_32v3
+       help
+         Say Y if you have an ARMv7 processor supporting the LPAE page
+         table format and you would like to access memory beyond the
+         4GB limit. The resulting kernel image will not run on
+         processors without the LPA extension.
+
+         If unsure, say N.
+
+config ARCH_PHYS_ADDR_T_64BIT
+       def_bool ARM_LPAE
+
+config ARCH_DMA_ADDR_T_64BIT
+       bool
+
 config ARM_THUMB
        bool "Support Thumb user binaries"
        depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
@@ -889,3 +907,18 @@ config ARCH_HAS_BARRIERS
        help
          This option allows the use of custom mandatory barriers
          included via the mach/barriers.h file.
+
+config USER_L2_PLE
+       bool "Enable userspace access to the L2 PLE"
+       depends on CPU_V7
+       default n
+       help
+         Enable userspace access to the L2 preload engine (PLE) available
+         in Cortex-A series ARM processors.
+
+config USER_PMON
+       bool "Enable userspace access to performance counters"
+       depends on CPU_V7
+       default n
+       help
+         Enable userpsace access to the performance monitor registers.
index bca7e61..d223854 100644 (file)
@@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES)         += proc-syms.o
 
 obj-$(CONFIG_ALIGNMENT_TRAP)   += alignment.o
 obj-$(CONFIG_HIGHMEM)          += highmem.o
+obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
 
 obj-$(CONFIG_CPU_ABRT_NOMMU)   += abort-nommu.o
 obj-$(CONFIG_CPU_ABRT_EV4)     += abort-ev4.o
index 6e39bf1..ba9e4d2 100644 (file)
@@ -74,6 +74,7 @@
 
 static unsigned long ai_user;
 static unsigned long ai_sys;
+static void *ai_sys_last_pc;
 static unsigned long ai_skipped;
 static unsigned long ai_half;
 static unsigned long ai_word;
@@ -127,7 +128,7 @@ static const char *usermode_action[] = {
 static int alignment_proc_show(struct seq_file *m, void *v)
 {
        seq_printf(m, "User:\t\t%lu\n", ai_user);
-       seq_printf(m, "System:\t\t%lu\n", ai_sys);
+       seq_printf(m, "System:\t\t%lu (%pF)\n", ai_sys, ai_sys_last_pc);
        seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
        seq_printf(m, "Half:\t\t%lu\n", ai_half);
        seq_printf(m, "Word:\t\t%lu\n", ai_word);
@@ -699,7 +700,6 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
        unsigned long instr = *pinstr;
        u16 tinst1 = (instr >> 16) & 0xffff;
        u16 tinst2 = instr & 0xffff;
-       poffset->un = 0;
 
        switch (tinst1 & 0xffe0) {
        /* A6.3.5 Load/Store multiple */
@@ -746,7 +746,7 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
 static int
 do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
-       union offset_union offset;
+       union offset_union uninitialized_var(offset);
        unsigned long instr = 0, instrptr;
        int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
        unsigned int type;
@@ -788,6 +788,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                goto user;
 
        ai_sys += 1;
+       ai_sys_last_pc = (void *)instruction_pointer(regs);
 
  fixup:
 
@@ -853,10 +854,13 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                break;
 
        case 0x08000000:        /* ldm or stm, or thumb-2 32bit instruction */
-               if (thumb2_32b)
+               if (thumb2_32b) {
+                       offset.un = 0;
                        handler = do_alignment_t32_to_handler(&instr, regs, &offset);
-               else
+               } else {
+                       offset.un = 0;
                        handler = do_alignment_ldmstm;
+               }
                break;
 
        default:
@@ -968,7 +972,7 @@ static int __init alignment_init(void)
                ai_usermode = safe_usermode(ai_usermode, false);
        }
 
-       hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
+       hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
                        "alignment exception");
 
        /*
index c2301f2..52e35f3 100644 (file)
@@ -78,6 +78,7 @@ ENTRY(v3_coherent_kern_range)
  *     - end    - virtual end address
  */
 ENTRY(v3_coherent_user_range)
+       mov     r0, #0
        mov     pc, lr
 
 /*
index fd9bb7a..022135d 100644 (file)
@@ -88,6 +88,7 @@ ENTRY(v4_coherent_kern_range)
  *     - end    - virtual end address
  */
 ENTRY(v4_coherent_user_range)
+       mov     r0, #0
        mov     pc, lr
 
 /*
index 4f2c141..8f1eeae 100644 (file)
@@ -167,9 +167,9 @@ ENTRY(v4wb_coherent_user_range)
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        blo     1b
-       mov     ip, #0
-       mcr     p15, 0, ip, c7, c5, 0           @ invalidate I cache
-       mcr     p15, 0, ip, c7, c10, 4          @ drain WB
+       mov     r0, #0
+       mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
+       mcr     p15, 0, r0, c7, c10, 4          @ drain WB
        mov     pc, lr
 
 
index 4d7b467..b34a5f9 100644 (file)
@@ -125,6 +125,7 @@ ENTRY(v4wt_coherent_user_range)
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        blo     1b
+       mov     r0, #0
        mov     pc, lr
 
 /*
index 74c2e5a..4b10760 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <asm/assembler.h>
+#include <asm/errno.h>
 #include <asm/unwind.h>
 
 #include "proc-macros.S"
@@ -135,7 +136,6 @@ ENTRY(v6_coherent_user_range)
 1:
  USER( mcr     p15, 0, r0, c7, c10, 1  )       @ clean D line
        add     r0, r0, #CACHE_LINE_SIZE
-2:
        cmp     r0, r1
        blo     1b
 #endif
@@ -154,13 +154,11 @@ ENTRY(v6_coherent_user_range)
 
 /*
  * Fault handling for the cache operation above. If the virtual address in r0
- * isn't mapped, just try the next page.
+ * isn't mapped, fail with -EFAULT.
  */
 9001:
-       mov     r0, r0, lsr #12
-       mov     r0, r0, lsl #12
-       add     r0, r0, #4096
-       b       2b
+       mov     r0, #-EFAULT
+       mov     pc, lr
  UNWIND(.fnend         )
 ENDPROC(v6_coherent_user_range)
 ENDPROC(v6_coherent_kern_range)
index a655d3d..3953920 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <asm/assembler.h>
+#include <asm/errno.h>
 #include <asm/unwind.h>
 
 #include "proc-macros.S"
@@ -198,7 +199,6 @@ ENTRY(v7_coherent_user_range)
        add     r12, r12, r2
        cmp     r12, r1
        blo     2b
-3:
        mov     r0, #0
        ALT_SMP(mcr     p15, 0, r0, c7, c1, 6)  @ invalidate BTB Inner Shareable
        ALT_UP(mcr      p15, 0, r0, c7, c5, 6)  @ invalidate BTB
@@ -208,13 +208,11 @@ ENTRY(v7_coherent_user_range)
 
 /*
  * Fault handling for the cache operation above. If the virtual address in r0
- * isn't mapped, just try the next page.
+ * isn't mapped, fail with -EFAULT.
  */
 9001:
-       mov     r12, r12, lsr #12
-       mov     r12, r12, lsl #12
-       add     r12, r12, #4096
-       b       3b
+       mov     r0, #-EFAULT
+       mov     pc, lr
  UNWIND(.fnend         )
 ENDPROC(v7_coherent_kern_range)
 ENDPROC(v7_coherent_user_range)
@@ -331,9 +329,10 @@ ENDPROC(v7_dma_flush_range)
  */
 ENTRY(v7_dma_map_area)
        add     r1, r1, r0
-       teq     r2, #DMA_FROM_DEVICE
-       beq     v7_dma_inv_range
-       b       v7_dma_clean_range
+       cmp     r2, #DMA_TO_DEVICE
+       beq     v7_dma_clean_range
+       bcs     v7_dma_inv_range
+       b       v7_dma_flush_range
 ENDPROC(v7_dma_map_area)
 
 /*
@@ -343,9 +342,6 @@ ENDPROC(v7_dma_map_area)
  *     - dir   - DMA direction
  */
 ENTRY(v7_dma_unmap_area)
-       add     r1, r1, r0
-       teq     r2, #DMA_TO_DEVICE
-       bne     v7_dma_inv_range
        mov     pc, lr
 ENDPROC(v7_dma_unmap_area)
 
index 93aac06..ee9bb36 100644 (file)
@@ -22,6 +22,21 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
 DEFINE_PER_CPU(struct mm_struct *, current_mm);
 #endif
 
+#ifdef CONFIG_ARM_LPAE
+#define cpu_set_asid(asid) {                                           \
+       unsigned long ttbl, ttbh;                                       \
+       asm volatile(                                                   \
+       "       mrrc    p15, 0, %0, %1, c2              @ read TTBR0\n" \
+       "       mov     %1, %2, lsl #(48 - 32)          @ set ASID\n"   \
+       "       mcrr    p15, 0, %0, %1, c2              @ set TTBR0\n"  \
+       : "=&r" (ttbl), "=&r" (ttbh)                                    \
+       : "r" (asid & ~ASID_MASK));                                     \
+}
+#else
+#define cpu_set_asid(asid) \
+       asm("   mcr     p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
+#endif
+
 /*
  * We fork()ed a process, and we need a new context for the child
  * to run in.  We reserve version 0 for initial tasks so we will
@@ -37,7 +52,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 static void flush_context(void)
 {
        /* set the reserved ASID before flushing the TLB */
-       asm("mcr        p15, 0, %0, c13, c0, 1\n" : : "r" (0));
+       cpu_set_asid(0);
        isb();
        local_flush_tlb_all();
        if (icache_is_vivt_asid_tagged()) {
@@ -99,7 +114,7 @@ static void reset_context(void *info)
        set_mm_context(mm, asid);
 
        /* set the new ASID */
-       asm("mcr        p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
+       cpu_set_asid(mm->context.id);
        isb();
 }
 
index e1dd92c..775903e 100644 (file)
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
 #include <linux/highmem.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
 #include <asm/tlbflush.h>
 #include <asm/sizes.h>
 #include <asm/mach/arch.h>
+#include <asm/dma-iommu.h>
+#include <asm/mach/map.h>
+#include <asm/system.h>
+#include <asm/dma-contiguous.h>
 
 #include "mm.h"
 
+/*
+ * The DMA API is built upon the notion of "buffer ownership".  A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device.  These helper functions
+ * represent the transitions between these two ownership states.
+ *
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches.  We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ */
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
+               size_t, enum dma_data_direction);
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
+               size_t, enum dma_data_direction);
+
+/**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed.  The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs)
+{
+       if (!arch_is_coherent())
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+       return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+/**
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation.  The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+                                     handle & ~PAGE_MASK, size, dir);
+}
+
+static void arm_dma_sync_single_for_cpu(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned int offset = handle & (PAGE_SIZE - 1);
+       struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned int offset = handle & (PAGE_SIZE - 1);
+       struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+       if (!arch_is_coherent())
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+struct dma_map_ops arm_dma_ops = {
+       .alloc                  = arm_dma_alloc,
+       .free                   = arm_dma_free,
+       .mmap                   = arm_dma_mmap,
+       .map_page               = arm_dma_map_page,
+       .unmap_page             = arm_dma_unmap_page,
+       .map_sg                 = arm_dma_map_sg,
+       .unmap_sg               = arm_dma_unmap_sg,
+       .sync_single_for_cpu    = arm_dma_sync_single_for_cpu,
+       .sync_single_for_device = arm_dma_sync_single_for_device,
+       .sync_sg_for_cpu        = arm_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_dma_sync_sg_for_device,
+       .set_dma_mask           = arm_dma_set_mask,
+};
+EXPORT_SYMBOL(arm_dma_ops);
+
 static u64 get_coherent_dma_mask(struct device *dev)
 {
        u64 mask = (u64)arm_dma_limit;
@@ -56,6 +164,21 @@ static u64 get_coherent_dma_mask(struct device *dev)
        return mask;
 }
 
+static void __dma_clear_buffer(struct page *page, size_t size)
+{
+       void *ptr;
+       /*
+        * Ensure that the allocated pages are zeroed, and that any data
+        * lurking in the kernel direct-mapped region is invalidated.
+        */
+       ptr = page_address(page);
+       if (ptr) {
+               memset(ptr, 0, size);
+               dmac_flush_range(ptr, ptr + size);
+               outer_flush_range(__pa(ptr), __pa(ptr) + size);
+       }
+}
+
 /*
  * Allocate a DMA buffer for 'dev' of size 'size' using the
  * specified gfp mask.  Note that 'size' must be page aligned.
@@ -64,23 +187,6 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 {
        unsigned long order = get_order(size);
        struct page *page, *p, *e;
-       void *ptr;
-       u64 mask = get_coherent_dma_mask(dev);
-
-#ifdef CONFIG_DMA_API_DEBUG
-       u64 limit = (mask + 1) & ~mask;
-       if (limit && size >= limit) {
-               dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
-                       size, mask);
-               return NULL;
-       }
-#endif
-
-       if (!mask)
-               return NULL;
-
-       if (mask < 0xffffffffULL)
-               gfp |= GFP_DMA;
 
        page = alloc_pages(gfp, order);
        if (!page)
@@ -93,14 +199,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
        for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
                __free_page(p);
 
-       /*
-        * Ensure that the allocated pages are zeroed, and that any data
-        * lurking in the kernel direct-mapped region is invalidated.
-        */
-       ptr = page_address(page);
-       memset(ptr, 0, size);
-       dmac_flush_range(ptr, ptr + size);
-       outer_flush_range(__pa(ptr), __pa(ptr) + size);
+       __dma_clear_buffer(page, size);
 
        return page;
 }
@@ -120,218 +219,346 @@ static void __dma_free_buffer(struct page *page, size_t size)
 
 #ifdef CONFIG_MMU
 
-#define CONSISTENT_OFFSET(x)   (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT)
-#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
-
-/*
- * These are the page tables (2MB each) covering uncached, DMA consistent allocations
- */
-static pte_t **consistent_pte;
-
-#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+                                    pgprot_t prot, struct page **ret_page);
 
-unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+                                pgprot_t prot, struct page **ret_page,
+                                const void *caller);
 
-void __init init_consistent_dma_size(unsigned long size)
+static void *
+__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+       const void *caller)
 {
-       unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
+       struct vm_struct *area;
+       unsigned long addr;
 
-       BUG_ON(consistent_pte); /* Check we're called before DMA region init */
-       BUG_ON(base < VMALLOC_END);
+       /*
+        * DMA allocation can be mapped to user space, so lets
+        * set VM_USERMAP flags too.
+        */
+       area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+                                 caller);
+       if (!area)
+               return NULL;
+       addr = (unsigned long)area->addr;
+       area->phys_addr = __pfn_to_phys(page_to_pfn(page));
+
+       if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
+               vunmap((void *)addr);
+               return NULL;
+       }
+       return (void *)addr;
+}
 
-       /* Grow region to accommodate specified size  */
-       if (base < consistent_base)
-               consistent_base = base;
+static void __dma_free_remap(void *cpu_addr, size_t size)
+{
+       unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
+       struct vm_struct *area = find_vm_area(cpu_addr);
+       if (!area || (area->flags & flags) != flags) {
+               WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+               return;
+       }
+       unmap_kernel_range((unsigned long)cpu_addr, size);
+       vunmap(cpu_addr);
 }
 
-#include "vmregion.h"
+struct dma_pool {
+       size_t size;
+       spinlock_t lock;
+       unsigned long *bitmap;
+       unsigned long nr_pages;
+       void *vaddr;
+       struct page *page;
+};
 
-static struct arm_vmregion_head consistent_head = {
-       .vm_lock        = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
-       .vm_list        = LIST_HEAD_INIT(consistent_head.vm_list),
-       .vm_end         = CONSISTENT_END,
+static struct dma_pool atomic_pool = {
+       .size = SZ_256K,
 };
 
-#ifdef CONFIG_HUGETLB_PAGE
-#error ARM Coherent DMA allocator does not (yet) support huge TLB
-#endif
+static int __init early_coherent_pool(char *p)
+{
+       atomic_pool.size = memparse(p, &p);
+       return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
 
 /*
- * Initialise the consistent memory allocation.
+ * Initialise the coherent pool for atomic allocations.
  */
-static int __init consistent_init(void)
+static int __init atomic_pool_init(void)
 {
-       int ret = 0;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-       int i = 0;
-       unsigned long base = consistent_base;
-       unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+       struct dma_pool *pool = &atomic_pool;
+       pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+       unsigned long nr_pages = pool->size >> PAGE_SHIFT;
+       unsigned long *bitmap;
+       struct page *page;
+       void *ptr;
+       int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
 
-       consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
-       if (!consistent_pte) {
-               pr_err("%s: no memory\n", __func__);
-               return -ENOMEM;
+       bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+       if (!bitmap)
+               goto no_bitmap;
+
+       if (IS_ENABLED(CONFIG_CMA))
+               ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
+       else
+               ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
+                                          &page, NULL);
+       if (ptr) {
+               spin_lock_init(&pool->lock);
+               pool->vaddr = ptr;
+               pool->page = page;
+               pool->bitmap = bitmap;
+               pool->nr_pages = nr_pages;
+               pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+                      (unsigned)pool->size / 1024);
+               return 0;
        }
+       kfree(bitmap);
+no_bitmap:
+       pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+              (unsigned)pool->size / 1024);
+       return -ENOMEM;
+}
+/*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
+postcore_initcall(atomic_pool_init);
 
-       pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
-       consistent_head.vm_start = base;
+struct dma_contig_early_reserve {
+       phys_addr_t base;
+       unsigned long size;
+};
 
-       do {
-               pgd = pgd_offset(&init_mm, base);
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
 
-               pud = pud_alloc(&init_mm, pgd, base);
-               if (!pud) {
-                       printk(KERN_ERR "%s: no pud tables\n", __func__);
-                       ret = -ENOMEM;
-                       break;
-               }
+static int dma_mmu_remap_num __initdata;
 
-               pmd = pmd_alloc(&init_mm, pud, base);
-               if (!pmd) {
-                       printk(KERN_ERR "%s: no pmd tables\n", __func__);
-                       ret = -ENOMEM;
-                       break;
-               }
-               WARN_ON(!pmd_none(*pmd));
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+       dma_mmu_remap[dma_mmu_remap_num].base = base;
+       dma_mmu_remap[dma_mmu_remap_num].size = size;
+       dma_mmu_remap_num++;
+}
 
-               pte = pte_alloc_kernel(pmd, base);
-               if (!pte) {
-                       printk(KERN_ERR "%s: no pte tables\n", __func__);
-                       ret = -ENOMEM;
-                       break;
-               }
+void __init dma_contiguous_remap(void)
+{
+       int i;
+       for (i = 0; i < dma_mmu_remap_num; i++) {
+               phys_addr_t start = dma_mmu_remap[i].base;
+               phys_addr_t end = start + dma_mmu_remap[i].size;
+               struct map_desc map;
+               unsigned long addr;
+
+               if (end > arm_lowmem_limit)
+                       end = arm_lowmem_limit;
+               if (start >= end)
+                       return;
+
+               map.pfn = __phys_to_pfn(start);
+               map.virtual = __phys_to_virt(start);
+               map.length = end - start;
+               map.type = MT_MEMORY_DMA_READY;
 
-               consistent_pte[i++] = pte;
-               base += PMD_SIZE;
-       } while (base < CONSISTENT_END);
+               /*
+                * Clear previous low-memory mapping
+                */
+               for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+                    addr += PMD_SIZE)
+                       pmd_clear(pmd_off_k(addr));
 
-       return ret;
+               iotable_init(&map, 1);
+       }
 }
 
-core_initcall(consistent_init);
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+                           void *data)
+{
+       struct page *page = virt_to_page(addr);
+       pgprot_t prot = *(pgprot_t *)data;
 
-static void *
-__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
+       set_pte_ext(pte, mk_pte(page, prot), 0);
+       return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+{
+       unsigned long start = (unsigned long) page_address(page);
+       unsigned end = start + size;
+
+       apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+       dsb();
+       flush_tlb_kernel_range(start, end);
+}
+
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+                                pgprot_t prot, struct page **ret_page,
+                                const void *caller)
 {
-       struct arm_vmregion *c;
-       size_t align;
-       int bit;
+       struct page *page;
+       void *ptr;
+       page = __dma_alloc_buffer(dev, size, gfp);
+       if (!page)
+               return NULL;
+
+       ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+       if (!ptr) {
+               __dma_free_buffer(page, size);
+               return NULL;
+       }
+
+       *ret_page = page;
+       return ptr;
+}
 
-       if (!consistent_pte) {
-               printk(KERN_ERR "%s: not initialised\n", __func__);
-               dump_stack();
+static void *__alloc_from_pool(size_t size, struct page **ret_page)
+{
+       struct dma_pool *pool = &atomic_pool;
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned int pageno;
+       unsigned long flags;
+       void *ptr = NULL;
+       unsigned long align_mask;
+
+       if (!pool->vaddr) {
+               WARN(1, "coherent pool not initialised!\n");
                return NULL;
        }
 
        /*
-        * Align the virtual region allocation - maximum alignment is
-        * a section size, minimum is a page size.  This helps reduce
-        * fragmentation of the DMA space, and also prevents allocations
-        * smaller than a section from crossing a section boundary.
+        * Align the region allocation - allocations from pool are rather
+        * small, so align them to their order in pages, minimum is a page
+        * size. This helps reduce fragmentation of the DMA space.
         */
-       bit = fls(size - 1);
-       if (bit > SECTION_SHIFT)
-               bit = SECTION_SHIFT;
-       align = 1 << bit;
+       align_mask = (1 << get_order(size)) - 1;
+
+       spin_lock_irqsave(&pool->lock, flags);
+       pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
+                                           0, count, align_mask);
+       if (pageno < pool->nr_pages) {
+               bitmap_set(pool->bitmap, pageno, count);
+               ptr = pool->vaddr + PAGE_SIZE * pageno;
+               *ret_page = pool->page + pageno;
+       } else {
+               pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
+                           "Please increase it with coherent_pool= kernel parameter!\n",
+                           (unsigned)pool->size / 1024);
+       }
+       spin_unlock_irqrestore(&pool->lock, flags);
 
-       /*
-        * Allocate a virtual address in the consistent mapping region.
-        */
-       c = arm_vmregion_alloc(&consistent_head, align, size,
-                           gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
-       if (c) {
-               pte_t *pte;
-               int idx = CONSISTENT_PTE_INDEX(c->vm_start);
-               u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
-
-               pte = consistent_pte[idx] + off;
-               c->vm_pages = page;
-
-               do {
-                       BUG_ON(!pte_none(*pte));
-
-                       set_pte_ext(pte, mk_pte(page, prot), 0);
-                       page++;
-                       pte++;
-                       off++;
-                       if (off >= PTRS_PER_PTE) {
-                               off = 0;
-                               pte = consistent_pte[++idx];
-                       }
-               } while (size -= PAGE_SIZE);
+       return ptr;
+}
 
-               dsb();
+static int __free_from_pool(void *start, size_t size)
+{
+       struct dma_pool *pool = &atomic_pool;
+       unsigned long pageno, count;
+       unsigned long flags;
+
+       if (start < pool->vaddr || start > pool->vaddr + pool->size)
+               return 0;
 
-               return (void *)c->vm_start;
+       if (start + size > pool->vaddr + pool->size) {
+               WARN(1, "freeing wrong coherent size from pool\n");
+               return 0;
        }
-       return NULL;
+
+       pageno = (start - pool->vaddr) >> PAGE_SHIFT;
+       count = size >> PAGE_SHIFT;
+
+       spin_lock_irqsave(&pool->lock, flags);
+       bitmap_clear(pool->bitmap, pageno, count);
+       spin_unlock_irqrestore(&pool->lock, flags);
+
+       return 1;
 }
 
-static void __dma_free_remap(void *cpu_addr, size_t size)
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+                                    pgprot_t prot, struct page **ret_page)
 {
-       struct arm_vmregion *c;
-       unsigned long addr;
-       pte_t *ptep;
-       int idx;
-       u32 off;
-
-       c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
-       if (!c) {
-               printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
-                      __func__, cpu_addr);
-               dump_stack();
-               return;
-       }
+       unsigned long order = get_order(size);
+       size_t count = size >> PAGE_SHIFT;
+       struct page *page;
 
-       if ((c->vm_end - c->vm_start) != size) {
-               printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
-                      __func__, c->vm_end - c->vm_start, size);
-               dump_stack();
-               size = c->vm_end - c->vm_start;
-       }
+       page = dma_alloc_from_contiguous(dev, count, order);
+       if (!page)
+               return NULL;
 
-       idx = CONSISTENT_PTE_INDEX(c->vm_start);
-       off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
-       ptep = consistent_pte[idx] + off;
-       addr = c->vm_start;
-       do {
-               pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
-
-               ptep++;
-               addr += PAGE_SIZE;
-               off++;
-               if (off >= PTRS_PER_PTE) {
-                       off = 0;
-                       ptep = consistent_pte[++idx];
-               }
+       __dma_clear_buffer(page, size);
+       __dma_remap(page, size, prot);
 
-               if (pte_none(pte) || !pte_present(pte))
-                       printk(KERN_CRIT "%s: bad page in kernel page table\n",
-                              __func__);
-       } while (size -= PAGE_SIZE);
+       *ret_page = page;
+       return page_address(page);
+}
 
-       flush_tlb_kernel_range(c->vm_start, c->vm_end);
+static void __free_from_contiguous(struct device *dev, struct page *page,
+                                  size_t size)
+{
+       __dma_remap(page, size, pgprot_kernel);
+       dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
 
-       arm_vmregion_free(&consistent_head, c);
+static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+{
+       prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
+                           pgprot_writecombine(prot) :
+                           pgprot_dmacoherent(prot);
+       return prot;
 }
 
+#define nommu() 0
+
 #else  /* !CONFIG_MMU */
 
-#define __dma_alloc_remap(page, size, gfp, prot)       page_address(page)
-#define __dma_free_remap(addr, size)                   do { } while (0)
+#define nommu() 1
+
+#define __get_dma_pgprot(attrs, prot)  __pgprot(0)
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)     NULL
+#define __alloc_from_pool(size, ret_page)                      NULL
+#define __alloc_from_contiguous(dev, size, prot, ret)          NULL
+#define __free_from_pool(cpu_addr, size)                       0
+#define __free_from_contiguous(dev, page, size)                        do { } while (0)
+#define __dma_free_remap(cpu_addr, size)                       do { } while (0)
 
 #endif /* CONFIG_MMU */
 
-static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
-           pgprot_t prot)
+static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
+                                  struct page **ret_page)
+{
+       struct page *page;
+       page = __dma_alloc_buffer(dev, size, gfp);
+       if (!page)
+               return NULL;
+
+       *ret_page = page;
+       return page_address(page);
+}
+
+
+
+static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+                        gfp_t gfp, pgprot_t prot, const void *caller)
 {
+       u64 mask = get_coherent_dma_mask(dev);
        struct page *page;
        void *addr;
 
+#ifdef CONFIG_DMA_API_DEBUG
+       u64 limit = (mask + 1) & ~mask;
+       if (limit && size >= limit) {
+               dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+                       size, mask);
+               return NULL;
+       }
+#endif
+
+       if (!mask)
+               return NULL;
+
+       if (mask < 0xffffffffULL)
+               gfp |= GFP_DMA;
+
        /*
         * Following is a work-around (a.k.a. hack) to prevent pages
         * with __GFP_COMP being passed to split_page() which cannot
@@ -341,22 +568,20 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
         */
        gfp &= ~(__GFP_COMP);
 
-       *handle = ~0;
+       *handle = DMA_ERROR_CODE;
        size = PAGE_ALIGN(size);
 
-       page = __dma_alloc_buffer(dev, size, gfp);
-       if (!page)
-               return NULL;
-
-       if (!arch_is_coherent())
-               addr = __dma_alloc_remap(page, size, gfp, prot);
+       if (arch_is_coherent() || nommu())
+               addr = __alloc_simple_buffer(dev, size, gfp, &page);
+       else if (gfp & GFP_ATOMIC)
+               addr = __alloc_from_pool(size, &page);
+       else if (!IS_ENABLED(CONFIG_CMA))
+               addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
        else
-               addr = page_address(page);
+               addr = __alloc_from_contiguous(dev, size, prot, &page);
 
        if (addr)
                *handle = pfn_to_dma(dev, page_to_pfn(page));
-       else
-               __dma_free_buffer(page, size);
 
        return addr;
 }
@@ -365,136 +590,77 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
  * Allocate DMA-coherent memory space and return both the kernel remapped
  * virtual and bus address for that space.
  */
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+                   gfp_t gfp, struct dma_attrs *attrs)
 {
+       pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
        void *memory;
 
        if (dma_alloc_from_coherent(dev, size, handle, &memory))
                return memory;
 
-       return __dma_alloc(dev, size, handle, gfp,
-                          pgprot_dmacoherent(pgprot_kernel));
+       return __dma_alloc(dev, size, handle, gfp, prot,
+                          __builtin_return_address(0));
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
 /*
- * Allocate a writecombining region, in much the same way as
- * dma_alloc_coherent above.
+ * Create userspace mapping for the DMA-coherent memory.
  */
-void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
-{
-       return __dma_alloc(dev, size, handle, gfp,
-                          pgprot_writecombine(pgprot_kernel));
-}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
-                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                struct dma_attrs *attrs)
 {
        int ret = -ENXIO;
 #ifdef CONFIG_MMU
-       unsigned long user_size, kern_size;
-       struct arm_vmregion *c;
-
-       user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long pfn = dma_to_pfn(dev, dma_addr);
+       unsigned long off = vma->vm_pgoff;
 
-       c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
-       if (c) {
-               unsigned long off = vma->vm_pgoff;
+       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
 
-               kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
+       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
 
-               if (off < kern_size &&
-                   user_size <= (kern_size - off)) {
-                       ret = remap_pfn_range(vma, vma->vm_start,
-                                             page_to_pfn(c->vm_pages) + off,
-                                             user_size << PAGE_SHIFT,
-                                             vma->vm_page_prot);
-               }
+       if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
+               ret = remap_pfn_range(vma, vma->vm_start,
+                                     pfn + off,
+                                     vma->vm_end - vma->vm_start,
+                                     vma->vm_page_prot);
        }
 #endif /* CONFIG_MMU */
 
        return ret;
 }
 
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
-                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
-       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
-                         void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
 /*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Free a buffer as defined by the above mapping.
  */
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+                 dma_addr_t handle, struct dma_attrs *attrs)
 {
-       WARN_ON(irqs_disabled());
+       struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
 
        if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
                return;
 
        size = PAGE_ALIGN(size);
 
-       if (!arch_is_coherent())
+       if (arch_is_coherent() || nommu()) {
+               __dma_free_buffer(page, size);
+       } else if (__free_from_pool(cpu_addr, size)) {
+               return;
+       } else if (!IS_ENABLED(CONFIG_CMA)) {
                __dma_free_remap(cpu_addr, size);
-
-       __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
-}
-EXPORT_SYMBOL(dma_free_coherent);
-
-/*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
-void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
-       enum dma_data_direction dir)
-{
-       unsigned long paddr;
-
-       BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
-       dmac_map_area(kaddr, size, dir);
-
-       paddr = __pa(kaddr);
-       if (dir == DMA_FROM_DEVICE) {
-               outer_inv_range(paddr, paddr + size);
+               __dma_free_buffer(page, size);
        } else {
-               outer_clean_range(paddr, paddr + size);
-       }
-       /* FIXME: non-speculating: flush on bidirectional mappings? */
-}
-EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-
-void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
-       enum dma_data_direction dir)
-{
-       BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
-       /* FIXME: non-speculating: not required */
-       /* don't bother invalidating if DMA to device */
-       if (dir != DMA_TO_DEVICE) {
-               unsigned long paddr = __pa(kaddr);
-               outer_inv_range(paddr, paddr + size);
+               /*
+                * Non-atomic allocations cannot be freed with IRQs disabled
+                */
+               WARN_ON(irqs_disabled());
+               __free_from_contiguous(dev, page, size);
        }
-
-       dmac_unmap_area(kaddr, size, dir);
 }
-EXPORT_SYMBOL(___dma_single_dev_to_cpu);
 
 static void dma_cache_maint_page(struct page *page, unsigned long offset,
        size_t size, enum dma_data_direction dir,
@@ -542,7 +708,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
        } while (left);
 }
 
-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
        size_t size, enum dma_data_direction dir)
 {
        unsigned long paddr;
@@ -557,9 +729,8 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
        }
        /* FIXME: non-speculating: flush on bidirectional mappings? */
 }
-EXPORT_SYMBOL(___dma_page_cpu_to_dev);
 
-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
        size_t size, enum dma_data_direction dir)
 {
        unsigned long paddr = page_to_phys(page) + off;
@@ -572,15 +743,28 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
        dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
 
        /*
-        * Mark the D-cache clean for this page to avoid extra flushing.
+        * Mark the D-cache clean for these pages to avoid extra flushing.
         */
-       if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
-               set_bit(PG_dcache_clean, &page->flags);
+       if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
+               unsigned long pfn;
+               size_t left = size;
+
+               pfn = page_to_pfn(page) + off / PAGE_SIZE;
+               off %= PAGE_SIZE;
+               if (off) {
+                       pfn++;
+                       left -= PAGE_SIZE - off;
+               }
+               while (left >= PAGE_SIZE) {
+                       page = pfn_to_page(pfn++);
+                       set_bit(PG_dcache_clean, &page->flags);
+                       left -= PAGE_SIZE;
+               }
+       }
 }
-EXPORT_SYMBOL(___dma_page_dev_to_cpu);
 
 /**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map
@@ -595,32 +779,32 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
  * Device ownership issues as mentioned for dma_map_single are the same
  * here.
  */
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir)
+int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+               enum dma_data_direction dir, struct dma_attrs *attrs)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
        int i, j;
 
-       BUG_ON(!valid_dma_direction(dir));
-
        for_each_sg(sg, s, nents, i) {
-               s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
-                                               s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+               s->dma_length = s->length;
+#endif
+               s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+                                               s->length, dir, attrs);
                if (dma_mapping_error(dev, s->dma_address))
                        goto bad_mapping;
        }
-       debug_dma_map_sg(dev, sg, nents, nents, dir);
        return nents;
 
  bad_mapping:
        for_each_sg(sg, s, i, j)
-               __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+               ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
        return 0;
 }
-EXPORT_SYMBOL(dma_map_sg);
 
 /**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -629,70 +813,55 @@ EXPORT_SYMBOL(dma_map_sg);
  * Unmap a set of streaming mode DMA translations.  Again, CPU access
  * rules concerning calls here are the same as for dma_unmap_single().
  */
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir)
+void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+               enum dma_data_direction dir, struct dma_attrs *attrs)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
-       int i;
 
-       debug_dma_unmap_sg(dev, sg, nents, dir);
+       int i;
 
        for_each_sg(sg, s, nents, i)
-               __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+               ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
 }
-EXPORT_SYMBOL(dma_unmap_sg);
 
 /**
- * dma_sync_sg_for_cpu
+ * arm_dma_sync_sg_for_cpu
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map (returned from dma_map_sg)
  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  */
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
                        int nents, enum dma_data_direction dir)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
        int i;
 
-       for_each_sg(sg, s, nents, i) {
-               if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
-                                           sg_dma_len(s), dir))
-                       continue;
-
-               __dma_page_dev_to_cpu(sg_page(s), s->offset,
-                                     s->length, dir);
-       }
-
-       debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+       for_each_sg(sg, s, nents, i)
+               ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
+                                        dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
 
 /**
- * dma_sync_sg_for_device
+ * arm_dma_sync_sg_for_device
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map (returned from dma_map_sg)
  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  */
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
                        int nents, enum dma_data_direction dir)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
        int i;
 
-       for_each_sg(sg, s, nents, i) {
-               if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
-                                       sg_dma_len(s), dir))
-                       continue;
-
-               __dma_page_cpu_to_dev(sg_page(s), s->offset,
-                                     s->length, dir);
-       }
-
-       debug_dma_sync_sg_for_device(dev, sg, nents, dir);
+       for_each_sg(sg, s, nents, i)
+               ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
+                                           dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_device);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -708,18 +877,15 @@ int dma_supported(struct device *dev, u64 mask)
 }
 EXPORT_SYMBOL(dma_supported);
 
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
 {
        if (!dev->dma_mask || !dma_supported(dev, dma_mask))
                return -EIO;
 
-#ifndef CONFIG_DMABOUNCE
        *dev->dma_mask = dma_mask;
-#endif
 
        return 0;
 }
-EXPORT_SYMBOL(dma_set_mask);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES     4096
 
@@ -729,3 +895,666 @@ static int __init dma_debug_do_init(void)
        return 0;
 }
 fs_initcall(dma_debug_do_init);
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
+/* IOMMU */
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+                                     size_t size)
+{
+       unsigned int order = get_order(size);
+       unsigned int align = 0;
+       unsigned int count, start;
+       unsigned long flags;
+
+       count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+                (1 << mapping->order) - 1) >> mapping->order;
+
+       if (order > mapping->order)
+               align = (1 << (order - mapping->order)) - 1;
+
+       spin_lock_irqsave(&mapping->lock, flags);
+       start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+                                          count, align);
+       if (start > mapping->bits) {
+               spin_unlock_irqrestore(&mapping->lock, flags);
+               return DMA_ERROR_CODE;
+       }
+
+       bitmap_set(mapping->bitmap, start, count);
+       spin_unlock_irqrestore(&mapping->lock, flags);
+
+       return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+                              dma_addr_t addr, size_t size)
+{
+       unsigned int start = (addr - mapping->base) >>
+                            (mapping->order + PAGE_SHIFT);
+       unsigned int count = ((size >> PAGE_SHIFT) +
+                             (1 << mapping->order) - 1) >> mapping->order;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mapping->lock, flags);
+       bitmap_clear(mapping->bitmap, start, count);
+       spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+{
+       struct page **pages;
+       int count = size >> PAGE_SHIFT;
+       int array_size = count * sizeof(struct page *);
+       int i = 0;
+
+       if (array_size <= PAGE_SIZE)
+               pages = kzalloc(array_size, gfp);
+       else
+               pages = vzalloc(array_size);
+       if (!pages)
+               return NULL;
+
+       while (count) {
+               int j, order = __fls(count);
+
+               pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+               while (!pages[i] && order)
+                       pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+               if (!pages[i])
+                       goto error;
+
+               if (order)
+                       split_page(pages[i], order);
+               j = 1 << order;
+               while (--j)
+                       pages[i + j] = pages[i] + j;
+
+               __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+               i += 1 << order;
+               count -= 1 << order;
+       }
+
+       return pages;
+error:
+       while (i--)
+               if (pages[i])
+                       __free_pages(pages[i], 0);
+       if (array_size <= PAGE_SIZE)
+               kfree(pages);
+       else
+               vfree(pages);
+       return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+{
+       int count = size >> PAGE_SHIFT;
+       int array_size = count * sizeof(struct page *);
+       int i;
+       for (i = 0; i < count; i++)
+               if (pages[i])
+                       __free_pages(pages[i], 0);
+       if (array_size <= PAGE_SIZE)
+               kfree(pages);
+       else
+               vfree(pages);
+       return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
+                   const void *caller)
+{
+       unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       struct vm_struct *area;
+       unsigned long p;
+
+       area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+                                 caller);
+       if (!area)
+               return NULL;
+
+       area->pages = pages;
+       area->nr_pages = nr_pages;
+       p = (unsigned long)area->addr;
+
+       for (i = 0; i < nr_pages; i++) {
+               phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
+               if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
+                       goto err;
+               p += PAGE_SIZE;
+       }
+       return area->addr;
+err:
+       unmap_kernel_range((unsigned long)area->addr, size);
+       vunmap(area->addr);
+       return NULL;
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       dma_addr_t dma_addr, iova;
+       int i, ret = DMA_ERROR_CODE;
+
+       dma_addr = __alloc_iova(mapping, size);
+       if (dma_addr == DMA_ERROR_CODE)
+               return dma_addr;
+
+       iova = dma_addr;
+       for (i = 0; i < count; ) {
+               unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+               phys_addr_t phys = page_to_phys(pages[i]);
+               unsigned int len, j;
+
+               for (j = i + 1; j < count; j++, next_pfn++)
+                       if (page_to_pfn(pages[j]) != next_pfn)
+                               break;
+
+               len = (j - i) << PAGE_SHIFT;
+               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               if (ret < 0)
+                       goto fail;
+               iova += len;
+               i = j;
+       }
+       return dma_addr;
+fail:
+       iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+       __free_iova(mapping, dma_addr, size);
+       return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+       /*
+        * add optional in-page offset from iova to size and align
+        * result to page size
+        */
+       size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+       iova &= PAGE_MASK;
+
+       iommu_unmap(mapping->domain, iova, size);
+       __free_iova(mapping, iova, size);
+       return 0;
+}
+
+static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
+{
+       struct vm_struct *area;
+
+       if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+               return cpu_addr;
+
+       area = find_vm_area(cpu_addr);
+       if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
+               return area->pages;
+       return NULL;
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+           dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+       pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+       struct page **pages;
+       void *addr = NULL;
+
+       *handle = DMA_ERROR_CODE;
+       size = PAGE_ALIGN(size);
+
+       pages = __iommu_alloc_buffer(dev, size, gfp);
+       if (!pages)
+               return NULL;
+
+       *handle = __iommu_create_mapping(dev, pages, size);
+       if (*handle == DMA_ERROR_CODE)
+               goto err_buffer;
+
+       if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+               return pages;
+
+       addr = __iommu_alloc_remap(pages, size, gfp, prot,
+                                  __builtin_return_address(0));
+       if (!addr)
+               goto err_mapping;
+
+       return addr;
+
+err_mapping:
+       __iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+       __iommu_free_buffer(dev, pages, size);
+       return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+                   void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                   struct dma_attrs *attrs)
+{
+       unsigned long uaddr = vma->vm_start;
+       unsigned long usize = vma->vm_end - vma->vm_start;
+       struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+
+       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
+       if (!pages)
+               return -ENXIO;
+
+       do {
+               int ret = vm_insert_page(vma, uaddr, *pages++);
+               if (ret) {
+                       pr_err("Remapping memory failed: %d\n", ret);
+                       return ret;
+               }
+               uaddr += PAGE_SIZE;
+               usize -= PAGE_SIZE;
+       } while (usize > 0);
+
+       return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+                         dma_addr_t handle, struct dma_attrs *attrs)
+{
+       struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+       size = PAGE_ALIGN(size);
+
+       if (!pages) {
+               WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+               return;
+       }
+
+       if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+               unmap_kernel_range((unsigned long)cpu_addr, size);
+               vunmap(cpu_addr);
+       }
+
+       __iommu_remove_mapping(dev, handle, size);
+       __iommu_free_buffer(dev, pages, size);
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+                         size_t size, dma_addr_t *handle,
+                         enum dma_data_direction dir)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova, iova_base;
+       int ret = 0;
+       unsigned int count;
+       struct scatterlist *s;
+
+       size = PAGE_ALIGN(size);
+       *handle = DMA_ERROR_CODE;
+
+       iova_base = iova = __alloc_iova(mapping, size);
+       if (iova == DMA_ERROR_CODE)
+               return -ENOMEM;
+
+       for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+               phys_addr_t phys = page_to_phys(sg_page(s));
+               unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+               if (!arch_is_coherent())
+                       __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+
+               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               if (ret < 0)
+                       goto fail;
+               count += len >> PAGE_SHIFT;
+               iova += len;
+       }
+       *handle = iova_base;
+
+       return 0;
+fail:
+       iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+       __free_iova(mapping, iova_base, size);
+       return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+                    enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       struct scatterlist *s = sg, *dma = sg, *start = sg;
+       int i, count = 0;
+       unsigned int offset = s->offset;
+       unsigned int size = s->offset + s->length;
+       unsigned int max = dma_get_max_seg_size(dev);
+
+       for (i = 1; i < nents; i++) {
+               s = sg_next(s);
+
+               s->dma_address = DMA_ERROR_CODE;
+               s->dma_length = 0;
+
+               if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
+                       if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+                           dir) < 0)
+                               goto bad_mapping;
+
+                       dma->dma_address += offset;
+                       dma->dma_length = size - offset;
+
+                       size = offset = s->offset;
+                       start = s;
+                       dma = sg_next(dma);
+                       count += 1;
+               }
+               size += s->length;
+       }
+       if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+               goto bad_mapping;
+
+       dma->dma_address += offset;
+       dma->dma_length = size - offset;
+
+       return count+1;
+
+bad_mapping:
+       for_each_sg(sg, s, count, i)
+               __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+       return 0;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations.  Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+                       enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i) {
+               if (sg_dma_len(s))
+                       __iommu_remove_mapping(dev, sg_dma_address(s),
+                                              sg_dma_len(s));
+               if (!arch_is_coherent())
+                       __dma_page_dev_to_cpu(sg_page(s), s->offset,
+                                             s->length, dir);
+       }
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+                       int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i)
+               if (!arch_is_coherent())
+                       __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+                       int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i)
+               if (!arch_is_coherent())
+                       __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t dma_addr;
+       int ret, len = PAGE_ALIGN(size + offset);
+
+       if (!arch_is_coherent())
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+
+       dma_addr = __alloc_iova(mapping, len);
+       if (dma_addr == DMA_ERROR_CODE)
+               return dma_addr;
+
+       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
+       if (ret < 0)
+               goto fail;
+
+       return dma_addr + offset;
+fail:
+       __free_iova(mapping, dma_addr, len);
+       return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova = handle & PAGE_MASK;
+       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       int offset = handle & ~PAGE_MASK;
+       int len = PAGE_ALIGN(size + offset);
+
+       if (!iova)
+               return;
+
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(page, offset, size, dir);
+
+       iommu_unmap(mapping->domain, iova, len);
+       __free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova = handle & PAGE_MASK;
+       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       unsigned int offset = handle & ~PAGE_MASK;
+
+       if (!iova)
+               return;
+
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova = handle & PAGE_MASK;
+       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       unsigned int offset = handle & ~PAGE_MASK;
+
+       if (!iova)
+               return;
+
+       __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+struct dma_map_ops iommu_ops = {
+       .alloc          = arm_iommu_alloc_attrs,
+       .free           = arm_iommu_free_attrs,
+       .mmap           = arm_iommu_mmap_attrs,
+
+       .map_page               = arm_iommu_map_page,
+       .unmap_page             = arm_iommu_unmap_page,
+       .sync_single_for_cpu    = arm_iommu_sync_single_for_cpu,
+       .sync_single_for_device = arm_iommu_sync_single_for_device,
+
+       .map_sg                 = arm_iommu_map_sg,
+       .unmap_sg               = arm_iommu_unmap_sg,
+       .sync_sg_for_cpu        = arm_iommu_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_iommu_sync_sg_for_device,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+                        int order)
+{
+       unsigned int count = size >> (PAGE_SHIFT + order);
+       unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+       struct dma_iommu_mapping *mapping;
+       int err = -ENOMEM;
+
+       if (!count)
+               return ERR_PTR(-EINVAL);
+
+       mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+       if (!mapping)
+               goto err;
+
+       mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+       if (!mapping->bitmap)
+               goto err2;
+
+       mapping->base = base;
+       mapping->bits = BITS_PER_BYTE * bitmap_size;
+       mapping->order = order;
+       spin_lock_init(&mapping->lock);
+
+       mapping->domain = iommu_domain_alloc(bus);
+       if (!mapping->domain)
+               goto err3;
+
+       kref_init(&mapping->kref);
+       return mapping;
+err3:
+       kfree(mapping->bitmap);
+err2:
+       kfree(mapping);
+err:
+       return ERR_PTR(err);
+}
+
+static void release_iommu_mapping(struct kref *kref)
+{
+       struct dma_iommu_mapping *mapping =
+               container_of(kref, struct dma_iommu_mapping, kref);
+
+       iommu_domain_free(mapping->domain);
+       kfree(mapping->bitmap);
+       kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+       if (mapping)
+               kref_put(&mapping->kref, release_iommu_mapping);
+}
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ *     arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+                           struct dma_iommu_mapping *mapping)
+{
+       int err;
+
+       err = iommu_attach_device(mapping->domain, dev);
+       if (err)
+               return err;
+
+       kref_get(&mapping->kref);
+       dev->archdata.mapping = mapping;
+       set_dma_ops(dev, &iommu_ops);
+
+       pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
+       return 0;
+}
+
+#endif
index 4b0bc37..ea34eb9 100644 (file)
 
 #include "fault.h"
 
-/*
- * Fault status register encodings.  We steal bit 31 for our own purposes.
- */
-#define FSR_LNX_PF             (1 << 31)
-#define FSR_WRITE              (1 << 11)
-#define FSR_FS4                        (1 << 10)
-#define FSR_FS3_0              (15)
-
-static inline int fsr_fs(unsigned int fsr)
-{
-       return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
-}
-
 #ifdef CONFIG_MMU
 
 #ifdef CONFIG_KPROBES
@@ -123,8 +110,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
 
                pte = pte_offset_map(pmd, addr);
                printk(", *pte=%08llx", (long long)pte_val(*pte));
+#ifndef CONFIG_ARM_LPAE
                printk(", *ppte=%08llx",
                       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
+#endif
                pte_unmap(pte);
        } while(0);
 
@@ -163,6 +152,39 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
        do_exit(SIGKILL);
 }
 
+#ifdef CONFIG_DEBUG_USER
+static void
+print_user_faulter_location(const char *name, struct pt_regs *regs)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       char *p, *t, buf[128];
+
+       printk(KERN_DEBUG "%s: pc=%08lx",
+              name, regs->ARM_pc);
+
+       do {
+               if (!mm)
+                       break;
+               vma = find_vma(mm, regs->ARM_pc);
+               if (!vma || !vma->vm_file)
+                       break;
+
+               p = d_path(&vma->vm_file->f_path, buf, sizeof(buf));
+               if (IS_ERR(p))
+                       break;
+
+               t = strrchr(p, '/');
+               if (t)
+                       p = t + 1;
+
+               printk(KERN_CONT " (%s+%lx)", p, regs->ARM_pc - vma->vm_start);
+       } while (0);
+
+       printk(KERN_CONT ", lr=%08lx\n", regs->ARM_lr);
+}
+#endif
+
 /*
  * Something tried to access memory that isn't in our memory map..
  * User mode accesses just cause a SIGSEGV
@@ -175,9 +197,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
        struct siginfo si;
 
 #ifdef CONFIG_DEBUG_USER
+       if (user_debug & (UDBG_SEGV | UDBG_SEGV_SHORT)) {
+               printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x (%s)\n",
+                      tsk->comm, sig, addr, fsr,
+                      (fsr & FSR_WRITE) ? "write" : "read");
+               print_user_faulter_location(tsk->comm, regs);
+       }
        if (user_debug & UDBG_SEGV) {
-               printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
-                      tsk->comm, sig, addr, fsr);
                show_pte(tsk->mm, addr);
                show_regs(regs);
        }
@@ -443,6 +469,12 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
        pmd = pmd_offset(pud, addr);
        pmd_k = pmd_offset(pud_k, addr);
 
+#ifdef CONFIG_ARM_LPAE
+       /*
+        * Only one hardware entry per PMD with LPAE.
+        */
+       index = 0;
+#else
        /*
         * On ARM one Linux PGD entry contains two hardware entries (see page
         * tables layout in pgtable.h). We normally guarantee that we always
@@ -452,6 +484,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
         * for the first of pair.
         */
        index = (addr >> SECTION_SHIFT) & 1;
+#endif
        if (pmd_none(pmd_k[index]))
                goto bad_area;
 
@@ -471,17 +504,6 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
 }
 #endif                                 /* CONFIG_MMU */
 
-/*
- * Some section permission faults need to be handled gracefully.
- * They can happen due to a __{get,put}_user during an oops.
- */
-static int
-do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
-{
-       do_bad_area(addr, fsr, regs);
-       return 0;
-}
-
 /*
  * This abort handler always returns "fault".
  */
@@ -491,55 +513,20 @@ do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        return 1;
 }
 
-static struct fsr_info {
+struct fsr_info {
        int     (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
        int     sig;
        int     code;
        const char *name;
-} fsr_info[] = {
-       /*
-        * The following are the standard ARMv3 and ARMv4 aborts.  ARMv5
-        * defines these to be "precise" aborts.
-        */
-       { do_bad,               SIGSEGV, 0,             "vector exception"                 },
-       { do_bad,               SIGBUS,  BUS_ADRALN,    "alignment exception"              },
-       { do_bad,               SIGKILL, 0,             "terminal exception"               },
-       { do_bad,               SIGBUS,  BUS_ADRALN,    "alignment exception"              },
-       { do_bad,               SIGBUS,  0,             "external abort on linefetch"      },
-       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "section translation fault"        },
-       { do_bad,               SIGBUS,  0,             "external abort on linefetch"      },
-       { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "page translation fault"           },
-       { do_bad,               SIGBUS,  0,             "external abort on non-linefetch"  },
-       { do_bad,               SIGSEGV, SEGV_ACCERR,   "section domain fault"             },
-       { do_bad,               SIGBUS,  0,             "external abort on non-linefetch"  },
-       { do_bad,               SIGSEGV, SEGV_ACCERR,   "page domain fault"                },
-       { do_bad,               SIGBUS,  0,             "external abort on translation"    },
-       { do_sect_fault,        SIGSEGV, SEGV_ACCERR,   "section permission fault"         },
-       { do_bad,               SIGBUS,  0,             "external abort on translation"    },
-       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "page permission fault"            },
-       /*
-        * The following are "imprecise" aborts, which are signalled by bit
-        * 10 of the FSR, and may not be recoverable.  These are only
-        * supported if the CPU abort handler supports bit 10.
-        */
-       { do_bad,               SIGBUS,  0,             "unknown 16"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 17"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 18"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 19"                       },
-       { do_bad,               SIGBUS,  0,             "lock abort"                       }, /* xscale */
-       { do_bad,               SIGBUS,  0,             "unknown 21"                       },
-       { do_bad,               SIGBUS,  BUS_OBJERR,    "imprecise external abort"         }, /* xscale */
-       { do_bad,               SIGBUS,  0,             "unknown 23"                       },
-       { do_bad,               SIGBUS,  0,             "dcache parity error"              }, /* xscale */
-       { do_bad,               SIGBUS,  0,             "unknown 25"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 26"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 27"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 28"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 29"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 30"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 31"                       }
 };
 
+/* FSR definition */
+#ifdef CONFIG_ARM_LPAE
+#include "fsr-3level.c"
+#else
+#include "fsr-2level.c"
+#endif
+
 void __init
 hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
                int sig, int code, const char *name)
@@ -575,42 +562,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        arm_notify_die("", regs, &info, fsr, 0);
 }
 
-
-static struct fsr_info ifsr_info[] = {
-       { do_bad,               SIGBUS,  0,             "unknown 0"                        },
-       { do_bad,               SIGBUS,  0,             "unknown 1"                        },
-       { do_bad,               SIGBUS,  0,             "debug event"                      },
-       { do_bad,               SIGSEGV, SEGV_ACCERR,   "section access flag fault"        },
-       { do_bad,               SIGBUS,  0,             "unknown 4"                        },
-       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "section translation fault"        },
-       { do_bad,               SIGSEGV, SEGV_ACCERR,   "page access flag fault"           },
-       { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "page translation fault"           },
-       { do_bad,               SIGBUS,  0,             "external abort on non-linefetch"  },
-       { do_bad,               SIGSEGV, SEGV_ACCERR,   "section domain fault"             },
-       { do_bad,               SIGBUS,  0,             "unknown 10"                       },
-       { do_bad,               SIGSEGV, SEGV_ACCERR,   "page domain fault"                },
-       { do_bad,               SIGBUS,  0,             "external abort on translation"    },
-       { do_sect_fault,        SIGSEGV, SEGV_ACCERR,   "section permission fault"         },
-       { do_bad,               SIGBUS,  0,             "external abort on translation"    },
-       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "page permission fault"            },
-       { do_bad,               SIGBUS,  0,             "unknown 16"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 17"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 18"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 19"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 20"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 21"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 22"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 23"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 24"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 25"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 26"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 27"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 28"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 29"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 30"                       },
-       { do_bad,               SIGBUS,  0,             "unknown 31"                       },
-};
-
 void __init
 hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
                 int sig, int code, const char *name)
@@ -643,6 +594,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
        arm_notify_die("", regs, &info, ifsr, 0);
 }
 
+#ifndef CONFIG_ARM_LPAE
 static int __init exceptions_init(void)
 {
        if (cpu_architecture() >= CPU_ARCH_ARMv6) {
@@ -665,3 +617,4 @@ static int __init exceptions_init(void)
 }
 
 arch_initcall(exceptions_init);
+#endif
index 49e9e38..cf08bdf 100644 (file)
@@ -1,3 +1,28 @@
-void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+#ifndef __ARCH_ARM_FAULT_H
+#define __ARCH_ARM_FAULT_H
+
+/*
+ * Fault status register encodings.  We steal bit 31 for our own purposes.
+ */
+#define FSR_LNX_PF             (1 << 31)
+#define FSR_WRITE              (1 << 11)
+#define FSR_FS4                        (1 << 10)
+#define FSR_FS3_0              (15)
+#define FSR_FS5_0              (0x3f)
+
+#ifdef CONFIG_ARM_LPAE
+static inline int fsr_fs(unsigned int fsr)
+{
+       return fsr & FSR_FS5_0;
+}
+#else
+static inline int fsr_fs(unsigned int fsr)
+{
+       return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
+}
+#endif
 
+void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
 unsigned long search_exception_table(unsigned long addr);
+
+#endif /* __ARCH_ARM_FAULT_H */
index fe61cab..ac5416a 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/smp_plat.h>
 #include <asm/system.h>
 #include <asm/tlbflush.h>
+#include <linux/hugetlb.h>
 
 #include "mm.h"
 
@@ -173,17 +174,22 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
         * coherent with the kernels mapping.
         */
        if (!PageHighMem(page)) {
-               __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+               size_t page_size = PAGE_SIZE << compound_order(page);
+               __cpuc_flush_dcache_area(page_address(page), page_size);
        } else {
-               void *addr = kmap_high_get(page);
-               if (addr) {
-                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
-                       kunmap_high(page);
-               } else if (cache_is_vipt()) {
-                       /* unmapped pages might still be cached */
-                       addr = kmap_atomic(page);
-                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
-                       kunmap_atomic(addr);
+               unsigned long i;
+               for(i = 0; i < (1 << compound_order(page)); i++) {
+                       struct page *cpage = page + i;
+                       void *addr = kmap_high_get(cpage);
+                       if (addr) {
+                               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                               kunmap_high(cpage);
+                       } else if (cache_is_vipt()) {
+                               /* unmapped pages might still be cached */
+                               addr = kmap_atomic(cpage);
+                               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                               kunmap_atomic(addr);
+                       }
                }
        }
 
@@ -290,7 +296,7 @@ void flush_dcache_page(struct page *page)
        mapping = page_mapping(page);
 
        if (!cache_ops_need_broadcast() &&
-           mapping && !mapping_mapped(mapping))
+           mapping && !page_mapped(page))
                clear_bit(PG_dcache_clean, &page->flags);
        else {
                __flush_dcache_page(mapping, page);
diff --git a/arch/arm/mm/fsr-2level.c b/arch/arm/mm/fsr-2level.c
new file mode 100644 (file)
index 0000000..c1a2afc
--- /dev/null
@@ -0,0 +1,78 @@
+static struct fsr_info fsr_info[] = {
+       /*
+        * The following are the standard ARMv3 and ARMv4 aborts.  ARMv5
+        * defines these to be "precise" aborts.
+        */
+       { do_bad,               SIGSEGV, 0,             "vector exception"                 },
+       { do_bad,               SIGBUS,  BUS_ADRALN,    "alignment exception"              },
+       { do_bad,               SIGKILL, 0,             "terminal exception"               },
+       { do_bad,               SIGBUS,  BUS_ADRALN,    "alignment exception"              },
+       { do_bad,               SIGBUS,  0,             "external abort on linefetch"      },
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "section translation fault"        },
+       { do_bad,               SIGBUS,  0,             "external abort on linefetch"      },
+       { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "page translation fault"           },
+       { do_bad,               SIGBUS,  0,             "external abort on non-linefetch"  },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "section domain fault"             },
+       { do_bad,               SIGBUS,  0,             "external abort on non-linefetch"  },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "page domain fault"                },
+       { do_bad,               SIGBUS,  0,             "external abort on translation"    },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "section permission fault"         },
+       { do_bad,               SIGBUS,  0,             "external abort on translation"    },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "page permission fault"            },
+       /*
+        * The following are "imprecise" aborts, which are signalled by bit
+        * 10 of the FSR, and may not be recoverable.  These are only
+        * supported if the CPU abort handler supports bit 10.
+        */
+       { do_bad,               SIGBUS,  0,             "unknown 16"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 17"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 18"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 19"                       },
+       { do_bad,               SIGBUS,  0,             "lock abort"                       }, /* xscale */
+       { do_bad,               SIGBUS,  0,             "unknown 21"                       },
+       { do_bad,               SIGBUS,  BUS_OBJERR,    "imprecise external abort"         }, /* xscale */
+       { do_bad,               SIGBUS,  0,             "unknown 23"                       },
+       { do_bad,               SIGBUS,  0,             "dcache parity error"              }, /* xscale */
+       { do_bad,               SIGBUS,  0,             "unknown 25"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 26"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 27"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 28"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 29"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 30"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 31"                       },
+};
+
+static struct fsr_info ifsr_info[] = {
+       { do_bad,               SIGBUS,  0,             "unknown 0"                        },
+       { do_bad,               SIGBUS,  0,             "unknown 1"                        },
+       { do_bad,               SIGBUS,  0,             "debug event"                      },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "section access flag fault"        },
+       { do_bad,               SIGBUS,  0,             "unknown 4"                        },
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "section translation fault"        },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "page access flag fault"           },
+       { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "page translation fault"           },
+       { do_bad,               SIGBUS,  0,             "external abort on non-linefetch"  },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "section domain fault"             },
+       { do_bad,               SIGBUS,  0,             "unknown 10"                       },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "page domain fault"                },
+       { do_bad,               SIGBUS,  0,             "external abort on translation"    },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "section permission fault"         },
+       { do_bad,               SIGBUS,  0,             "external abort on translation"    },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "page permission fault"            },
+       { do_bad,               SIGBUS,  0,             "unknown 16"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 17"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 18"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 19"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 20"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 21"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 22"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 23"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 24"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 25"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 26"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 27"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 28"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 29"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 30"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 31"                       },
+};
diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c
new file mode 100644 (file)
index 0000000..ab4409a
--- /dev/null
@@ -0,0 +1,68 @@
+static struct fsr_info fsr_info[] = {
+       { do_bad,               SIGBUS,  0,             "unknown 0"                     },
+       { do_bad,               SIGBUS,  0,             "unknown 1"                     },
+       { do_bad,               SIGBUS,  0,             "unknown 2"                     },
+       { do_bad,               SIGBUS,  0,             "unknown 3"                     },
+       { do_bad,               SIGBUS,  0,             "reserved translation fault"    },
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 1 translation fault"     },
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 2 translation fault"     },
+       { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "level 3 translation fault"     },
+       { do_bad,               SIGBUS,  0,             "reserved access flag fault"    },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "level 1 access flag fault"     },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 access flag fault"     },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 3 access flag fault"     },
+       { do_bad,               SIGBUS,  0,             "reserved permission fault"     },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "level 1 permission fault"      },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 permission fault"      },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 3 permission fault"      },
+       { do_bad,               SIGBUS,  0,             "synchronous external abort"    },
+       { do_bad,               SIGBUS,  0,             "asynchronous external abort"   },
+       { do_bad,               SIGBUS,  0,             "unknown 18"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 19"                    },
+       { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous parity error"      },
+       { do_bad,               SIGBUS,  0,             "asynchronous parity error"     },
+       { do_bad,               SIGBUS,  0,             "unknown 26"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 27"                    },
+       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk" },
+       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk" },
+       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk" },
+       { do_bad,               SIGBUS,  0,             "synchronous parity error (translation table walk" },
+       { do_bad,               SIGBUS,  0,             "unknown 32"                    },
+       { do_bad,               SIGBUS,  BUS_ADRALN,    "alignment fault"               },
+       { do_bad,               SIGBUS,  0,             "debug event"                   },
+       { do_bad,               SIGBUS,  0,             "unknown 35"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 36"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 37"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 38"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 39"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 40"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 41"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 42"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 43"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 44"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 45"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 46"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 47"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 48"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 49"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 50"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 51"                    },
+       { do_bad,               SIGBUS,  0,             "implementation fault (lockdown abort)" },
+       { do_bad,               SIGBUS,  0,             "unknown 53"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 54"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 55"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 56"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 57"                    },
+       { do_bad,               SIGBUS,  0,             "implementation fault (coprocessor abort)" },
+       { do_bad,               SIGBUS,  0,             "unknown 59"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 60"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 61"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 62"                    },
+       { do_bad,               SIGBUS,  0,             "unknown 63"                    },
+};
+
+#define ifsr_info      fsr_info
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
new file mode 100644 (file)
index 0000000..93b78be
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * arch/arm/mm/hugetlbpage.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/sysctl.h>
+#include <asm/mman.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+/*
+ * On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot
+ * of type casting from pmd_t * to pte_t *.
+ */
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd = NULL;
+
+       pgd = pgd_offset(mm, addr);
+       if (pgd_present(*pgd)) {
+               pud = pud_offset(pgd, addr);
+               if (pud_present(*pud))
+                       pmd = pmd_offset(pud, addr);
+       }
+
+       return (pte_t *)pmd;
+}
+
+struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+                             int write)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+int pmd_huge(pmd_t pmd)
+{
+       return (pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT;
+}
+
+int pud_huge(pud_t pud)
+{
+       return 0;
+}
+
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       return 0;
+}
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+                       unsigned long addr, unsigned long sz)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pte_t *pte = NULL;
+
+       pgd = pgd_offset(mm, addr);
+       pud = pud_alloc(mm, pgd, addr);
+       if (pud)
+               pte = (pte_t *)pmd_alloc(mm, pud, addr);
+
+       return pte;
+}
+
+struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+                            pmd_t *pmd, int write)
+{
+       struct page *page;
+       unsigned long pfn;
+
+       pfn = ((pmd_val(*pmd) & HPAGE_MASK) >> PAGE_SHIFT);
+       page = pfn_to_page(pfn);
+       return page;
+}
index 2be9139..feacf4c 100644 (file)
@@ -1,9 +1,38 @@
 #include <linux/kernel.h>
 
 #include <asm/cputype.h>
+#include <asm/idmap.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 
+pgd_t *idmap_pgd;
+
+#ifdef CONFIG_ARM_LPAE
+static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+       unsigned long prot)
+{
+       pmd_t *pmd;
+       unsigned long next;
+
+       if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
+               pmd = pmd_alloc_one(&init_mm, addr);
+               if (!pmd) {
+                       pr_warning("Failed to allocate identity pmd.\n");
+                       return;
+               }
+               pud_populate(&init_mm, pud, pmd);
+               pmd += pmd_index(addr);
+       } else
+               pmd = pmd_offset(pud, addr);
+
+       do {
+               next = pmd_addr_end(addr, end);
+               *pmd = __pmd((addr & PMD_MASK) | prot);
+               flush_pmd_entry(pmd);
+       } while (pmd++, addr = next, addr != end);
+}
+#else  /* !CONFIG_ARM_LPAE */
 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
        unsigned long prot)
 {
@@ -15,6 +44,7 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
        pmd[1] = __pmd(addr);
        flush_pmd_entry(pmd);
 }
+#endif /* CONFIG_ARM_LPAE */
 
 static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
        unsigned long prot)
@@ -28,11 +58,11 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
        } while (pud++, addr = next, addr != end);
 }
 
-void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
 {
        unsigned long prot, next;
 
-       prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
+       prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
        if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
                prot |= PMD_BIT4;
 
@@ -43,48 +73,41 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
        } while (pgd++, addr = next, addr != end);
 }
 
-#ifdef CONFIG_SMP
-static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
-{
-       pmd_t *pmd = pmd_offset(pud, addr);
-       pmd_clear(pmd);
-}
+extern char  __idmap_text_start[], __idmap_text_end[];
 
-static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
+static int __init init_static_idmap(void)
 {
-       pud_t *pud = pud_offset(pgd, addr);
-       unsigned long next;
+       phys_addr_t idmap_start, idmap_end;
 
-       do {
-               next = pud_addr_end(addr, end);
-               idmap_del_pmd(pud, addr, next);
-       } while (pud++, addr = next, addr != end);
-}
+       idmap_pgd = pgd_alloc(&init_mm);
+       if (!idmap_pgd)
+               return -ENOMEM;
 
-void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
-{
-       unsigned long next;
+       /* Add an identity mapping for the physical address of the section. */
+       idmap_start = virt_to_phys((void *)__idmap_text_start);
+       idmap_end = virt_to_phys((void *)__idmap_text_end);
 
-       pgd += pgd_index(addr);
-       do {
-               next = pgd_addr_end(addr, end);
-               idmap_del_pud(pgd, addr, next);
-       } while (pgd++, addr = next, addr != end);
+       pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
+               (long long)idmap_start, (long long)idmap_end);
+       identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
+
+       return 0;
 }
-#endif
+early_initcall(init_static_idmap);
 
 /*
- * In order to soft-boot, we need to insert a 1:1 mapping in place of
- * the user-mode pages.  This will then ensure that we have predictable
- * results when turning the mmu off
+ * In order to soft-boot, we need to switch to a 1:1 mapping for the
+ * cpu_reset functions. This will then ensure that we have predictable
+ * results when turning off the mmu.
  */
-void setup_mm_for_reboot(char mode)
+void setup_mm_for_reboot(void)
 {
-       /*
-        * We need to access to user-mode page tables here. For kernel threads
-        * we don't have any user-mode mappings so we use the context that we
-        * "borrowed".
-        */
-       identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
+       /* Clean and invalidate L1. */
+       flush_cache_all();
+
+       /* Switch to the identity mapping. */
+       cpu_switch_mm(idmap_pgd, &init_mm);
+
+       /* Flush the TLB. */
        local_flush_tlb_all();
 }
index cc3f35d..6386e70 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/highmem.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
-#include <linux/sort.h>
+#include <linux/dma-contiguous.h>
 
 #include <asm/mach-types.h>
 #include <asm/prom.h>
@@ -137,30 +137,18 @@ void show_mem(unsigned int filter)
 }
 
 static void __init find_limits(unsigned long *min, unsigned long *max_low,
-       unsigned long *max_high)
+                              unsigned long *max_high)
 {
        struct meminfo *mi = &meminfo;
        int i;
 
-       *min = -1UL;
-       *max_low = *max_high = 0;
-
-       for_each_bank (i, mi) {
-               struct membank *bank = &mi->bank[i];
-               unsigned long start, end;
-
-               start = bank_pfn_start(bank);
-               end = bank_pfn_end(bank);
-
-               if (*min > start)
-                       *min = start;
-               if (*max_high < end)
-                       *max_high = end;
-               if (bank->highmem)
-                       continue;
-               if (*max_low < end)
-                       *max_low = end;
-       }
+       /* This assumes the meminfo array is properly sorted */
+       *min = bank_pfn_start(&mi->bank[0]);
+       for_each_bank (i, mi)
+               if (mi->bank[i].highmem)
+                               break;
+       *max_low = bank_pfn_end(&mi->bank[i - 1]);
+       *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
 }
 
 static void __init arm_bootmem_init(unsigned long start_pfn,
@@ -226,7 +214,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
  * allocations.  This must be the smallest DMA mask in the system,
  * so a successful GFP_DMA allocation will always satisfy this.
  */
-u32 arm_dma_limit;
+phys_addr_t arm_dma_limit;
 
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
        unsigned long dma_size)
@@ -241,6 +229,17 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 }
 #endif
 
+void __init setup_dma_zone(struct machine_desc *mdesc)
+{
+#ifdef CONFIG_ZONE_DMA
+       if (mdesc->dma_zone_size) {
+               arm_dma_zone_size = mdesc->dma_zone_size;
+               arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+       } else
+               arm_dma_limit = 0xffffffff;
+#endif
+}
+
 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
        unsigned long max_high)
 {
@@ -288,12 +287,9 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
         * Adjust the sizes according to any special requirements for
         * this machine type.
         */
-       if (arm_dma_zone_size) {
+       if (arm_dma_zone_size)
                arm_adjust_dma_zone(zone_size, zhole_size,
                        arm_dma_zone_size >> PAGE_SHIFT);
-               arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
-       } else
-               arm_dma_limit = 0xffffffff;
 #endif
 
        free_area_init_node(0, zone_size, min, zhole_size);
@@ -322,19 +318,10 @@ static void arm_memory_present(void)
 }
 #endif
 
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
-       const struct membank *a = _a, *b = _b;
-       long cmp = bank_pfn_start(a) - bank_pfn_start(b);
-       return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
-}
-
 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
 {
        int i;
 
-       sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
-
        memblock_init();
        for (i = 0; i < mi->nr_banks; i++)
                memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -374,6 +361,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
        if (mdesc->reserve)
                mdesc->reserve();
 
+       /*
+        * reserve memory for DMA contigouos allocations,
+        * must come from DMA area inside low memory
+        */
+       dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
+
        memblock_analyze();
        memblock_dump_all();
 }
@@ -406,8 +399,6 @@ void __init bootmem_init(void)
         */
        arm_bootmem_free(min, max_low, max_high);
 
-       high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
-
        /*
         * This doesn't seem to be used by the Linux memory manager any
         * more, but is used by ll_rw_block.  If we can get rid of it, we
index bdb248c..c6f20dc 100644 (file)
 #include <asm/mach/map.h>
 #include "mm.h"
 
-/*
- * Used by ioremap() and iounmap() code to mark (super)section-mapped
- * I/O regions in vm_struct->flags field.
- */
-#define VM_ARM_SECTION_MAPPING 0x80000000
-
 int ioremap_page(unsigned long virt, unsigned long phys,
                 const struct mem_type *mtype)
 {
@@ -64,7 +58,7 @@ void __check_kvm_seq(struct mm_struct *mm)
        } while (seq != init_mm.context.kvm_seq);
 }
 
-#ifndef CONFIG_SMP
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 /*
  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
  * the other CPUs will not see this change until their next context switch.
@@ -79,13 +73,16 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
 {
        unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
        pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmdp;
 
        flush_cache_vunmap(addr, end);
        pgd = pgd_offset_k(addr);
+       pud = pud_offset(pgd, addr);
+       pmdp = pmd_offset(pud, addr);
        do {
-               pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
+               pmd_t pmd = *pmdp;
 
-               pmd = *pmdp;
                if (!pmd_none(pmd)) {
                        /*
                         * Clear the PMD from the page table, and
@@ -104,8 +101,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
                                pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
                }
 
-               addr += PGDIR_SIZE;
-               pgd++;
+               addr += PMD_SIZE;
+               pmdp += 2;
        } while (addr < end);
 
        /*
@@ -124,6 +121,8 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
 {
        unsigned long addr = virt, end = virt + size;
        pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
 
        /*
         * Remove and free any PTE-based mapping, and
@@ -132,17 +131,17 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
        unmap_area_sections(virt, size);
 
        pgd = pgd_offset_k(addr);
+       pud = pud_offset(pgd, addr);
+       pmd = pmd_offset(pud, addr);
        do {
-               pmd_t *pmd = pmd_offset(pgd, addr);
-
                pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
                pfn += SZ_1M >> PAGE_SHIFT;
                pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
                pfn += SZ_1M >> PAGE_SHIFT;
                flush_pmd_entry(pmd);
 
-               addr += PGDIR_SIZE;
-               pgd++;
+               addr += PMD_SIZE;
+               pmd += 2;
        } while (addr < end);
 
        return 0;
@@ -154,6 +153,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
 {
        unsigned long addr = virt, end = virt + size;
        pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
 
        /*
         * Remove and free any PTE-based mapping, and
@@ -162,6 +163,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
        unmap_area_sections(virt, size);
 
        pgd = pgd_offset_k(virt);
+       pud = pud_offset(pgd, addr);
+       pmd = pmd_offset(pud, addr);
        do {
                unsigned long super_pmd_val, i;
 
@@ -170,14 +173,12 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
                super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
 
                for (i = 0; i < 8; i++) {
-                       pmd_t *pmd = pmd_offset(pgd, addr);
-
                        pmd[0] = __pmd(super_pmd_val);
                        pmd[1] = __pmd(super_pmd_val);
                        flush_pmd_entry(pmd);
 
-                       addr += PGDIR_SIZE;
-                       pgd++;
+                       addr += PMD_SIZE;
+                       pmd += 2;
                }
 
                pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
@@ -195,17 +196,13 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
        unsigned long addr;
        struct vm_struct * area;
 
+#ifndef CONFIG_ARM_LPAE
        /*
         * High mappings must be supersection aligned
         */
        if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
                return NULL;
-
-       /*
-        * Don't allow RAM to be mapped - this causes problems with ARMv6+
-        */
-       if (WARN_ON(pfn_valid(pfn)))
-               return NULL;
+#endif
 
        type = get_mem_type(mtype);
        if (!type)
@@ -216,12 +213,40 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
         */
        size = PAGE_ALIGN(offset + size);
 
+       /*
+        * Try to reuse one of the static mapping whenever possible.
+        */
+       read_lock(&vmlist_lock);
+       for (area = vmlist; area; area = area->next) {
+               if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
+                       break;
+               if (!(area->flags & VM_ARM_STATIC_MAPPING))
+                       continue;
+               if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
+                       continue;
+               if (__phys_to_pfn(area->phys_addr) > pfn ||
+                   __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
+                       continue;
+               /* we can drop the lock here as we know *area is static */
+               read_unlock(&vmlist_lock);
+               addr = (unsigned long)area->addr;
+               addr += __pfn_to_phys(pfn) - area->phys_addr;
+               return (void __iomem *) (offset + addr);
+       }
+       read_unlock(&vmlist_lock);
+
+       /*
+        * Don't allow RAM to be mapped - this causes problems with ARMv6+
+        */
+       if (WARN_ON(pfn_valid(pfn)))
+               return NULL;
+
        area = get_vm_area_caller(size, VM_IOREMAP, caller);
        if (!area)
                return NULL;
        addr = (unsigned long)area->addr;
 
-#ifndef CONFIG_SMP
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
        if (DOMAIN_IO == 0 &&
            (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
               cpu_is_xsc3()) && pfn >= 0x100000 &&
@@ -310,31 +335,74 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
                        __builtin_return_address(0));
 }
 
+void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
+                               unsigned long prot_val)
+{
+       const struct mem_type *type;
+       unsigned long addr;
+       struct vm_struct * area;
+       pteval_t prot_pte;
+       int err;
+
+       size = PAGE_ALIGN(size);
+
+       type = get_mem_type(MT_DEVICE);
+       if (!type)
+               return NULL;
+
+       prot_pte = type->prot_pte & ~L_PTE_MT_MASK;
+       prot_pte |= prot_val & L_PTE_MT_MASK;
+
+       area = get_vm_area_caller(size, VM_IOREMAP,
+                                 __builtin_return_address(0));
+       if (!area)
+               return NULL;
+       addr = (unsigned long)area->addr;
+
+       err = ioremap_page_range(addr, addr + size, phys_addr,
+                                __pgprot(prot_pte));
+       if (err) {
+               vunmap((void *)addr);
+               return NULL;
+       }
+
+       flush_cache_vmap(addr, addr + size);
+       return (void __iomem *)addr;
+}
+
+EXPORT_SYMBOL(ioremap_prot);
+
 void __iounmap(volatile void __iomem *io_addr)
 {
        void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
-#ifndef CONFIG_SMP
-       struct vm_struct **p, *tmp;
+       struct vm_struct *vm;
 
-       /*
-        * If this is a section based mapping we need to handle it
-        * specially as the VM subsystem does not know how to handle
-        * such a beast. We need the lock here b/c we need to clear
-        * all the mappings before the area can be reclaimed
-        * by someone else.
-        */
-       write_lock(&vmlist_lock);
-       for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
-               if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
-                       if (tmp->flags & VM_ARM_SECTION_MAPPING) {
-                               unmap_area_sections((unsigned long)tmp->addr,
-                                                   tmp->size);
-                       }
+       read_lock(&vmlist_lock);
+       for (vm = vmlist; vm; vm = vm->next) {
+               if (vm->addr > addr)
+                       break;
+               if (!(vm->flags & VM_IOREMAP))
+                       continue;
+               /* If this is a static mapping we must leave it alone */
+               if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
+                   (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
+                       read_unlock(&vmlist_lock);
+                       return;
+               }
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
+               /*
+                * If this is a section based mapping we need to handle it
+                * specially as the VM subsystem does not know how to handle
+                * such a beast.
+                */
+               if ((vm->addr == addr) &&
+                   (vm->flags & VM_ARM_SECTION_MAPPING)) {
+                       unmap_area_sections((unsigned long)vm->addr, vm->size);
                        break;
                }
-       }
-       write_unlock(&vmlist_lock);
 #endif
+       }
+       read_unlock(&vmlist_lock);
 
        vunmap(addr);
 }
index ad7cce3..0bfa717 100644 (file)
@@ -21,13 +21,33 @@ const struct mem_type *get_mem_type(unsigned int type);
 
 extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
 
+/*
+ * ARM specific vm_struct->flags bits.
+ */
+
+/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
+#define VM_ARM_SECTION_MAPPING 0x80000000
+
+/* permanent static mappings from iotable_init() */
+#define VM_ARM_STATIC_MAPPING  0x40000000
+
+/* mapping type (attributes) for permanent static mappings */
+#define VM_ARM_MTYPE(mt)               ((mt) << 20)
+#define VM_ARM_MTYPE_MASK      (0x1f << 20)
+
+/* consistent regions used by dma_alloc_attrs() */
+#define VM_ARM_DMA_CONSISTENT  0x20000000
+
 #endif
 
 #ifdef CONFIG_ZONE_DMA
-extern u32 arm_dma_limit;
+extern phys_addr_t arm_dma_limit;
 #else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
 #endif
 
+extern phys_addr_t arm_lowmem_limit;
+
 void __init bootmem_init(void);
 void arm_mm_memblock_reserve(void);
+void dma_contiguous_remap(void);
index 082fa18..1ca8e17 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/nodemask.h>
 #include <linux/memblock.h>
 #include <linux/fs.h>
+#include <linux/vmalloc.h>
 
 #include <asm/cputype.h>
 #include <asm/sections.h>
@@ -150,6 +151,7 @@ static int __init early_nowrite(char *__unused)
 }
 early_param("nowb", early_nowrite);
 
+#ifndef CONFIG_ARM_LPAE
 static int __init early_ecc(char *p)
 {
        if (memcmp(p, "on", 2) == 0)
@@ -159,6 +161,7 @@ static int __init early_ecc(char *p)
        return 0;
 }
 early_param("ecc", early_ecc);
+#endif
 
 static int __init noalign_setup(char *__unused)
 {
@@ -228,10 +231,12 @@ static struct mem_type mem_types[] = {
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
                .domain    = DOMAIN_KERNEL,
        },
+#ifndef CONFIG_ARM_LPAE
        [MT_MINICLEAN] = {
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
                .domain    = DOMAIN_KERNEL,
        },
+#endif
        [MT_LOW_VECTORS] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_RDONLY,
@@ -281,6 +286,11 @@ static struct mem_type mem_types[] = {
                                PMD_SECT_UNCACHED | PMD_SECT_XN,
                .domain    = DOMAIN_KERNEL,
        },
+       [MT_MEMORY_DMA_READY] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .domain    = DOMAIN_KERNEL,
+       },
 };
 
 const struct mem_type *get_mem_type(unsigned int type)
@@ -422,6 +432,7 @@ static void __init build_mem_type_table(void)
        if (arch_is_coherent() && cpu_is_xsc3()) {
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
                mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+               mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
                mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
                mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
        }
@@ -437,6 +448,7 @@ static void __init build_mem_type_table(void)
         * ARMv6 and above have extended page tables.
         */
        if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
+#ifndef CONFIG_ARM_LPAE
                /*
                 * Mark cache clean areas and XIP ROM read only
                 * from SVC mode and no access from userspace.
@@ -444,6 +456,7 @@ static void __init build_mem_type_table(void)
                mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
                mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
                mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+#endif
 
                if (is_smp()) {
                        /*
@@ -459,6 +472,7 @@ static void __init build_mem_type_table(void)
                        mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
                        mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
                        mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
                }
@@ -482,6 +496,18 @@ static void __init build_mem_type_table(void)
                mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
        }
 
+#ifdef CONFIG_ARM_LPAE
+       /*
+        * Do not generate access flag faults for the kernel mappings.
+        */
+       for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
+               mem_types[i].prot_pte |= PTE_EXT_AF;
+               mem_types[i].prot_sect |= PMD_SECT_AF;
+       }
+       kern_pgprot |= PTE_EXT_AF;
+       vecs_pgprot |= PTE_EXT_AF;
+#endif
+
        for (i = 0; i < 16; i++) {
                pteval_t v = pgprot_val(protection_map[i]);
                protection_map[i] = __pgprot(v | user_pgprot);
@@ -498,6 +524,7 @@ static void __init build_mem_type_table(void)
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
        mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
        mem_types[MT_ROM].prot_sect |= cp->pmd;
 
@@ -537,13 +564,18 @@ EXPORT_SYMBOL(phys_mem_access_prot);
 
 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
 
-static void __init *early_alloc(unsigned long sz)
+static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
 {
-       void *ptr = __va(memblock_alloc(sz, sz));
+       void *ptr = __va(memblock_alloc(sz, align));
        memset(ptr, 0, sz);
        return ptr;
 }
 
+static void __init *early_alloc(unsigned long sz)
+{
+       return early_alloc_aligned(sz, sz);
+}
+
 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
 {
        if (pmd_none(*pmd)) {
@@ -577,11 +609,13 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
         * L1 entries, whereas PGDs refer to a group of L1 entries making
         * up one logical pointer to an L2 table.
         */
-       if (((addr | end | phys) & ~SECTION_MASK) == 0) {
+       if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
                pmd_t *p = pmd;
 
+#ifndef CONFIG_ARM_LPAE
                if (addr & SECTION_SIZE)
                        pmd++;
+#endif
 
                do {
                        *pmd = __pmd(phys | type->prot_sect);
@@ -611,6 +645,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
        } while (pud++, addr = next, addr != end);
 }
 
+#ifndef CONFIG_ARM_LPAE
 static void __init create_36bit_mapping(struct map_desc *md,
                                        const struct mem_type *type)
 {
@@ -670,6 +705,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
                pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
        } while (addr != end);
 }
+#endif /* !CONFIG_ARM_LPAE */
 
 /*
  * Create the page directory entries and any necessary
@@ -693,14 +729,16 @@ static void __init create_mapping(struct map_desc *md)
        }
 
        if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
-           md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
+           md->virtual >= PAGE_OFFSET &&
+           (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
                printk(KERN_WARNING "BUG: mapping for 0x%08llx"
-                      " at 0x%08lx overlaps vmalloc space\n",
+                      " at 0x%08lx out of vmalloc space\n",
                       (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
        }
 
        type = &mem_types[md->type];
 
+#ifndef CONFIG_ARM_LPAE
        /*
         * Catch 36-bit addresses
         */
@@ -708,6 +746,7 @@ static void __init create_mapping(struct map_desc *md)
                create_36bit_mapping(md, type);
                return;
        }
+#endif
 
        addr = md->virtual & PAGE_MASK;
        phys = __pfn_to_phys(md->pfn);
@@ -737,18 +776,33 @@ static void __init create_mapping(struct map_desc *md)
  */
 void __init iotable_init(struct map_desc *io_desc, int nr)
 {
-       int i;
+       struct map_desc *md;
+       struct vm_struct *vm;
+
+       if (!nr)
+               return;
 
-       for (i = 0; i < nr; i++)
-               create_mapping(io_desc + i);
+       vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
+
+       for (md = io_desc; nr; md++, nr--) {
+               create_mapping(md);
+               vm->addr = (void *)(md->virtual & PAGE_MASK);
+               vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
+               vm->phys_addr = __pfn_to_phys(md->pfn); 
+               vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 
+               vm->flags |= VM_ARM_MTYPE(md->type);
+               vm->caller = iotable_init;
+               vm_area_add_early(vm++);
+       }
 }
 
-static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
+static void * __initdata vmalloc_min =
+       (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
 /*
  * vmalloc=size forces the vmalloc area to be exactly 'size'
  * bytes. This can be used to increase (or decrease) the vmalloc
- * area - the default is 128m.
+ * area - the default is 240m.
  */
 static int __init early_vmalloc(char *arg)
 {
@@ -773,7 +827,7 @@ static int __init early_vmalloc(char *arg)
 }
 early_param("vmalloc", early_vmalloc);
 
-static phys_addr_t lowmem_limit __initdata = 0;
+phys_addr_t arm_lowmem_limit __initdata = 0;
 
 void __init sanity_check_meminfo(void)
 {
@@ -783,6 +837,9 @@ void __init sanity_check_meminfo(void)
                struct membank *bank = &meminfo.bank[j];
                *bank = meminfo.bank[i];
 
+               if (bank->start > ULONG_MAX)
+                       highmem = 1;
+
 #ifdef CONFIG_HIGHMEM
                if (__va(bank->start) >= vmalloc_min ||
                    __va(bank->start) < (void *)PAGE_OFFSET)
@@ -794,7 +851,7 @@ void __init sanity_check_meminfo(void)
                 * Split those memory banks which are partially overlapping
                 * the vmalloc area greatly simplifying things later.
                 */
-               if (__va(bank->start) < vmalloc_min &&
+               if (!highmem && __va(bank->start) < vmalloc_min &&
                    bank->size > vmalloc_min - __va(bank->start)) {
                        if (meminfo.nr_banks >= NR_BANKS) {
                                printk(KERN_CRIT "NR_BANKS too low, "
@@ -814,6 +871,17 @@ void __init sanity_check_meminfo(void)
 #else
                bank->highmem = highmem;
 
+               /*
+                * Highmem banks not allowed with !CONFIG_HIGHMEM.
+                */
+               if (highmem) {
+                       printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
+                              "(!CONFIG_HIGHMEM).\n",
+                              (unsigned long long)bank->start,
+                              (unsigned long long)bank->start + bank->size - 1);
+                       continue;
+               }
+
                /*
                 * Check whether this memory bank would entirely overlap
                 * the vmalloc area.
@@ -842,8 +910,8 @@ void __init sanity_check_meminfo(void)
                        bank->size = newsize;
                }
 #endif
-               if (!bank->highmem && bank->start + bank->size > lowmem_limit)
-                       lowmem_limit = bank->start + bank->size;
+               if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
+                       arm_lowmem_limit = bank->start + bank->size;
 
                j++;
        }
@@ -868,7 +936,8 @@ void __init sanity_check_meminfo(void)
        }
 #endif
        meminfo.nr_banks = j;
-       memblock_set_current_limit(lowmem_limit);
+       high_memory = __va(arm_lowmem_limit - 1) + 1;
+       memblock_set_current_limit(arm_lowmem_limit);
 }
 
 static inline void prepare_page_table(void)
@@ -893,19 +962,25 @@ static inline void prepare_page_table(void)
         * Find the end of the first block of lowmem.
         */
        end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
-       if (end >= lowmem_limit)
-               end = lowmem_limit;
+       if (end >= arm_lowmem_limit)
+               end = arm_lowmem_limit;
 
        /*
         * Clear out all the kernel space mappings, except for the first
-        * memory bank, up to the end of the vmalloc region.
+        * memory bank, up to the vmalloc region.
         */
        for (addr = __phys_to_virt(end);
-            addr < VMALLOC_END; addr += PMD_SIZE)
+            addr < VMALLOC_START; addr += PMD_SIZE)
                pmd_clear(pmd_off_k(addr));
 }
 
+#ifdef CONFIG_ARM_LPAE
+/* the first page is reserved for pgd */
+#define SWAPPER_PG_DIR_SIZE    (PAGE_SIZE + \
+                                PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
+#else
 #define SWAPPER_PG_DIR_SIZE    (PTRS_PER_PGD * sizeof(pgd_t))
+#endif
 
 /*
  * Reserve the special regions of memory
@@ -928,8 +1003,8 @@ void __init arm_mm_memblock_reserve(void)
 }
 
 /*
- * Set up device the mappings.  Since we clear out the page tables for all
- * mappings above VMALLOC_END, we will remove any debug device mappings.
+ * Set up the device mappings.  Since we clear out the page tables for all
+ * mappings above VMALLOC_START, we will remove any debug device mappings.
  * This means you have to be careful how you debug this function, or any
  * called function.  This means you can't use any function or debugging
  * method which may touch any device, otherwise the kernel _will_ crash.
@@ -944,7 +1019,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
         */
        vectors_page = early_alloc(PAGE_SIZE);
 
-       for (addr = VMALLOC_END; addr; addr += PMD_SIZE)
+       for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
                pmd_clear(pmd_off_k(addr));
 
        /*
@@ -1028,8 +1103,8 @@ static void __init map_lowmem(void)
                phys_addr_t end = start + reg->size;
                struct map_desc map;
 
-               if (end > lowmem_limit)
-                       end = lowmem_limit;
+               if (end > arm_lowmem_limit)
+                       end = arm_lowmem_limit;
                if (start >= end)
                        break;
 
@@ -1050,11 +1125,12 @@ void __init paging_init(struct machine_desc *mdesc)
 {
        void *zero_page;
 
-       memblock_set_current_limit(lowmem_limit);
+       memblock_set_current_limit(arm_lowmem_limit);
 
        build_mem_type_table();
        prepare_page_table();
        map_lowmem();
+       dma_contiguous_remap();
        devicemaps_init(mdesc);
        kmap_init();
 
index a5018fb..385171e 100644 (file)
@@ -29,6 +29,8 @@ void __init arm_mm_memblock_reserve(void)
 
 void __init sanity_check_meminfo(void)
 {
+       phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]);
+       high_memory = __va(end - 1) + 1;
 }
 
 /*
@@ -43,7 +45,7 @@ void __init paging_init(struct machine_desc *mdesc)
 /*
  * We don't need to do anything here for nommu machines.
  */
-void setup_mm_for_reboot(char mode)
+void setup_mm_for_reboot(void)
 {
 }
 
index b2027c1..a3e78cc 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mm.h>
 #include <linux/gfp.h>
 #include <linux/highmem.h>
+#include <linux/slab.h>
 
 #include <asm/pgalloc.h>
 #include <asm/page.h>
 
 #include "mm.h"
 
+#ifdef CONFIG_ARM_LPAE
+#define __pgd_alloc()  kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
+#define __pgd_free(pgd)        kfree(pgd)
+#else
+#define __pgd_alloc()  (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
+#define __pgd_free(pgd)        free_pages((unsigned long)pgd, 2)
+#endif
+
 /*
  * need to get a 16k page for level 1
  */
@@ -27,7 +36,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
        pmd_t *new_pmd, *init_pmd;
        pte_t *new_pte, *init_pte;
 
-       new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
+       new_pgd = __pgd_alloc();
        if (!new_pgd)
                goto no_pgd;
 
@@ -42,10 +51,25 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 
        clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
 
+#ifdef CONFIG_ARM_LPAE
+       /*
+        * Allocate PMD table for modules and pkmap mappings.
+        */
+       new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
+                           MODULES_VADDR);
+       if (!new_pud)
+               goto no_pud;
+
+       new_pmd = pmd_alloc(mm, new_pud, 0);
+       if (!new_pmd)
+               goto no_pmd;
+#endif
+
        if (!vectors_high()) {
                /*
                 * On ARM, first page must always be allocated since it
-                * contains the machine vectors.
+                * contains the machine vectors. The vectors are always high
+                * with LPAE.
                 */
                new_pud = pud_alloc(mm, new_pgd, 0);
                if (!new_pud)
@@ -74,7 +98,7 @@ no_pte:
 no_pmd:
        pud_free(mm, new_pud);
 no_pud:
-       free_pages((unsigned long)new_pgd, 2);
+       __pgd_free(new_pgd);
 no_pgd:
        return NULL;
 }
@@ -111,5 +135,24 @@ no_pud:
        pgd_clear(pgd);
        pud_free(mm, pud);
 no_pgd:
-       free_pages((unsigned long) pgd_base, 2);
+#ifdef CONFIG_ARM_LPAE
+       /*
+        * Free modules/pkmap or identity pmd tables.
+        */
+       for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
+               if (pgd_none_or_clear_bad(pgd))
+                       continue;
+               if (pgd_val(*pgd) & L_PGD_SWAPPER)
+                       continue;
+               pud = pud_offset(pgd, 0);
+               if (pud_none_or_clear_bad(pud))
+                       continue;
+               pmd = pmd_offset(pud, 0);
+               pud_clear(pud);
+               pmd_free(mm, pmd);
+               pgd_clear(pgd);
+               pud_free(mm, pud);
+       }
+#endif
+       __pgd_free(pgd_base);
 }
index 6746966..0650bb8 100644 (file)
@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm1020_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_arm1020_reset)
+       .popsection
 
 /*
  * cpu_arm1020_do_idle()
@@ -238,6 +241,7 @@ ENTRY(arm1020_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index 4251421..4188478 100644 (file)
@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020e_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm1020e_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020e_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_arm1020e_reset)
+       .popsection
 
 /*
  * cpu_arm1020e_do_idle()
@@ -232,6 +235,7 @@ ENTRY(arm1020e_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index d283cf3..33c6882 100644 (file)
@@ -84,6 +84,7 @@ ENTRY(cpu_arm1022_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm1022_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -96,6 +97,8 @@ ENTRY(cpu_arm1022_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_arm1022_reset)
+       .popsection
 
 /*
  * cpu_arm1022_do_idle()
@@ -221,6 +224,7 @@ ENTRY(arm1022_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index 678a1ce..fbc1d5f 100644 (file)
@@ -84,6 +84,7 @@ ENTRY(cpu_arm1026_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm1026_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -96,6 +97,8 @@ ENTRY(cpu_arm1026_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_arm1026_reset)
+       .popsection
 
 /*
  * cpu_arm1026_do_idle()
@@ -215,6 +218,7 @@ ENTRY(arm1026_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index e5b974c..4fbeb5b 100644 (file)
@@ -225,6 +225,7 @@ ENTRY(cpu_arm7_set_pte_ext)
  * Params  : r0 = address to jump to
  * Notes   : This sets up everything for a reset
  */
+               .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm6_reset)
 ENTRY(cpu_arm7_reset)
                mov     r1, #0
@@ -235,6 +236,9 @@ ENTRY(cpu_arm7_reset)
                mov     r1, #0x30
                mcr     p15, 0, r1, c1, c0, 0           @ turn off MMU etc
                mov     pc, r0
+ENDPROC(cpu_arm6_reset)
+ENDPROC(cpu_arm7_reset)
+               .popsection
 
                __CPUINIT
 
index 55f4e29..0ac908c 100644 (file)
@@ -101,6 +101,7 @@ ENTRY(cpu_arm720_set_pte_ext)
  * Params  : r0 = address to jump to
  * Notes   : This sets up everything for a reset
  */
+               .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm720_reset)
                mov     ip, #0
                mcr     p15, 0, ip, c7, c7, 0           @ invalidate cache
@@ -112,6 +113,8 @@ ENTRY(cpu_arm720_reset)
                bic     ip, ip, #0x2100                 @ ..v....s........
                mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
                mov     pc, r0
+ENDPROC(cpu_arm720_reset)
+               .popsection
 
        __CPUINIT
 
index 4506be3..dc5de5d 100644 (file)
@@ -49,6 +49,7 @@ ENTRY(cpu_arm740_proc_fin)
  * Params  : r0 = address to jump to
  * Notes   : This sets up everything for a reset
  */
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm740_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c0, 0           @ invalidate cache
@@ -56,6 +57,8 @@ ENTRY(cpu_arm740_reset)
        bic     ip, ip, #0x0000000c             @ ............wc..
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_arm740_reset)
+       .popsection
 
        __CPUINIT
 
index 7e0e1fe..6ddea3e 100644 (file)
@@ -45,8 +45,11 @@ ENTRY(cpu_arm7tdmi_proc_fin)
  * Params  : loc(r0)   address to jump to
  * Purpose : Sets up everything for a reset and jump to the location for soft reset.
  */
+               .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm7tdmi_reset)
                mov     pc, r0
+ENDPROC(cpu_arm7tdmi_reset)
+               .popsection
 
                __CPUINIT
 
index 927a639..2a5691a 100644 (file)
@@ -85,6 +85,7 @@ ENTRY(cpu_arm920_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm920_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -97,6 +98,8 @@ ENTRY(cpu_arm920_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_arm920_reset)
+       .popsection
 
 /*
  * cpu_arm920_do_idle()
@@ -207,6 +210,7 @@ ENTRY(arm920_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index 490e188..4c44d7e 100644 (file)
@@ -87,6 +87,7 @@ ENTRY(cpu_arm922_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm922_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -99,6 +100,8 @@ ENTRY(cpu_arm922_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_arm922_reset)
+       .popsection
 
 /*
  * cpu_arm922_do_idle()
@@ -209,6 +212,7 @@ ENTRY(arm922_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index 51d494b..ec5b118 100644 (file)
@@ -108,6 +108,7 @@ ENTRY(cpu_arm925_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm925_reset)
        /* Send software reset to MPU and DSP */
        mov     ip, #0xff000000
@@ -115,6 +116,8 @@ ENTRY(cpu_arm925_reset)
        orr     ip, ip, #0x0000ce00
        mov     r4, #1
        strh    r4, [ip, #0x10]
+ENDPROC(cpu_arm925_reset)
+       .popsection
 
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -255,6 +258,7 @@ ENTRY(arm925_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index 090f18f..5e174e4 100644 (file)
@@ -77,6 +77,7 @@ ENTRY(cpu_arm926_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm926_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -89,6 +90,8 @@ ENTRY(cpu_arm926_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_arm926_reset)
+       .popsection
 
 /*
  * cpu_arm926_do_idle()
@@ -218,6 +221,7 @@ ENTRY(arm926_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index ac750d5..a613a7d 100644 (file)
@@ -48,6 +48,7 @@ ENTRY(cpu_arm940_proc_fin)
  * Params  : r0 = address to jump to
  * Notes   : This sets up everything for a reset
  */
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm940_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c5, 0           @ flush I cache
@@ -58,6 +59,8 @@ ENTRY(cpu_arm940_reset)
        bic     ip, ip, #0x00001000             @ i-cache
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_arm940_reset)
+       .popsection
 
 /*
  * cpu_arm940_do_idle()
@@ -157,7 +160,7 @@ ENTRY(arm940_coherent_user_range)
  *     - size  - region size
  */
 ENTRY(arm940_flush_kern_dcache_area)
-       mov     ip, #0
+       mov     r0, #0
        mov     r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
 1:     orr     r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
 2:     mcr     p15, 0, r3, c7, c14, 2          @ clean/flush D index
@@ -165,8 +168,8 @@ ENTRY(arm940_flush_kern_dcache_area)
        bcs     2b                              @ entries 63 to 0
        subs    r1, r1, #1 << 4
        bcs     1b                              @ segments 7 to 0
-       mcr     p15, 0, ip, c7, c5, 0           @ invalidate I cache
-       mcr     p15, 0, ip, c7, c10, 4          @ drain WB
+       mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
+       mcr     p15, 0, r0, c7, c10, 4          @ drain WB
        mov     pc, lr
 
 /*
index 683af3a..9f4f299 100644 (file)
@@ -55,6 +55,7 @@ ENTRY(cpu_arm946_proc_fin)
  * Params  : r0 = address to jump to
  * Notes   : This sets up everything for a reset
  */
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm946_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c5, 0           @ flush I cache
@@ -65,6 +66,8 @@ ENTRY(cpu_arm946_reset)
        bic     ip, ip, #0x00001000             @ i-cache
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_arm946_reset)
+       .popsection
 
 /*
  * cpu_arm946_do_idle()
@@ -187,6 +190,7 @@ ENTRY(arm946_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index 2120f9e..8881391 100644 (file)
@@ -45,8 +45,11 @@ ENTRY(cpu_arm9tdmi_proc_fin)
  * Params  : loc(r0)   address to jump to
  * Purpose : Sets up everything for a reset and jump to the location for soft reset.
  */
+               .pushsection    .idmap.text, "ax"
 ENTRY(cpu_arm9tdmi_reset)
                mov     pc, r0
+ENDPROC(cpu_arm9tdmi_reset)
+               .popsection
 
                __CPUINIT
 
index 4c7a571..272558a 100644 (file)
@@ -57,6 +57,7 @@ ENTRY(cpu_fa526_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  4
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_fa526_reset)
 /* TODO: Use CP8 if possible... */
        mov     ip, #0
@@ -73,6 +74,8 @@ ENTRY(cpu_fa526_reset)
        nop
        nop
        mov     pc, r0
+ENDPROC(cpu_fa526_reset)
+       .popsection
 
 /*
  * cpu_fa526_do_idle()
index 8a6c2f7..23a8e4c 100644 (file)
@@ -98,6 +98,7 @@ ENTRY(cpu_feroceon_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_feroceon_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -110,6 +111,8 @@ ENTRY(cpu_feroceon_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_feroceon_reset)
+       .popsection
 
 /*
  * cpu_feroceon_do_idle()
@@ -229,6 +232,7 @@ ENTRY(feroceon_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index 8a3edd4..933fc81 100644 (file)
@@ -91,8 +91,9 @@
 #if L_PTE_SHARED != PTE_EXT_SHARED
 #error PTE shared bit mismatch
 #endif
-#if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
-     L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
+#if !defined (CONFIG_ARM_LPAE) && \
+       (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
+        L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
 #error Invalid Linux PTE bit settings
 #endif
 #endif /* CONFIG_MMU */
index db52b0f..b047546 100644 (file)
@@ -69,6 +69,7 @@ ENTRY(cpu_mohawk_proc_fin)
  * (same as arm926)
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_mohawk_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -79,6 +80,8 @@ ENTRY(cpu_mohawk_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_mohawk_reset)
+       .popsection
 
 /*
  * cpu_mohawk_do_idle()
@@ -190,6 +193,7 @@ ENTRY(mohawk_coherent_user_range)
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
+       mov     r0, #0
        mov     pc, lr
 
 /*
index d50ada2..775d70f 100644 (file)
@@ -62,6 +62,7 @@ ENTRY(cpu_sa110_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_sa110_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -74,6 +75,8 @@ ENTRY(cpu_sa110_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_sa110_reset)
+       .popsection
 
 /*
  * cpu_sa110_do_idle(type)
index 6594aef..d92dfd0 100644 (file)
@@ -70,6 +70,7 @@ ENTRY(cpu_sa1100_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_sa1100_reset)
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
@@ -82,6 +83,8 @@ ENTRY(cpu_sa1100_reset)
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        mov     pc, r0
+ENDPROC(cpu_sa1100_reset)
+       .popsection
 
 /*
  * cpu_sa1100_do_idle(type)
index 85a5348..4d2e447 100644 (file)
@@ -55,6 +55,7 @@ ENTRY(cpu_v6_proc_fin)
  *     - loc   - location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_v6_reset)
        mrc     p15, 0, r1, c1, c0, 0           @ ctrl register
        bic     r1, r1, #0x1                    @ ...............m
@@ -62,6 +63,8 @@ ENTRY(cpu_v6_reset)
        mov     r1, #0
        mcr     p15, 0, r1, c7, c5, 4           @ ISB
        mov     pc, r0
+ENDPROC(cpu_v6_reset)
+       .popsection
 
 /*
  *     cpu_v6_do_idle()
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
new file mode 100644 (file)
index 0000000..37d2189
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * arch/arm/mm/proc-v7-2level.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define TTB_S          (1 << 1)
+#define TTB_RGN_NC     (0 << 3)
+#define TTB_RGN_OC_WBWA        (1 << 3)
+#define TTB_RGN_OC_WT  (2 << 3)
+#define TTB_RGN_OC_WB  (3 << 3)
+#define TTB_NOS                (1 << 5)
+#define TTB_IRGN_NC    ((0 << 0) | (0 << 6))
+#define TTB_IRGN_WBWA  ((0 << 0) | (1 << 6))
+#define TTB_IRGN_WT    ((1 << 0) | (0 << 6))
+#define TTB_IRGN_WB    ((1 << 0) | (1 << 6))
+
+/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
+#define TTB_FLAGS_UP   TTB_IRGN_WB|TTB_RGN_OC_WB
+#define PMD_FLAGS_UP   PMD_SECT_WB
+
+/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
+#define TTB_FLAGS_SMP  TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
+#define PMD_FLAGS_SMP  PMD_SECT_WBWA|PMD_SECT_S
+
+/*
+ *     cpu_v7_switch_mm(pgd_phys, tsk)
+ *
+ *     Set the translation table base pointer to be pgd_phys
+ *
+ *     - pgd_phys - physical address of new TTB
+ *
+ *     It is assumed that:
+ *     - we are not using split page tables
+ */
+ENTRY(cpu_v7_switch_mm)
+#ifdef CONFIG_MMU
+       mov     r2, #0
+       ldr     r1, [r1, #MM_CONTEXT_ID]        @ get mm->context.id
+       ALT_SMP(orr     r0, r0, #TTB_FLAGS_SMP)
+       ALT_UP(orr      r0, r0, #TTB_FLAGS_UP)
+#ifdef CONFIG_ARM_ERRATA_430973
+       mcr     p15, 0, r2, c7, c5, 6           @ flush BTAC/BTB
+#endif
+#ifdef CONFIG_ARM_ERRATA_754322
+       dsb
+#endif
+       mcr     p15, 0, r2, c13, c0, 1          @ set reserved context ID
+       isb
+1:     mcr     p15, 0, r0, c2, c0, 0           @ set TTB 0
+       isb
+#ifdef CONFIG_ARM_ERRATA_754322
+       dsb
+#endif
+       mcr     p15, 0, r1, c13, c0, 1          @ set context ID
+       isb
+#endif
+       mov     pc, lr
+ENDPROC(cpu_v7_switch_mm)
+
+/*
+ *     cpu_v7_set_pte_ext(ptep, pte)
+ *
+ *     Set a level 2 translation table entry.
+ *
+ *     - ptep  - pointer to level 2 translation table entry
+ *               (hardware version is stored at +2048 bytes)
+ *     - pte   - PTE value to store
+ *     - ext   - value for extended PTE bits
+ */
+ENTRY(cpu_v7_set_pte_ext)
+#ifdef CONFIG_MMU
+       str     r1, [r0]                        @ linux version
+
+       bic     r3, r1, #0x000003f0
+       bic     r3, r3, #PTE_TYPE_MASK
+       orr     r3, r3, r2
+       orr     r3, r3, #PTE_EXT_AP0 | 2
+
+       tst     r1, #1 << 4
+       orrne   r3, r3, #PTE_EXT_TEX(1)
+
+       eor     r1, r1, #L_PTE_DIRTY
+       tst     r1, #L_PTE_RDONLY | L_PTE_DIRTY
+       orrne   r3, r3, #PTE_EXT_APX
+
+       tst     r1, #L_PTE_USER
+       orrne   r3, r3, #PTE_EXT_AP1
+
+       tst     r1, #L_PTE_XN
+       orrne   r3, r3, #PTE_EXT_XN
+
+       tst     r1, #L_PTE_YOUNG
+       tstne   r1, #L_PTE_VALID
+       eorne   r1, r1, #L_PTE_NONE
+       tstne   r1, #L_PTE_NONE
+       moveq   r3, #0
+
+ ARM(  str     r3, [r0, #2048]! )
+ THUMB(        add     r0, r0, #2048 )
+ THUMB(        str     r3, [r0] )
+       mcr     p15, 0, r0, c7, c10, 1          @ flush_pte
+#endif
+       mov     pc, lr
+ENDPROC(cpu_v7_set_pte_ext)
+
+       /*
+        * Memory region attributes with SCTLR.TRE=1
+        *
+        *   n = TEX[0],C,B
+        *   TR = PRRR[2n+1:2n]         - memory type
+        *   IR = NMRR[2n+1:2n]         - inner cacheable property
+        *   OR = NMRR[2n+17:2n+16]     - outer cacheable property
+        *
+        *                      n       TR      IR      OR
+        *   UNCACHED           000     00
+        *   BUFFERABLE         001     10      00      00
+        *   WRITETHROUGH       010     10      10      10
+        *   WRITEBACK          011     10      11      11
+        *   reserved           110
+        *   WRITEALLOC         111     10      01      01
+        *   DEV_SHARED         100     01
+        *   DEV_NONSHARED      100     01
+        *   DEV_WC             001     10
+        *   DEV_CACHED         011     10
+        *
+        * Other attributes:
+        *
+        *   DS0 = PRRR[16] = 0         - device shareable property
+        *   DS1 = PRRR[17] = 1         - device shareable property
+        *   NS0 = PRRR[18] = 0         - normal shareable property
+        *   NS1 = PRRR[19] = 1         - normal shareable property
+        *   NOS = PRRR[24+n] = 1       - not outer shareable
+        */
+.equ   PRRR,   0xff0a81a8
+.equ   NMRR,   0x40e040e0
+
+       /*
+        * Macro for setting up the TTBRx and TTBCR registers.
+        * - \ttb0 and \ttb1 updated with the corresponding flags.
+        */
+       .macro  v7_ttb_setup, zero, ttbr0, ttbr1, tmp
+       mcr     p15, 0, \zero, c2, c0, 2        @ TTB control register
+       ALT_SMP(orr     \ttbr0, \ttbr0, #TTB_FLAGS_SMP)
+       ALT_UP(orr      \ttbr0, \ttbr0, #TTB_FLAGS_UP)
+       ALT_SMP(orr     \ttbr1, \ttbr1, #TTB_FLAGS_SMP)
+       ALT_UP(orr      \ttbr1, \ttbr1, #TTB_FLAGS_UP)
+       mcr     p15, 0, \ttbr1, c2, c0, 1       @ load TTB1
+       .endm
+
+       __CPUINIT
+
+       /*   AT
+        *  TFR   EV X F   I D LR    S
+        * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
+        * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
+        *    1    0 110       0011 1100 .111 1101 < we want
+        */
+       .align  2
+       .type   v7_crval, #object
+v7_crval:
+       crval   clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
+
+       .previous
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
new file mode 100644 (file)
index 0000000..d23d067
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * arch/arm/mm/proc-v7-3level.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *   based on arch/arm/mm/proc-v7-2level.S
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define TTB_IRGN_NC    (0 << 8)
+#define TTB_IRGN_WBWA  (1 << 8)
+#define TTB_IRGN_WT    (2 << 8)
+#define TTB_IRGN_WB    (3 << 8)
+#define TTB_RGN_NC     (0 << 10)
+#define TTB_RGN_OC_WBWA        (1 << 10)
+#define TTB_RGN_OC_WT  (2 << 10)
+#define TTB_RGN_OC_WB  (3 << 10)
+#define TTB_S          (3 << 12)
+#define TTB_EAE                (1 << 31)
+
+/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
+#define TTB_FLAGS_UP   (TTB_IRGN_WB|TTB_RGN_OC_WB)
+#define PMD_FLAGS_UP   (PMD_SECT_WB)
+
+/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
+#define TTB_FLAGS_SMP  (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA)
+#define PMD_FLAGS_SMP  (PMD_SECT_WBWA|PMD_SECT_S)
+
+/*
+ * cpu_v7_switch_mm(pgd_phys, tsk)
+ *
+ * Set the translation table base pointer to be pgd_phys (physical address of
+ * the new TTB).
+ */
+ENTRY(cpu_v7_switch_mm)
+#ifdef CONFIG_MMU
+       ldr     r1, [r1, #MM_CONTEXT_ID]        @ get mm->context.id
+       and     r3, r1, #0xff
+       mov     r3, r3, lsl #(48 - 32)          @ ASID
+       mcrr    p15, 0, r0, r3, c2              @ set TTB 0
+       isb
+#endif
+       mov     pc, lr
+ENDPROC(cpu_v7_switch_mm)
+
+/*
+ * cpu_v7_set_pte_ext(ptep, pte)
+ *
+ * Set a level 2 translation table entry.
+ * - ptep - pointer to level 3 translation table entry
+ * - pte - PTE value to store (64-bit in r2 and r3)
+ */
+ENTRY(cpu_v7_set_pte_ext)
+#ifdef CONFIG_MMU
+       tst     r2, #L_PTE_VALID
+       beq     1f
+       tst     r3, #1 << (55 - 32)             @ L_PTE_DIRTY
+       orreq   r2, #L_PTE_RDONLY
+1:     strd    r2, r3, [r0]
+       mcr     p15, 0, r0, c7, c10, 1          @ flush_pte
+#endif
+       mov     pc, lr
+ENDPROC(cpu_v7_set_pte_ext)
+
+       /*
+        * Memory region attributes for LPAE (defined in pgtable-3level.h):
+        *
+        *   n = AttrIndx[2:0]
+        *
+        *                      n       MAIR
+        *   UNCACHED           000     00000000
+        *   BUFFERABLE         001     01000100
+        *   DEV_WC             001     01000100
+        *   WRITETHROUGH       010     10101010
+        *   WRITEBACK          011     11101110
+        *   DEV_CACHED         011     11101110
+        *   DEV_SHARED         100     00000100
+        *   DEV_NONSHARED      100     00000100
+        *   unused             101
+        *   unused             110
+        *   WRITEALLOC         111     11111111
+        */
+.equ   PRRR,   0xeeaa4400                      @ MAIR0
+.equ   NMRR,   0xff000004                      @ MAIR1
+
+       /*
+        * Macro for setting up the TTBRx and TTBCR registers.
+        * - \ttbr1 updated.
+        */
+       .macro  v7_ttb_setup, zero, ttbr0, ttbr1, tmp
+       ldr     \tmp, =swapper_pg_dir           @ swapper_pg_dir virtual address
+       cmp     \ttbr1, \tmp                    @ PHYS_OFFSET > PAGE_OFFSET? (branch below)
+       mrc     p15, 0, \tmp, c2, c0, 2         @ TTB control register
+       orr     \tmp, \tmp, #TTB_EAE
+       ALT_SMP(orr     \tmp, \tmp, #TTB_FLAGS_SMP)
+       ALT_UP(orr      \tmp, \tmp, #TTB_FLAGS_UP)
+       ALT_SMP(orr     \tmp, \tmp, #TTB_FLAGS_SMP << 16)
+       ALT_UP(orr      \tmp, \tmp, #TTB_FLAGS_UP << 16)
+       /*
+        * TTBR0/TTBR1 split (PAGE_OFFSET):
+        *   0x40000000: T0SZ = 2, T1SZ = 0 (not used)
+        *   0x80000000: T0SZ = 0, T1SZ = 1
+        *   0xc0000000: T0SZ = 0, T1SZ = 2
+        *
+        * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
+        * booting secondary CPUs would end up using TTBR1 for the identity
+        * mapping set up in TTBR0.
+        */
+       bhi     9001f                           @ PHYS_OFFSET > PAGE_OFFSET?
+       orr     \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ
+#if defined CONFIG_VMSPLIT_2G
+       /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */
+       add     \ttbr1, \ttbr1, #1 << 4         @ skip two L1 entries
+#elif defined CONFIG_VMSPLIT_3G
+       /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */
+       add     \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd
+#endif
+       /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */
+9001:  mcr     p15, 0, \tmp, c2, c0, 2         @ TTB control register
+       mcrr    p15, 1, \ttbr1, \zero, c2       @ load TTBR1
+       .endm
+
+       __CPUINIT
+
+       /*
+        *   AT
+        *  TFR   EV X F   IHD LR    S
+        * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM
+        * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
+        *   11    0 110    1  0011 1100 .111 1101 < we want
+        */
+       .align  2
+       .type   v7_crval, #object
+v7_crval:
+       crval   clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
+
+       .previous
index 43c6981..c540f53 100644 (file)
 
 #include "proc-macros.S"
 
-#define TTB_S          (1 << 1)
-#define TTB_RGN_NC     (0 << 3)
-#define TTB_RGN_OC_WBWA        (1 << 3)
-#define TTB_RGN_OC_WT  (2 << 3)
-#define TTB_RGN_OC_WB  (3 << 3)
-#define TTB_NOS                (1 << 5)
-#define TTB_IRGN_NC    ((0 << 0) | (0 << 6))
-#define TTB_IRGN_WBWA  ((0 << 0) | (1 << 6))
-#define TTB_IRGN_WT    ((1 << 0) | (0 << 6))
-#define TTB_IRGN_WB    ((1 << 0) | (1 << 6))
-
-/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
-#define TTB_FLAGS_UP   TTB_IRGN_WB|TTB_RGN_OC_WB
-#define PMD_FLAGS_UP   PMD_SECT_WB
-
-/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
-#define TTB_FLAGS_SMP  TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
-#define PMD_FLAGS_SMP  PMD_SECT_WBWA|PMD_SECT_S
+#ifdef CONFIG_ARM_LPAE
+#include "proc-v7-3level.S"
+#else
+#include "proc-v7-2level.S"
+#endif
 
 ENTRY(cpu_v7_proc_init)
        mov     pc, lr
@@ -63,6 +50,7 @@ ENDPROC(cpu_v7_proc_fin)
  *      caches disabled.
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_v7_reset)
        mrc     p15, 0, r1, c1, c0, 0           @ ctrl register
        bic     r1, r1, #0x1                    @ ...............m
@@ -71,6 +59,7 @@ ENTRY(cpu_v7_reset)
        isb
        mov     pc, r0
 ENDPROC(cpu_v7_reset)
+       .popsection
 
 /*
  *     cpu_v7_do_idle()
@@ -97,124 +86,12 @@ ENTRY(cpu_v7_dcache_clean_area)
        mov     pc, lr
 ENDPROC(cpu_v7_dcache_clean_area)
 
-/*
- *     cpu_v7_switch_mm(pgd_phys, tsk)
- *
- *     Set the translation table base pointer to be pgd_phys
- *
- *     - pgd_phys - physical address of new TTB
- *
- *     It is assumed that:
- *     - we are not using split page tables
- */
-ENTRY(cpu_v7_switch_mm)
-#ifdef CONFIG_MMU
-       mov     r2, #0
-       ldr     r1, [r1, #MM_CONTEXT_ID]        @ get mm->context.id
-       ALT_SMP(orr     r0, r0, #TTB_FLAGS_SMP)
-       ALT_UP(orr      r0, r0, #TTB_FLAGS_UP)
-#ifdef CONFIG_ARM_ERRATA_430973
-       mcr     p15, 0, r2, c7, c5, 6           @ flush BTAC/BTB
-#endif
-#ifdef CONFIG_ARM_ERRATA_754322
-       dsb
-#endif
-       mcr     p15, 0, r2, c13, c0, 1          @ set reserved context ID
-       isb
-1:     mcr     p15, 0, r0, c2, c0, 0           @ set TTB 0
-       isb
-#ifdef CONFIG_ARM_ERRATA_754322
-       dsb
-#endif
-       mcr     p15, 0, r1, c13, c0, 1          @ set context ID
-       isb
-#endif
-       mov     pc, lr
-ENDPROC(cpu_v7_switch_mm)
-
-/*
- *     cpu_v7_set_pte_ext(ptep, pte)
- *
- *     Set a level 2 translation table entry.
- *
- *     - ptep  - pointer to level 2 translation table entry
- *               (hardware version is stored at +2048 bytes)
- *     - pte   - PTE value to store
- *     - ext   - value for extended PTE bits
- */
-ENTRY(cpu_v7_set_pte_ext)
-#ifdef CONFIG_MMU
-       str     r1, [r0]                        @ linux version
-
-       bic     r3, r1, #0x000003f0
-       bic     r3, r3, #PTE_TYPE_MASK
-       orr     r3, r3, r2
-       orr     r3, r3, #PTE_EXT_AP0 | 2
-
-       tst     r1, #1 << 4
-       orrne   r3, r3, #PTE_EXT_TEX(1)
-
-       eor     r1, r1, #L_PTE_DIRTY
-       tst     r1, #L_PTE_RDONLY | L_PTE_DIRTY
-       orrne   r3, r3, #PTE_EXT_APX
-
-       tst     r1, #L_PTE_USER
-       orrne   r3, r3, #PTE_EXT_AP1
-
-       tst     r1, #L_PTE_XN
-       orrne   r3, r3, #PTE_EXT_XN
-
-       tst     r1, #L_PTE_YOUNG
-       tstne   r1, #L_PTE_PRESENT
-       eorne   r1, r1, #L_PTE_NONE
-       tstne   r1, #L_PTE_NONE
-       moveq   r3, #0
-
- ARM(  str     r3, [r0, #2048]! )
- THUMB(        add     r0, r0, #2048 )
- THUMB(        str     r3, [r0] )
-       mcr     p15, 0, r0, c7, c10, 1          @ flush_pte
-#endif
-       mov     pc, lr
-ENDPROC(cpu_v7_set_pte_ext)
-
        string  cpu_v7_name, "ARMv7 Processor"
        .align
 
-       /*
-        * Memory region attributes with SCTLR.TRE=1
-        *
-        *   n = TEX[0],C,B
-        *   TR = PRRR[2n+1:2n]         - memory type
-        *   IR = NMRR[2n+1:2n]         - inner cacheable property
-        *   OR = NMRR[2n+17:2n+16]     - outer cacheable property
-        *
-        *                      n       TR      IR      OR
-        *   UNCACHED           000     00
-        *   BUFFERABLE         001     10      00      00
-        *   WRITETHROUGH       010     10      10      10
-        *   WRITEBACK          011     10      11      11
-        *   reserved           110
-        *   WRITEALLOC         111     10      01      01
-        *   DEV_SHARED         100     01
-        *   DEV_NONSHARED      100     01
-        *   DEV_WC             001     10
-        *   DEV_CACHED         011     10
-        *
-        * Other attributes:
-        *
-        *   DS0 = PRRR[16] = 0         - device shareable property
-        *   DS1 = PRRR[17] = 1         - device shareable property
-        *   NS0 = PRRR[18] = 0         - normal shareable property
-        *   NS1 = PRRR[19] = 1         - normal shareable property
-        *   NOS = PRRR[24+n] = 1       - not outer shareable
-        */
-.equ   PRRR,   0xff0a81a8
-.equ   NMRR,   0x40e040e0
-
 /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
 .globl cpu_v7_suspend_size
-.equ   cpu_v7_suspend_size, 4 * 7
+.equ   cpu_v7_suspend_size, 4 * 8
 #ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_v7_do_suspend)
        stmfd   sp!, {r4 - r10, lr}
@@ -223,10 +100,11 @@ ENTRY(cpu_v7_do_suspend)
        stmia   r0!, {r4 - r5}
        mrc     p15, 0, r6, c3, c0, 0   @ Domain ID
        mrc     p15, 0, r7, c2, c0, 1   @ TTB 1
+       mrc     p15, 0, r11, c2, c0, 2  @ TTB control register
        mrc     p15, 0, r8, c1, c0, 0   @ Control register
        mrc     p15, 0, r9, c1, c0, 1   @ Auxiliary control register
        mrc     p15, 0, r10, c1, c0, 2  @ Co-processor access control
-       stmia   r0, {r6 - r10}
+       stmia   r0, {r6 - r11}
        ldmfd   sp!, {r4 - r10, pc}
 ENDPROC(cpu_v7_do_suspend)
 
@@ -238,13 +116,15 @@ ENTRY(cpu_v7_do_resume)
        ldmia   r0!, {r4 - r5}
        mcr     p15, 0, r4, c13, c0, 0  @ FCSE/PID
        mcr     p15, 0, r5, c13, c0, 3  @ User r/o thread ID
-       ldmia   r0, {r6 - r10}
+       ldmia   r0, {r6 - r11}
        mcr     p15, 0, r6, c3, c0, 0   @ Domain ID
+#ifndef CONFIG_ARM_LPAE
        ALT_SMP(orr     r1, r1, #TTB_FLAGS_SMP)
        ALT_UP(orr      r1, r1, #TTB_FLAGS_UP)
+#endif
        mcr     p15, 0, r1, c2, c0, 0   @ TTB 0
        mcr     p15, 0, r7, c2, c0, 1   @ TTB 1
-       mcr     p15, 0, ip, c2, c0, 2   @ TTB control register
+       mcr     p15, 0, r11, c2, c0, 2  @ TTB control register
        mrc     p15, 0, r4, c1, c0, 1   @ Read Auxiliary control register
        teq     r4, r9                  @ Is it already set?
        mcrne   p15, 0, r9, c1, c0, 1   @ No, so write it
@@ -367,18 +247,19 @@ __v7_setup:
        mcr     p15, 0, r10, c7, c5, 0          @ I+BTB cache invalidate
 #ifdef CONFIG_MMU
        mcr     p15, 0, r10, c8, c7, 0          @ invalidate I + D TLBs
-       mcr     p15, 0, r10, c2, c0, 2          @ TTB control register
-       ALT_SMP(orr     r4, r4, #TTB_FLAGS_SMP)
-       ALT_UP(orr      r4, r4, #TTB_FLAGS_UP)
-       ALT_SMP(orr     r8, r8, #TTB_FLAGS_SMP)
-       ALT_UP(orr      r8, r8, #TTB_FLAGS_UP)
-       mcr     p15, 0, r8, c2, c0, 1           @ load TTB1
+       v7_ttb_setup r10, r4, r8, r5            @ TTBCR, TTBRx setup
        ldr     r5, =PRRR                       @ PRRR
        ldr     r6, =NMRR                       @ NMRR
        mcr     p15, 0, r5, c10, c2, 0          @ write PRRR
        mcr     p15, 0, r6, c10, c2, 1          @ write NMRR
 #endif
        dsb                                     @ Complete invalidations
+
+#ifdef CONFIG_USER_PMON
+       mov     r0, #1
+       mcr     p15, 0, r0, c9, c14, 0
+#endif
+
 #ifndef CONFIG_ARM_THUMBEE
        mrc     p15, 0, r0, c0, c1, 0           @ read ID_PFR0 for ThumbEE
        and     r0, r0, #(0xf << 12)            @ ThumbEE enabled field
@@ -407,16 +288,7 @@ __v7_setup:
        mov     pc, lr                          @ return to head.S:__ret
 ENDPROC(__v7_setup)
 
-       /*   AT
-        *  TFR   EV X F   I D LR    S
-        * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
-        * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
-        *    1    0 110       0011 1100 .111 1101 < we want
-        */
-       .type   v7_crval, #object
-v7_crval:
-       crval   clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
-
+       .align  2
 __v7_setup_stack:
        .space  4 * 11                          @ 11 registers
 
@@ -438,11 +310,11 @@ __v7_setup_stack:
         */
 .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
        ALT_SMP(.long   PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
-                       PMD_FLAGS_SMP | \mm_mmuflags)
+                       PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
        ALT_UP(.long    PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
-                       PMD_FLAGS_UP | \mm_mmuflags)
-       .long   PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \
-               PMD_SECT_AP_READ | \io_mmuflags
+                       PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags)
+       .long   PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags
        W(b)    \initfunc
        .long   cpu_arch_name
        .long   cpu_elf_name
@@ -455,6 +327,7 @@ __v7_setup_stack:
        .long   v7_cache_fns
 .endm
 
+#ifndef CONFIG_ARM_LPAE
        /*
         * ARM Ltd. Cortex A5 processor.
         */
@@ -474,6 +347,7 @@ __v7_ca9mp_proc_info:
        .long   0xff0ffff0
        __v7_proc __v7_ca9mp_setup
        .size   __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+#endif /* CONFIG_ARM_LPAE */
 
        /*
         * ARM Ltd. Cortex A15 processor.
index 5c4969d..a2d1e86 100644 (file)
@@ -105,6 +105,7 @@ ENTRY(cpu_xsc3_proc_fin)
  * loc: location to jump to for soft reset
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_xsc3_reset)
        mov     r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
        msr     cpsr_c, r1                      @ reset CPSR
@@ -119,6 +120,8 @@ ENTRY(cpu_xsc3_reset)
        @ already containing those two last instructions to survive.
        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I and D TLBs
        mov     pc, r0
+ENDPROC(cpu_xsc3_reset)
+       .popsection
 
 /*
  * cpu_xsc3_do_idle()
index 76a8015..98646d0 100644 (file)
@@ -142,6 +142,7 @@ ENTRY(cpu_xscale_proc_fin)
  * Beware PXA270 erratum E7.
  */
        .align  5
+       .pushsection    .idmap.text, "ax"
 ENTRY(cpu_xscale_reset)
        mov     r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
        msr     cpsr_c, r1                      @ reset CPSR
@@ -160,6 +161,8 @@ ENTRY(cpu_xscale_reset)
        @ already containing those two last instructions to survive.
        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
        mov     pc, r0
+ENDPROC(cpu_xscale_reset)
+       .popsection
 
 /*
  * cpu_xscale_do_idle()
index 036fdbf..a631016 100644 (file)
@@ -1,5 +1,8 @@
+#include <linux/fs.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
 
 #include "vmregion.h"
@@ -36,7 +39,7 @@
 
 struct arm_vmregion *
 arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
-                  size_t size, gfp_t gfp)
+                  size_t size, gfp_t gfp, const void *caller)
 {
        unsigned long start = head->vm_start, addr = head->vm_end;
        unsigned long flags;
@@ -52,6 +55,8 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
        if (!new)
                goto out;
 
+       new->caller = caller;
+
        spin_lock_irqsave(&head->vm_lock, flags);
 
        addr = rounddown(addr - size, align);
@@ -129,3 +134,72 @@ void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
 
        kfree(c);
 }
+
+#ifdef CONFIG_PROC_FS
+static int arm_vmregion_show(struct seq_file *m, void *p)
+{
+       struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
+
+       seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
+               c->vm_end - c->vm_start);
+       if (c->caller)
+               seq_printf(m, " %pS", (void *)c->caller);
+       seq_putc(m, '\n');
+       return 0;
+}
+
+static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
+{
+       struct arm_vmregion_head *h = m->private;
+       spin_lock_irq(&h->vm_lock);
+       return seq_list_start(&h->vm_list, *pos);
+}
+
+static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
+{
+       struct arm_vmregion_head *h = m->private;
+       return seq_list_next(p, &h->vm_list, pos);
+}
+
+static void arm_vmregion_stop(struct seq_file *m, void *p)
+{
+       struct arm_vmregion_head *h = m->private;
+       spin_unlock_irq(&h->vm_lock);
+}
+
+static const struct seq_operations arm_vmregion_ops = {
+       .start  = arm_vmregion_start,
+       .stop   = arm_vmregion_stop,
+       .next   = arm_vmregion_next,
+       .show   = arm_vmregion_show,
+};
+
+static int arm_vmregion_open(struct inode *inode, struct file *file)
+{
+       struct arm_vmregion_head *h = PDE(inode)->data;
+       int ret = seq_open(file, &arm_vmregion_ops);
+       if (!ret) {
+               struct seq_file *m = file->private_data;
+               m->private = h;
+       }
+       return ret;
+}
+
+static const struct file_operations arm_vmregion_fops = {
+       .open   = arm_vmregion_open,
+       .read   = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
+{
+       proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
+       return 0;
+}
+#else
+int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
+{
+       return 0;
+}
+#endif
index 15e9f04..bf312c3 100644 (file)
@@ -17,13 +17,16 @@ struct arm_vmregion {
        struct list_head        vm_list;
        unsigned long           vm_start;
        unsigned long           vm_end;
-       struct page             *vm_pages;
+       void                    *priv;
        int                     vm_active;
+       const void              *caller;
 };
 
-struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t);
+struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
 struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
 struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
 void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
 
+int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
+
 #endif
index 568dd02..cbfbbe4 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/timex.h>
-#include <linux/sched.h>
 #include <linux/io.h>
 #include <linux/clocksource.h>
 #include <linux/clockchips.h>
@@ -52,21 +51,12 @@ static struct clocksource iop_clocksource = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static DEFINE_CLOCK_DATA(cd);
-
 /*
  * IOP sched_clock() implementation via its clocksource.
  */
-unsigned long long notrace sched_clock(void)
+static u32 notrace iop_read_sched_clock(void)
 {
-       u32 cyc = 0xffffffffu - read_tcr1();
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace iop_update_sched_clock(void)
-{
-       u32 cyc = 0xffffffffu - read_tcr1();
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return 0xffffffffu - read_tcr1();
 }
 
 /*
@@ -152,7 +142,7 @@ void __init iop_init_time(unsigned long tick_rate)
 {
        u32 timer_ctl;
 
-       init_sched_clock(&cd, iop_update_sched_clock, 32, tick_rate);
+       setup_sched_clock(iop_read_sched_clock, 32, tick_rate);
 
        ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ);
        iop_tick_rate = tick_rate;
diff --git a/arch/arm/plat-mxc/include/mach/vmalloc.h b/arch/arm/plat-mxc/include/mach/vmalloc.h
deleted file mode 100644 (file)
index ef6379c..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- *  Copyright (C) 2000 Russell King.
- *  Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MXC_VMALLOC_H__
-#define __ASM_ARCH_MXC_VMALLOC_H__
-
-/* vmalloc ending address */
-#define VMALLOC_END       0xf4000000UL
-
-#endif /* __ASM_ARCH_MXC_VMALLOC_H__ */
index d65fb31..7e5c76e 100644 (file)
@@ -71,7 +71,7 @@ void arch_reset(char mode, const char *cmd)
        mdelay(50);
 
        /* we'll take a jump through zero as a poor second */
-       cpu_reset(0);
+       soft_restart(0);
 }
 
 void mxc_arch_reset_init(void __iomem *base)
index 4b0fe28..1c96cdb 100644 (file)
@@ -108,18 +108,9 @@ static void gpt_irq_acknowledge(void)
 
 static void __iomem *sched_clock_reg;
 
-static DEFINE_CLOCK_DATA(cd);
-unsigned long long notrace sched_clock(void)
+static u32 notrace mxc_read_sched_clock(void)
 {
-       cycle_t cyc = sched_clock_reg ? __raw_readl(sched_clock_reg) : 0;
-
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace mxc_update_sched_clock(void)
-{
-       cycle_t cyc = sched_clock_reg ? __raw_readl(sched_clock_reg) : 0;
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return sched_clock_reg ? __raw_readl(sched_clock_reg) : 0;
 }
 
 static int __init mxc_clocksource_init(struct clk *timer_clk)
@@ -129,7 +120,7 @@ static int __init mxc_clocksource_init(struct clk *timer_clk)
 
        sched_clock_reg = reg;
 
-       init_sched_clock(&cd, mxc_update_sched_clock, 32, c);
+       setup_sched_clock(mxc_read_sched_clock, 32, c);
        return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
                        clocksource_mmio_readl_up);
 }
index 30b6433..ad1b45b 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/clk.h>
 #include <linux/jiffies.h>
 #include <linux/err.h>
-#include <linux/sched.h>
 #include <asm/mach/time.h>
 #include <asm/sched_clock.h>
 
@@ -79,23 +78,12 @@ void __iomem *mtu_base; /* Assigned by machine code */
  * local implementation which uses the clocksource to get some
  * better resolution when scheduling the kernel.
  */
-static DEFINE_CLOCK_DATA(cd);
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace nomadik_read_sched_clock(void)
 {
-       u32 cyc;
-
        if (unlikely(!mtu_base))
                return 0;
 
-       cyc = -readl(mtu_base + MTU_VAL(0));
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace nomadik_update_sched_clock(void)
-{
-       u32 cyc = -readl(mtu_base + MTU_VAL(0));
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return -readl(mtu_base + MTU_VAL(0));
 }
 #endif
 
@@ -231,9 +219,11 @@ void __init nmdk_timer_init(void)
                        rate, 200, 32, clocksource_mmio_readl_down))
                pr_err("timer: failed to initialize clock source %s\n",
                       "mtu_0");
+
 #ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
-       init_sched_clock(&cd, nomadik_update_sched_clock, 32, rate);
+       setup_sched_clock(nomadik_read_sched_clock, 32, rate);
 #endif
+
        /* Timer 1 is used for events */
 
        clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
index 9852622..a53eca3 100644 (file)
@@ -19,7 +19,6 @@ obj-$(CONFIG_ARCH_OMAP4) += omap_device.o
 
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
 
-obj-$(CONFIG_CPU_FREQ) += cpu-omap.o
 obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
 obj-$(CONFIG_OMAP_DEBUG_DEVICES) += debug-devices.o
 obj-$(CONFIG_OMAP_DEBUG_LEDS) += debug-leds.o
index 567e4b5..8030cc1 100644 (file)
@@ -442,6 +442,8 @@ static int __init clk_disable_unused(void)
                return 0;
 
        pr_info("clock: disabling unused clocks to save power\n");
+
+       spin_lock_irqsave(&clockfw_lock, flags);
        list_for_each_entry(ck, &clocks, node) {
                if (ck->ops == &clkops_null)
                        continue;
@@ -449,10 +451,9 @@ static int __init clk_disable_unused(void)
                if (ck->usecount > 0 || !ck->enable_reg)
                        continue;
 
-               spin_lock_irqsave(&clockfw_lock, flags);
                arch_clock->clk_disable_unused(ck);
-               spin_unlock_irqrestore(&clockfw_lock, flags);
        }
+       spin_unlock_irqrestore(&clockfw_lock, flags);
 
        return 0;
 }
index d9f10a3..842df13 100644 (file)
@@ -64,5 +64,7 @@ void __init omap_reserve(void)
 {
        omapfb_reserve_sdram_memblock();
        omap_vram_reserve_sdram_memblock();
+#ifndef CONFIG_CMA
        omap_dsp_reserve_sdram_memblock();
+#endif
 }
index 04e703a..c232856 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/sched.h>
 #include <linux/clocksource.h>
 
 #include <asm/sched_clock.h>
@@ -37,41 +36,9 @@ static void __iomem *timer_32k_base;
 
 #define OMAP16XX_TIMER_32K_SYNCHRONIZED                0xfffbc410
 
-/*
- * Returns current time from boot in nsecs. It's OK for this to wrap
- * around for now, as it's just a relative time stamp.
- */
-static DEFINE_CLOCK_DATA(cd);
-
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 32768, NSEC_PER_SEC, 60).
- * This gives a resolution of about 30us and a wrap period of about 36hrs.
- */
-#define SC_MULT                4000000000u
-#define SC_SHIFT       17
-
-static inline unsigned long long notrace _omap_32k_sched_clock(void)
-{
-       u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
-       return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
-}
-
-#if defined(CONFIG_OMAP_32K_TIMER) && !defined(CONFIG_OMAP_MPU_TIMER)
-unsigned long long notrace sched_clock(void)
-{
-       return _omap_32k_sched_clock();
-}
-#else
-unsigned long long notrace omap_32k_sched_clock(void)
-{
-       return _omap_32k_sched_clock();
-}
-#endif
-
-static void notrace omap_update_sched_clock(void)
+static u32 notrace omap_32k_read_sched_clock(void)
 {
-       u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return timer_32k_base ? __raw_readl(timer_32k_base) : 0;
 }
 
 /**
@@ -154,8 +121,7 @@ int __init omap_init_clocksource_32k(void)
                                          clocksource_mmio_readl_up))
                        printk(err, "32k_counter");
 
-               init_fixed_sched_clock(&cd, omap_update_sched_clock, 32,
-                                      32768, SC_MULT, SC_SHIFT);
+               setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
        }
        return 0;
 }
diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c
deleted file mode 100644 (file)
index da4f68d..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- *  linux/arch/arm/plat-omap/cpu-omap.c
- *
- *  CPU frequency scaling for OMAP
- *
- *  Copyright (C) 2005 Nokia Corporation
- *  Written by Tony Lindgren <tony@atomide.com>
- *
- *  Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <plat/clock.h>
-#include <asm/system.h>
-
-#define VERY_HI_RATE   900000000
-
-static struct cpufreq_frequency_table *freq_table;
-
-#ifdef CONFIG_ARCH_OMAP1
-#define MPU_CLK                "mpu"
-#else
-#define MPU_CLK                "virt_prcm_set"
-#endif
-
-static struct clk *mpu_clk;
-
-/* TODO: Add support for SDRAM timing changes */
-
-static int omap_verify_speed(struct cpufreq_policy *policy)
-{
-       if (freq_table)
-               return cpufreq_frequency_table_verify(policy, freq_table);
-
-       if (policy->cpu)
-               return -EINVAL;
-
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
-
-       policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000;
-       policy->max = clk_round_rate(mpu_clk, policy->max * 1000) / 1000;
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
-       return 0;
-}
-
-static unsigned int omap_getspeed(unsigned int cpu)
-{
-       unsigned long rate;
-
-       if (cpu)
-               return 0;
-
-       rate = clk_get_rate(mpu_clk) / 1000;
-       return rate;
-}
-
-static int omap_target(struct cpufreq_policy *policy,
-                      unsigned int target_freq,
-                      unsigned int relation)
-{
-       struct cpufreq_freqs freqs;
-       int ret = 0;
-
-       /* Ensure desired rate is within allowed range.  Some govenors
-        * (ondemand) will just pass target_freq=0 to get the minimum. */
-       if (target_freq < policy->min)
-               target_freq = policy->min;
-       if (target_freq > policy->max)
-               target_freq = policy->max;
-
-       freqs.old = omap_getspeed(0);
-       freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000;
-       freqs.cpu = 0;
-
-       if (freqs.old == freqs.new)
-               return ret;
-
-       cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-#ifdef CONFIG_CPU_FREQ_DEBUG
-       printk(KERN_DEBUG "cpufreq-omap: transition: %u --> %u\n",
-              freqs.old, freqs.new);
-#endif
-       ret = clk_set_rate(mpu_clk, freqs.new * 1000);
-       cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
-       return ret;
-}
-
-static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
-{
-       int result = 0;
-
-       mpu_clk = clk_get(NULL, MPU_CLK);
-       if (IS_ERR(mpu_clk))
-               return PTR_ERR(mpu_clk);
-
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       policy->cur = policy->min = policy->max = omap_getspeed(0);
-
-       clk_init_cpufreq_table(&freq_table);
-       if (freq_table) {
-               result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-               if (!result)
-                       cpufreq_frequency_table_get_attr(freq_table,
-                                                       policy->cpu);
-       } else {
-               policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
-               policy->cpuinfo.max_freq = clk_round_rate(mpu_clk,
-                                                       VERY_HI_RATE) / 1000;
-       }
-
-       /* FIXME: what's the actual transition time? */
-       policy->cpuinfo.transition_latency = 300 * 1000;
-
-       return 0;
-}
-
-static int omap_cpu_exit(struct cpufreq_policy *policy)
-{
-       clk_exit_cpufreq_table(&freq_table);
-       clk_put(mpu_clk);
-       return 0;
-}
-
-static struct freq_attr *omap_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
-static struct cpufreq_driver omap_driver = {
-       .flags          = CPUFREQ_STICKY,
-       .verify         = omap_verify_speed,
-       .target         = omap_target,
-       .get            = omap_getspeed,
-       .init           = omap_cpu_init,
-       .exit           = omap_cpu_exit,
-       .name           = "omap",
-       .attr           = omap_cpufreq_attr,
-};
-
-static int __init omap_cpufreq_init(void)
-{
-       return cpufreq_register_driver(&omap_driver);
-}
-
-arch_initcall(omap_cpufreq_init);
-
-/*
- * if ever we want to remove this, upon cleanup call:
- *
- * cpufreq_unregister_driver()
- * cpufreq_frequency_table_put_attr()
- */
-
index c22217c..96c14a3 100644 (file)
@@ -563,22 +563,25 @@ EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
 
 static inline void omap_enable_channel_irq(int lch)
 {
-       u32 status;
-
        /* Clear CSR */
        if (cpu_class_is_omap1())
-               status = p->dma_read(CSR, lch);
-       else if (cpu_class_is_omap2())
+               p->dma_read(CSR, lch);
+       else
                p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
 
        /* Enable some nice interrupts. */
        p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
 }
 
-static void omap_disable_channel_irq(int lch)
+static inline void omap_disable_channel_irq(int lch)
 {
-       if (cpu_class_is_omap2())
-               p->dma_write(0, CICR, lch);
+       /* disable channel interrupts */
+       p->dma_write(0, CICR, lch);
+       /* Clear CSR */
+       if (cpu_class_is_omap1())
+               p->dma_read(CSR, lch);
+       else
+               p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
 }
 
 void omap_enable_dma_irq(int lch, u16 bits)
@@ -622,14 +625,14 @@ static inline void disable_lnk(int lch)
        l = p->dma_read(CLNK_CTRL, lch);
 
        /* Disable interrupts */
+       omap_disable_channel_irq(lch);
+
        if (cpu_class_is_omap1()) {
-               p->dma_write(0, CICR, lch);
                /* Set the STOP_LNK bit */
                l |= 1 << 14;
        }
 
        if (cpu_class_is_omap2()) {
-               omap_disable_channel_irq(lch);
                /* Clear the ENABLE_LNK bit */
                l &= ~(1 << 15);
        }
@@ -647,6 +650,9 @@ static inline void omap2_enable_irq_lch(int lch)
                return;
 
        spin_lock_irqsave(&dma_chan_lock, flags);
+       /* clear IRQ STATUS */
+       p->dma_write(1 << lch, IRQSTATUS_L0, lch);
+       /* Enable interrupt */
        val = p->dma_read(IRQENABLE_L0, lch);
        val |= 1 << lch;
        p->dma_write(val, IRQENABLE_L0, lch);
@@ -662,9 +668,12 @@ static inline void omap2_disable_irq_lch(int lch)
                return;
 
        spin_lock_irqsave(&dma_chan_lock, flags);
+       /* Disable interrupt */
        val = p->dma_read(IRQENABLE_L0, lch);
        val &= ~(1 << lch);
        p->dma_write(val, IRQENABLE_L0, lch);
+       /* clear IRQ STATUS */
+       p->dma_write(1 << lch, IRQSTATUS_L0, lch);
        spin_unlock_irqrestore(&dma_chan_lock, flags);
 }
 
@@ -680,8 +689,8 @@ int omap_request_dma(int dev_id, const char *dev_name,
        for (ch = 0; ch < dma_chan_count; ch++) {
                if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
                        free_ch = ch;
-                       if (dev_id == 0)
-                               break;
+                       /* Exit after first free channel found */
+                       break;
                }
        }
        if (free_ch == -1) {
@@ -735,11 +744,8 @@ int omap_request_dma(int dev_id, const char *dev_name,
        }
 
        if (cpu_class_is_omap2()) {
-               omap2_enable_irq_lch(free_ch);
                omap_enable_channel_irq(free_ch);
-               /* Clear the CSR register and IRQ status register */
-               p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
-               p->dma_write(1 << free_ch, IRQSTATUS_L0, 0);
+               omap2_enable_irq_lch(free_ch);
        }
 
        *dma_ch_out = free_ch;
@@ -758,27 +764,19 @@ void omap_free_dma(int lch)
                return;
        }
 
-       if (cpu_class_is_omap1()) {
-               /* Disable all DMA interrupts for the channel. */
-               p->dma_write(0, CICR, lch);
-               /* Make sure the DMA transfer is stopped. */
-               p->dma_write(0, CCR, lch);
-       }
-
-       if (cpu_class_is_omap2()) {
+       /* Disable interrupt for logical channel */
+       if (cpu_class_is_omap2())
                omap2_disable_irq_lch(lch);
 
-               /* Clear the CSR register and IRQ status register */
-               p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
-               p->dma_write(1 << lch, IRQSTATUS_L0, lch);
+       /* Disable all DMA interrupts for the channel. */
+       omap_disable_channel_irq(lch);
 
-               /* Disable all DMA interrupts for the channel. */
-               p->dma_write(0, CICR, lch);
+       /* Make sure the DMA transfer is stopped. */
+       p->dma_write(0, CCR, lch);
 
-               /* Make sure the DMA transfer is stopped. */
-               p->dma_write(0, CCR, lch);
+       /* Clear registers */
+       if (cpu_class_is_omap2())
                omap_clear_dma(lch);
-       }
 
        spin_lock_irqsave(&dma_chan_lock, flags);
        dma_chan[lch].dev_id = -1;
@@ -884,11 +882,12 @@ void omap_start_dma(int lch)
                int next_lch, cur_lch;
                char dma_chan_link_map[dma_lch_count];
 
-               dma_chan_link_map[lch] = 1;
                /* Set the link register of the first channel */
                enable_lnk(lch);
 
                memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
+               dma_chan_link_map[lch] = 1;
+
                cur_lch = dma_chan[lch].next_lch;
                do {
                        next_lch = dma_chan[cur_lch].next_lch;
@@ -915,6 +914,13 @@ void omap_start_dma(int lch)
                        l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
        l |= OMAP_DMA_CCR_EN;
 
+       /*
+        * As dma_write() uses IO accessors which are weakly ordered, there
+        * is no guarantee that data in coherent DMA memory will be visible
+        * to the DMA device.  Add a memory barrier here to ensure that any
+        * such data is visible prior to enabling DMA.
+        */
+       mb();
        p->dma_write(l, CCR, lch);
 
        dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
@@ -926,8 +932,7 @@ void omap_stop_dma(int lch)
        u32 l;
 
        /* Disable all interrupts on the channel */
-       if (cpu_class_is_omap1())
-               p->dma_write(0, CICR, lch);
+       omap_disable_channel_irq(lch);
 
        l = p->dma_read(CCR, lch);
        if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
@@ -964,6 +969,13 @@ void omap_stop_dma(int lch)
                p->dma_write(l, CCR, lch);
        }
 
+       /*
+        * Ensure that data transferred by DMA is visible to any access
+        * after DMA has been disabled.  This is important for coherent
+        * DMA regions.
+        */
+       mb();
+
        if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
                int next_lch, cur_lch = lch;
                char dma_chan_link_map[dma_lch_count];
@@ -1034,6 +1046,18 @@ dma_addr_t omap_get_dma_src_pos(int lch)
        if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
                offset = p->dma_read(CSAC, lch);
 
+       if (!cpu_is_omap15xx()) {
+               /*
+                * CDAC == 0 indicates that the DMA transfer on the channel has
+                * not been started (no data has been transferred so far).
+                * Return the programmed source start address in this case.
+                */
+               if (likely(p->dma_read(CDAC, lch)))
+                       offset = p->dma_read(CSAC, lch);
+               else
+                       offset = p->dma_read(CSSA, lch);
+       }
+
        if (cpu_class_is_omap1())
                offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
 
@@ -1062,8 +1086,16 @@ dma_addr_t omap_get_dma_dst_pos(int lch)
         * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
         * read before the DMA controller finished disabling the channel.
         */
-       if (!cpu_is_omap15xx() && offset == 0)
+       if (!cpu_is_omap15xx() && offset == 0) {
                offset = p->dma_read(CDAC, lch);
+               /*
+                * CDAC == 0 indicates that the DMA transfer on the channel has
+                * not been started (no data has been transferred so far).
+                * Return the programmed destination start address in this case.
+                */
+               if (unlikely(!offset))
+                       offset = p->dma_read(CDSA, lch);
+       }
 
        if (cpu_class_is_omap1())
                offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
@@ -1982,7 +2014,7 @@ static int __devinit omap_system_dma_probe(struct platform_device *pdev)
        errata                  = p->errata;
 
        if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
-                       && (omap_dma_reserve_channels <= dma_lch_count))
+                       && (omap_dma_reserve_channels < d->lch_count))
                d->lch_count    = omap_dma_reserve_channels;
 
        dma_lch_count           = d->lch_count;
@@ -2045,6 +2077,7 @@ static int __devinit omap_system_dma_probe(struct platform_device *pdev)
                dma_irq = platform_get_irq_byname(pdev, irq_name);
                if (dma_irq < 0) {
                        dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
+                       ret = dma_irq;
                        goto exit_dma_lch_fail;
                }
                ret = setup_irq(dma_irq, &omap24xx_dma_irq);
@@ -2075,8 +2108,6 @@ exit_dma_irq_fail:
        }
 
 exit_dma_lch_fail:
-       kfree(p);
-       kfree(d);
        kfree(dma_chan);
        return ret;
 }
@@ -2097,8 +2128,6 @@ static int __devexit omap_system_dma_remove(struct platform_device *pdev)
                        free_irq(dma_irq, (void *)(irq_rel + 1));
                }
        }
-       kfree(p);
-       kfree(d);
        kfree(dma_chan);
        return 0;
 }
index f9adbbb..fea56d5 100644 (file)
@@ -80,12 +80,6 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
 
 static void omap_timer_restore_context(struct omap_dm_timer *timer)
 {
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_OFFSET,
-                               timer->context.tiocp_cfg);
-       if (timer->revision > 1)
-               __raw_writel(timer->context.tistat, timer->sys_stat);
-
-       __raw_writel(timer->context.tisr, timer->irq_stat);
        omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
                                timer->context.twer);
        omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
@@ -120,21 +114,17 @@ static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
 
 static void omap_dm_timer_reset(struct omap_dm_timer *timer)
 {
-       omap_dm_timer_enable(timer);
        if (timer->pdev->id != 1) {
                omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
                omap_dm_timer_wait_for_reset(timer);
        }
 
        __omap_dm_timer_reset(timer, 0, 0);
-       omap_dm_timer_disable(timer);
-       timer->posted = 1;
 }
 
 int omap_dm_timer_prepare(struct omap_dm_timer *timer)
 {
        struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data;
-       int ret;
 
        timer->fclk = clk_get(&timer->pdev->dev, "fck");
        if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) {
@@ -143,13 +133,15 @@ int omap_dm_timer_prepare(struct omap_dm_timer *timer)
                return -EINVAL;
        }
 
+       omap_dm_timer_enable(timer);
+
        if (pdata->needs_manual_reset)
                omap_dm_timer_reset(timer);
 
-       ret = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
+       __omap_dm_timer_enable_posted(timer);
+       omap_dm_timer_disable(timer);
 
-       timer->posted = 1;
-       return ret;
+       return omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
 }
 
 struct omap_dm_timer *omap_dm_timer_request(void)
@@ -167,6 +159,7 @@ struct omap_dm_timer *omap_dm_timer_request(void)
                timer->reserved = 1;
                break;
        }
+       spin_unlock_irqrestore(&dm_timer_lock, flags);
 
        if (timer) {
                ret = omap_dm_timer_prepare(timer);
@@ -175,7 +168,6 @@ struct omap_dm_timer *omap_dm_timer_request(void)
                        timer = NULL;
                }
        }
-       spin_unlock_irqrestore(&dm_timer_lock, flags);
 
        if (!timer)
                pr_debug("%s: timer request failed!\n", __func__);
@@ -198,6 +190,7 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
                        break;
                }
        }
+       spin_unlock_irqrestore(&dm_timer_lock, flags);
 
        if (timer) {
                ret = omap_dm_timer_prepare(timer);
@@ -206,7 +199,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
                        timer = NULL;
                }
        }
-       spin_unlock_irqrestore(&dm_timer_lock, flags);
 
        if (!timer)
                pr_debug("%s: timer%d request failed!\n", __func__, id);
@@ -357,6 +349,18 @@ int omap_dm_timer_stop(struct omap_dm_timer *timer)
 
        __omap_dm_timer_stop(timer, timer->posted, rate);
 
+       if (timer->loses_context && timer->get_context_loss_count)
+               timer->ctx_loss_count =
+                       timer->get_context_loss_count(&timer->pdev->dev);
+
+       /*
+        * Since the register values are computed and written within
+        * __omap_dm_timer_stop, we need to use read to retrieve the
+        * context.
+        */
+       timer->context.tclr =
+                       omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+       omap_dm_timer_disable(timer);
        return 0;
 }
 EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
@@ -457,8 +461,8 @@ int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
                l |= OMAP_TIMER_CTRL_CE;
        else
                l &= ~OMAP_TIMER_CTRL_CE;
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
        omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
+       omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
 
        /* Save the context */
        timer->context.tclr = l;
@@ -555,8 +559,7 @@ int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
                return -EINVAL;
 
        __omap_dm_timer_write_status(timer, value);
-       /* Save the context */
-       timer->context.tisr = value;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(omap_dm_timer_write_status);
@@ -659,6 +662,7 @@ static int __devinit omap_dm_timer_probe(struct platform_device *pdev)
        }
 
        timer->id = pdev->id;
+       timer->errata = pdata->timer_errata;
        timer->irq = irq->start;
        timer->reserved = pdata->reserved;
        timer->pdev = pdev;
index 3ff3e36..e3a7f60 100644 (file)
@@ -42,7 +42,6 @@ extern struct sys_timer omap3_secure_timer;
 extern struct sys_timer omap4_timer;
 extern bool omap_32k_timer_init(void);
 extern int __init omap_init_clocksource_32k(void);
-extern unsigned long long notrace omap_32k_sched_clock(void);
 
 extern void omap_reserve(void);
 
index 408a12f..b22399d 100644 (file)
@@ -395,7 +395,15 @@ IS_OMAP_TYPE(3517, 0x3517)
 #define OMAP446X_CLASS         0x44600044
 #define OMAP4460_REV_ES1_0     (OMAP446X_CLASS | (0x10 << 8))
 
-void omap2_check_revision(void);
+#define OMAP447X_CLASS         0x44700044
+#define OMAP4470_REV_ES1_0     (OMAP447X_CLASS | (0x10 << 8))
+
+void omap2xxx_check_revision(void);
+void omap3xxx_check_revision(void);
+void omap4xxx_check_revision(void);
+void omap3xxx_check_features(void);
+void ti81xx_check_features(void);
+void omap4xxx_check_features(void);
 
 /*
  * Runtime detection of OMAP3 features
index 9418f00..47d506f 100644 (file)
  */
 #define OMAP_TIMER_IP_VERSION_1                        0x1
 
+/* posted mode types */
+#define OMAP_TIMER_NONPOSTED                   0x00
+#define OMAP_TIMER_POSTED                      0x01
+
 /* timer capabilities used in hwmod database */
 #define OMAP_TIMER_SECURE                              0x80000000
 #define OMAP_TIMER_ALWON                               0x40000000
 #define OMAP_TIMER_HAS_PWM                             0x20000000
 
+/*
+ * timer errata flags
+ *
+ * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
+ * errata prevents us from using posted mode on these devices, unless the
+ * timer counter register is never read. For more details please refer to
+ * the OMAP3/4/5 errata documents.
+ */
+#define OMAP_TIMER_ERRATA_I103_I767                    0x80000000
+
 struct omap_timer_capability_dev_attr {
        u32 timer_capability;
 };
@@ -75,9 +89,6 @@ struct clk;
 
 struct timer_regs {
        u32 tidr;
-       u32 tiocp_cfg;
-       u32 tistat;
-       u32 tisr;
        u32 tier;
        u32 twer;
        u32 tclr;
@@ -103,6 +114,7 @@ struct dmtimer_platform_data {
        bool reserved;
 
        bool loses_context;
+       u32 timer_errata;
 
        int (*get_context_loss_count)(struct device *dev);
 };
@@ -259,7 +271,7 @@ struct omap_dm_timer {
        unsigned long phys_base;
        int id;
        int irq;
-       struct clk *iclk, *fclk;
+       struct clk *fclk;
 
        void __iomem    *io_base;
        void __iomem    *sys_stat;      /* TISTAT timer status */
@@ -276,6 +288,7 @@ struct omap_dm_timer {
        bool loses_context;
        int ctx_loss_count;
        int revision;
+       u32 errata;
        struct platform_device *pdev;
        struct list_head node;
 
@@ -349,10 +362,46 @@ static inline void __omap_dm_timer_reset(struct omap_dm_timer *timer,
                l |= 1 << 2;
 
        __raw_writel(l, timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET);
+}
+
+/*
+ * __omap_dm_timer_enable_posted - enables write posted mode
+ * @timer:      pointer to timer instance handle
+ *
+ * Enables the write posted mode for the timer. When posted mode is enabled
+ * writes to certain timer registers are immediately acknowledged by the
+ * internal bus and hence prevents stalling the CPU waiting for the write to
+ * complete. Enabling this feature can improve performance for writing to the
+ * timer registers.
+ */
+static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
+{
+       if (timer->posted)
+               return;
+
+       if (timer->errata & OMAP_TIMER_ERRATA_I103_I767)
+               return;
 
-       /* Match hardware reset default of posted mode */
        __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
-                                       OMAP_TIMER_CTRL_POSTED, 0);
+                             OMAP_TIMER_CTRL_POSTED, 0);
+       timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
+       timer->posted = OMAP_TIMER_POSTED;
+}
+
+/**
+ * __omap_dm_timer_override_errata - override errata flags for a timer
+ * @timer:      pointer to timer handle
+ * @errata:    errata flags to be ignored
+ *
+ * For a given timer, override a timer errata by clearing the flags
+ * specified by the errata argument. A specific erratum should only be
+ * overridden for a timer if the timer is used in such a way the erratum
+ * has no impact.
+ */
+static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer,
+                                                  u32 errata)
+{
+       timer->errata &= ~errata;
 }
 
 static inline int __omap_dm_timer_set_source(struct clk *timer_fck,
index 1527929..1340e37 100644 (file)
@@ -36,6 +36,7 @@
 #define GPMC_PREFETCH_FIFO_CNT 0x00000007 /* bytes available in FIFO for r/w */
 #define GPMC_PREFETCH_COUNT    0x00000008 /* remaining bytes to be read/write*/
 #define GPMC_STATUS_BUFFER     0x00000009 /* 1: buffer is available to write */
+#define GPMC_STATUS_WAIT       0x0000000e
 
 #define GPMC_NAND_COMMAND      0x0000000a
 #define GPMC_NAND_ADDRESS      0x0000000b
index 2682043..07badbd 100644 (file)
@@ -33,6 +33,8 @@
 
 #define OMAP_MODE13X_SPEED     230400
 
+#define OMAP_UART_SCR_TX_EMPTY 0x08
+
 /* WER = 0x7F
  * Enable module level wakeup in WER reg
  */
@@ -100,6 +102,7 @@ struct uart_omap_port {
        unsigned char           mcr;
        unsigned char           fcr;
        unsigned char           efr;
+       unsigned char           scr;
 
        int                     use_dma;
        /*
@@ -113,4 +116,7 @@ struct uart_omap_port {
        unsigned long           port_activity;
 };
 
+/* HACK */
+void omap_uart_block_sleep_id(int num);
+
 #endif /* __OMAP_SERIAL_H__ */
index 51423d2..d93ac93 100644 (file)
@@ -36,7 +36,7 @@
 
 #include <plat/omap_hwmod.h>
 
-extern struct device omap_device_parent;
+extern struct dev_pm_domain omap_device_pm_domain;
 
 /* omap_device._state values */
 #define OMAP_DEVICE_STATE_UNKNOWN      0
index 8b372ed..432fdf7 100644 (file)
@@ -379,8 +379,8 @@ struct omap_hwmod_omap4_prcm {
  *
  * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out
  *     of idle, rather than relying on module smart-idle
- * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out
- *     of standby, rather than relying on module smart-standby
+ * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and
+ *     out of standby, rather than relying on module smart-standby
  * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
  *     SDRAM controller, etc. XXX probably belongs outside the main hwmod file
  *     XXX Should be HWMOD_SETUP_NO_RESET
@@ -398,6 +398,10 @@ struct omap_hwmod_omap4_prcm {
  *     in order to complete the reset. Optional clocks will be disabled
  *     again after the reset.
  * HWMOD_16BIT_REG: Module has 16bit registers
+ * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device
+ *     is kept in force-standby mode. Failing to do so causes PM problems
+ *     with musb on OMAP3630 at least. Note that musb has a dedicated register
+ *     to control MSTANDBY signal when MIDLEMODE is set to force-standby.
  */
 #define HWMOD_SWSUP_SIDLE                      (1 << 0)
 #define HWMOD_SWSUP_MSTANDBY                   (1 << 1)
@@ -408,6 +412,7 @@ struct omap_hwmod_omap4_prcm {
 #define HWMOD_NO_IDLEST                                (1 << 6)
 #define HWMOD_CONTROL_OPT_CLKS_IN_RESET                (1 << 7)
 #define HWMOD_16BIT_REG                                (1 << 8)
+#define HWMOD_FORCE_MSTANDBY                   (1 << 11)
 
 /*
  * omap_hwmod._int_flags definitions
@@ -480,7 +485,6 @@ struct omap_hwmod_class {
  * @main_clk: main clock: OMAP clock name
  * @_clk: pointer to the main struct clk (filled in at runtime)
  * @opt_clks: other device clocks that drivers can request (0..*)
- * @vdd_name: voltage domain name
  * @voltdm: pointer to voltage domain (filled in at runtime)
  * @masters: ptr to array of OCP ifs that this hwmod can initiate on
  * @slaves: ptr to array of OCP ifs that this hwmod can respond on
@@ -524,7 +528,6 @@ struct omap_hwmod {
        struct omap_hwmod_opt_clk       *opt_clks;
        char                            *clkdm_name;
        struct clockdomain              *clkdm;
-       char                            *vdd_name;
        struct omap_hwmod_ocp_if        **masters; /* connect to *_IA */
        struct omap_hwmod_ocp_if        **slaves;  /* connect to *_TA */
        void                            *dev_attr;
index f500fc3..63280cd 100644 (file)
@@ -34,7 +34,7 @@ extern void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val,
 extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
 
 extern u32 omap3_configure_core_dpll(
-                       u32 m2, u32 unlock_dll, u32 f, u32 inc,
+                       u32 cm_clksel1_pll, u32 unlock_dll, u32 f, u32 inc,
                        u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
                        u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
                        u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
index 3dc3801..5a97b4d 100644 (file)
@@ -319,7 +319,7 @@ int omap_pm_get_dev_context_loss_count(struct device *dev)
        if (WARN_ON(!dev))
                return -ENODEV;
 
-       if (dev->parent == &omap_device_parent) {
+       if (dev->pm_domain == &omap_device_pm_domain) {
                count = omap_device_get_context_loss_count(pdev);
        } else {
                WARN_ONCE(off_mode_enabled, "omap_pm: using dummy context loss counter; device %s should be converted to omap_device",
index e8d9869..3643900 100644 (file)
@@ -320,8 +320,6 @@ static void _add_hwmod_clocks_clkdev(struct omap_device *od,
 }
 
 
-static struct dev_pm_domain omap_device_pm_domain;
-
 /**
  * omap_device_build_from_dt - build an omap_device with multiple hwmods
  * @pdev_name: name of the platform_device driver to use
@@ -799,7 +797,7 @@ static int _od_resume_noirq(struct device *dev)
 #define _od_resume_noirq NULL
 #endif
 
-static struct dev_pm_domain omap_device_pm_domain = {
+struct dev_pm_domain omap_device_pm_domain = {
        .ops = {
                SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
                                   _od_runtime_idle)
@@ -821,7 +819,6 @@ static int omap_device_register(struct platform_device *pdev)
 {
        pr_debug("omap_device: %s: registering\n", pdev->name);
 
-       pdev->dev.parent = &omap_device_parent;
        pdev->dev.pm_domain = &omap_device_pm_domain;
        return platform_device_add(pdev);
 }
@@ -1130,11 +1127,6 @@ int omap_device_enable_clocks(struct omap_device *od)
        return 0;
 }
 
-struct device omap_device_parent = {
-       .init_name      = "omap",
-       .parent         = &platform_bus,
-};
-
 static struct notifier_block platform_nb = {
        .notifier_call = _omap_device_notifier_call,
 };
@@ -1142,6 +1134,6 @@ static struct notifier_block platform_nb = {
 static int __init omap_device_init(void)
 {
        bus_register_notifier(&platform_bus_type, &platform_nb);
-       return device_register(&omap_device_parent);
+       return 0;
 }
 core_initcall(omap_device_init);
index 8b28664..0b8c4d5 100644 (file)
@@ -316,13 +316,13 @@ static inline int omap243x_sram_init(void)
 #ifdef CONFIG_ARCH_OMAP3
 
 static u32 (*_omap3_sram_configure_core_dpll)(
-                       u32 m2, u32 unlock_dll, u32 f, u32 inc,
+                       u32 cm_clksel1_pll, u32 unlock_dll, u32 f, u32 inc,
                        u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
                        u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
                        u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
                        u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1);
 
-u32 omap3_configure_core_dpll(u32 m2, u32 unlock_dll, u32 f, u32 inc,
+u32 omap3_configure_core_dpll(u32 cm_clksel1_pll, u32 unlock_dll, u32 f, u32 inc,
                        u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
                        u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
                        u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
@@ -330,14 +330,13 @@ u32 omap3_configure_core_dpll(u32 m2, u32 unlock_dll, u32 f, u32 inc,
 {
        BUG_ON(!_omap3_sram_configure_core_dpll);
        return _omap3_sram_configure_core_dpll(
-                       m2, unlock_dll, f, inc,
+                       cm_clksel1_pll, unlock_dll, f, inc,
                        sdrc_rfr_ctrl_0, sdrc_actim_ctrl_a_0,
                        sdrc_actim_ctrl_b_0, sdrc_mr_0,
                        sdrc_rfr_ctrl_1, sdrc_actim_ctrl_a_1,
                        sdrc_actim_ctrl_b_1, sdrc_mr_1);
 }
 
-#ifdef CONFIG_PM
 void omap3_sram_restore_context(void)
 {
        omap_sram_ceil = omap_sram_base + omap_sram_size;
@@ -347,17 +346,18 @@ void omap3_sram_restore_context(void)
                               omap3_sram_configure_core_dpll_sz);
        omap_push_sram_idle();
 }
-#endif /* CONFIG_PM */
-
-#endif /* CONFIG_ARCH_OMAP3 */
 
 static inline int omap34xx_sram_init(void)
 {
-#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
        omap3_sram_restore_context();
-#endif
        return 0;
 }
+#else
+static inline int omap34xx_sram_init(void)
+{
+       return 0;
+}
+#endif /* CONFIG_ARCH_OMAP3 */
 
 int __init omap_sram_init(void)
 {
index 69a6136..1ed8d13 100644 (file)
@@ -12,7 +12,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/clockchips.h>
 #include <linux/interrupt.h>
@@ -60,24 +59,10 @@ static u32 ticks_per_jiffy;
  * Orion's sched_clock implementation. It has a resolution of
  * at least 7.5ns (133MHz TCLK).
  */
-static DEFINE_CLOCK_DATA(cd);
 
-unsigned long long notrace sched_clock(void)
+static u32 notrace orion_read_sched_clock(void)
 {
-       u32 cyc = ~readl(timer_base + TIMER0_VAL_OFF);
-       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-
-static void notrace orion_update_sched_clock(void)
-{
-       u32 cyc = ~readl(timer_base + TIMER0_VAL_OFF);
-       update_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void __init setup_sched_clock(unsigned long tclk)
-{
-       init_sched_clock(&cd, orion_update_sched_clock, 32, tclk);
+       return ~readl(timer_base + TIMER0_VAL_OFF);
 }
 
 /*
@@ -217,7 +202,7 @@ orion_time_init(u32 _bridge_base, u32 _bridge_timer1_clr_mask,
        /*
         * Set scale and timer for sched_clock.
         */
-       setup_sched_clock(tclk);
+       setup_sched_clock(orion_read_sched_clock, 32, tclk);
 
        /*
         * Setup free-running clocksource timer (interrupts
index c833e7b..17c0a2c 100644 (file)
@@ -10,7 +10,6 @@
  * published by the Free Software Foundation.
 */
 
-#include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/err.h>
@@ -321,26 +320,14 @@ static void __iomem *s5p_timer_reg(void)
  * this wraps around for now, since it is just a relative time
  * stamp. (Inspired by U300 implementation.)
  */
-static DEFINE_CLOCK_DATA(cd);
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace s5p_read_sched_clock(void)
 {
        void __iomem *reg = s5p_timer_reg();
 
        if (!reg)
                return 0;
 
-       return cyc_to_sched_clock(&cd, ~__raw_readl(reg), (u32)~0);
-}
-
-static void notrace s5p_update_sched_clock(void)
-{
-       void __iomem *reg = s5p_timer_reg();
-
-       if (!reg)
-               return;
-
-       update_sched_clock(&cd, ~__raw_readl(reg), (u32)~0);
+       return ~__raw_readl(reg);
 }
 
 static void __init s5p_clocksource_init(void)
@@ -358,7 +345,7 @@ static void __init s5p_clocksource_init(void)
        s5p_time_setup(timer_source.source_id, TCNT_MAX);
        s5p_time_start(timer_source.source_id, PERIODIC);
 
-       init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
+       setup_sched_clock(s5p_read_sched_clock, 32, clock_rate);
 
        if (clocksource_mmio_init(s5p_timer_reg(), "s5p_clocksource_timer",
                        clock_rate, 250, 32, clocksource_mmio_readl_down))
index a235fa0..1171f22 100644 (file)
@@ -31,7 +31,7 @@ static inline void arch_reset(char mode, const char *cmd)
 {
        if (mode == 's') {
                /* software reset, Jump into ROM at address 0 */
-               cpu_reset(0);
+               soft_restart(0);
        } else {
                /* hardware reset, Use on-chip reset capability */
                sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h
deleted file mode 100644 (file)
index 8c8b24d..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/vmalloc.h
- *
- * Defining Vmalloc area for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_VMALLOC_H
-#define __PLAT_VMALLOC_H
-
-#define VMALLOC_END            0xF0000000UL
-
-#endif /* __PLAT_VMALLOC_H */
diff --git a/arch/arm/plat-tcc/include/mach/vmalloc.h b/arch/arm/plat-tcc/include/mach/vmalloc.h
deleted file mode 100644 (file)
index 99414d9..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Author: <linux@telechips.com>
- * Created: June 10, 2008
- *
- * Copyright (C) 2000 Russell King.
- * Copyright (C) 2008-2009 Telechips
- *
- * Licensed under the terms of the GPL v2.
- */
-#define VMALLOC_END    0xf0000000UL
index 3d6a4c2..b33b74c 100644 (file)
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
+#include <linux/kernel.h>
 #include <linux/io.h>
-#include <linux/sched.h>
 
 #include <asm/sched_clock.h>
 #include <plat/sched_clock.h>
 
-static DEFINE_CLOCK_DATA(cd);
 static void __iomem *ctr;
 
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 24MHz, NSEC_PER_SEC, 60).
- * This gives a resolution of about 41ns and a wrap period of about 178s.
- */
-#define SC_MULT                2796202667u
-#define SC_SHIFT       26
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace versatile_read_sched_clock(void)
 {
-       if (ctr) {
-               u32 cyc = readl(ctr);
-               return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0,
-                                               SC_MULT, SC_SHIFT);
-       } else
-               return 0;
-}
+       if (ctr)
+               return readl(ctr);
 
-static void notrace versatile_update_sched_clock(void)
-{
-       u32 cyc = readl(ctr);
-       update_sched_clock(&cd, cyc, (u32)~0);
+       return 0;
 }
 
 void __init versatile_sched_clock_init(void __iomem *reg, unsigned long rate)
 {
        ctr = reg;
-       init_fixed_sched_clock(&cd, versatile_update_sched_clock,
-                              32, rate, SC_MULT, SC_SHIFT);
+       setup_sched_clock(versatile_read_sched_clock, 32, rate);
 }
index da55c63..94eaa5b 100644 (file)
@@ -77,4 +77,8 @@ static inline void arch_release_hugepage(struct page *page)
 {
 }
 
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
 #endif /* _ASM_IA64_HUGETLB_H */
index c565b7c..ac094d6 100644 (file)
@@ -112,4 +112,8 @@ static inline void arch_release_hugepage(struct page *page)
 {
 }
 
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
 #endif /* __ASM_HUGETLB_H */
index 29811f0..84d0639 100644 (file)
@@ -326,7 +326,7 @@ static void sp_cleanup(void)
                i = j * __NFDBITS;
                if (i >= fdt->max_fds)
                        break;
-               set = fdt->open_fds->fds_bits[j++];
+               set = fdt->open_fds[j++];
                while (set) {
                        if (set & 1) {
                                struct file * file = xchg(&fdt->fd[i], NULL);
index 8600493..a163f78 100644 (file)
@@ -141,6 +141,10 @@ static inline void arch_release_hugepage(struct page *page)
 {
 }
 
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
 #else /* ! CONFIG_HUGETLB_PAGE */
 static inline void reserve_hugetlb_gpages(void)
 {
index ca683a1..876c52b 100644 (file)
@@ -617,7 +617,7 @@ static void register_nodes(void)
 int sysfs_add_device_to_node(struct sys_device *dev, int nid)
 {
        struct node *node = &node_devices[nid];
-       return sysfs_create_link(&node->sysdev.kobj, &dev->kobj,
+       return sysfs_create_link(&node->dev.kobj, &dev->kobj,
                        kobject_name(&dev->kobj));
 }
 EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
@@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
 void sysfs_remove_device_from_node(struct sys_device *dev, int nid)
 {
        struct node *node = &node_devices[nid];
-       sysfs_remove_link(&node->sysdev.kobj, kobject_name(&dev->kobj));
+       sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
 }
 EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
 
index 03c5fce..c2c5b07 100644 (file)
@@ -122,7 +122,7 @@ static struct spu_context *coredump_next_context(int *fd)
        struct spu_context *ctx = NULL;
 
        for (; *fd < fdt->max_fds; (*fd)++) {
-               if (!FD_ISSET(*fd, fdt->open_fds))
+               if (!fd_is_open(*fd, fdt))
                        continue;
 
                file = fcheck(*fd);
index 18124b7..9d88db1 100644 (file)
@@ -73,7 +73,7 @@ typedef struct debug_info {
        struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
        struct debug_view* views[DEBUG_MAX_VIEWS];      
        char name[DEBUG_MAX_NAME_LEN];
-       mode_t mode;
+       umode_t mode;
 } debug_info_t;
 
 typedef int (debug_header_proc_t) (debug_info_t* id,
@@ -124,7 +124,7 @@ debug_info_t *debug_register(const char *name, int pages, int nr_areas,
                              int buf_size);
 
 debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas,
-                                 int buf_size, mode_t mode, uid_t uid,
+                                 int buf_size, umode_t mode, uid_t uid,
                                  gid_t gid);
 
 void debug_unregister(debug_info_t* id);
index dcd46cb..606b059 100644 (file)
@@ -34,6 +34,7 @@ static inline int prepare_hugepage_range(struct file *file,
 }
 
 #define hugetlb_prefault_arch_hook(mm)         do { } while (0)
+#define arch_clear_hugepage_flags(page)                do { } while (0)
 
 int arch_prepare_hugepage(struct page *page);
 void arch_release_hugepage(struct page *page);
index 5ad6bc0..6848828 100644 (file)
@@ -74,7 +74,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf,
 static int debug_open(struct inode *inode, struct file *file);
 static int debug_close(struct inode *inode, struct file *file);
 static debug_info_t *debug_info_create(const char *name, int pages_per_area,
-                       int nr_areas, int buf_size, mode_t mode);
+                       int nr_areas, int buf_size, umode_t mode);
 static void debug_info_get(debug_info_t *);
 static void debug_info_put(debug_info_t *);
 static int debug_prolog_level_fn(debug_info_t * id,
@@ -330,7 +330,7 @@ debug_info_free(debug_info_t* db_info){
 
 static debug_info_t*
 debug_info_create(const char *name, int pages_per_area, int nr_areas,
-                 int buf_size, mode_t mode)
+                 int buf_size, umode_t mode)
 {
        debug_info_t* rc;
 
@@ -688,7 +688,7 @@ debug_close(struct inode *inode, struct file *file)
  */
 
 debug_info_t *debug_register_mode(const char *name, int pages_per_area,
-                                 int nr_areas, int buf_size, mode_t mode,
+                                 int nr_areas, int buf_size, umode_t mode,
                                  uid_t uid, gid_t gid)
 {
        debug_info_t *rc = NULL;
@@ -1090,7 +1090,7 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
        int rc = 0;
        int i;
        unsigned long flags;
-       mode_t mode;
+       umode_t mode;
        struct dentry *pde;
 
        if (!id)
index 967068f..b3808c7 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_SH_HUGETLB_H
 #define _ASM_SH_HUGETLB_H
 
+#include <asm/cacheflush.h>
 #include <asm/page.h>
 
 
@@ -89,4 +90,9 @@ static inline void arch_release_hugepage(struct page *page)
 {
 }
 
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+       clear_bit(PG_dcache_clean, &page->flags);
+}
+
 #endif /* _ASM_SH_HUGETLB_H */
index f368cef..2ed0bcb 100644 (file)
@@ -88,4 +88,8 @@ static inline void arch_release_hugepage(struct page *page)
 {
 }
 
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
 #endif /* _ASM_SPARC64_HUGETLB_H */
index d396d18..9202941 100644 (file)
@@ -106,4 +106,8 @@ static inline void arch_release_hugepage(struct page *page)
 {
 }
 
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
 #endif /* _ASM_TILE_HUGETLB_H */
index 1a20b7e..17ad0ed 100644 (file)
@@ -188,6 +188,7 @@ static inline void __pte_clear(pte_t *ptep)
  * Undefined behaviour if not..
  */
 #define pte_present hv_pte_get_present
+#define pte_mknotpresent hv_pte_clear_present
 #define pte_user hv_pte_get_user
 #define pte_read hv_pte_get_readable
 #define pte_dirty hv_pte_get_dirty
@@ -313,7 +314,7 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
  */
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-       return pfn_pte(hv_pte_get_pfn(pte), newprot);
+       return pfn_pte(pte_pfn(pte), newprot);
 }
 
 /*
@@ -411,6 +412,46 @@ static inline unsigned long pmd_index(unsigned long address)
        return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 }
 
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+                                           unsigned long address,
+                                           pmd_t *pmdp)
+{
+       return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+                                     unsigned long address, pmd_t *pmdp)
+{
+       ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
+}
+
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+                                      unsigned long address,
+                                      pmd_t *pmdp)
+{
+       return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
+}
+
+static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+       set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
+}
+
+#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
+
+/* Create a pmd from a PTFN. */
+static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
+{
+       return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
+}
+
+/* Return the page-table frame number (ptfn) that a pmd_t points at. */
+#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
+
 /*
  * A given kernel pmd_t maps to a specific virtual address (either a
  * kernel huge page or a kernel pte_t table).  Since kernel pte_t
@@ -433,6 +474,47 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  */
 #define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd)))
 
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       __pte_clear(pmdp_ptep(pmdp));
+}
+
+#define pmd_mknotpresent(pmd)  pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_young(pmd)         pte_young(pmd_pte(pmd))
+#define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_write(pmd)         pte_write(pmd_pte(pmd))
+#define pmd_wrprotect(pmd)     pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_huge_page(pmd)     pte_huge(pmd_pte(pmd))
+#define pmd_mkhuge(pmd)                pte_pmd(pte_mkhuge(pmd_pte(pmd)))
+#define __HAVE_ARCH_PMD_WRITE
+
+#define pfn_pmd(pfn, pgprot)   pte_pmd(pfn_pte((pfn), (pgprot)))
+#define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
+#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+       return pfn_pmd(pmd_pfn(pmd), newprot);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#define pmd_trans_huge pmd_huge_page
+
+static inline pmd_t pmd_mksplitting(pmd_t pmd)
+{
+       return pte_pmd(hv_pte_set_client2(pmd_pte(pmd)));
+}
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+       return hv_pte_get_client2(pmd_pte(pmd));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 /*
  * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  *
@@ -449,11 +531,6 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
        return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
 }
 
-static inline int pmd_huge_page(pmd_t pmd)
-{
-       return pmd_val(pmd) & _PAGE_HUGE_PAGE;
-}
-
 #include <asm-generic/pgtable.h>
 
 /* Support /proc/NN/pgtable API. */
index 9f98529..27e20f6 100644 (file)
@@ -111,24 +111,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
        return pte;
 }
 
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
-       set_pte(&pmdp->pud.pgd, pmdval.pud.pgd);
-}
-
-/* Create a pmd from a PTFN. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
-       return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } };
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-#define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd)
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
-       __pte_clear(&pmdp->pud.pgd);
-}
+/*
+ * pmds are wrappers around pgds, which are the same as ptes.
+ * It's often convenient to "cast" back and forth and use the pte methods,
+ * which are the methods supplied by the hypervisor.
+ */
+#define pmd_pte(pmd) ((pmd).pud.pgd)
+#define pmdp_ptep(pmdp) (&(pmdp)->pud.pgd)
+#define pte_pmd(pte) ((pmd_t){ { (pte) } })
 
 #endif /* __ASSEMBLY__ */
 
index fd80328..e105f3a 100644 (file)
@@ -108,28 +108,6 @@ static inline unsigned long pud_index(unsigned long address)
 #define pmd_offset(pud, address) \
        ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
 
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
-       set_pte(pmdp, pmdval);
-}
-
-/* Create a pmd from a PTFN and pgprot. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
-       return hv_pte_set_ptfn(prot, ptfn);
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-static inline unsigned long pmd_ptfn(pmd_t pmd)
-{
-       return hv_pte_get_ptfn(pmd);
-}
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
-       __pte_clear(pmdp);
-}
-
 /* Normalize an address to having the correct high bits set. */
 #define pgd_addr_normalize pgd_addr_normalize
 static inline unsigned long pgd_addr_normalize(unsigned long addr)
@@ -170,6 +148,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
        return hv_pte(__insn_exch(&ptep->val, 0UL));
 }
 
+/*
+ * pmds are the same as pgds and ptes, so converting is a no-op.
+ */
+#define pmd_pte(pmd) (pmd)
+#define pmdp_ptep(pmdp) (pmdp)
+#define pte_pmd(pte) (pte)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_TILE_PGTABLE_64_H */
index 72ec1e9..793123e 100644 (file)
@@ -1855,8 +1855,7 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
                                               future use. */
 #define HV_PTE_INDEX_MODE            16  /**< Page mode; see HV_PTE_MODE_xxx */
 #define HV_PTE_MODE_BITS              3  /**< Number of bits in mode */
-                                         /*   Bit 19 is reserved for
-                                              future use. */
+#define HV_PTE_INDEX_CLIENT2         19  /**< Page client state 2 */
 #define HV_PTE_INDEX_LOTAR           20  /**< Page's LOTAR; must be high bits
                                               of word */
 #define HV_PTE_LOTAR_BITS            12  /**< Number of bits in a LOTAR */
@@ -2046,6 +2045,13 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  */
 #define HV_PTE_CLIENT1               (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1)
 
+/** Client-private bit in PTE.
+ *
+ * This bit is guaranteed not to be inspected or modified by the
+ * hypervisor.
+ */
+#define HV_PTE_CLIENT2               (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT2)
+
 /** Non-coherent (NC) bit in PTE.
  *
  * If this bit is set, the mapping that is set up will be non-coherent
@@ -2180,6 +2186,7 @@ _HV_BIT(present,         PRESENT)
 _HV_BIT(page,            PAGE)
 _HV_BIT(client0,         CLIENT0)
 _HV_BIT(client1,         CLIENT1)
+_HV_BIT(client2,         CLIENT2)
 _HV_BIT(migrating,       MIGRATING)
 _HV_BIT(nc,              NC)
 _HV_BIT(readable,        READABLE)
index d720208..c2274c1 100644 (file)
@@ -73,6 +73,7 @@ config X86
        select IRQ_FORCED_THREADING
        select USE_GENERIC_SMP_HELPERS if SMP
        select HAVE_BPF_JIT if (X86_64 && NET)
+       select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select CLKEVT_I8253
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_SUPPORTS_ATOMIC_RMW
index 4e8225c..246f062 100644 (file)
@@ -92,4 +92,8 @@ static inline void arch_release_hugepage(struct page *page)
 {
 }
 
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
 #endif /* _ASM_X86_HUGETLB_H */
index 7c0fedd..ef1db19 100644 (file)
@@ -109,7 +109,7 @@ static const struct file_operations u32_array_fops = {
        .llseek = no_llseek,
 };
 
-struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
+struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
                                            struct dentry *parent,
                                            u32 *array, unsigned elements)
 {
index e281320..78d2549 100644 (file)
@@ -3,7 +3,7 @@
 
 struct dentry * __init xen_init_debugfs(void);
 
-struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
+struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
                                            struct dentry *parent,
                                            u32 *array, unsigned elements);
 
index 6c47ae9..b258cab 100644 (file)
@@ -105,7 +105,7 @@ int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
 {
        struct dentry *dev_dir;
        char name[64];
-       mode_t mode = 0400;
+       umode_t mode = 0400;
 
        if (ec_device_count == 0) {
                acpi_ec_debugfs_dir = debugfs_create_dir("ec", NULL);
index 21cf46f..305e831 100644 (file)
@@ -172,6 +172,98 @@ config SYS_HYPERVISOR
        bool
        default n
 
+config SOC_BUS
+       bool
+
 source "drivers/base/regmap/Kconfig"
 
+config CMA
+       bool "Contiguous Memory Allocator (EXPERIMENTAL)"
+       depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+       select MIGRATION
+       help
+         This enables the Contiguous Memory Allocator which allows drivers
+         to allocate big physically-contiguous blocks of memory for use with
+         hardware components that do not support I/O map nor scatter-gather.
+
+         For more information see <include/linux/dma-contiguous.h>.
+         If unsure, say "n".
+
+if CMA
+
+config CMA_DEBUG
+       bool "CMA debug messages (DEVELOPMENT)"
+       depends on DEBUG_KERNEL
+       help
+         Turns on debug messages in CMA.  This produces KERN_DEBUG
+         messages for every CMA call as well as various messages while
+         processing calls such as dma_alloc_from_contiguous().
+         This option does not affect warning and error messages.
+
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+       int "Size in Mega Bytes"
+       depends on !CMA_SIZE_SEL_PERCENTAGE
+       default 16
+       help
+         Defines the size (in MiB) of the default memory area for Contiguous
+         Memory Allocator.
+
+config CMA_SIZE_PERCENTAGE
+       int "Percentage of total memory"
+       depends on !CMA_SIZE_SEL_MBYTES
+       default 10
+       help
+         Defines the size of the default memory area for Contiguous Memory
+         Allocator as a percentage of the total memory in the system.
+
+choice
+       prompt "Selected region size"
+       default CMA_SIZE_SEL_ABSOLUTE
+
+config CMA_SIZE_SEL_MBYTES
+       bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+       bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+       bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+       bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+       int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+       range 4 9
+       default 8
+       help
+         DMA mapping framework by default aligns all buffers to the smallest
+         PAGE_SIZE order which is greater than or equal to the requested buffer
+         size. This works well for buffers up to a few hundreds kilobytes, but
+         for larger buffers it just a memory waste. With this parameter you can
+         specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+         buffers will be aligned only to this specified order. The order is
+         expressed as a power of two multiplied by the PAGE_SIZE.
+
+         For example, if your system defaults to 4KiB pages, the order value
+         of 8 means that the buffers will be aligned up to 1MiB only.
+
+         If unsure, leave the default value "8".
+
+config CMA_AREAS
+       int "Maximum count of the CMA device-private areas"
+       default 7
+       help
+         CMA allows to create CMA areas for particular devices. This parameter
+         sets the maximum number of such device private CMA areas in the
+         system.
+
+         If unsure, leave the default value "7".
+
+endif
+
 endmenu
index 99a375a..be0b5ef 100644 (file)
@@ -5,6 +5,7 @@ obj-y                   := core.o sys.o bus.o dd.o syscore.o \
                           cpu.o firmware.o init.o map.o devres.o \
                           attribute_container.o transport_class.o
 obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
+obj-$(CONFIG_CMA) += dma-contiguous.o
 obj-y                  += power/
 obj-$(CONFIG_HAS_DMA)  += dma-mapping.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_MODULES) += module.o
 endif
 obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
 obj-$(CONFIG_REGMAP)   += regmap/
+obj-$(CONFIG_SOC_BUS) += soc.o
 
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
 
index bb0025c..1b85949 100644 (file)
@@ -10,6 +10,7 @@
 struct dma_coherent_mem {
        void            *virt_base;
        dma_addr_t      device_base;
+       phys_addr_t     pfn_base;
        int             size;
        int             flags;
        unsigned long   *bitmap;
@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
 
        dev->dma_mem->virt_base = mem_base;
        dev->dma_mem->device_base = device_addr;
+       dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
        dev->dma_mem->size = pages;
        dev->dma_mem->flags = flags;
 
@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
        return 0;
 }
 EXPORT_SYMBOL(dma_release_from_coherent);
+
+/**
+ * dma_mmap_from_coherent() - try to mmap the memory allocated from
+ * per-device coherent memory pool to userspace
+ * @dev:       device from which the memory was allocated
+ * @vma:       vm_area for the userspace memory
+ * @vaddr:     cpu address returned by dma_alloc_from_coherent
+ * @size:      size of the memory buffer allocated by dma_alloc_from_coherent
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if
+ * dma_release_coherent() should proceed with mapping memory from
+ * generic pools.
+ */
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+                          void *vaddr, size_t size, int *ret)
+{
+       struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+       if (mem && vaddr >= mem->virt_base && vaddr + size <=
+                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+               unsigned long off = vma->vm_pgoff;
+               int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+               int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+               int count = size >> PAGE_SHIFT;
+
+               *ret = -ENXIO;
+               if (off < count && user_count <= count - off) {
+                       unsigned pfn = mem->pfn_base + start + off;
+                       *ret = remap_pfn_range(vma, vma->vm_start, pfn,
+                                              user_count << PAGE_SHIFT,
+                                              vma->vm_page_prot);
+               }
+               return 1;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
new file mode 100644 (file)
index 0000000..34d94c7
--- /dev/null
@@ -0,0 +1,401 @@
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ *     Marek Szyprowski <m.szyprowski@samsung.com>
+ *     Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+#  define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-isolation.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/mm_types.h>
+#include <linux/dma-contiguous.h>
+
+#ifndef SZ_1M
+#define SZ_1M (1 << 20)
+#endif
+
+struct cma {
+       unsigned long   base_pfn;
+       unsigned long   count;
+       unsigned long   *bitmap;
+};
+
+struct cma *dma_contiguous_default_area;
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is usefull mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static long size_cmdline = -1;
+
+static int __init early_cma(char *p)
+{
+       pr_debug("%s(%s)\n", __func__, p);
+       size_cmdline = memparse(p, &p);
+       return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+{
+       struct memblock_region *reg;
+       unsigned long total_pages = 0;
+
+       /*
+        * We cannot use memblock_phys_mem_size() here, because
+        * memblock_analyze() has not been called yet.
+        */
+       for_each_memblock(memory, reg)
+               total_pages += memblock_region_memory_end_pfn(reg) -
+                              memblock_region_memory_base_pfn(reg);
+
+       return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+{
+       return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+       unsigned long selected_size = 0;
+
+       pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+       if (size_cmdline != -1) {
+               selected_size = size_cmdline;
+       } else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+               selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+               selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+               selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+               selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+       }
+
+       if (selected_size) {
+               pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+                        selected_size / SZ_1M);
+
+               dma_declare_contiguous(NULL, selected_size, 0, limit);
+       }
+};
+
+static DEFINE_MUTEX(cma_mutex);
+
+static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+{
+       unsigned long pfn = base_pfn;
+       unsigned i = count >> pageblock_order;
+       struct zone *zone;
+
+       WARN_ON_ONCE(!pfn_valid(pfn));
+       zone = page_zone(pfn_to_page(pfn));
+
+       do {
+               unsigned j;
+               base_pfn = pfn;
+               for (j = pageblock_nr_pages; j; --j, pfn++) {
+                       WARN_ON_ONCE(!pfn_valid(pfn));
+                       if (page_zone(pfn_to_page(pfn)) != zone)
+                               return -EINVAL;
+               }
+               init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+       } while (--i);
+       return 0;
+}
+
+static __init struct cma *cma_create_area(unsigned long base_pfn,
+                                    unsigned long count)
+{
+       int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+       struct cma *cma;
+       int ret = -ENOMEM;
+
+       pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
+
+       cma = kmalloc(sizeof *cma, GFP_KERNEL);
+       if (!cma)
+               return ERR_PTR(-ENOMEM);
+
+       cma->base_pfn = base_pfn;
+       cma->count = count;
+       cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+       if (!cma->bitmap)
+               goto no_mem;
+
+       ret = cma_activate_area(base_pfn, count);
+       if (ret)
+               goto error;
+
+       pr_debug("%s: returned %p\n", __func__, (void *)cma);
+       return cma;
+
+error:
+       kfree(cma->bitmap);
+no_mem:
+       kfree(cma);
+       return ERR_PTR(ret);
+}
+
+static struct cma_reserved {
+       phys_addr_t start;
+       unsigned long size;
+       struct device *dev;
+} cma_reserved[MAX_CMA_AREAS] __initdata;
+static unsigned cma_reserved_count __initdata;
+
+static int __init cma_init_reserved_areas(void)
+{
+       struct cma_reserved *r = cma_reserved;
+       unsigned i = cma_reserved_count;
+
+       pr_debug("%s()\n", __func__);
+
+       for (; i; --i, ++r) {
+               struct cma *cma;
+               cma = cma_create_area(PFN_DOWN(r->start),
+                                     r->size >> PAGE_SHIFT);
+               if (!IS_ERR(cma))
+                       dev_set_cma_area(r->dev, cma);
+       }
+       return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ *                           for particular device
+ * @dev:   Pointer to device structure.
+ * @size:  Size of the reserved memory.
+ * @base:  Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+                                 phys_addr_t base, phys_addr_t limit)
+{
+       struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+       unsigned long alignment;
+
+       pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+                (unsigned long)size, (unsigned long)base,
+                (unsigned long)limit);
+
+       /* Sanity checks */
+       if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+               pr_err("Not enough slots for CMA reserved regions!\n");
+               return -ENOSPC;
+       }
+
+       if (!size)
+               return -EINVAL;
+
+       /* Sanitise input arguments */
+       alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+       base = ALIGN(base, alignment);
+       size = ALIGN(size, alignment);
+       limit &= ~(alignment - 1);
+
+       /* Reserve memory */
+       if (base) {
+               if (memblock_is_region_reserved(base, size) ||
+                   memblock_reserve(base, size) < 0) {
+                       base = -EBUSY;
+                       goto err;
+               }
+       } else {
+               /*
+                * Use __memblock_alloc_base() since
+                * memblock_alloc_base() panic()s.
+                */
+               phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+               if (!addr) {
+                       base = -ENOMEM;
+                       goto err;
+               } else if (addr + size > ~(unsigned long)0) {
+                       memblock_free(addr, size);
+                       base = -EINVAL;
+                       goto err;
+               } else {
+                       base = addr;
+               }
+       }
+
+       /*
+        * Each reserved area must be initialised later, when more kernel
+        * subsystems (like slab allocator) are available.
+        */
+       r->start = base;
+       r->size = size;
+       r->dev = dev;
+       cma_reserved_count++;
+       pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+               (unsigned long)base);
+
+       /* Architecture specific contiguous memory fixup. */
+       dma_contiguous_early_fixup(base, size);
+       return 0;
+err:
+       pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+       return base;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev:   Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific get_dev_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+                                      unsigned int align)
+{
+       unsigned long mask, pfn, pageno, start = 0;
+       struct cma *cma = dev_get_cma_area(dev);
+       int ret;
+
+       if (!cma || !cma->count)
+               return NULL;
+
+       if (align > CONFIG_CMA_ALIGNMENT)
+               align = CONFIG_CMA_ALIGNMENT;
+
+       pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+                count, align);
+
+       if (!count)
+               return NULL;
+
+       mask = (1 << align) - 1;
+
+       mutex_lock(&cma_mutex);
+
+       for (;;) {
+               pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+                                                   start, count, mask);
+               if (pageno >= cma->count) {
+                       ret = -ENOMEM;
+                       goto error;
+               }
+
+               pfn = cma->base_pfn + pageno;
+               ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+               if (ret == 0) {
+                       bitmap_set(cma->bitmap, pageno, count);
+                       break;
+               } else if (ret != -EBUSY) {
+                       goto error;
+               }
+               pr_debug("%s(): memory range at %p is busy, retrying\n",
+                        __func__, pfn_to_page(pfn));
+               /* try again with a bit different memory target */
+               start = pageno + mask + 1;
+       }
+
+       mutex_unlock(&cma_mutex);
+
+       pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
+       return pfn_to_page(pfn);
+error:
+       mutex_unlock(&cma_mutex);
+       return NULL;
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev:   Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+                                int count)
+{
+       struct cma *cma = dev_get_cma_area(dev);
+       unsigned long pfn;
+
+       if (!cma || !pages)
+               return false;
+
+       pr_debug("%s(page %p)\n", __func__, (void *)pages);
+
+       pfn = page_to_pfn(pages);
+
+       if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+               return false;
+
+       VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+
+       mutex_lock(&cma_mutex);
+       bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+       free_contig_range(pfn, count);
+       mutex_unlock(&cma_mutex);
+
+       return true;
+}
index 763b356..bbd49c0 100644 (file)
@@ -79,7 +79,7 @@ enum {
        FW_STATUS_ABORT,
 };
 
-static int loading_timeout = 60;       /* In seconds */
+static int loading_timeout = 10;       /* In seconds */
 
 /* fw_lock could be moved to 'struct firmware_priv' but since it is just
  * guarding for corner cases a global lock should be OK */
@@ -544,7 +544,7 @@ static int _request_firmware(const struct firmware **firmware_p,
        }
 
        if (uevent)
-               dev_dbg(device, "firmware: requesting %s\n", name);
+               dev_info(device, "firmware: requesting %s\n", name);
 
        fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
        if (IS_ERR(fw_priv)) {
@@ -576,6 +576,7 @@ static int _request_firmware(const struct firmware **firmware_p,
 
 out:
        if (retval) {
+               dev_warn(device, "firmware: failed to get '%s'\n", name);
                release_firmware(firmware);
                *firmware_p = NULL;
        }
index 732ad0d..38d0a34 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * drivers/base/memory.c - basic Memory class support
+ * Memory subsystem support
  *
  * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
  *            Dave Hansen <haveblue@us.ibm.com>
@@ -10,7 +10,6 @@
  * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
  */
 
-#include <linux/sysdev.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/topology.h>
@@ -38,26 +37,9 @@ static inline int base_memory_block_id(int section_nr)
        return section_nr / sections_per_block;
 }
 
-static struct sysdev_class memory_sysdev_class = {
+static struct bus_type memory_subsys = {
        .name = MEMORY_CLASS_NAME,
-};
-
-static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj)
-{
-       return MEMORY_CLASS_NAME;
-}
-
-static int memory_uevent(struct kset *kset, struct kobject *obj,
-                       struct kobj_uevent_env *env)
-{
-       int retval = 0;
-
-       return retval;
-}
-
-static const struct kset_uevent_ops memory_uevent_ops = {
-       .name           = memory_uevent_name,
-       .uevent         = memory_uevent,
+       .dev_name = MEMORY_CLASS_NAME,
 };
 
 static BLOCKING_NOTIFIER_HEAD(memory_chain);
@@ -96,21 +78,21 @@ int register_memory(struct memory_block *memory)
 {
        int error;
 
-       memory->sysdev.cls = &memory_sysdev_class;
-       memory->sysdev.id = memory->start_section_nr / sections_per_block;
+       memory->dev.bus = &memory_subsys;
+       memory->dev.id = memory->start_section_nr / sections_per_block;
 
-       error = sysdev_register(&memory->sysdev);
+       error = device_register(&memory->dev);
        return error;
 }
 
 static void
 unregister_memory(struct memory_block *memory)
 {
-       BUG_ON(memory->sysdev.cls != &memory_sysdev_class);
+       BUG_ON(memory->dev.bus != &memory_subsys);
 
        /* drop the ref. we got in remove_memory_block() */
-       kobject_put(&memory->sysdev.kobj);
-       sysdev_unregister(&memory->sysdev);
+       kobject_put(&memory->dev.kobj);
+       device_unregister(&memory->dev);
 }
 
 unsigned long __weak memory_block_size_bytes(void)
@@ -138,22 +120,22 @@ static unsigned long get_memory_block_size(void)
  * uses.
  */
 
-static ssize_t show_mem_start_phys_index(struct sys_device *dev,
-                       struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_start_phys_index(struct device *dev,
+                       struct device_attribute *attr, char *buf)
 {
        struct memory_block *mem =
-               container_of(dev, struct memory_block, sysdev);
+               container_of(dev, struct memory_block, dev);
        unsigned long phys_index;
 
        phys_index = mem->start_section_nr / sections_per_block;
        return sprintf(buf, "%08lx\n", phys_index);
 }
 
-static ssize_t show_mem_end_phys_index(struct sys_device *dev,
-                       struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_end_phys_index(struct device *dev,
+                       struct device_attribute *attr, char *buf)
 {
        struct memory_block *mem =
-               container_of(dev, struct memory_block, sysdev);
+               container_of(dev, struct memory_block, dev);
        unsigned long phys_index;
 
        phys_index = mem->end_section_nr / sections_per_block;
@@ -163,13 +145,13 @@ static ssize_t show_mem_end_phys_index(struct sys_device *dev,
 /*
  * Show whether the section of memory is likely to be hot-removable
  */
-static ssize_t show_mem_removable(struct sys_device *dev,
-                       struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_removable(struct device *dev,
+                       struct device_attribute *attr, char *buf)
 {
        unsigned long i, pfn;
        int ret = 1;
        struct memory_block *mem =
-               container_of(dev, struct memory_block, sysdev);
+               container_of(dev, struct memory_block, dev);
 
        for (i = 0; i < sections_per_block; i++) {
                if (!present_section_nr(mem->start_section_nr + i))
@@ -184,11 +166,11 @@ static ssize_t show_mem_removable(struct sys_device *dev,
 /*
  * online, offline, going offline, etc.
  */
-static ssize_t show_mem_state(struct sys_device *dev,
-                       struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_state(struct device *dev,
+                       struct device_attribute *attr, char *buf)
 {
        struct memory_block *mem =
-               container_of(dev, struct memory_block, sysdev);
+               container_of(dev, struct memory_block, dev);
        ssize_t len = 0;
 
        /*
@@ -326,13 +308,13 @@ out:
 }
 
 static ssize_t
-store_mem_state(struct sys_device *dev,
-               struct sysdev_attribute *attr, const char *buf, size_t count)
+store_mem_state(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
 {
        struct memory_block *mem;
        int ret = -EINVAL;
 
-       mem = container_of(dev, struct memory_block, sysdev);
+       mem = container_of(dev, struct memory_block, dev);
 
        if (!strncmp(buf, "online", min((int)count, 6)))
                ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
@@ -353,41 +335,41 @@ store_mem_state(struct sys_device *dev,
  * s.t. if I offline all of these sections I can then
  * remove the physical device?
  */
-static ssize_t show_phys_device(struct sys_device *dev,
-                               struct sysdev_attribute *attr, char *buf)
+static ssize_t show_phys_device(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct memory_block *mem =
-               container_of(dev, struct memory_block, sysdev);
+               container_of(dev, struct memory_block, dev);
        return sprintf(buf, "%d\n", mem->phys_device);
 }
 
-static SYSDEV_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
-static SYSDEV_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
-static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state);
-static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL);
-static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
+static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
+static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
+static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
+static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
+static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
 
 #define mem_create_simple_file(mem, attr_name) \
-       sysdev_create_file(&mem->sysdev, &attr_##attr_name)
+       device_create_file(&mem->dev, &dev_attr_##attr_name)
 #define mem_remove_simple_file(mem, attr_name) \
-       sysdev_remove_file(&mem->sysdev, &attr_##attr_name)
+       device_remove_file(&mem->dev, &dev_attr_##attr_name)
 
 /*
  * Block size attribute stuff
  */
 static ssize_t
-print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr,
+print_block_size(struct device *dev, struct device_attribute *attr,
                 char *buf)
 {
        return sprintf(buf, "%lx\n", get_memory_block_size());
 }
 
-static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
+static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
 
 static int block_size_init(void)
 {
-       return sysfs_create_file(&memory_sysdev_class.kset.kobj,
-                               &attr_block_size_bytes.attr);
+       return device_create_file(memory_subsys.dev_root,
+                                 &dev_attr_block_size_bytes);
 }
 
 /*
@@ -398,7 +380,7 @@ static int block_size_init(void)
  */
 #ifdef CONFIG_ARCH_MEMORY_PROBE
 static ssize_t
-memory_probe_store(struct class *class, struct class_attribute *attr,
+memory_probe_store(struct device *dev, struct device_attribute *attr,
                   const char *buf, size_t count)
 {
        u64 phys_addr;
@@ -425,12 +407,11 @@ memory_probe_store(struct class *class, struct class_attribute *attr,
 out:
        return ret;
 }
-static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
+static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
 
 static int memory_probe_init(void)
 {
-       return sysfs_create_file(&memory_sysdev_class.kset.kobj,
-                               &class_attr_probe.attr);
+       return device_create_file(memory_subsys.dev_root, &dev_attr_probe);
 }
 #else
 static inline int memory_probe_init(void)
@@ -446,8 +427,8 @@ static inline int memory_probe_init(void)
 
 /* Soft offline a page */
 static ssize_t
-store_soft_offline_page(struct class *class,
-                       struct class_attribute *attr,
+store_soft_offline_page(struct device *dev,
+                       struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        int ret;
@@ -465,8 +446,8 @@ store_soft_offline_page(struct class *class,
 
 /* Forcibly offline a page, including killing processes. */
 static ssize_t
-store_hard_offline_page(struct class *class,
-                       struct class_attribute *attr,
+store_hard_offline_page(struct device *dev,
+                       struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        int ret;
@@ -480,18 +461,18 @@ store_hard_offline_page(struct class *class,
        return ret ? ret : count;
 }
 
-static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
-static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
+static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
 
 static __init int memory_fail_init(void)
 {
        int err;
 
-       err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
-                               &class_attr_soft_offline_page.attr);
+       err = device_create_file(memory_subsys.dev_root,
+                               &dev_attr_soft_offline_page);
        if (!err)
-               err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
-                               &class_attr_hard_offline_page.attr);
+               err = device_create_file(memory_subsys.dev_root,
+                               &dev_attr_hard_offline_page);
        return err;
 }
 #else
@@ -511,31 +492,23 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
        return 0;
 }
 
+/*
+ * A reference for the returned object is held and the reference for the
+ * hinted object is released.
+ */
 struct memory_block *find_memory_block_hinted(struct mem_section *section,
                                              struct memory_block *hint)
 {
-       struct kobject *kobj;
-       struct sys_device *sysdev;
-       struct memory_block *mem;
-       char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1];
        int block_id = base_memory_block_id(__section_nr(section));
+       struct device *hintdev = hint ? &hint->dev : NULL;
+       struct device *dev;
 
-       kobj = hint ? &hint->sysdev.kobj : NULL;
-
-       /*
-        * This only works because we know that section == sysdev->id
-        * slightly redundant with sysdev_register()
-        */
-       sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, block_id);
-
-       kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj);
-       if (!kobj)
+       dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
+       if (hint)
+               put_device(&hint->dev);
+       if (!dev)
                return NULL;
-
-       sysdev = container_of(kobj, struct sys_device, kobj);
-       mem = container_of(sysdev, struct memory_block, sysdev);
-
-       return mem;
+       return container_of(dev, struct memory_block, dev);
 }
 
 /*
@@ -544,7 +517,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
  * this gets to be a real problem, we can always use a radix
  * tree or something here.
  *
- * This could be made generic for all sysdev classes.
+ * This could be made generic for all device subsystems.
  */
 struct memory_block *find_memory_block(struct mem_section *section)
 {
@@ -600,7 +573,7 @@ static int add_memory_section(int nid, struct mem_section *section,
        mem = find_memory_block(section);
        if (mem) {
                mem->section_count++;
-               kobject_put(&mem->sysdev.kobj);
+               kobject_put(&mem->dev.kobj);
        } else
                ret = init_memory_block(&mem, section, state);
 
@@ -633,7 +606,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
                unregister_memory(mem);
                kfree(mem);
        } else
-               kobject_put(&mem->sysdev.kobj);
+               kobject_put(&mem->dev.kobj);
 
        mutex_unlock(&mem_sysfs_mutex);
        return 0;
@@ -666,8 +639,7 @@ int __init memory_dev_init(void)
        int err;
        unsigned long block_sz;
 
-       memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops;
-       ret = sysdev_class_register(&memory_sysdev_class);
+       ret = subsys_system_register(&memory_subsys, NULL);
        if (ret)
                goto out;
 
index 5693ece..cf97192 100644 (file)
@@ -1,8 +1,7 @@
 /*
- * drivers/base/node.c - basic Node class support
+ * Basic Node interface support
  */
 
-#include <linux/sysdev.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/slab.h>
 
-static struct sysdev_class_attribute *node_state_attrs[];
-
-static struct sysdev_class node_class = {
+static struct bus_type node_subsys = {
        .name = "node",
-       .attrs = node_state_attrs,
+       .dev_name = "node",
 };
 
 
-static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
+static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
 {
        struct node *node_dev = to_node(dev);
-       const struct cpumask *mask = cpumask_of_node(node_dev->sysdev.id);
+       const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
        int len;
 
        /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
@@ -44,23 +41,23 @@ static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
        return len;
 }
 
-static inline ssize_t node_read_cpumask(struct sys_device *dev,
-                               struct sysdev_attribute *attr, char *buf)
+static inline ssize_t node_read_cpumask(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        return node_read_cpumap(dev, 0, buf);
 }
-static inline ssize_t node_read_cpulist(struct sys_device *dev,
-                               struct sysdev_attribute *attr, char *buf)
+static inline ssize_t node_read_cpulist(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        return node_read_cpumap(dev, 1, buf);
 }
 
-static SYSDEV_ATTR(cpumap,  S_IRUGO, node_read_cpumask, NULL);
-static SYSDEV_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
+static DEVICE_ATTR(cpumap,  S_IRUGO, node_read_cpumask, NULL);
+static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
 
 #define K(x) ((x) << (PAGE_SHIFT - 10))
-static ssize_t node_read_meminfo(struct sys_device * dev,
-                       struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_meminfo(struct device *dev,
+                       struct device_attribute *attr, char *buf)
 {
        int n;
        int nid = dev->id;
@@ -157,10 +154,10 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
 }
 
 #undef K
-static SYSDEV_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
+static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
 
-static ssize_t node_read_numastat(struct sys_device * dev,
-                               struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_numastat(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        return sprintf(buf,
                       "numa_hit %lu\n"
@@ -176,10 +173,10 @@ static ssize_t node_read_numastat(struct sys_device * dev,
                       node_page_state(dev->id, NUMA_LOCAL),
                       node_page_state(dev->id, NUMA_OTHER));
 }
-static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
+static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
 
-static ssize_t node_read_vmstat(struct sys_device *dev,
-                               struct sysdev_attribute *attr, char *buf)
+static ssize_t node_read_vmstat(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        int nid = dev->id;
        int i;
@@ -191,10 +188,10 @@ static ssize_t node_read_vmstat(struct sys_device *dev,
 
        return n;
 }
-static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
 
-static ssize_t node_read_distance(struct sys_device * dev,
-                       struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_distance(struct device *dev,
+                       struct device_attribute *attr, char * buf)
 {
        int nid = dev->id;
        int len = 0;
@@ -212,7 +209,7 @@ static ssize_t node_read_distance(struct sys_device * dev,
        len += sprintf(buf + len, "\n");
        return len;
 }
-static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
 
 #ifdef CONFIG_HUGETLBFS
 /*
@@ -230,7 +227,7 @@ static node_registration_func_t __hugetlb_unregister_node;
 static inline bool hugetlb_register_node(struct node *node)
 {
        if (__hugetlb_register_node &&
-                       node_state(node->sysdev.id, N_HIGH_MEMORY)) {
+                       node_state(node->dev.id, N_HIGH_MEMORY)) {
                __hugetlb_register_node(node);
                return true;
        }
@@ -266,17 +263,17 @@ int register_node(struct node *node, int num, struct node *parent)
 {
        int error;
 
-       node->sysdev.id = num;
-       node->sysdev.cls = &node_class;
-       error = sysdev_register(&node->sysdev);
+       node->dev.id = num;
+       node->dev.bus = &node_subsys;
+       error = device_register(&node->dev);
 
        if (!error){
-               sysdev_create_file(&node->sysdev, &attr_cpumap);
-               sysdev_create_file(&node->sysdev, &attr_cpulist);
-               sysdev_create_file(&node->sysdev, &attr_meminfo);
-               sysdev_create_file(&node->sysdev, &attr_numastat);
-               sysdev_create_file(&node->sysdev, &attr_distance);
-               sysdev_create_file(&node->sysdev, &attr_vmstat);
+               device_create_file(&node->dev, &dev_attr_cpumap);
+               device_create_file(&node->dev, &dev_attr_cpulist);
+               device_create_file(&node->dev, &dev_attr_meminfo);
+               device_create_file(&node->dev, &dev_attr_numastat);
+               device_create_file(&node->dev, &dev_attr_distance);
+               device_create_file(&node->dev, &dev_attr_vmstat);
 
                scan_unevictable_register_node(node);
 
@@ -296,17 +293,17 @@ int register_node(struct node *node, int num, struct node *parent)
  */
 void unregister_node(struct node *node)
 {
-       sysdev_remove_file(&node->sysdev, &attr_cpumap);
-       sysdev_remove_file(&node->sysdev, &attr_cpulist);
-       sysdev_remove_file(&node->sysdev, &attr_meminfo);
-       sysdev_remove_file(&node->sysdev, &attr_numastat);
-       sysdev_remove_file(&node->sysdev, &attr_distance);
-       sysdev_remove_file(&node->sysdev, &attr_vmstat);
+       device_remove_file(&node->dev, &dev_attr_cpumap);
+       device_remove_file(&node->dev, &dev_attr_cpulist);
+       device_remove_file(&node->dev, &dev_attr_meminfo);
+       device_remove_file(&node->dev, &dev_attr_numastat);
+       device_remove_file(&node->dev, &dev_attr_distance);
+       device_remove_file(&node->dev, &dev_attr_vmstat);
 
        scan_unevictable_unregister_node(node);
        hugetlb_unregister_node(node);          /* no-op, if memoryless node */
 
-       sysdev_unregister(&node->sysdev);
+       device_unregister(&node->dev);
 }
 
 struct node node_devices[MAX_NUMNODES];
@@ -326,15 +323,15 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
        if (!obj)
                return 0;
 
-       ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
+       ret = sysfs_create_link(&node_devices[nid].dev.kobj,
                                &obj->kobj,
                                kobject_name(&obj->kobj));
        if (ret)
                return ret;
 
        return sysfs_create_link(&obj->kobj,
-                                &node_devices[nid].sysdev.kobj,
-                                kobject_name(&node_devices[nid].sysdev.kobj));
+                                &node_devices[nid].dev.kobj,
+                                kobject_name(&node_devices[nid].dev.kobj));
 }
 
 int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
@@ -348,10 +345,10 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
        if (!obj)
                return 0;
 
-       sysfs_remove_link(&node_devices[nid].sysdev.kobj,
+       sysfs_remove_link(&node_devices[nid].dev.kobj,
                          kobject_name(&obj->kobj));
        sysfs_remove_link(&obj->kobj,
-                         kobject_name(&node_devices[nid].sysdev.kobj));
+                         kobject_name(&node_devices[nid].dev.kobj));
 
        return 0;
 }
@@ -393,15 +390,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
                        continue;
                if (page_nid != nid)
                        continue;
-               ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
-                                       &mem_blk->sysdev.kobj,
-                                       kobject_name(&mem_blk->sysdev.kobj));
+               ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj,
+                                       &mem_blk->dev.kobj,
+                                       kobject_name(&mem_blk->dev.kobj));
                if (ret)
                        return ret;
 
-               return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
-                               &node_devices[nid].sysdev.kobj,
-                               kobject_name(&node_devices[nid].sysdev.kobj));
+               return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
+                               &node_devices[nid].dev.kobj,
+                               kobject_name(&node_devices[nid].dev.kobj));
        }
        /* mem section does not span the specified node */
        return 0;
@@ -434,10 +431,10 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
                        continue;
                if (node_test_and_set(nid, *unlinked_nodes))
                        continue;
-               sysfs_remove_link(&node_devices[nid].sysdev.kobj,
-                        kobject_name(&mem_blk->sysdev.kobj));
-               sysfs_remove_link(&mem_blk->sysdev.kobj,
-                        kobject_name(&node_devices[nid].sysdev.kobj));
+               sysfs_remove_link(&node_devices[nid].dev.kobj,
+                        kobject_name(&mem_blk->dev.kobj));
+               sysfs_remove_link(&mem_blk->dev.kobj,
+                        kobject_name(&node_devices[nid].dev.kobj));
        }
        NODEMASK_FREE(unlinked_nodes);
        return 0;
@@ -468,7 +465,7 @@ static int link_mem_sections(int nid)
        }
 
        if (mem_blk)
-               kobject_put(&mem_blk->sysdev.kobj);
+               kobject_put(&mem_blk->dev.kobj);
        return err;
 }
 
@@ -596,19 +593,19 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
 }
 
 struct node_attr {
-       struct sysdev_class_attribute attr;
+       struct device_attribute attr;
        enum node_states state;
 };
 
-static ssize_t show_node_state(struct sysdev_class *class,
-                              struct sysdev_class_attribute *attr, char *buf)
+static ssize_t show_node_state(struct device *dev,
+                              struct device_attribute *attr, char *buf)
 {
        struct node_attr *na = container_of(attr, struct node_attr, attr);
        return print_nodes_state(na->state, buf);
 }
 
 #define _NODE_ATTR(name, state) \
-       { _SYSDEV_CLASS_ATTR(name, 0444, show_node_state, NULL), state }
+       { __ATTR(name, 0444, show_node_state, NULL), state }
 
 static struct node_attr node_state_attr[] = {
        _NODE_ATTR(possible, N_POSSIBLE),
@@ -620,17 +617,26 @@ static struct node_attr node_state_attr[] = {
 #endif
 };
 
-static struct sysdev_class_attribute *node_state_attrs[] = {
-       &node_state_attr[0].attr,
-       &node_state_attr[1].attr,
-       &node_state_attr[2].attr,
-       &node_state_attr[3].attr,
+static struct attribute *node_state_attrs[] = {
+       &node_state_attr[0].attr.attr,
+       &node_state_attr[1].attr.attr,
+       &node_state_attr[2].attr.attr,
+       &node_state_attr[3].attr.attr,
 #ifdef CONFIG_HIGHMEM
-       &node_state_attr[4].attr,
+       &node_state_attr[4].attr.attr,
 #endif
        NULL
 };
 
+static struct attribute_group memory_root_attr_group = {
+       .attrs = node_state_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+       &memory_root_attr_group,
+       NULL,
+};
+
 #define NODE_CALLBACK_PRI      2       /* lower than SLAB */
 static int __init register_node_type(void)
 {
@@ -639,7 +645,7 @@ static int __init register_node_type(void)
        BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
        BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
 
-       ret = sysdev_class_register(&node_class);
+       ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
        if (!ret) {
                hotplug_memory_notifier(node_memory_callback,
                                        NODE_CALLBACK_PRI);
index 95706fa..775e1fe 100644 (file)
@@ -458,12 +458,12 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  */
-static int opp_set_availability(struct device *dev, unsigned long freq,
+static int opp_set_availability(struct device *dev, int index, unsigned long freq,
                bool availability_req)
 {
        struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
        struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
-       int r = 0;
+       int i, r = 0;
 
        /* keep the node allocated */
        new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
@@ -488,11 +488,13 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
        }
 
        /* Do we have the frequency? */
+       i = 0;
        list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
-               if (tmp_opp->rate == freq) {
+               if (i == index || tmp_opp->rate == freq) {
                        opp = tmp_opp;
                        break;
                }
+               i++;
        }
        if (IS_ERR(opp)) {
                r = PTR_ERR(opp);
@@ -548,7 +550,7 @@ out:
  */
 int opp_enable(struct device *dev, unsigned long freq)
 {
-       return opp_set_availability(dev, freq, true);
+       return opp_set_availability(dev, -1, freq, true);
 }
 
 /**
@@ -569,7 +571,109 @@ int opp_enable(struct device *dev, unsigned long freq)
  */
 int opp_disable(struct device *dev, unsigned long freq)
 {
-       return opp_set_availability(dev, freq, false);
+       return opp_set_availability(dev, -1, freq, false);
+}
+
+int opp_enable_i(struct device *dev, int index)
+{
+       return opp_set_availability(dev, index, ~0, true);
+}
+
+int opp_disable_i(struct device *dev, int index)
+{
+       return opp_set_availability(dev, index, ~0, false);
+}
+
+int opp_hack_set_freq(struct device *dev, int index, unsigned long freq)
+{
+       struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
+       struct opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
+       int i, r = 0;
+
+       mutex_lock(&dev_opp_list_lock);
+
+       /* Find the device_opp */
+       list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
+               if (dev == tmp_dev_opp->dev) {
+                       dev_opp = tmp_dev_opp;
+                       break;
+               }
+       }
+       if (IS_ERR(dev_opp)) {
+               r = PTR_ERR(dev_opp);
+               dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+               goto unlock;
+       }
+
+       /* Do we have the frequency? */
+       i = 0;
+       list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+               if (i == index) {
+                       opp = tmp_opp;
+                       break;
+               }
+               i++;
+       }
+       if (IS_ERR(opp)) {
+               r = PTR_ERR(opp);
+               goto unlock;
+       }
+
+       opp->rate = freq;
+
+       mutex_unlock(&dev_opp_list_lock);
+       synchronize_rcu();
+
+       /* Notify the change */
+       srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE, opp);
+
+       return r;
+
+unlock:
+       mutex_unlock(&dev_opp_list_lock);
+       return r;
+}
+
+int opp_hack_get_freq(struct device *dev, int index, unsigned long *freq)
+{
+       struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
+       struct opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
+       int i, r = 0;
+
+       mutex_lock(&dev_opp_list_lock);
+
+       /* Find the device_opp */
+       list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
+               if (dev == tmp_dev_opp->dev) {
+                       dev_opp = tmp_dev_opp;
+                       break;
+               }
+       }
+       if (IS_ERR(dev_opp)) {
+               r = PTR_ERR(dev_opp);
+               dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+               goto unlock;
+       }
+
+       /* Do we have the frequency? */
+       i = 0;
+       list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+               if (i == index) {
+                       opp = tmp_opp;
+                       break;
+               }
+               i++;
+       }
+       if (IS_ERR(opp)) {
+               r = PTR_ERR(opp);
+               goto unlock;
+       }
+
+       *freq = opp->rate;
+
+unlock:
+       mutex_unlock(&dev_opp_list_lock);
+       return r;
 }
 
 #ifdef CONFIG_CPU_FREQ
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
new file mode 100644 (file)
index 0000000..ba29b2e
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
+#include <linux/err.h>
+
+static DEFINE_IDA(soc_ida);
+static DEFINE_SPINLOCK(soc_lock);
+
+static ssize_t soc_info_get(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf);
+
+struct soc_device {
+       struct device dev;
+       struct soc_device_attribute *attr;
+       int soc_dev_num;
+};
+
+static struct bus_type soc_bus_type = {
+       .name  = "soc",
+};
+
+static DEVICE_ATTR(machine,  S_IRUGO, soc_info_get,  NULL);
+static DEVICE_ATTR(family,   S_IRUGO, soc_info_get,  NULL);
+static DEVICE_ATTR(soc_id,   S_IRUGO, soc_info_get,  NULL);
+static DEVICE_ATTR(revision, S_IRUGO, soc_info_get,  NULL);
+
+struct device *soc_device_to_device(struct soc_device *soc_dev)
+{
+       return &soc_dev->dev;
+}
+
+static mode_t soc_attribute_mode(struct kobject *kobj,
+                                 struct attribute *attr,
+                                 int index)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+       if ((attr == &dev_attr_machine.attr)
+           && (soc_dev->attr->machine != NULL))
+               return attr->mode;
+       if ((attr == &dev_attr_family.attr)
+           && (soc_dev->attr->family != NULL))
+               return attr->mode;
+       if ((attr == &dev_attr_revision.attr)
+           && (soc_dev->attr->revision != NULL))
+               return attr->mode;
+       if ((attr == &dev_attr_soc_id.attr)
+           && (soc_dev->attr->soc_id != NULL))
+               return attr->mode;
+
+       /* Unknown or unfilled attribute. */
+       return 0;
+}
+
+static ssize_t soc_info_get(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf)
+{
+       struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+       if (attr == &dev_attr_machine)
+               return sprintf(buf, "%s\n", soc_dev->attr->machine);
+       if (attr == &dev_attr_family)
+               return sprintf(buf, "%s\n", soc_dev->attr->family);
+       if (attr == &dev_attr_revision)
+               return sprintf(buf, "%s\n", soc_dev->attr->revision);
+       if (attr == &dev_attr_soc_id)
+               return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
+
+       return -EINVAL;
+
+}
+
+static struct attribute *soc_attr[] = {
+       &dev_attr_machine.attr,
+       &dev_attr_family.attr,
+       &dev_attr_soc_id.attr,
+       &dev_attr_revision.attr,
+       NULL,
+};
+
+static const struct attribute_group soc_attr_group = {
+       .attrs = soc_attr,
+       .is_visible = soc_attribute_mode,
+};
+
+static const struct attribute_group *soc_attr_groups[] = {
+       &soc_attr_group,
+       NULL,
+};
+
+static void soc_release(struct device *dev)
+{
+       struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+       kfree(soc_dev);
+}
+
+struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
+{
+       struct soc_device *soc_dev;
+       int ret;
+
+       soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
+       if (!soc_dev) {
+               ret = -ENOMEM;
+               goto out1;
+       }
+
+       /* Fetch a unique (reclaimable) SOC ID. */
+       do {
+               if (!ida_pre_get(&soc_ida, GFP_KERNEL)) {
+                       ret = -ENOMEM;
+                       goto out2;
+               }
+
+               spin_lock(&soc_lock);
+               ret = ida_get_new(&soc_ida, &soc_dev->soc_dev_num);
+               spin_unlock(&soc_lock);
+
+       } while (ret == -EAGAIN);
+
+       if (ret)
+                goto out2;
+
+       soc_dev->attr = soc_dev_attr;
+       soc_dev->dev.bus = &soc_bus_type;
+       soc_dev->dev.groups = soc_attr_groups;
+       soc_dev->dev.release = soc_release;
+
+       dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
+
+       ret = device_register(&soc_dev->dev);
+       if (ret)
+               goto out3;
+
+       return soc_dev;
+
+out3:
+       ida_remove(&soc_ida, soc_dev->soc_dev_num);
+out2:
+       kfree(soc_dev);
+out1:
+       return ERR_PTR(ret);
+}
+
+/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
+void soc_device_unregister(struct soc_device *soc_dev)
+{
+       ida_remove(&soc_ida, soc_dev->soc_dev_num);
+
+       device_unregister(&soc_dev->dev);
+}
+
+static int __init soc_bus_register(void)
+{
+       return bus_register(&soc_bus_type);
+}
+core_initcall(soc_bus_register);
+
+static void __exit soc_bus_unregister(void)
+{
+       ida_destroy(&soc_ida);
+
+       bus_unregister(&soc_bus_type);
+}
+module_exit(soc_bus_unregister);
index 72a0044..1c31dca 100644 (file)
@@ -2,6 +2,12 @@
 # ARM CPU Frequency scaling drivers
 #
 
+config ARM_OMAP2PLUS_CPUFREQ
+       bool "TI OMAP2+"
+       depends on ARCH_OMAP2PLUS
+       select CPU_FREQ_TABLE
+       default y
+
 config ARM_S3C64XX_CPUFREQ
        bool "Samsung S3C64XX"
        depends on CPU_S3C6410
index a48bc02..805bfeb 100644 (file)
@@ -43,6 +43,7 @@ obj-$(CONFIG_UX500_SOC_DB8500)                += db8500-cpufreq.o
 obj-$(CONFIG_ARM_S3C64XX_CPUFREQ)      += s3c64xx-cpufreq.o
 obj-$(CONFIG_ARM_S5PV210_CPUFREQ)      += s5pv210-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)   += exynos4210-cpufreq.o
+obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)     += omap-cpufreq.o
 
 ##################################################################################
 # PowerPC platform drivers
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
new file mode 100644 (file)
index 0000000..d9d78ba
--- /dev/null
@@ -0,0 +1,442 @@
+/*
+ *  CPU frequency scaling for OMAP using OPP information
+ *
+ *  Copyright (C) 2005 Nokia Corporation
+ *  Written by Tony Lindgren <tony@atomide.com>
+ *
+ *  Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2011 Texas Instruments, Inc.
+ * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/opp.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <asm/system.h>
+#include <asm/smp_plat.h>
+#include <asm/cpu.h>
+
+#include <plat/clock.h>
+#include <plat/omap-pm.h>
+#include <plat/common.h>
+#include <plat/omap_device.h>
+
+#include <mach/hardware.h>
+
+/* OPP tolerance in percentage */
+#define        OPP_TOLERANCE   4
+
+#ifdef CONFIG_SMP
+struct lpj_info {
+       unsigned long   ref;
+       unsigned int    freq;
+};
+
+static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
+static struct lpj_info global_lpj_ref;
+#endif
+
+static struct cpufreq_frequency_table *freq_table;
+static atomic_t freq_table_users = ATOMIC_INIT(0);
+static struct clk *mpu_clk;
+static char *mpu_clk_name;
+static struct device *mpu_dev;
+static struct regulator *mpu_reg;
+static unsigned long freq_max, volt_max;
+
+static int omap_verify_speed(struct cpufreq_policy *policy)
+{
+       if (!freq_table)
+               return -EINVAL;
+       return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static unsigned int omap_getspeed(unsigned int cpu)
+{
+       unsigned long rate;
+
+       if (cpu >= NR_CPUS)
+               return 0;
+
+       rate = clk_get_rate(mpu_clk) / 1000;
+       return rate;
+}
+
+static int omap_target(struct cpufreq_policy *policy,
+                      unsigned int target_freq,
+                      unsigned int relation)
+{
+       unsigned int i;
+       int r, ret = 0;
+       struct cpufreq_freqs freqs;
+       struct opp *opp;
+       unsigned long freq, volt = 0, volt_old = 0;
+
+       if (!freq_table) {
+               dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
+                               policy->cpu);
+               return -EINVAL;
+       }
+
+       ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+                       relation, &i);
+       if (ret) {
+               dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
+                       __func__, policy->cpu, target_freq, ret);
+               return ret;
+       }
+       freqs.new = freq_table[i].frequency;
+       if (!freqs.new) {
+               dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
+                       policy->cpu, target_freq);
+               return -EINVAL;
+       }
+
+       freqs.old = omap_getspeed(policy->cpu);
+       freqs.cpu = policy->cpu;
+
+       if (freqs.old == freqs.new && policy->cur == freqs.new)
+               return ret;
+
+       /* notifiers */
+       for_each_cpu(i, policy->cpus) {
+               freqs.cpu = i;
+               cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+       }
+
+       freq = freqs.new * 1000;
+
+       if (mpu_reg) {
+               opp = opp_find_freq_ceil(mpu_dev, &freq);
+               if (IS_ERR(opp)) {
+                       dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
+                               __func__, freqs.new);
+                       return -EINVAL;
+               }
+               volt = opp_get_voltage(opp);
+               volt_old = regulator_get_voltage(mpu_reg);
+       }
+
+       dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n", 
+               freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
+               freqs.new / 1000, volt ? volt / 1000 : -1);
+
+       /* scaling up?  scale voltage before frequency */
+       if (mpu_reg && (freqs.new > freqs.old)) {
+               r = regulator_set_voltage(mpu_reg, volt, volt_max);
+               if (r < 0) {
+                       dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
+                                __func__);
+                       freqs.new = freqs.old;
+                       goto done;
+               }
+       }
+
+       ret = clk_set_rate(mpu_clk, freqs.new * 1000);
+
+       /* scaling down?  scale voltage after frequency */
+       if (mpu_reg && (freqs.new < freqs.old)) {
+               r = regulator_set_voltage(mpu_reg, volt, volt_max);
+               if (r < 0) {
+                       dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
+                                __func__);
+                       ret = clk_set_rate(mpu_clk, freqs.old * 1000);
+                       freqs.new = freqs.old;
+                       goto done;
+               }
+       }
+
+       freqs.new = omap_getspeed(policy->cpu);
+#ifdef CONFIG_SMP
+       /*
+        * Note that loops_per_jiffy is not updated on SMP systems in
+        * cpufreq driver. So, update the per-CPU loops_per_jiffy value
+        * on frequency transition. We need to update all dependent CPUs.
+        */
+       for_each_cpu(i, policy->cpus) {
+               struct lpj_info *lpj = &per_cpu(lpj_ref, i);
+               if (!lpj->freq) {
+                       lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
+                       lpj->freq = freqs.old;
+               }
+
+               per_cpu(cpu_data, i).loops_per_jiffy =
+                       cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
+       }
+
+       /* And don't forget to adjust the global one */
+       if (!global_lpj_ref.freq) {
+               global_lpj_ref.ref = loops_per_jiffy;
+               global_lpj_ref.freq = freqs.old;
+       }
+       loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
+                                       freqs.new);
+#endif
+
+done:
+       /* notifiers */
+       for_each_cpu(i, policy->cpus) {
+               freqs.cpu = i;
+               cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+       }
+
+       return ret;
+}
+
+static inline void freq_table_free(void)
+{
+       if (atomic_dec_and_test(&freq_table_users))
+               opp_free_cpufreq_table(mpu_dev, &freq_table);
+}
+
+/* force-update hack */
+static struct notifier_block omap_freq_nb;
+static struct cpufreq_policy *omap_freq_policy;
+
+static void check_max_freq(unsigned long freq)
+{
+       unsigned long volt;
+       struct opp *opp;
+
+       freq *= 1000;
+
+       if (freq <= freq_max)
+               return;
+
+       opp = opp_find_freq_ceil(mpu_dev, &freq);
+       if (IS_ERR(opp)) {
+               dev_err(mpu_dev, "%s: unable to find MPU OPP for %ld\n",
+                               __func__, freq);
+               return;
+       }
+
+       volt = opp_get_voltage(opp);
+       volt += volt * OPP_TOLERANCE / 100;
+
+       if (volt > volt_max) {
+               volt_max = volt;
+               freq_max = freq;
+       }
+}
+
+static int freq_notifier_call(struct notifier_block *nb, unsigned long type,
+                             void *devp)
+{
+       static DEFINE_SPINLOCK(lock);
+       struct cpufreq_frequency_table *new_freq_table, *old_freq_table;
+       unsigned long flags;
+       int ret;
+
+       ret = opp_init_cpufreq_table(mpu_dev, &new_freq_table);
+       if (ret) {
+               dev_err(mpu_dev, "%s: failed to create cpufreq_table: %d\n",
+                       __func__, ret);
+               return ret;
+       }
+
+       /* FIXME: use proper locks instead of these hacks */
+       spin_lock_irqsave(&lock, flags);
+       old_freq_table = freq_table;
+       freq_table = new_freq_table;
+       spin_unlock_irqrestore(&lock, flags);
+       msleep(1);
+       opp_free_cpufreq_table(mpu_dev, &old_freq_table);
+
+       if (omap_freq_policy == NULL) {
+               dev_err(mpu_dev, "%s: omap_freq_policy is NULL\n", __func__);
+               return -EINVAL;
+       }
+
+       cpufreq_frequency_table_get_attr(freq_table, omap_freq_policy->cpu);
+
+       ret = cpufreq_frequency_table_cpuinfo(omap_freq_policy, freq_table);
+       if (ret)
+               dev_err(mpu_dev, "%s: cpufreq_frequency_table_cpuinfo: %d\n",
+                       __func__, ret);
+       omap_freq_policy->user_policy.min = omap_freq_policy->cpuinfo.min_freq;
+       omap_freq_policy->user_policy.max = omap_freq_policy->cpuinfo.max_freq;
+
+       check_max_freq(omap_freq_policy->cpuinfo.max_freq);
+
+       return ret;
+}
+
+static void freq_register_opp_notifier(struct device *dev,
+                                      struct cpufreq_policy *policy)
+{
+       struct srcu_notifier_head *nh = opp_get_notifier(dev);
+       int ret;
+
+       omap_freq_policy = policy;
+
+       if (IS_ERR(nh)) {
+               ret = PTR_ERR(nh);
+               goto out;
+       }
+       omap_freq_nb.notifier_call = freq_notifier_call;
+       ret = srcu_notifier_chain_register(nh, &omap_freq_nb);
+out:
+       if (ret != 0)
+               dev_err(mpu_dev, "%s: failed to register notifier: %d\n",
+                               __func__, ret);
+}
+
+static void freq_unregister_opp_notifier(struct device *dev)
+{
+       struct srcu_notifier_head *nh = opp_get_notifier(dev);
+
+       if (IS_ERR(nh))
+               return;
+       srcu_notifier_chain_unregister(nh, &omap_freq_nb);
+}
+
+static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
+{
+       int result = 0;
+
+       mpu_clk = clk_get(NULL, mpu_clk_name);
+       if (IS_ERR(mpu_clk))
+               return PTR_ERR(mpu_clk);
+
+       if (policy->cpu >= NR_CPUS) {
+               result = -EINVAL;
+               goto fail_ck;
+       }
+
+       policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
+
+       if (atomic_inc_return(&freq_table_users) == 1)
+               result = opp_init_cpufreq_table(mpu_dev, &freq_table);
+
+       if (result) {
+               dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
+                               __func__, policy->cpu, result);
+               goto fail_ck;
+       }
+
+       result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+       if (result)
+               goto fail_table;
+
+       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+       policy->min = policy->cpuinfo.min_freq;
+       policy->max = policy->cpuinfo.max_freq;
+       policy->cur = omap_getspeed(policy->cpu);
+
+       check_max_freq(policy->cpuinfo.max_freq);
+
+       /*
+        * On OMAP SMP configuartion, both processors share the voltage
+        * and clock. So both CPUs needs to be scaled together and hence
+        * needs software co-ordination. Use cpufreq affected_cpus
+        * interface to handle this scenario. Additional is_smp() check
+        * is to keep SMP_ON_UP build working.
+        */
+       if (is_smp()) {
+               policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+               cpumask_setall(policy->cpus);
+       }
+
+       /* FIXME: what's the actual transition time? */
+       policy->cpuinfo.transition_latency = 300 * 1000;
+
+       freq_register_opp_notifier(mpu_dev, policy);
+
+       return 0;
+
+fail_table:
+       freq_table_free();
+fail_ck:
+       clk_put(mpu_clk);
+       return result;
+}
+
+static int omap_cpu_exit(struct cpufreq_policy *policy)
+{
+       freq_unregister_opp_notifier(mpu_dev);
+       freq_table_free();
+       clk_put(mpu_clk);
+       return 0;
+}
+
+static struct freq_attr *omap_cpufreq_attr[] = {
+       &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,
+};
+
+static struct cpufreq_driver omap_driver = {
+       .flags          = CPUFREQ_STICKY,
+       .verify         = omap_verify_speed,
+       .target         = omap_target,
+       .get            = omap_getspeed,
+       .init           = omap_cpu_init,
+       .exit           = omap_cpu_exit,
+       .name           = "omap",
+       .attr           = omap_cpufreq_attr,
+};
+
+static int __init omap_cpufreq_init(void)
+{
+       if (cpu_is_omap24xx())
+               mpu_clk_name = "virt_prcm_set";
+       else if (cpu_is_omap34xx())
+               mpu_clk_name = "dpll1_ck";
+       else if (cpu_is_omap44xx())
+               mpu_clk_name = "dpll_mpu_ck";
+
+       if (!mpu_clk_name) {
+               pr_err("%s: unsupported Silicon?\n", __func__);
+               return -EINVAL;
+       }
+
+       mpu_dev = omap_device_get_by_hwmod_name("mpu");
+       if (!mpu_dev) {
+               pr_warning("%s: unable to get the mpu device\n", __func__);
+               return -EINVAL;
+       }
+
+       mpu_reg = regulator_get(mpu_dev, "vcc");
+       if (IS_ERR(mpu_reg)) {
+               pr_warning("%s: unable to get MPU regulator\n", __func__);
+               mpu_reg = NULL;
+       } else {
+               /* 
+                * Ensure physical regulator is present.
+                * (e.g. could be dummy regulator.)
+                */
+               if (regulator_get_voltage(mpu_reg) < 0) {
+                       pr_warn("%s: physical regulator not present for MPU\n",
+                               __func__);
+                       regulator_put(mpu_reg);
+                       mpu_reg = NULL;
+               }
+       }
+
+       return cpufreq_register_driver(&omap_driver);
+}
+
+static void __exit omap_cpufreq_exit(void)
+{
+       cpufreq_unregister_driver(&omap_driver);
+}
+
+MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
+MODULE_LICENSE("GPL");
+module_init(omap_cpufreq_init);
+module_exit(omap_cpufreq_exit);
index 06ce268..5159e65 100644 (file)
@@ -197,6 +197,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
        state->power_usage = -1;
        state->flags = 0;
        state->enter = poll_idle;
+       state->disable = 0;
 }
 #else
 static void poll_idle_init(struct cpuidle_driver *drv) {}
index ad09526..5c17ca1 100644 (file)
@@ -280,7 +280,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * We want to default to C1 (hlt), not to busy polling
         * unless the timer is happening really really soon.
         */
-       if (data->expected_us > 5)
+       if (data->expected_us > 5 &&
+               drv->states[CPUIDLE_DRIVER_STATE_START].disable == 0)
                data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
 
        /*
@@ -290,6 +291,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
                struct cpuidle_state *s = &drv->states[i];
 
+               if (s->disable)
+                       continue;
                if (s->target_residency > data->predicted_us)
                        continue;
                if (s->exit_latency > latency_req)
index 1e756e1..ff64887 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/sysfs.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
+#include <linux/capability.h>
 
 #include "cpuidle.h"
 
@@ -224,6 +225,9 @@ struct cpuidle_state_attr {
 #define define_one_state_ro(_name, show) \
 static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
 
+#define define_one_state_rw(_name, show, store) \
+static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
+
 #define define_show_state_function(_name) \
 static ssize_t show_state_##_name(struct cpuidle_state *state, \
                         struct cpuidle_state_usage *state_usage, char *buf) \
@@ -231,6 +235,24 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
        return sprintf(buf, "%u\n", state->_name);\
 }
 
+#define define_store_state_function(_name) \
+static ssize_t store_state_##_name(struct cpuidle_state *state, \
+               const char *buf, size_t size) \
+{ \
+       long value; \
+       int err; \
+       if (!capable(CAP_SYS_ADMIN)) \
+               return -EPERM; \
+       err = kstrtol(buf, 0, &value); \
+       if (err) \
+               return err; \
+       if (value) \
+               state->disable = 1; \
+       else \
+               state->disable = 0; \
+       return size; \
+}
+
 #define define_show_state_ull_function(_name) \
 static ssize_t show_state_##_name(struct cpuidle_state *state, \
                        struct cpuidle_state_usage *state_usage, char *buf) \
@@ -253,6 +275,8 @@ define_show_state_ull_function(usage)
 define_show_state_ull_function(time)
 define_show_state_str_function(name)
 define_show_state_str_function(desc)
+define_show_state_function(disable)
+define_store_state_function(disable)
 
 define_one_state_ro(name, show_state_name);
 define_one_state_ro(desc, show_state_desc);
@@ -260,6 +284,7 @@ define_one_state_ro(latency, show_state_exit_latency);
 define_one_state_ro(power, show_state_power_usage);
 define_one_state_ro(usage, show_state_usage);
 define_one_state_ro(time, show_state_time);
+define_one_state_rw(disable, show_state_disable, store_state_disable);
 
 static struct attribute *cpuidle_state_default_attrs[] = {
        &attr_name.attr,
@@ -268,6 +293,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
        &attr_power.attr,
        &attr_usage.attr,
        &attr_time.attr,
+       &attr_disable.attr,
        NULL
 };
 
@@ -289,8 +315,22 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
        return ret;
 }
 
+static ssize_t cpuidle_state_store(struct kobject *kobj,
+       struct attribute *attr, const char *buf, size_t size)
+{
+       int ret = -EIO;
+       struct cpuidle_state *state = kobj_to_state(kobj);
+       struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
+
+       if (cattr->store)
+               ret = cattr->store(state, buf, size);
+
+       return ret;
+}
+
 static const struct sysfs_ops cpuidle_state_sysfs_ops = {
        .show = cpuidle_state_show,
+       .store = cpuidle_state_store,
 };
 
 static void cpuidle_state_sysfs_release(struct kobject *kobj)
index a6c10e8..4390dd7 100644 (file)
@@ -44,6 +44,7 @@ struct gpio_bank {
        u32 saved_datain;
        u32 saved_fallingdetect;
        u32 saved_risingdetect;
+       u32 saved_irqstatus;
        u32 level_mask;
        u32 toggle_mask;
        spinlock_t lock;
@@ -1350,9 +1351,23 @@ void omap2_gpio_prepare_for_idle(int off_mode)
 
        for (i = min; i < gpio_bank_count; i++) {
                struct gpio_bank *bank = &gpio_bank[i];
+               void __iomem *reg;
                u32 l1 = 0, l2 = 0;
                int j;
 
+               /*
+                * Disable debounce since clock disable below will cause
+                * problems if GPIO module doesn't go idle for some reason.
+                */
+               if (bank->dbck_enable_mask != 0) {
+                       reg = bank->base + bank->regs->datain;
+                       bank->saved_datain = __raw_readl(reg);
+                       reg = bank->base + bank->regs->irqstatus;
+                       bank->saved_irqstatus = __raw_readl(reg);
+                       reg = bank->base + bank->regs->debounce_en;
+                       __raw_writel(0, reg);
+               }
+
                for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
                        clk_disable(bank->dbck);
 
@@ -1418,12 +1433,28 @@ void omap2_gpio_resume_after_idle(void)
                min = 1;
        for (i = min; i < gpio_bank_count; i++) {
                struct gpio_bank *bank = &gpio_bank[i];
+               void __iomem *reg;
                u32 l = 0, gen, gen0, gen1;
                int j;
 
                for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
                        clk_enable(bank->dbck);
 
+               if (bank->dbck_enable_mask != 0) {
+                       reg = bank->base + bank->regs->debounce_en;
+                       __raw_writel(bank->dbck_enable_mask, reg);
+                       /* clear irqs that could come from glitches
+                        * because debounce was disabled */
+                       reg = bank->base + bank->regs->irqstatus;
+                       gen = __raw_readl(reg) & ~bank->saved_irqstatus;
+                       reg = bank->base + bank->regs->datain;
+                       l = __raw_readl(reg) ^ bank->saved_datain;
+                       l = gen & ~l;
+                       l = l & bank->dbck_enable_mask & ~bank->level_mask;
+                       if (l)
+                               _clear_gpio_irqbank(bank, l);
+               }
+
                if (!workaround_enabled)
                        continue;
 
index 61660f8..fa0ba2c 100644 (file)
@@ -1537,6 +1537,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
index ccc89b0..c86c300 100644 (file)
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD  0xc20a
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD       0xc211
 #define USB_DEVICE_ID_LOGITECH_EXTREME_3D      0xc215
+#define USB_DEVICE_ID_LOGITECH_DUAL_ACTION     0xc216
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2      0xc218
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2    0xc219
 #define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D     0xc283
index 3ad6cd4..932c962 100644 (file)
@@ -440,6 +440,8 @@ static const struct hid_device_id lg_devices[] = {
 
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D),
                .driver_data = LG_NOGET },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION),
+               .driver_data = LG_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL),
                .driver_data = LG_NOGET | LG_FF4 },
 
index 257c1a5..7bcf92a 100644 (file)
@@ -1066,7 +1066,7 @@ omap_i2c_probe(struct platform_device *pdev)
 
        isr = (dev->rev < OMAP_I2C_OMAP1_REV_2) ? omap_i2c_omap1_isr :
                                                                   omap_i2c_isr;
-       r = request_irq(dev->irq, isr, 0, pdev->name, dev);
+       r = request_irq(dev->irq, isr, IRQF_NO_SUSPEND, pdev->name, dev);
 
        if (r) {
                dev_err(dev->dev, "failure requesting irq %i\n", dev->irq);
index ed1ed46..a20764e 100644 (file)
@@ -342,6 +342,8 @@ static void gpio_keys_work_func(struct work_struct *work)
                container_of(work, struct gpio_button_data, work);
 
        gpio_keys_report_event(bdata);
+       if (bdata->button->wakeup)
+               pm_relax(bdata->input->dev.parent);
 }
 
 static void gpio_keys_timer(unsigned long _data)
@@ -358,6 +360,8 @@ static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
 
        BUG_ON(irq != gpio_to_irq(button->gpio));
 
+       if (bdata->button->wakeup)
+               pm_stay_awake(bdata->input->dev.parent);
        if (bdata->timer_debounce)
                mod_timer(&bdata->timer,
                        jiffies + msecs_to_jiffies(bdata->timer_debounce));
@@ -418,7 +422,7 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
        if (!button->can_disable)
                irqflags |= IRQF_SHARED;
 
-       error = request_threaded_irq(irq, NULL, gpio_keys_isr, irqflags, desc, bdata);
+       error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
        if (error < 0) {
                dev_err(dev, "Unable to claim irq %d; error %d\n",
                        irq, error);
index a26922c..217a000 100644 (file)
@@ -56,7 +56,7 @@
  * row lines connected to the gnd (see twl4030_col_xlate()).
  */
 #define TWL4030_ROW_SHIFT      4
-#define TWL4030_KEYMAP_SIZE    (TWL4030_MAX_ROWS << TWL4030_ROW_SHIFT)
+#define TWL4030_KEYMAP_SIZE    ((TWL4030_MAX_ROWS << TWL4030_ROW_SHIFT) * 2)
 
 struct twl4030_keypad {
        unsigned short  keymap[TWL4030_KEYMAP_SIZE];
@@ -65,6 +65,9 @@ struct twl4030_keypad {
        unsigned        n_cols;
        unsigned        irq;
 
+       unsigned        fn_down:1;
+       unsigned        fn_sticked:1;
+
        struct device *dbg_dev;
        struct input_dev *input;
 };
@@ -231,7 +234,8 @@ static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
 
                /* Extra column handles "all gnd" rows */
                for (col = 0; col < kp->n_cols + 1; col++) {
-                       int code;
+                       int code, kcode, is_down;
+                       int code2;
 
                        if (!(changed & (1 << col)))
                                continue;
@@ -241,9 +245,34 @@ static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
                                "press" : "release");
 
                        code = MATRIX_SCAN_CODE(row, col, TWL4030_ROW_SHIFT);
+                       kcode = kp->keymap[code];
+                       is_down = (new_state[row] & (1 << col)) ? 1 : 0;
+
+                       dev_dbg(kp->dbg_dev, "code:     %d %d\n", code, kcode);
+                       /* Fn handling */
+                       if (kcode == KEY_FN) {
+                               kp->fn_down = is_down;
+                               kp->fn_sticked |= is_down;
+                       } else if (kp->fn_down || kp->fn_sticked) {
+                               /* make sure other function is up */
+                               input_event(input, EV_MSC, MSC_SCAN, code);
+                               input_report_key(input, kcode, 0);
+
+                               code = MATRIX_SCAN_CODE(row + TWL4030_MAX_ROWS,
+                                       col, TWL4030_ROW_SHIFT);
+                               kcode = kp->keymap[code];
+
+                               kp->fn_sticked = 0;
+                       } else {
+                               code2 = MATRIX_SCAN_CODE(row + TWL4030_MAX_ROWS,
+                                       col, TWL4030_ROW_SHIFT);
+                               input_event(input, EV_MSC, MSC_SCAN, code2);
+                               input_report_key(input, kp->keymap[code2], 0);
+                       }
+
+                       dev_dbg(kp->dbg_dev, "code(fn): %d %d\n", code, kcode);
                        input_event(input, EV_MSC, MSC_SCAN, code);
-                       input_report_key(input, kp->keymap[code],
-                                        new_state[row] & (1 << col));
+                       input_report_key(input, kcode, is_down);
                }
                kp->kp_state[row] = new_state[row];
        }
@@ -370,7 +399,7 @@ static int __devinit twl4030_kp_probe(struct platform_device *pdev)
 
        input_set_capability(input, EV_MSC, MSC_SCAN);
 
-       input->name             = "TWL4030 Keypad";
+       input->name             = "keypad";
        input->phys             = "twl4030_keypad/input0";
        input->dev.parent       = &pdev->dev;
 
index 22d875f..aef269b 100644 (file)
@@ -352,6 +352,16 @@ config INPUT_TWL6040_VIBRA
          To compile this driver as a module, choose M here. The module will
          be called twl6040_vibra.
 
+config INPUT_VSENSE
+       tristate "VSense Navigation Device"
+       depends on I2C
+       help
+         Say Y here if you want to support VSense navigation device
+         (the analog nub on pandora handheld).
+
+         To compile this driver as a module, choose M here: the
+         module will be called vsense.
+
 config INPUT_UINPUT
        tristate "User level driver support"
        help
index a244fc6..d937548 100644 (file)
@@ -46,6 +46,7 @@ obj-$(CONFIG_INPUT_SPARCSPKR)         += sparcspkr.o
 obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON)  += twl4030-pwrbutton.o
 obj-$(CONFIG_INPUT_TWL4030_VIBRA)      += twl4030-vibra.o
 obj-$(CONFIG_INPUT_TWL6040_VIBRA)      += twl6040-vibra.o
+obj-$(CONFIG_INPUT_VSENSE)             += vsense.o
 obj-$(CONFIG_INPUT_UINPUT)             += uinput.o
 obj-$(CONFIG_INPUT_WISTRON_BTNS)       += wistron_btns.o
 obj-$(CONFIG_INPUT_WM831X_ON)          += wm831x-on.o
index 38e4b50..29555ee 100644 (file)
@@ -42,6 +42,7 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
        err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
                                STS_HW_CONDITIONS);
        if (!err)  {
+               pm_wakeup_event(pwr->dev.parent, 0);
                input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ);
                input_sync(pwr);
        } else {
@@ -66,7 +67,7 @@ static int __init twl4030_pwrbutton_probe(struct platform_device *pdev)
 
        pwr->evbit[0] = BIT_MASK(EV_KEY);
        pwr->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
-       pwr->name = "twl4030_pwrbutton";
+       pwr->name = "power-button";
        pwr->phys = "twl4030_pwrbutton/input0";
        pwr->dev.parent = &pdev->dev;
 
diff --git a/drivers/input/misc/vsense.c b/drivers/input/misc/vsense.c
new file mode 100644 (file)
index 0000000..e57b9ac
--- /dev/null
@@ -0,0 +1,777 @@
+/*
+       vsense.c
+
+       Written by Gražvydas Ignotas <notasas@gmail.com>
+
+       This program is free software; you can redistribute it and/or modify
+       it under the terms of the GNU General Public License as published by
+       the Free Software Foundation; version 2 of the License.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/proc_fs.h>
+#include <linux/idr.h>
+#include <linux/i2c/vsense.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+
+#define VSENSE_INTERVAL                25
+
+#define VSENSE_MODE_ABS                0
+#define VSENSE_MODE_MOUSE      1
+#define VSENSE_MODE_SCROLL     2
+#define VSENSE_MODE_MBUTTONS   3
+
+static DEFINE_IDR(vsense_proc_id);
+static DEFINE_MUTEX(vsense_mutex);
+
+/* Reset state is shared between both nubs, so we keep
+ * track of it here.
+ */
+static int vsense_reset_state;
+static int vsense_reset_refcount;
+
+struct vsense_drvdata {
+       char dev_name[12];
+       struct input_dev *input;
+       struct i2c_client *client;
+       struct regulator *reg;
+       struct delayed_work work;
+       int reset_gpio;
+       int irq_gpio;
+       int mode;
+       int proc_id;
+       struct proc_dir_entry *proc_root;
+       int mouse_multiplier;   /* 24.8 */
+       int scrollx_multiplier;
+       int scrolly_multiplier;
+       int scroll_counter;
+       int scroll_steps;
+       struct {
+               int threshold_x;
+               int threshold_y;
+               int delay;
+               int dblclick_stage;
+               int state_l;
+               int state_m;
+               int state_r;
+               int pos_active;
+               int pos_prev;
+               int pos_stable_counter;
+       } mbutton;
+};
+
+enum nub_position {
+       NUB_POS_CENTER = 0,
+       NUB_POS_UP,
+       NUB_POS_RIGHT,
+       NUB_POS_DOWN,
+       NUB_POS_LEFT,
+};
+
+static void release_mbuttons(struct vsense_drvdata *ddata)
+{
+       if (ddata->mbutton.state_l) {
+               input_report_key(ddata->input, BTN_LEFT, 0);
+               ddata->mbutton.state_l = 0;
+       }
+       if (ddata->mbutton.state_m) {
+               input_report_key(ddata->input, BTN_MIDDLE, 0);
+               ddata->mbutton.state_m = 0;
+       }
+       if (ddata->mbutton.state_r) {
+               input_report_key(ddata->input, BTN_RIGHT, 0);
+               ddata->mbutton.state_r = 0;
+       }
+       ddata->mbutton.pos_active = NUB_POS_CENTER;
+}
+
+static void vsense_work(struct work_struct *work)
+{
+       struct vsense_drvdata *ddata;
+       int ax = 0, ay = 0, rx = 0, ry = 0;
+       int update_pending = 0;
+       signed char buff[4];
+       int ret, pos, l, m, r;
+
+       ddata = container_of(work, struct vsense_drvdata, work.work);
+
+       if (unlikely(gpio_get_value(ddata->irq_gpio)))
+               goto dosync;
+
+       ret = i2c_master_recv(ddata->client, buff, sizeof(buff));
+       if (unlikely(ret != sizeof(buff))) {
+               dev_err(&ddata->client->dev, "read failed with %i\n", ret);
+               goto dosync;
+       }
+
+       rx = (signed int)buff[0];
+       ry = (signed int)buff[1];
+       ax = (signed int)buff[2];
+       ay = (signed int)buff[3];
+
+       schedule_delayed_work(&ddata->work, msecs_to_jiffies(VSENSE_INTERVAL));
+       update_pending = 1;
+
+dosync:
+       switch (ddata->mode) {
+       case VSENSE_MODE_MOUSE:
+               rx = rx * ddata->mouse_multiplier / 256;
+               ry = -ry * ddata->mouse_multiplier / 256;
+               input_report_rel(ddata->input, REL_X, rx);
+               input_report_rel(ddata->input, REL_Y, ry);
+               break;
+       case VSENSE_MODE_SCROLL:
+               if (++(ddata->scroll_counter) < ddata->scroll_steps)
+                       return;
+               ddata->scroll_counter = 0;
+               ax = ax * ddata->scrollx_multiplier / 256;
+               ay = ay * ddata->scrolly_multiplier / 256;
+               input_report_rel(ddata->input, REL_HWHEEL, ax);
+               input_report_rel(ddata->input, REL_WHEEL, ay);
+               break;
+       case VSENSE_MODE_MBUTTONS:
+               if (!update_pending) {
+                       release_mbuttons(ddata);
+                       break;
+               }
+
+               pos = NUB_POS_CENTER;
+               if      (ax >= ddata->mbutton.threshold_x) pos = NUB_POS_RIGHT;
+               else if (ax <= -ddata->mbutton.threshold_x) pos = NUB_POS_LEFT;
+               else if (ay >= ddata->mbutton.threshold_y) pos = NUB_POS_UP;
+               else if (ay <= -ddata->mbutton.threshold_y) pos = NUB_POS_DOWN;
+
+               if (pos != ddata->mbutton.pos_prev) {
+                       ddata->mbutton.pos_prev = pos;
+                       ddata->mbutton.pos_stable_counter = 0;
+               }
+               else
+                       ddata->mbutton.pos_stable_counter++;
+
+               if (ddata->mbutton.pos_stable_counter < ddata->mbutton.delay)
+                       pos = ddata->mbutton.pos_active;
+
+               if (pos != NUB_POS_UP)
+                       ddata->mbutton.dblclick_stage = 0;
+
+               l = m = r = 0;
+               switch (pos) {
+               case NUB_POS_UP:
+                       ddata->mbutton.dblclick_stage++;
+                       switch (ddata->mbutton.dblclick_stage) {
+                               case 1: case 2: case 5: case 6:
+                               l = 1;
+                               break;
+                       }
+                       break;
+               case NUB_POS_RIGHT:
+                       r = 1;
+                       break;
+               case NUB_POS_DOWN:
+                       m = 1;
+                       break;
+               case NUB_POS_LEFT:
+                       l = 1;
+                       break;
+               }
+               input_report_key(ddata->input, BTN_LEFT, l);
+               input_report_key(ddata->input, BTN_RIGHT, r);
+               input_report_key(ddata->input, BTN_MIDDLE, m);
+               ddata->mbutton.pos_active = pos;
+               ddata->mbutton.state_l = l;
+               ddata->mbutton.state_m = m;
+               ddata->mbutton.state_r = r;
+               break;
+       default:
+               input_report_abs(ddata->input, ABS_X, ax * 8);
+               input_report_abs(ddata->input, ABS_Y, -ay * 8);
+               break;
+       }
+       input_sync(ddata->input);
+}
+
+static irqreturn_t vsense_isr(int irq, void *dev_id)
+{
+       struct vsense_drvdata *ddata = dev_id;
+
+       schedule_delayed_work(&ddata->work, 0);
+
+       return IRQ_HANDLED;
+}
+
+static int vsense_reset(struct vsense_drvdata *ddata, int val)
+{
+       int ret;
+
+       dev_dbg(&ddata->client->dev, "vsense_reset: %i\n", val);
+
+       if (ddata->mode != VSENSE_MODE_ABS)
+               release_mbuttons(ddata);
+
+       ret = gpio_direction_output(ddata->reset_gpio, val);
+       if (ret < 0) {
+               dev_err(&ddata->client->dev, "failed to configure direction "
+                       "for GPIO %d, error %d\n", ddata->reset_gpio, ret);
+       }
+       else {
+               vsense_reset_state = val;
+       }
+
+       return ret;
+}
+
+static int vsense_open(struct input_dev *dev)
+{
+       dev_dbg(&dev->dev, "vsense_open\n");
+
+       /* get out of reset and stay there until user wants to reset it */
+       if (vsense_reset_state != 0)
+               vsense_reset(input_get_drvdata(dev), 0);
+
+       return 0;
+}
+
+static int vsense_input_register(struct vsense_drvdata *ddata, int mode)
+{
+       struct input_dev *input;
+       int ret;
+
+       input = input_allocate_device();
+       if (input == NULL)
+               return -ENOMEM;
+
+       if (mode != VSENSE_MODE_ABS) {
+               /* pretend to be a mouse */
+               input_set_capability(input, EV_REL, REL_X);
+               input_set_capability(input, EV_REL, REL_Y);
+               input_set_capability(input, EV_REL, REL_WHEEL);
+               input_set_capability(input, EV_REL, REL_HWHEEL);
+               /* add fake buttons to fool X that this is a mouse */
+               input_set_capability(input, EV_KEY, BTN_LEFT);
+               input_set_capability(input, EV_KEY, BTN_RIGHT);
+               input_set_capability(input, EV_KEY, BTN_MIDDLE);
+       } else {
+               input->evbit[BIT_WORD(EV_ABS)] = BIT_MASK(EV_ABS);
+               input_set_abs_params(input, ABS_X, -256, 256, 0, 0);
+               input_set_abs_params(input, ABS_Y, -256, 256, 0, 0);
+       }
+
+       input->name = ddata->dev_name;
+       input->dev.parent = &ddata->client->dev;
+
+       input->id.bustype = BUS_I2C;
+       input->id.version = 0x0092;
+
+       input->open = vsense_open;
+
+       ddata->input = input;
+       input_set_drvdata(input, ddata);
+
+       ret = input_register_device(input);
+       if (ret) {
+               dev_err(&ddata->client->dev, "failed to register input device,"
+                       " error %d\n", ret);
+               input_free_device(input);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void vsense_input_unregister(struct vsense_drvdata *ddata)
+{
+       cancel_delayed_work_sync(&ddata->work);
+       input_unregister_device(ddata->input);
+}
+
+static int vsense_proc_mode_read(char *page, char **start, off_t off, int count,
+               int *eof, void *data)
+{
+       struct vsense_drvdata *ddata = data;
+       char *p = page;
+       int len;
+
+       switch (ddata->mode) {
+       case VSENSE_MODE_MOUSE:
+               len = sprintf(p, "mouse\n");
+               break;
+       case VSENSE_MODE_SCROLL:
+               len = sprintf(p, "scroll\n");
+               break;
+       case VSENSE_MODE_MBUTTONS:
+               len = sprintf(p, "mbuttons\n");
+               break;
+       default:
+               len = sprintf(p, "absolute\n");
+               break;
+       }
+
+       *eof = 1;
+       return len;
+}
+
+static int vsense_proc_mode_write(struct file *file, const char __user *buffer,
+               unsigned long count, void *data)
+{
+       struct vsense_drvdata *ddata = data;
+       int mode = ddata->mode;
+       char buff[32], *p;
+       int ret;
+
+       count = strncpy_from_user(buff, buffer,
+                       count < sizeof(buff) ? count : sizeof(buff) - 1);
+       buff[count] = 0;
+
+       p = buff + strlen(buff) - 1;
+       while (p > buff && isspace(*p))
+               p--;
+       p[1] = 0;
+
+       if (strcasecmp(buff, "mouse") == 0)
+               mode = VSENSE_MODE_MOUSE;
+       else if (strcasecmp(buff, "scroll") == 0)
+               mode = VSENSE_MODE_SCROLL;
+       else if (strcasecmp(buff, "mbuttons") == 0)
+               mode = VSENSE_MODE_MBUTTONS;
+       else if (strcasecmp(buff, "absolute") == 0)
+               mode = VSENSE_MODE_ABS;
+       else {
+               dev_err(&ddata->client->dev, "unknown mode: %s\n", buff);
+               return -EINVAL;
+       }
+
+       if (ddata->mode != VSENSE_MODE_ABS)
+               release_mbuttons(ddata);
+
+       if ((mode == VSENSE_MODE_ABS && ddata->mode != VSENSE_MODE_ABS) ||
+           (mode != VSENSE_MODE_ABS && ddata->mode == VSENSE_MODE_ABS)) {
+               disable_irq(ddata->client->irq);
+               vsense_input_unregister(ddata);
+               ret = vsense_input_register(ddata, mode);
+               if (ret)
+                       dev_err(&ddata->client->dev, "failed to re-register "
+                               "input as %d, code %d\n", mode, ret);
+               else
+                       enable_irq(ddata->client->irq);
+       }
+       ddata->mode = mode;
+
+       return count;
+}
+
+static int vsense_proc_int_read(char *page, char **start, off_t off,
+               int count, int *eof, void *data)
+{
+       int *val = data;
+       int len;
+
+       len = sprintf(page, "%d\n", *val);
+       *eof = 1;
+       return len;
+}
+
+static int vsense_proc_int_write(struct file *file, const char __user *buffer,
+               unsigned long count, void *data)
+{
+       char buff[32];
+       long val;
+       int ret;
+       int *value = data;
+
+       count = strncpy_from_user(buff, buffer,
+                       count < sizeof(buff) ? count : sizeof(buff) - 1);
+       buff[count] = 0;
+
+       ret = strict_strtol(buff, 0, &val);
+       if (ret < 0)
+               return ret;
+       *value = val;
+       return count;
+}
+
+static int vsense_proc_mult_read(char *page, char **start, off_t off,
+               int count, int *eof, void *data)
+{
+       int *multiplier = data;
+       int val = *multiplier * 100 / 256;
+       return vsense_proc_int_read(page, start, off, count, eof, &val);
+}
+
+static int vsense_proc_mult_write(struct file *file, const char __user *buffer,
+               unsigned long count, void *data)
+{
+       int *multiplier = data;
+       int ret, val, adj;
+
+       ret = vsense_proc_int_write(file, buffer, count, &val);
+       if (ret < 0)
+               return ret;
+       if (val == 0)
+               return -EINVAL;
+
+       /* round to higher absolute value */
+       adj = val < 0 ? -99 : 99;
+       *multiplier = (val * 256 + adj) / 100;
+
+       return ret;
+}
+
+static int vsense_proc_rate_read(char *page, char **start, off_t off,
+               int count, int *eof, void *data)
+{
+       int *steps = data;
+       int val = 1000 / VSENSE_INTERVAL / *steps;
+       return vsense_proc_int_read(page, start, off, count, eof, &val);
+}
+
+static int vsense_proc_rate_write(struct file *file, const char __user *buffer,
+               unsigned long count, void *data)
+{
+       int *steps = data;
+       int ret, val;
+
+       ret = vsense_proc_int_write(file, buffer, count, &val);
+       if (ret < 0)
+               return ret;
+       if (val < 1)
+               return -EINVAL;
+
+       *steps = 1000 / VSENSE_INTERVAL / val;
+       if (*steps < 1)
+               *steps = 1;
+       return ret;
+}
+
+static int vsense_proc_treshold_write(struct file *file, const char __user *buffer,
+               unsigned long count, void *data)
+{
+       int *value = data;
+       int ret, val;
+
+       ret = vsense_proc_int_write(file, buffer, count, &val);
+       if (ret < 0)
+               return ret;
+       if (val < 1 || val > 32)
+               return -EINVAL;
+
+       *value = val;
+       return ret;
+}
+
+static ssize_t
+vsense_show_reset(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", vsense_reset_state);
+}
+
+static ssize_t
+vsense_set_reset(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       unsigned long new_reset;
+       struct i2c_client *client;
+       struct vsense_drvdata *ddata;
+       int ret;
+
+       ret = strict_strtoul(buf, 10, &new_reset);
+       if (ret)
+               return -EINVAL;
+
+       client = to_i2c_client(dev);
+       ddata = i2c_get_clientdata(client);
+
+       vsense_reset(ddata, new_reset ? 1 : 0);
+
+       return count;
+}
+static DEVICE_ATTR(reset, S_IRUGO | S_IWUSR,
+       vsense_show_reset, vsense_set_reset);
+
+static void vsense_create_proc(struct vsense_drvdata *ddata,
+                              void *pdata, const char *name,
+                              read_proc_t *read_proc, write_proc_t *write_proc)
+{
+       struct proc_dir_entry *pret;
+
+       pret = create_proc_entry(name, S_IWUGO | S_IRUGO, ddata->proc_root);
+       if (pret == NULL) {
+               dev_err(&ddata->client->dev, "failed to create proc file %s\n", name);
+               return;
+       }
+
+       pret->data = pdata;
+       pret->read_proc = read_proc;
+       pret->write_proc = write_proc;
+}
+
+static int vsense_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct vsense_platform_data *pdata = client->dev.platform_data;
+       struct vsense_drvdata *ddata;
+       char buff[32];
+       int ret;
+
+       if (pdata == NULL) {
+               dev_err(&client->dev, "no platform data?\n");
+               return -EINVAL;
+       }
+
+       if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
+               dev_err(&client->dev, "can't talk I2C?\n");
+               return -EIO;
+       }
+
+       ddata = kzalloc(sizeof(struct vsense_drvdata), GFP_KERNEL);
+       if (ddata == NULL)
+               return -ENOMEM;
+
+       ret = idr_pre_get(&vsense_proc_id, GFP_KERNEL);
+       if (ret == 0) {
+               ret = -ENOMEM;
+               goto err_idr;
+       }
+
+       mutex_lock(&vsense_mutex);
+
+       ret = idr_get_new(&vsense_proc_id, client, &ddata->proc_id);
+       if (ret < 0) {
+               mutex_unlock(&vsense_mutex);
+               goto err_idr;
+       }
+
+       if (!vsense_reset_refcount) {
+               ret = gpio_request_one(pdata->gpio_reset, GPIOF_OUT_INIT_HIGH,
+                       "vsense reset");
+               if (ret < 0) {
+                       dev_err(&client->dev, "gpio_request error: %d, %d\n",
+                               pdata->gpio_reset, ret);
+                       mutex_unlock(&vsense_mutex);
+                       goto err_gpio_reset;
+               }
+       }
+       vsense_reset_refcount++;
+
+       mutex_unlock(&vsense_mutex);
+
+       ret = gpio_request_one(pdata->gpio_irq, GPIOF_IN, client->name);
+       if (ret < 0) {
+               dev_err(&client->dev, "failed to request GPIO %d,"
+                       " error %d\n", pdata->gpio_irq, ret);
+               goto err_gpio_irq;
+       }
+
+       ret = gpio_to_irq(pdata->gpio_irq);
+       if (ret < 0) {
+               dev_err(&client->dev, "unable to get irq number for GPIO %d, "
+                       "error %d\n", pdata->gpio_irq, ret);
+               goto err_gpio_to_irq;
+       }
+       client->irq = ret;
+
+       snprintf(ddata->dev_name, sizeof(ddata->dev_name),
+                "nub%d", ddata->proc_id);
+
+       INIT_DELAYED_WORK(&ddata->work, vsense_work);
+       ddata->mode = VSENSE_MODE_ABS;
+       ddata->client = client;
+       ddata->reset_gpio = pdata->gpio_reset;
+       ddata->irq_gpio = pdata->gpio_irq;
+       ddata->mouse_multiplier = 170 * 256 / 100;
+       ddata->scrollx_multiplier =
+       ddata->scrolly_multiplier = 8 * 256 / 100;
+       ddata->scroll_steps = 1000 / VSENSE_INTERVAL / 3;
+       ddata->mbutton.threshold_x = 20;
+       ddata->mbutton.threshold_y = 26;
+       ddata->mbutton.delay = 1;
+       i2c_set_clientdata(client, ddata);
+
+       ddata->reg = regulator_get(&client->dev, "vcc");
+       if (IS_ERR(ddata->reg)) {
+               ret = PTR_ERR(ddata->reg);
+               dev_err(&client->dev, "unable to get regulator: %d\n", ret);
+               goto err_regulator_get;
+       }
+
+       ret = regulator_enable(ddata->reg);
+       if (ret) {
+               dev_err(&client->dev, "unable to enable regulator: %d\n", ret);
+               goto err_regulator_enable;
+       }
+
+       /* HACK */
+       if (vsense_reset_refcount == 2)
+               /* resetting drains power, as well as disabling supply,
+                * so keep it powered and out of reset at all times */
+               vsense_reset(ddata, 0);
+
+       ret = vsense_input_register(ddata, ddata->mode);
+       if (ret) {
+               dev_err(&client->dev, "failed to register input device, "
+                       "error %d\n", ret);
+               goto err_input_register;
+       }
+
+       ret = request_irq(client->irq, vsense_isr,
+                       IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_FALLING,
+                       client->name, ddata);
+       if (ret) {
+               dev_err(&client->dev, "unable to claim irq %d, error %d\n",
+                       client->irq, ret);
+               goto err_request_irq;
+       }
+
+       dev_dbg(&client->dev, "probe %02x, gpio %i, irq %i, \"%s\"\n",
+               client->addr, pdata->gpio_irq, client->irq, client->name);
+
+       snprintf(buff, sizeof(buff), "pandora/nub%d", ddata->proc_id);
+       ddata->proc_root = proc_mkdir(buff, NULL);
+       if (ddata->proc_root != NULL) {
+               vsense_create_proc(ddata, ddata, "mode",
+                               vsense_proc_mode_read, vsense_proc_mode_write);
+               vsense_create_proc(ddata, &ddata->mouse_multiplier, "mouse_sensitivity",
+                               vsense_proc_mult_read, vsense_proc_mult_write);
+               vsense_create_proc(ddata, &ddata->scrollx_multiplier, "scrollx_sensitivity",
+                               vsense_proc_mult_read, vsense_proc_mult_write);
+               vsense_create_proc(ddata, &ddata->scrolly_multiplier, "scrolly_sensitivity",
+                               vsense_proc_mult_read, vsense_proc_mult_write);
+               vsense_create_proc(ddata, &ddata->scroll_steps, "scroll_rate",
+                               vsense_proc_rate_read, vsense_proc_rate_write);
+               vsense_create_proc(ddata, &ddata->mbutton.threshold_x, "mbutton_threshold",
+                               vsense_proc_int_read, vsense_proc_treshold_write);
+               vsense_create_proc(ddata, &ddata->mbutton.threshold_y, "mbutton_threshold_y",
+                               vsense_proc_int_read, vsense_proc_treshold_write);
+               vsense_create_proc(ddata, &ddata->mbutton.delay, "mbutton_delay",
+                               vsense_proc_int_read, vsense_proc_int_write);
+       } else
+               dev_err(&client->dev, "can't create proc dir");
+
+       ret = device_create_file(&client->dev, &dev_attr_reset);
+
+       return 0;
+
+err_request_irq:
+       vsense_input_unregister(ddata);
+err_input_register:
+err_regulator_enable:
+       regulator_put(ddata->reg);
+err_regulator_get:
+err_gpio_to_irq:
+       gpio_free(pdata->gpio_irq);
+err_gpio_irq:
+       gpio_free(pdata->gpio_reset);
+err_gpio_reset:
+       idr_remove(&vsense_proc_id, ddata->proc_id);
+err_idr:
+       kfree(ddata);
+       return ret;
+}
+
+static int __devexit vsense_remove(struct i2c_client *client)
+{
+       struct vsense_drvdata *ddata;
+       char buff[32];
+
+       dev_dbg(&client->dev, "remove\n");
+
+       ddata = i2c_get_clientdata(client);
+
+       mutex_lock(&vsense_mutex);
+
+       vsense_reset_refcount--;
+       if (!vsense_reset_refcount)
+               gpio_free(ddata->reset_gpio);
+
+       mutex_unlock(&vsense_mutex);
+
+       device_remove_file(&client->dev, &dev_attr_reset);
+
+       remove_proc_entry("mode", ddata->proc_root);
+       remove_proc_entry("mouse_sensitivity", ddata->proc_root);
+       remove_proc_entry("scrollx_sensitivity", ddata->proc_root);
+       remove_proc_entry("scrolly_sensitivity", ddata->proc_root);
+       remove_proc_entry("scroll_rate", ddata->proc_root);
+       remove_proc_entry("mbutton_threshold", ddata->proc_root);
+       remove_proc_entry("mbutton_threshold_y", ddata->proc_root);
+       remove_proc_entry("mbutton_delay", ddata->proc_root);
+       snprintf(buff, sizeof(buff), "pandora/nub%d", ddata->proc_id);
+       remove_proc_entry(buff, NULL);
+       idr_remove(&vsense_proc_id, ddata->proc_id);
+
+       free_irq(client->irq, ddata);
+       vsense_input_unregister(ddata);
+       gpio_free(ddata->irq_gpio);
+       regulator_put(ddata->reg);
+       kfree(ddata);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vsense_i2c_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct vsense_drvdata *ddata = i2c_get_clientdata(client);
+
+       /* we can't process irqs while i2c is suspended and we can't
+        * ask the device to not generate them, so just disable instead */
+       cancel_delayed_work_sync(&ddata->work);
+       disable_irq(client->irq);
+
+       return 0;
+}
+
+static int vsense_i2c_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       enable_irq(client->irq);
+
+       return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(vsense_pm_ops, vsense_i2c_suspend, vsense_i2c_resume);
+
+static const struct i2c_device_id vsense_id[] = {
+       { "vsense", 0 },
+       { }
+};
+
+static struct i2c_driver vsense_driver = {
+       .driver = {
+               .name   = "vsense",
+               .owner  = THIS_MODULE,
+               .pm     = &vsense_pm_ops,
+       },
+       .probe          = vsense_probe,
+       .remove         = __devexit_p(vsense_remove),
+       .id_table       = vsense_id,
+};
+
+static int __init vsense_init(void)
+{
+       return i2c_add_driver(&vsense_driver);
+}
+
+static void __exit vsense_exit(void)
+{
+       i2c_del_driver(&vsense_driver);
+}
+
+
+MODULE_AUTHOR("Grazvydas Ignotas");
+MODULE_DESCRIPTION("VSense navigation device driver");
+MODULE_LICENSE("GPL");
+
+module_init(vsense_init);
+module_exit(vsense_exit);
index de31ec6..53aabc7 100644 (file)
@@ -876,6 +876,8 @@ static irqreturn_t ads7846_irq(int irq, void *handle)
 {
        struct ads7846 *ts = handle;
 
+       disable_irq_nosync(ts->spi->irq);
+
        /* Start with a small delay before checking pendown state */
        msleep(TS_POLL_DELAY);
 
@@ -902,6 +904,8 @@ static irqreturn_t ads7846_irq(int irq, void *handle)
                dev_vdbg(&ts->spi->dev, "UP\n");
        }
 
+       enable_irq(ts->spi->irq);
+
        return IRQ_HANDLED;
 }
 
@@ -1281,7 +1285,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
        ts->wait_for_sync = pdata->wait_for_sync ? : null_wait_for_sync;
 
        snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev));
-       snprintf(ts->name, sizeof(ts->name), "ADS%d Touchscreen", ts->model);
+       snprintf(ts->name, sizeof(ts->name), "touchscreen");
 
        input_dev->name = ts->name;
        input_dev->phys = ts->phys;
index ff203a4..90d9765 100644 (file)
@@ -283,6 +283,12 @@ config LEDS_PWM
        help
          This option enables support for pwm driven LEDs
 
+config LEDS_TWL4030_PWM
+       tristate "LED driver for TWL4030 PWM connected LEDs"
+       depends on LEDS_CLASS && TWL4030_CORE
+       help
+         This option enables support for TWL4030 PWM driven LEDs
+
 config LEDS_REGULATOR
        tristate "REGULATOR driven LED support"
        depends on LEDS_CLASS
index e4f6bf5..a71f158 100644 (file)
@@ -33,6 +33,7 @@ obj-$(CONFIG_LEDS_DA903X)             += leds-da903x.o
 obj-$(CONFIG_LEDS_WM831X_STATUS)       += leds-wm831x-status.o
 obj-$(CONFIG_LEDS_WM8350)              += leds-wm8350.o
 obj-$(CONFIG_LEDS_PWM)                 += leds-pwm.o
+obj-$(CONFIG_LEDS_TWL4030_PWM)         += leds-twl4030-pwm.o
 obj-$(CONFIG_LEDS_REGULATOR)           += leds-regulator.o
 obj-$(CONFIG_LEDS_INTEL_SS4200)                += leds-ss4200.o
 obj-$(CONFIG_LEDS_LT3593)              += leds-lt3593.o
index 6f1ff93..94aeeca 100644 (file)
@@ -122,6 +122,8 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
                led_cdev->trigger = trigger;
                if (trigger->activate)
                        trigger->activate(led_cdev);
+               else
+                       led_set_brightness(led_cdev, trigger->prev_brightness);
        }
 }
 EXPORT_SYMBOL_GPL(led_trigger_set);
@@ -227,6 +229,7 @@ void led_trigger_event(struct led_trigger *trigger,
                led_cdev = list_entry(entry, struct led_classdev, trig_list);
                led_set_brightness(led_cdev, brightness);
        }
+       trigger->prev_brightness = brightness;
        read_unlock(&trigger->leddev_list_lock);
 }
 EXPORT_SYMBOL_GPL(led_trigger_event);
diff --git a/drivers/leds/leds-twl4030-pwm.c b/drivers/leds/leds-twl4030-pwm.c
new file mode 100644 (file)
index 0000000..66f431d
--- /dev/null
@@ -0,0 +1,261 @@
+/*
+ * TWL4030 PWM controlled LED driver (LEDA, LEDB, PWM0, PWM1)
+ *
+ * Author: Gražvydas Ignotas <notasas@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/leds_pwm.h>
+#include <linux/i2c/twl.h>
+#include <linux/slab.h>
+
+#define TWL_INTBR_GPBR1                0x0c
+#define TWL_INTBR_PMBR1                0x0d
+
+#define TWL4030_PWMx_PWMxON    0x00
+#define TWL4030_PWMx_PWMxOFF   0x01
+#define TWL4030_LED_LEDEN      0x00
+
+#define GPBR1_PWM0_CLK_ENABLE  BIT(0)
+#define GPBR1_PWM1_CLK_ENABLE  BIT(1)
+#define GPBR1_PWM0_ENABLE      BIT(2)
+#define GPBR1_PWM1_ENABLE      BIT(3)
+
+/* LEDEN bits */
+#define LEDEN_LEDAON           BIT(0)
+#define LEDEN_LEDBON           BIT(1)
+#define LEDEN_LEDAPWM          BIT(4)
+#define LEDEN_LEDBPWM          BIT(5)
+
+#define LED_UNKNOWN            -1
+
+enum twl4030_led {
+       TWL4030_LEDA = 0,
+       TWL4030_LEDB,
+       TWL4030_PWM0,
+       TWL4030_PWM1,
+};
+
+struct twl4030_pwmled {
+       struct led_classdev cdev;
+       void (*enable)(enum twl4030_led led, bool enable);
+       enum led_brightness new_brightness;
+       enum led_brightness old_brightness;
+       enum twl4030_led id;
+       int module;
+       struct work_struct work;
+};
+
+static void twl4030_enable_ledab(enum twl4030_led led, bool enable)
+{
+       u8 bits;
+
+       if (led == TWL4030_LEDA)
+               bits = LEDEN_LEDAON | LEDEN_LEDAPWM;
+       else
+               bits = LEDEN_LEDBON | LEDEN_LEDBPWM;
+
+       if (enable)
+               twl_i2c_rmw_u8(TWL4030_MODULE_LED, 0, bits, TWL4030_LED_LEDEN);
+       else
+               twl_i2c_rmw_u8(TWL4030_MODULE_LED, bits, 0, TWL4030_LED_LEDEN);
+}
+
+static void twl4030_enable_pwm01(enum twl4030_led led, bool enable)
+{
+       u8 enbit, clkbit;
+
+       if (led == TWL4030_PWM0) {
+               enbit = GPBR1_PWM0_ENABLE;
+               clkbit = GPBR1_PWM0_CLK_ENABLE;
+       } else {
+               enbit = GPBR1_PWM1_ENABLE;
+               clkbit = GPBR1_PWM1_CLK_ENABLE;
+       }
+
+       if (enable) {
+               /* first enable clock, then PWM out */
+               twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+                       enbit, clkbit, TWL_INTBR_GPBR1);
+               twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+                       0, enbit, TWL_INTBR_GPBR1);
+       } else {
+               /* first disable PWM output, then clock */
+               twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+                       enbit, 0, TWL_INTBR_GPBR1);
+               twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+                       clkbit, 0, TWL_INTBR_GPBR1);
+       }
+}
+
+static void twl4030_pwmled_work(struct work_struct *work)
+{
+       enum led_brightness new_brightness;
+       struct twl4030_pwmled *led;
+       int val;
+
+       led = container_of(work, struct twl4030_pwmled, work);
+
+       new_brightness = ACCESS_ONCE(led->new_brightness);
+       if (new_brightness == LED_OFF) {
+               if (led->old_brightness != LED_OFF)
+                       led->enable(led->id, 0);
+               goto out;
+       }
+
+       val = new_brightness * 0x7f / LED_FULL;
+       /* avoid 0: on = off = 0 means full brightness */
+       if (val == 0)
+               val = 1;
+
+       twl_i2c_write_u8(led->module, val, TWL4030_PWMx_PWMxOFF);
+
+       if (led->old_brightness == LED_OFF || led->old_brightness == LED_UNKNOWN)
+               led->enable(led->id, 1);
+
+out:
+       led->old_brightness = new_brightness;
+}
+
+static void twl4030_pwmled_brightness(struct led_classdev *cdev,
+               enum led_brightness b)
+{
+       struct twl4030_pwmled *led;
+
+       led = container_of(cdev, struct twl4030_pwmled, cdev);
+       led->new_brightness = b;
+       schedule_work(&led->work);
+}
+
+static int __devinit twl4030_pwmled_probe(struct platform_device *pdev)
+{
+       const struct led_pwm_platform_data *pdata = pdev->dev.platform_data;
+       struct twl4030_pwmled *led, *leds;
+       int ret;
+       int i;
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata || pdata->num_leds < 1 || pdata->num_leds > 4)
+               return -ENODEV;
+
+       leds = kcalloc(pdata->num_leds, sizeof(*leds), GFP_KERNEL);
+       if (!leds)
+               return -ENOMEM;
+
+       for (i = 0; i < pdata->num_leds; i++) {
+               led = &leds[i];
+               led->cdev.name = pdata->leds[i].name;
+               led->cdev.brightness = LED_OFF;
+               led->cdev.brightness_set = twl4030_pwmled_brightness;
+               led->cdev.default_trigger = pdata->leds[i].default_trigger;
+               led->id = pdata->leds[i].pwm_id;
+               led->old_brightness = LED_UNKNOWN;
+
+               switch (pdata->leds[i].pwm_id) {
+               case TWL4030_LEDA:
+                       led->module = TWL4030_MODULE_PWMA;
+                       led->enable = twl4030_enable_ledab;
+                       break;
+               case TWL4030_LEDB:
+                       led->module = TWL4030_MODULE_PWMB;
+                       led->enable = twl4030_enable_ledab;
+                       break;
+               case TWL4030_PWM0:
+                       led->module = TWL4030_MODULE_PWM0;
+                       led->enable = twl4030_enable_pwm01;
+                       /* enable PWM0 in pin mux */
+                       twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+                               0x0c, 0x04, TWL_INTBR_PMBR1);
+                       /* enable PWM clock for initial write */
+                       twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+                               0, GPBR1_PWM0_CLK_ENABLE, TWL_INTBR_GPBR1);
+                       break;
+               case TWL4030_PWM1:
+                       led->module = TWL4030_MODULE_PWM1;
+                       led->enable = twl4030_enable_pwm01;
+                       twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+                               0, 0x30, TWL_INTBR_PMBR1);
+                       twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+                               0, GPBR1_PWM1_CLK_ENABLE, TWL_INTBR_GPBR1);
+                       break;
+               default:
+                       dev_err(&pdev->dev, "invalid pwm_id: %d\n",
+                               pdata->leds->pwm_id);
+                       ret = -ENODEV;
+                       goto err;
+               }
+               INIT_WORK(&led->work, twl4030_pwmled_work);
+
+               twl_i2c_write_u8(led->module, 0, TWL4030_PWMx_PWMxON);
+               led->new_brightness = LED_OFF;
+               twl4030_pwmled_work(&led->work);
+
+               /* Hand it over to the LED framework */
+               ret = led_classdev_register(&pdev->dev, &led->cdev);
+               if (ret < 0)
+                       goto err;
+       }
+
+       platform_set_drvdata(pdev, leds);
+       return 0;
+
+err:
+       for (--i; i >= 0; i--)
+               led_classdev_unregister(&leds[i].cdev);
+       kfree(leds);
+
+       return ret;
+}
+
+static int __devexit twl4030_pwmled_remove(struct platform_device *pdev)
+{
+       const struct led_pwm_platform_data *pdata = pdev->dev.platform_data;
+       struct twl4030_pwmled *leds;
+       int i;
+
+       leds = platform_get_drvdata(pdev);
+
+       for (i = 0; i < pdata->num_leds; i++) {
+               led_classdev_unregister(&leds[i].cdev);
+               cancel_work_sync(&leds[i].work);
+       }
+
+       kfree(leds);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static struct platform_driver twl4030_pwmled_driver = {
+       .driver = {
+               .name   = "leds-twl4030-pwm",
+               .owner  = THIS_MODULE,
+       },
+       .probe  = twl4030_pwmled_probe,
+       .remove = __devexit_p(twl4030_pwmled_remove),
+};
+
+static int __init twl4030_pwm_init(void)
+{
+       return platform_driver_register(&twl4030_pwmled_driver);
+}
+module_init(twl4030_pwm_init);
+
+static void __exit twl4030_pwm_exit(void)
+{
+       platform_driver_unregister(&twl4030_pwmled_driver);
+}
+module_exit(twl4030_pwm_exit);
+
+MODULE_AUTHOR("Gražvydas Ignotas");
+MODULE_DESCRIPTION("Driver for TWL4030 PWM controlled LEDs");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-twl4030-pwm");
index d345215..36324d8 100644 (file)
@@ -336,8 +336,6 @@ static int video_mode_to_dss_mode(struct omap_vout_device *vout)
        ovl = ovid->overlays[0];
 
        switch (pix->pixelformat) {
-       case 0:
-               break;
        case V4L2_PIX_FMT_YUYV:
                mode = OMAP_DSS_COLOR_YUV2;
                break;
@@ -359,6 +357,7 @@ static int video_mode_to_dss_mode(struct omap_vout_device *vout)
                break;
        default:
                mode = -EINVAL;
+               break;
        }
        return mode;
 }
@@ -1043,7 +1042,8 @@ static int vidioc_querycap(struct file *file, void *fh,
        strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
        strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
        cap->bus_info[0] = '\0';
-       cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+       cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+               V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
 
        return 0;
 }
@@ -1825,7 +1825,9 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
        ovid = &vout->vid_info;
        ovl = ovid->overlays[0];
 
-       a->flags = 0x0;
+       /* The video overlay must stay within the framebuffer and can't be
+          positioned independently. */
+       a->flags = V4L2_FBUF_FLAG_OVERLAY;
        a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
                | V4L2_FBUF_CAP_SRC_CHROMAKEY;
 
@@ -2265,13 +2267,12 @@ static struct platform_driver omap_vout_driver = {
        .driver = {
                .name = VOUT_NAME,
        },
-       .probe = omap_vout_probe,
        .remove = omap_vout_remove,
 };
 
 static int __init omap_vout_init(void)
 {
-       if (platform_driver_register(&omap_vout_driver) != 0) {
+       if (platform_driver_probe(&omap_vout_driver, omap_vout_probe) != 0) {
                printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n");
                return -EINVAL;
        }
index 61e70cf..5182f6d 100644 (file)
@@ -458,6 +458,88 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
 }
 EXPORT_SYMBOL(twl_i2c_read);
 
+/**
+ * twl_i2c_rmw_u8 - Reads a 8 bit register, modifies it and writes back
+ * @mod_no: module number
+ * @bits_to_clear
+ * @bits_to_set
+ * @reg: register address (just offset will do)
+ *
+ * Returns result of operation - 0 is success else failure.
+ */
+int twl_i2c_rmw_u8(u8 mod_no, u8 bits_to_clear, u8 bits_to_set, u8 reg)
+{
+       u8 value_w[2] = { 0 };
+       u8 value = 0;
+       int ret;
+       u8 val;
+       int sid;
+       struct twl_client *twl;
+       struct i2c_msg *msg;
+
+       if (unlikely(mod_no > TWL_MODULE_LAST)) {
+               pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+               return -EPERM;
+       }
+       if (unlikely(!inuse)) {
+               pr_err("%s: not initialized\n", DRIVER_NAME);
+               return -EPERM;
+       }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
+       mutex_lock(&twl->xfer_lock);
+       /* [MSG1] fill the register address data */
+       msg = &twl->xfer_msg[0];
+       msg->addr = twl->address;
+       msg->len = 1;
+       msg->flags = 0; /* Read the register value */
+       val = twl_map[mod_no].base + reg;
+       msg->buf = &val;
+       /* [MSG2] fill the data rx buffer */
+       msg = &twl->xfer_msg[1];
+       msg->addr = twl->address;
+       msg->flags = I2C_M_RD;  /* Read the register value */
+       msg->len = 1;
+       msg->buf = &value;
+       ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2);
+       /* i2c_transfer returns number of messages transferred */
+       if (ret != 2) {
+               pr_err("%s: i2c_read failed to transfer all messages\n",
+                       DRIVER_NAME);
+               if (ret >= 0)
+                       ret = -EIO;
+               goto out;
+       }
+
+       value &= ~bits_to_clear;
+       value |= bits_to_set;
+
+       value_w[0] = twl_map[mod_no].base + reg;
+       value_w[1] = value;
+
+       msg = &twl->xfer_msg[0];
+       msg->addr = twl->address;
+       msg->len = 2;
+       msg->flags = 0;
+       msg->buf = value_w;
+       ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1);
+       /* i2c_transfer returns number of messages transferred */
+       if (ret != 1) {
+               pr_err("%s: i2c_write failed to transfer all messages\n",
+                       DRIVER_NAME);
+               if (ret >= 0)
+                       ret = -EIO;
+               goto out;
+       }
+
+       ret = 0;
+out:
+       mutex_unlock(&twl->xfer_lock);
+       return ret;
+}
+EXPORT_SYMBOL(twl_i2c_rmw_u8);
+
 /**
  * twl_i2c_write_u8 - Writes a 8 bit register in TWL4030/TWL5030/TWL60X0
  * @mod_no: module number
@@ -561,7 +643,6 @@ add_numbered_child(unsigned chip, const char *name, int num,
                goto err;
        }
 
-       device_init_wakeup(&pdev->dev, can_wakeup);
        pdev->dev.parent = &twl->client->dev;
 
        if (pdata) {
@@ -586,6 +667,8 @@ add_numbered_child(unsigned chip, const char *name, int num,
        }
 
        status = platform_device_add(pdev);
+       if (status == 0)
+               device_init_wakeup(&pdev->dev, can_wakeup);
 
 err:
        if (status < 0) {
@@ -610,6 +693,8 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
                unsigned num_consumers, unsigned long features)
 {
        unsigned sub_chip_id;
+       struct twl_regulator_driver_data drv_data;
+
        /* regulator framework demands init_data ... */
        if (!pdata)
                return NULL;
@@ -619,7 +704,19 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
                pdata->num_consumer_supplies = num_consumers;
        }
 
-       pdata->driver_data = (void *)features;
+       if (pdata->driver_data) {
+               /* If we have existing drv_data, just add the flags */
+               struct twl_regulator_driver_data *tmp;
+               tmp = pdata->driver_data;
+               tmp->features |= features;
+       } else {
+               /* add new driver data struct, used only during init */
+               drv_data.features = features;
+               drv_data.set_voltage = NULL;
+               drv_data.get_voltage = NULL;
+               drv_data.data = NULL;
+               pdata->driver_data = &drv_data;
+       }
 
        /* NOTE:  we currently ignore regulator IRQs, e.g. for short circuits */
        sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid;
@@ -694,8 +791,9 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
                static struct regulator_consumer_supply usb1v8 = {
                        .supply =       "usb1v8",
                };
-               static struct regulator_consumer_supply usb3v1 = {
-                       .supply =       "usb3v1",
+               static struct regulator_consumer_supply usb3v1[] = {
+                       { .supply =     "usb3v1" },
+                       { .supply =     "bci3v1" },
                };
 
        /* First add the regulators so that they can be used by transceiver */
@@ -723,7 +821,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
                                return PTR_ERR(child);
 
                        child = add_regulator_linked(TWL4030_REG_VUSB3V1,
-                                                     &usb_fixed, &usb3v1, 1,
+                                                     &usb_fixed, usb3v1, 2,
                                                      features);
                        if (IS_ERR(child))
                                return PTR_ERR(child);
@@ -744,7 +842,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
                if (twl_has_regulator() && child) {
                        usb1v5.dev = child;
                        usb1v8.dev = child;
-                       usb3v1.dev = child;
+                       usb3v1[0].dev = child;
                }
        }
        if (twl_has_usb() && pdata->usb && twl_class_is_6030()) {
@@ -926,6 +1024,21 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
        /* twl6030 regulators */
        if (twl_has_regulator() && twl_class_is_6030() &&
                        !(features & TWL6025_SUBCLASS)) {
+               child = add_regulator(TWL6030_REG_VDD1, pdata->vdd1,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6030_REG_VDD2, pdata->vdd2,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6030_REG_VDD3, pdata->vdd3,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
                child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc,
                                        features);
                if (IS_ERR(child))
index 29f11e0..b69bb51 100644 (file)
@@ -492,7 +492,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
                        u8      bytes[4];
                } imr;
 
-               /* byte[0] gets overwriten as we write ... */
+               /* byte[0] gets overwritten as we write ... */
                imr.word = cpu_to_le32(agent->imr << 8);
                agent->imr_change_pending = false;
 
@@ -667,6 +667,7 @@ int twl4030_sih_setup(int module)
                irq_set_chip_data(irq, agent);
                irq_set_chip_and_handler(irq, &twl4030_sih_irq_chip,
                                         handle_edge_irq);
+               irq_set_nested_thread(irq, 1);
                activate_irq(irq);
        }
 
index a764676..280c360 100644 (file)
@@ -34,7 +34,8 @@
 static u8 twl4030_start_script_address = 0x2b;
 
 #define PWR_P1_SW_EVENTS       0x10
-#define PWR_DEVOFF     (1<<0)
+#define PWR_DEVOFF             (1 << 0)
+#define SEQ_OFFSYNC            (1 << 0)
 
 #define PHY_TO_OFF_PM_MASTER(p)                (p - 0x36)
 #define PHY_TO_OFF_PM_RECEIVER(p)      (p - 0x5b)
@@ -440,12 +441,12 @@ static int __init load_twl4030_script(struct twl4030_script *tscript,
                err = twl4030_config_wakeup12_sequence(address);
                if (err)
                        goto out;
-               order = 1;
        }
        if (tscript->flags & TWL4030_WAKEUP3_SCRIPT) {
                err = twl4030_config_wakeup3_sequence(address);
                if (err)
                        goto out;
+               order = 1;
        }
        if (tscript->flags & TWL4030_SLEEP_SCRIPT) {
                if (!order)
@@ -511,12 +512,27 @@ int twl4030_remove_script(u8 flags)
        return err;
 }
 
+/*
+ * In master mode, start the power off sequence.
+ * After a successful execution, TWL shuts down the power to the SoC
+ * and all peripherals connected to it.
+ */
+void twl4030_power_off(void)
+{
+       int err;
+
+       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, PWR_DEVOFF,
+                              TWL4030_PM_MASTER_P1_SW_EVENTS);
+       if (err)
+               pr_err("TWL4030 Unable to power off\n");
+}
+
 void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
 {
        int err = 0;
        int i;
        struct twl4030_resconfig *resconfig;
-       u8 address = twl4030_start_script_address;
+       u8 val, address = twl4030_start_script_address;
 
        err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
                        TWL4030_PM_MASTER_KEY_CFG1,
@@ -548,6 +564,28 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
                }
        }
 
+       /* Board has to be wired properly to use this feature */
+       if (twl4030_scripts->use_poweroff && !pm_power_off) {
+               /* Default for SEQ_OFFSYNC is set, lets ensure this */
+               err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &val,
+                                     TWL4030_PM_MASTER_CFG_P123_TRANSITION);
+               if (err) {
+                       pr_warning("TWL4030 Unable to read registers\n");
+
+               } else if (!(val & SEQ_OFFSYNC)) {
+                       val |= SEQ_OFFSYNC;
+                       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, val,
+                                       TWL4030_PM_MASTER_CFG_P123_TRANSITION);
+                       if (err) {
+                               pr_err("TWL4030 Unable to setup SEQ_OFFSYNC\n");
+                               goto relock;
+                       }
+               }
+
+               pm_power_off = twl4030_power_off;
+       }
+
+relock:
        err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
                        TWL4030_PM_MASTER_PROTECT_KEY);
        if (err)
index 5664696..a50d0df 100644 (file)
@@ -230,6 +230,15 @@ config ENCLOSURE_SERVICES
          driver (SCSI/ATA) which supports enclosures
          or a SCSI enclosure device (SES) to use these services.
 
+config OMAP_OVERCLOCKING
+       bool "OMAP overclocking support"
+       depends on ARCH_OMAP && PM_OPP=y && PROC_FS
+       help
+         Support CPU overclocking at risk of reduced kernel/system stability.
+         Exports controls through /proc/pandora/cpu_*_max.
+         Note: after changing controls, make cpufreq do a transition (usually
+         updating governor is enough) to do the actual scaling with updated limits.
+
 config SGI_XP
        tristate "Support communication between SGI SSIs"
        depends on NET
index b26495a..1865074 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_SENSORS_BH1770)  += bh1770glc.o
 obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
 obj-$(CONFIG_SGI_IOC4)         += ioc4.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
+obj-$(CONFIG_OMAP_OVERCLOCKING) += omap_overclocking.o
 obj-$(CONFIG_KGDB_TESTS)       += kgdbts.o
 obj-$(CONFIG_SGI_XP)           += sgi-xp/
 obj-$(CONFIG_SGI_GRU)          += sgi-gru/
diff --git a/drivers/misc/omap_overclocking.c b/drivers/misc/omap_overclocking.c
new file mode 100644 (file)
index 0000000..96e08cd
--- /dev/null
@@ -0,0 +1,578 @@
+/*
+ * OMAP CPU overclocking hacks
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/opp.h>
+#include <linux/clk.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+
+#include <plat/omap_device.h>
+
+#define PROC_DIR       "pandora"
+#define PROC_CPUMHZ    "pandora/cpu_mhz_max"
+#define PROC_DSPMHZ    "pandora/dsp_mhz_max"
+#define PROC_CPUOPP    "pandora/cpu_opp_max"
+#define PROC_SYSMHZ    "pandora/sys_mhz_max"
+
+static struct device *mpu_dev;
+
+static struct device *iva_dev;
+static struct regulator *iva_reg;
+static struct clk *iva_clk;
+static DEFINE_MUTEX(iva_lock);
+static struct delayed_work iva_work;
+static int iva_mhz_max;
+static int iva_opp_min;
+static int iva_active;
+
+/* XXX: could use opp3xxx_data.c, but that's initdata.. */
+static const unsigned long nominal_f_mpu_35xx[] = {
+       125000000, 250000000, 500000000, 550000000, 600000000,
+};
+
+static const unsigned long nominal_f_mpu_36xx[] = {
+       300000000, 600000000, 800000000, 1000000000,
+};
+
+static const unsigned long nominal_f_iva_35xx[] = {
+       90000000,  180000000, 360000000, 400000000, 430000000,
+};
+
+static const unsigned long nominal_f_iva_36xx[] = {
+       260000000, 520000000, 660000000, 800000000,
+};
+
+static const unsigned long *nominal_freqs_mpu;
+static const unsigned long *nominal_freqs_iva;
+
+/* IVA voltages (MPU ones are managed by cpufreq) */
+static unsigned long iva_voltages[5];
+
+static int opp_max_avail, opp_max_now, opp_max_ceil;
+
+static int set_mpu_opp_max(int new_opp_max)
+{
+       int i, ret;
+
+       if (new_opp_max == opp_max_now)
+               return 0;
+
+       for (i = 1; i < new_opp_max; i++) {
+               ret = opp_enable_i(mpu_dev, i);
+               if (ret != 0)
+                       dev_err(mpu_dev, "%s: mpu opp_enable returned %d\n",
+                               __func__, ret);
+       }
+
+       for (i = new_opp_max; i < opp_max_avail; i++) {
+               ret = opp_disable_i(mpu_dev, i);
+               if (ret != 0)
+                       dev_err(mpu_dev, "%s: mpu opp_disable returned %d\n",
+                               __func__, ret);
+       }
+
+       dev_info(mpu_dev, "max MPU OPP set to %d\n", new_opp_max);
+       opp_max_now = new_opp_max;
+
+       return 0;
+}
+
+static int set_opp_max_ceil(int new_opp_max)
+{
+       opp_max_ceil = new_opp_max;
+       return set_mpu_opp_max(new_opp_max);
+}
+
+static int set_mpu_mhz_max(unsigned long new_mhz_max)
+{
+       unsigned long cur_mhz_max = 0;
+       int index, ret;
+
+       new_mhz_max *= 1000000;
+
+       if (opp_max_ceil < 1 || opp_max_ceil > opp_max_avail) {
+               pr_err("%s: corrupt opp_max_ceil: %d\n",
+                       __func__, opp_max_ceil);
+               return -EINVAL;
+       }
+
+       /* determine minimum OPP needed for given MPU clock limit,
+        * and limit that opp as maximum OPP.
+        * This is for cpufreq governors only. */
+       index = opp_max_ceil - 1;
+       while (index > 0 && new_mhz_max <= nominal_freqs_mpu[index - 1])
+               index--;
+
+       set_mpu_opp_max(index + 1);
+
+       opp_hack_get_freq(mpu_dev, index, &cur_mhz_max);
+       if (cur_mhz_max == new_mhz_max)
+               return 0;
+
+       ret = opp_hack_set_freq(mpu_dev, index, new_mhz_max);
+       if (ret != 0) {
+               dev_err(mpu_dev, "%s: opp_hack_set_freq returned %d\n",
+                       __func__, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int get_mpu_mhz_max(void)
+{
+       unsigned long cur_mhz_max = 0;
+
+       if (opp_max_now < 1 || opp_max_now > opp_max_avail) {
+               pr_err("%s: corrupt opp_max: %d\n", __func__, opp_max_now);
+               return -EINVAL;
+       }
+
+       opp_hack_get_freq(mpu_dev, opp_max_now - 1, &cur_mhz_max);
+
+       return cur_mhz_max / 1000000;
+}
+
+static void update_iva_opp_limit(int target_mhz)
+{
+       int volt_max;
+       int i, ret;
+
+       for (i = 0; i < opp_max_ceil - 1; i++) {
+               if (target_mhz * 1000000 <= nominal_freqs_iva[i])
+                       break;
+       }
+
+       if (iva_opp_min == i + 1)
+               return;
+
+       //dev_info(iva_dev, "new IVA OPP %d for clock %d\n",
+       //      i + 1, target_mhz);
+
+       volt_max = iva_voltages[opp_max_avail - 1];
+       volt_max += volt_max * 4 / 100;
+
+       ret = regulator_set_voltage(iva_reg, iva_voltages[i], volt_max);
+       if (ret < 0)
+               dev_warn(iva_dev, "unable to set IVA OPP limits: %d\n", ret);
+       else
+               iva_opp_min = i + 1;
+}
+
+static int set_dsp_mhz_max(unsigned long new_mhz_max)
+{
+       int ret;
+
+       mutex_lock(&iva_lock);
+
+       if (iva_active && new_mhz_max > iva_mhz_max)
+               /* going up.. */
+               update_iva_opp_limit(new_mhz_max);
+
+       ret = clk_set_rate(iva_clk, new_mhz_max * 1000000);
+       if (ret != 0) {
+               dev_warn(iva_dev, "unable to change IVA clock to %lu: %d\n",
+                       new_mhz_max, ret);
+               goto out;
+       }
+
+       if (iva_active && new_mhz_max < iva_mhz_max)
+               /* going down.. */
+               update_iva_opp_limit(new_mhz_max);
+
+       iva_mhz_max = new_mhz_max;
+out:
+       mutex_unlock(&iva_lock);
+
+       return ret;
+}
+
+static int get_dsp_mhz_max(void)
+{
+       return iva_mhz_max;
+}
+
+static void iva_unneeded_work(struct work_struct *work)
+{
+       mutex_lock(&iva_lock);
+
+       update_iva_opp_limit(0);
+       iva_active = 0;
+
+       mutex_unlock(&iva_lock);
+}
+
+/* called from c64_tools */
+void dsp_power_notify(int enable)
+{
+       if (enable) {
+               cancel_delayed_work_sync(&iva_work);
+
+               mutex_lock(&iva_lock);
+
+               if (iva_active) {
+                       mutex_unlock(&iva_lock);
+                       return;
+               }
+
+               /* apply the OPP limit */
+               update_iva_opp_limit(iva_mhz_max);
+               iva_active = 1;
+
+               mutex_unlock(&iva_lock);
+       }
+       else {
+               if (!iva_active)
+                       return;
+
+               cancel_delayed_work_sync(&iva_work);
+               schedule_delayed_work(&iva_work, HZ * 2);
+       }
+}
+EXPORT_SYMBOL(dsp_power_notify);
+
+static int init_opp_hacks(void)
+{
+       int iva_init_freq;
+       struct opp *opp;
+       int i, ret;
+
+       if (cpu_is_omap3630()) {
+               nominal_freqs_mpu = nominal_f_mpu_36xx;
+               nominal_freqs_iva = nominal_f_iva_36xx;
+               opp_max_avail = sizeof(nominal_f_mpu_36xx) / sizeof(nominal_f_mpu_36xx[0]);
+               opp_max_ceil = 2;
+       } else if (cpu_is_omap34xx()) {
+               nominal_freqs_mpu = nominal_f_mpu_35xx;
+               nominal_freqs_iva = nominal_f_iva_35xx;
+               opp_max_avail = sizeof(nominal_f_mpu_35xx) / sizeof(nominal_f_mpu_35xx[0]);
+               opp_max_ceil = opp_max_avail;
+       } else {
+               dev_err(mpu_dev, "%s: unsupported CPU\n", __func__);
+               return -ENODEV;
+       }
+       opp_max_now = opp_max_ceil;
+
+       for (i = 0; i < opp_max_avail; i++) {
+               /* enable all OPPs for MPU so that cpufreq can find out
+                * maximum voltage to supply to regulator as max */
+               ret = opp_enable_i(mpu_dev, i);
+               if (ret != 0) {
+                       dev_err(mpu_dev, "opp_enable returned %d\n", ret);
+                       return ret;
+               }
+
+               ret = opp_enable_i(iva_dev, i);
+               if (ret != 0) {
+                       dev_err(iva_dev, "opp_enable returned %d\n", ret);
+                       return ret;
+               }
+
+               opp = opp_find_freq_exact(iva_dev, nominal_freqs_iva[i], true);
+               if (IS_ERR(opp)) {
+                       dev_err(iva_dev, "mising opp %d, %lu\n",
+                               i, nominal_freqs_iva[i]);
+                       return PTR_ERR(opp);
+               }
+               iva_voltages[i] = opp_get_voltage(opp);
+       }
+
+       iva_init_freq = nominal_freqs_iva[(i + 1) / 2];
+       ret = clk_set_rate(iva_clk, iva_init_freq);
+       if (ret == 0) {
+               iva_mhz_max = iva_init_freq / 1000000;
+               dev_info(iva_dev, "IVA freq set to %dMHz\n", iva_mhz_max);
+       }
+       else
+               dev_err(iva_dev, "IVA freq set failed: %d\n", ret);
+
+       return 0;
+}
+
+static int set_sys_mhz_max(unsigned long rate)
+{
+       struct clk *dpll3_m2_ck;
+       int ret;
+
+       rate *= 1000000;
+
+       dpll3_m2_ck = clk_get(NULL, "dpll3_m2_ck");
+       if (IS_ERR(dpll3_m2_ck)) {
+               pr_err("%s: dpll3_m2_clk not available: %ld\n",
+                       __func__, PTR_ERR(dpll3_m2_ck));
+               return -ENODEV;
+       }
+
+       pr_info("Reprogramming CORE clock to %luHz\n", rate);
+       ret = clk_set_rate(dpll3_m2_ck, rate);
+       if (ret)
+               pr_err("dpll3_m2_clk rate change failed: %d\n", ret);
+
+       clk_put(dpll3_m2_ck);
+
+       return ret;
+}
+
+static int get_sys_mhz_max(void)
+{
+       struct clk *dpll3_m2_ck;
+       int ret;
+
+       dpll3_m2_ck = clk_get(NULL, "dpll3_m2_ck");
+       if (IS_ERR(dpll3_m2_ck)) {
+               pr_err("%s: dpll3_m2_clk not available: %ld\n",
+                       __func__, PTR_ERR(dpll3_m2_ck));
+               return -ENODEV;
+       }
+
+       ret = clk_get_rate(dpll3_m2_ck);
+       clk_put(dpll3_m2_ck);
+
+       return ret / 1000000;
+}
+
+static int proc_read_val(char *page, char **start, off_t off, int count,
+               int *eof, int val)
+{
+       char *p = page;
+       int len;
+
+       p += sprintf(p, "%d\n", val);
+
+       len = (p - page) - off;
+       if (len < 0)
+               len = 0;
+
+       *eof = (len <= count) ? 1 : 0;
+       *start = page + off;
+
+       return len;
+}
+
+static int proc_write_val(struct file *file, const char __user *buffer,
+               unsigned long count, unsigned long *val)
+{
+       char buff[32];
+       int ret;
+
+       count = strncpy_from_user(buff, buffer,
+                       count < sizeof(buff) ? count : sizeof(buff) - 1);
+       buff[count] = 0;
+
+       ret = strict_strtoul(buff, 0, val);
+       if (ret < 0) {
+               pr_err("error %i parsing %s\n", ret, buff);
+               return ret;
+       }
+
+       return count;
+}
+
+static int cpu_clk_read(char *page, char **start, off_t off, int count,
+               int *eof, void *data)
+{
+       return proc_read_val(page, start, off, count, eof, get_mpu_mhz_max());
+}
+
+static int cpu_clk_write(struct file *file, const char __user *buffer,
+               unsigned long count, void *data)
+{
+       unsigned long val;
+       int ret, retval;
+
+       retval = proc_write_val(file, buffer, count, &val);
+       if (retval < 0)
+               return retval;
+
+       ret = set_mpu_mhz_max(val);
+       if (ret < 0)
+               return ret;
+
+       return retval;
+}
+
+static int dsp_clk_read(char *page, char **start, off_t off, int count,
+               int *eof, void *data)
+{
+       return proc_read_val(page, start, off, count, eof, get_dsp_mhz_max());
+}
+
+static int dsp_clk_write(struct file *file, const char __user *buffer,
+               unsigned long count, void *data)
+{
+       unsigned long val;
+       int ret, retval;
+
+       retval = proc_write_val(file, buffer, count, &val);
+       if (retval < 0)
+               return retval;
+
+       ret = set_dsp_mhz_max(val);
+       if (ret < 0)
+               return ret;
+
+       return retval;
+}
+
+static int cpu_maxopp_read(char *page, char **start, off_t off, int count,
+               int *eof, void *data)
+{
+       return proc_read_val(page, start, off, count, eof, opp_max_ceil);
+}
+
+static int cpu_maxopp_write(struct file *file, const char __user *buffer,
+               unsigned long count, void *data)
+{
+       unsigned long val;
+       int ret, retval;
+
+       retval = proc_write_val(file, buffer, count, &val);
+       if (retval < 0)
+               return retval;
+
+       if (val > opp_max_avail)
+               val = opp_max_avail;
+
+       if (val < 1)
+               return -EINVAL;
+
+       ret = set_opp_max_ceil(val);
+       if (ret != 0)
+               return ret;
+
+       return retval;
+}
+
+static int sys_clk_read(char *page, char **start, off_t off, int count,
+               int *eof, void *data)
+{
+       return proc_read_val(page, start, off, count, eof, get_sys_mhz_max());
+}
+
+static int sys_clk_write(struct file *file, const char __user *buffer,
+               unsigned long count, void *data)
+{
+       unsigned long val;
+       int ret, retval;
+
+       retval = proc_write_val(file, buffer, count, &val);
+       if (retval < 0)
+               return retval;
+
+       ret = set_sys_mhz_max(val);
+       if (ret < 0)
+               return ret;
+
+       return retval;
+}
+
+static void proc_create_rw(const char *name, void *pdata,
+                          read_proc_t *read_proc, write_proc_t *write_proc)
+{
+       struct proc_dir_entry *pret;
+
+       pret = create_proc_entry(name, S_IWUSR | S_IRUGO, NULL);
+       if (pret == NULL) {
+               proc_mkdir(PROC_DIR, NULL);
+               pret = create_proc_entry(name, S_IWUSR | S_IRUGO, NULL);
+               if (pret == NULL) {
+                       pr_err("%s: failed to create proc file %s\n",
+                               __func__, name);
+                       return;
+               }
+       }
+
+       pret->data = pdata;
+       pret->read_proc = read_proc;
+       pret->write_proc = write_proc;
+}
+
+static int pndctrl_init(void)
+{
+       int ret;
+
+       INIT_DELAYED_WORK(&iva_work, iva_unneeded_work);
+
+       mpu_dev = omap_device_get_by_hwmod_name("mpu");
+       if (IS_ERR(mpu_dev)) {
+               pr_err("%s: mpu device not available (%ld)\n",
+                       __func__, PTR_ERR(mpu_dev));
+               return -ENODEV;
+       }
+
+       iva_dev = omap_device_get_by_hwmod_name("iva");
+       if (IS_ERR(iva_dev)) {
+               pr_err("%s: iva device not available (%ld)\n",
+                       __func__, PTR_ERR(iva_dev));
+               return -ENODEV;
+       }
+
+       /* regulator to constrain OPPs while DSP is running */
+       iva_reg = regulator_get(iva_dev, "vcc");
+       if (IS_ERR(iva_reg)) {
+               dev_err(iva_dev, "unable to get MPU regulator\n");
+               return -ENODEV;
+       }
+
+       /* 
+        * Ensure physical regulator is present.
+        * (e.g. could be dummy regulator.)
+        */
+       if (regulator_get_voltage(iva_reg) < 0) {
+               dev_err(iva_dev, "IVA regulator is not physical?\n");
+               ret = -ENODEV;
+               goto fail_reg;
+       }
+
+       iva_clk = clk_get(NULL, "dpll2_ck");
+       if (IS_ERR(iva_clk)) {
+               dev_err(iva_dev, "IVA clock not available.\n");
+               ret = PTR_ERR(iva_clk);
+               goto fail_reg;
+       }
+
+       ret = init_opp_hacks();
+       if (ret != 0) {
+               pr_err("init_opp_hacks failed: %d\n", ret);
+               goto fail_opp;
+       }
+
+       proc_create_rw(PROC_CPUMHZ, NULL, cpu_clk_read, cpu_clk_write);
+       proc_create_rw(PROC_DSPMHZ, NULL, dsp_clk_read, dsp_clk_write);
+       proc_create_rw(PROC_CPUOPP, NULL, cpu_maxopp_read, cpu_maxopp_write);
+       proc_create_rw(PROC_SYSMHZ, NULL, sys_clk_read, sys_clk_write);
+
+       pr_info("OMAP overclocker loaded.\n");
+       return 0;
+
+fail_opp:
+       clk_put(iva_clk);
+fail_reg:
+       regulator_put(iva_reg);
+       return ret;
+}
+
+
+static void pndctrl_cleanup(void)
+{
+       remove_proc_entry(PROC_SYSMHZ, NULL);
+       remove_proc_entry(PROC_CPUOPP, NULL);
+       remove_proc_entry(PROC_DSPMHZ, NULL);
+       remove_proc_entry(PROC_CPUMHZ, NULL);
+
+       cancel_delayed_work_sync(&iva_work);
+       regulator_put(iva_reg);
+       clk_put(iva_clk);
+}
+
+module_init(pndctrl_init);
+module_exit(pndctrl_cleanup);
+
+MODULE_AUTHOR("Gražvydas Ignotas");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("OMAP overclocking support");
index f53d5c8..ef08f7b 100644 (file)
@@ -119,6 +119,7 @@ enum mmc_blk_status {
        MMC_BLK_ABORT,
        MMC_BLK_DATA_ERR,
        MMC_BLK_ECC_ERR,
+       MMC_BLK_NOMEDIUM,
 };
 
 module_param(perdev_minors, int, 0444);
@@ -318,7 +319,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
        md = mmc_blk_get(bdev->bd_disk);
        if (!md) {
                err = -EINVAL;
-               goto cmd_done;
+               goto cmd_err;
        }
 
        card = md->queue.card;
@@ -417,6 +418,7 @@ cmd_rel_host:
 
 cmd_done:
        mmc_blk_put(md);
+cmd_err:
        kfree(idata->buf);
        kfree(idata);
        return err;
@@ -487,7 +489,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
        struct mmc_request mrq = {NULL};
        struct mmc_command cmd = {0};
        struct mmc_data data = {0};
-       unsigned int timeout_us;
 
        struct scatterlist sg;
 
@@ -507,23 +508,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
        cmd.arg = 0;
        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 
-       data.timeout_ns = card->csd.tacc_ns * 100;
-       data.timeout_clks = card->csd.tacc_clks * 100;
-
-       timeout_us = data.timeout_ns / 1000;
-       timeout_us += data.timeout_clks * 1000 /
-               (card->host->ios.clock / 1000);
-
-       if (timeout_us > 100000) {
-               data.timeout_ns = 100000000;
-               data.timeout_clks = 0;
-       }
-
        data.blksz = 4;
        data.blocks = 1;
        data.flags = MMC_DATA_READ;
        data.sg = &sg;
        data.sg_len = 1;
+       mmc_set_data_timeout(&data, card);
 
        mrq.cmd = &cmd;
        mrq.data = &data;
@@ -573,6 +563,7 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries)
        return err;
 }
 
+#define ERR_NOMEDIUM   3
 #define ERR_RETRY      2
 #define ERR_ABORT      1
 #define ERR_CONTINUE   0
@@ -640,6 +631,9 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
        u32 status, stop_status = 0;
        int err, retry;
 
+       if (mmc_card_removed(card))
+               return ERR_NOMEDIUM;
+
        /*
         * Try to get card status which indicates both the card state
         * and why there was no response.  If the first attempt fails,
@@ -656,8 +650,12 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
        }
 
        /* We couldn't get a response from the card.  Give up. */
-       if (err)
+       if (err) {
+               /* Check if the card is removed */
+               if (mmc_detect_card_removed(card->host))
+                       return ERR_NOMEDIUM;
                return ERR_ABORT;
+       }
 
        /* Flag ECC errors */
        if ((status & R1_CARD_ECC_FAILED) ||
@@ -804,9 +802,7 @@ out:
                goto retry;
        if (!err)
                mmc_blk_reset_success(md, type);
-       spin_lock_irq(&md->lock);
-       __blk_end_request(req, err, blk_rq_bytes(req));
-       spin_unlock_irq(&md->lock);
+       blk_end_request(req, err, blk_rq_bytes(req));
 
        return err ? 0 : 1;
 }
@@ -888,9 +884,7 @@ out_retry:
        if (!err)
                mmc_blk_reset_success(md, type);
 out:
-       spin_lock_irq(&md->lock);
-       __blk_end_request(req, err, blk_rq_bytes(req));
-       spin_unlock_irq(&md->lock);
+       blk_end_request(req, err, blk_rq_bytes(req));
 
        return err ? 0 : 1;
 }
@@ -905,9 +899,7 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
        if (ret)
                ret = -EIO;
 
-       spin_lock_irq(&md->lock);
-       __blk_end_request_all(req, ret);
-       spin_unlock_irq(&md->lock);
+       blk_end_request_all(req, ret);
 
        return ret ? 0 : 1;
 }
@@ -969,6 +961,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
                        return MMC_BLK_RETRY;
                case ERR_ABORT:
                        return MMC_BLK_ABORT;
+               case ERR_NOMEDIUM:
+                       return MMC_BLK_NOMEDIUM;
                case ERR_CONTINUE:
                        break;
                }
@@ -1217,14 +1211,10 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
 
                blocks = mmc_sd_num_wr_blocks(card);
                if (blocks != (u32)-1) {
-                       spin_lock_irq(&md->lock);
-                       ret = __blk_end_request(req, 0, blocks << 9);
-                       spin_unlock_irq(&md->lock);
+                       ret = blk_end_request(req, 0, blocks << 9);
                }
        } else {
-               spin_lock_irq(&md->lock);
-               ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
-               spin_unlock_irq(&md->lock);
+               ret = blk_end_request(req, 0, brq->data.bytes_xfered);
        }
        return ret;
 }
@@ -1266,10 +1256,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
                         * A block was successfully transferred.
                         */
                        mmc_blk_reset_success(md, type);
-                       spin_lock_irq(&md->lock);
-                       ret = __blk_end_request(req, 0,
+                       ret = blk_end_request(req, 0,
                                                brq->data.bytes_xfered);
-                       spin_unlock_irq(&md->lock);
                        /*
                         * If the blk_end_request function returns non-zero even
                         * though all data has been transferred and no errors
@@ -1321,13 +1309,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
                         * time, so we only reach here after trying to
                         * read a single sector.
                         */
-                       spin_lock_irq(&md->lock);
-                       ret = __blk_end_request(req, -EIO,
+                       ret = blk_end_request(req, -EIO,
                                                brq->data.blksz);
-                       spin_unlock_irq(&md->lock);
                        if (!ret)
                                goto start_new_req;
                        break;
+               case MMC_BLK_NOMEDIUM:
+                       goto cmd_abort;
                }
 
                if (ret) {
@@ -1343,15 +1331,21 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
        return 1;
 
  cmd_abort:
-       spin_lock_irq(&md->lock);
+       if (mmc_card_removed(card))
+               req->cmd_flags |= REQ_QUIET;
        while (ret)
-               ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
-       spin_unlock_irq(&md->lock);
+               ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
 
  start_new_req:
        if (rqc) {
-               mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
-               mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
+               if (mmc_card_removed(card)) {
+                       rqc->cmd_flags |= REQ_QUIET;
+                       blk_end_request_all(rqc, -EIO);
+               } else {
+                       mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+                       mmc_start_req(card->host,
+                                     &mq->mqrq_cur->mmc_active, NULL);
+               }
        }
 
        return 0;
@@ -1370,9 +1364,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        ret = mmc_blk_part_switch(card, md);
        if (ret) {
                if (req) {
-                       spin_lock_irq(&md->lock);
-                       __blk_end_request_all(req, -EIO);
-                       spin_unlock_irq(&md->lock);
+                       blk_end_request_all(req, -EIO);
                }
                ret = 0;
                goto out;
@@ -1775,7 +1767,7 @@ static void mmc_blk_remove(struct mmc_card *card)
 }
 
 #ifdef CONFIG_PM
-static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
+static int mmc_blk_suspend(struct mmc_card *card)
 {
        struct mmc_blk_data *part_md;
        struct mmc_blk_data *md = mmc_get_drvdata(card);
index b038c4a..e99bdc1 100644 (file)
@@ -2949,7 +2949,7 @@ static void mmc_test_free_dbgfs_file(struct mmc_card *card)
 }
 
 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
-       const char *name, mode_t mode, const struct file_operations *fops)
+       const char *name, umode_t mode, const struct file_operations *fops)
 {
        struct dentry *file = NULL;
        struct mmc_test_dbgfs_file *df;
index 78690f2..996f8e3 100644 (file)
@@ -29,6 +29,8 @@
  */
 static int mmc_prep_request(struct request_queue *q, struct request *req)
 {
+       struct mmc_queue *mq = q->queuedata;
+
        /*
         * We only like normal block requests and discards.
         */
@@ -37,6 +39,9 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
                return BLKPREP_KILL;
        }
 
+       if (mq && mmc_card_removed(mq->card))
+               return BLKPREP_KILL;
+
        req->cmd_flags |= REQ_DONTPREP;
 
        return BLKPREP_OK;
index 6be4924..bba990b 100644 (file)
@@ -122,14 +122,14 @@ static int mmc_bus_remove(struct device *dev)
        return 0;
 }
 
-static int mmc_bus_suspend(struct device *dev, pm_message_t state)
+static int mmc_bus_suspend(struct device *dev)
 {
        struct mmc_driver *drv = to_mmc_driver(dev->driver);
        struct mmc_card *card = mmc_dev_to_card(dev);
        int ret = 0;
 
        if (dev->driver && drv->suspend)
-               ret = drv->suspend(card, state);
+               ret = drv->suspend(card);
        return ret;
 }
 
@@ -165,20 +165,14 @@ static int mmc_runtime_idle(struct device *dev)
        return pm_runtime_suspend(dev);
 }
 
+#endif /* !CONFIG_PM_RUNTIME */
+
 static const struct dev_pm_ops mmc_bus_pm_ops = {
-       .runtime_suspend        = mmc_runtime_suspend,
-       .runtime_resume         = mmc_runtime_resume,
-       .runtime_idle           = mmc_runtime_idle,
+       SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume,
+                       mmc_runtime_idle)
+       SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)
 };
 
-#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops)
-
-#else /* !CONFIG_PM_RUNTIME */
-
-#define MMC_PM_OPS_PTR NULL
-
-#endif /* !CONFIG_PM_RUNTIME */
-
 static struct bus_type mmc_bus_type = {
        .name           = "mmc",
        .dev_attrs      = mmc_dev_attrs,
@@ -186,9 +180,7 @@ static struct bus_type mmc_bus_type = {
        .uevent         = mmc_bus_uevent,
        .probe          = mmc_bus_probe,
        .remove         = mmc_bus_remove,
-       .suspend        = mmc_bus_suspend,
-       .resume         = mmc_bus_resume,
-       .pm             = MMC_PM_OPS_PTR,
+       .pm             = &mmc_bus_pm_ops,
 };
 
 int mmc_register_bus(void)
index fc7386e..4dd99b2 100644 (file)
@@ -140,7 +140,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
                        cmd->retries = 0;
        }
 
-       if (err && cmd->retries) {
+       if (err && cmd->retries && !mmc_card_removed(host->card)) {
                /*
                 * Request starter must handle retries - see
                 * mmc_wait_for_req_done().
@@ -247,6 +247,11 @@ static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
 {
        init_completion(&mrq->completion);
        mrq->done = mmc_wait_done;
+       if (mmc_card_removed(host->card)) {
+               mrq->cmd->error = -ENOMEDIUM;
+               complete(&mrq->completion);
+               return;
+       }
        mmc_start_request(host, mrq);
 }
 
@@ -259,7 +264,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
                wait_for_completion(&mrq->completion);
 
                cmd = mrq->cmd;
-               if (!cmd->error || !cmd->retries)
+               if (!cmd->error || !cmd->retries ||
+                   mmc_card_removed(host->card))
                        break;
 
                pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
@@ -1456,7 +1462,7 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
        WARN_ON(host->removed);
        spin_unlock_irqrestore(&host->lock, flags);
 #endif
-
+       host->detect_change = 1;
        mmc_schedule_delayed_work(&host->detect, delay);
 }
 
@@ -2052,6 +2058,43 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
        return -EIO;
 }
 
+int _mmc_detect_card_removed(struct mmc_host *host)
+{
+       int ret;
+
+       if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
+               return 0;
+
+       if (!host->card || mmc_card_removed(host->card))
+               return 1;
+
+       ret = host->bus_ops->alive(host);
+       if (ret) {
+               mmc_card_set_removed(host->card);
+               pr_debug("%s: card remove detected\n", mmc_hostname(host));
+       }
+
+       return ret;
+}
+
+int mmc_detect_card_removed(struct mmc_host *host)
+{
+       struct mmc_card *card = host->card;
+
+       WARN_ON(!host->claimed);
+       /*
+        * The card will be considered unchanged unless we have been asked to
+        * detect a change or host requires polling to provide card detection.
+        */
+       if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
+               return mmc_card_removed(card);
+
+       host->detect_change = 0;
+
+       return _mmc_detect_card_removed(host);
+}
+EXPORT_SYMBOL(mmc_detect_card_removed);
+
 void mmc_rescan(struct work_struct *work)
 {
        static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
@@ -2072,6 +2115,8 @@ void mmc_rescan(struct work_struct *work)
            && !(host->caps & MMC_CAP_NONREMOVABLE))
                host->bus_ops->detect(host);
 
+       host->detect_change = 0;
+
        /*
         * Let mmc_bus_put() free the bus/bus_ops if we've found that
         * the card is no longer present.
index 14664f1..3400924 100644 (file)
@@ -24,6 +24,7 @@ struct mmc_bus_ops {
        int (*resume)(struct mmc_host *);
        int (*power_save)(struct mmc_host *);
        int (*power_restore)(struct mmc_host *);
+       int (*alive)(struct mmc_host *);
 };
 
 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -59,6 +60,8 @@ void mmc_rescan(struct work_struct *work);
 void mmc_start_host(struct mmc_host *host);
 void mmc_stop_host(struct mmc_host *host);
 
+int _mmc_detect_card_removed(struct mmc_host *host);
+
 int mmc_attach_mmc(struct mmc_host *host);
 int mmc_attach_sd(struct mmc_host *host);
 int mmc_attach_sdio(struct mmc_host *host);
index c1aec06..9485825 100644 (file)
@@ -1104,6 +1104,14 @@ static void mmc_remove(struct mmc_host *host)
        host->card = NULL;
 }
 
+/*
+ * Card detection - card is alive.
+ */
+static int mmc_alive(struct mmc_host *host)
+{
+       return mmc_send_status(host->card, NULL);
+}
+
 /*
  * Card detection callback from host.
  */
@@ -1119,7 +1127,7 @@ static void mmc_detect(struct mmc_host *host)
        /*
         * Just check if our card has been removed.
         */
-       err = mmc_send_status(host->card, NULL);
+       err = _mmc_detect_card_removed(host);
 
        mmc_release_host(host);
 
@@ -1224,6 +1232,7 @@ static const struct mmc_bus_ops mmc_ops = {
        .suspend = NULL,
        .resume = NULL,
        .power_restore = mmc_power_restore,
+       .alive = mmc_alive,
 };
 
 static const struct mmc_bus_ops mmc_ops_unsafe = {
@@ -1234,6 +1243,7 @@ static const struct mmc_bus_ops mmc_ops_unsafe = {
        .suspend = mmc_suspend,
        .resume = mmc_resume,
        .power_restore = mmc_power_restore,
+       .alive = mmc_alive,
 };
 
 static void mmc_attach_bus_ops(struct mmc_host *host)
index f2a05ea..2256b21 100644 (file)
@@ -1018,6 +1018,14 @@ static void mmc_sd_remove(struct mmc_host *host)
        host->card = NULL;
 }
 
+/*
+ * Card detection - card is alive.
+ */
+static int mmc_sd_alive(struct mmc_host *host)
+{
+       return mmc_send_status(host->card, NULL);
+}
+
 /*
  * Card detection callback from host.
  */
@@ -1033,7 +1041,7 @@ static void mmc_sd_detect(struct mmc_host *host)
        /*
         * Just check if our card has been removed.
         */
-       err = mmc_send_status(host->card, NULL);
+       err = _mmc_detect_card_removed(host);
 
        mmc_release_host(host);
 
@@ -1102,6 +1110,7 @@ static const struct mmc_bus_ops mmc_sd_ops = {
        .suspend = NULL,
        .resume = NULL,
        .power_restore = mmc_sd_power_restore,
+       .alive = mmc_sd_alive,
 };
 
 static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
@@ -1110,6 +1119,7 @@ static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
        .suspend = mmc_sd_suspend,
        .resume = mmc_sd_resume,
        .power_restore = mmc_sd_power_restore,
+       .alive = mmc_sd_alive,
 };
 
 static void mmc_sd_attach_bus_ops(struct mmc_host *host)
index 558a495..a3acaef 100644 (file)
@@ -549,6 +549,14 @@ static void mmc_sdio_remove(struct mmc_host *host)
        host->card = NULL;
 }
 
+/*
+ * Card detection - card is alive.
+ */
+static int mmc_sdio_alive(struct mmc_host *host)
+{
+       return mmc_select_card(host->card);
+}
+
 /*
  * Card detection callback from host.
  */
@@ -571,7 +579,7 @@ static void mmc_sdio_detect(struct mmc_host *host)
        /*
         * Just check if our card has been removed.
         */
-       err = mmc_select_card(host->card);
+       err = _mmc_detect_card_removed(host);
 
        mmc_release_host(host);
 
@@ -749,6 +757,7 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
        .suspend = mmc_sdio_suspend,
        .resume = mmc_sdio_resume,
        .power_restore = mmc_sdio_power_restore,
+       .alive = mmc_sdio_alive,
 };
 
 
index b1f3168..8f6f5ac 100644 (file)
@@ -196,6 +196,9 @@ static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
        else
                mval = min(mval, func->max_blksize);
 
+       if (mmc_card_broken_byte_mode_512(func->card))
+               return min(mval, 511u);
+
        return min(mval, 512u); /* maximum size for byte mode */
 }
 
@@ -314,7 +317,7 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
                        func->card->host->max_seg_size / func->cur_blksize);
                max_blocks = min(max_blocks, 511u);
 
-               while (remainder > func->cur_blksize) {
+               while (remainder >= func->cur_blksize) {
                        unsigned blocks;
 
                        blocks = remainder / func->cur_blksize;
@@ -339,8 +342,9 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
        while (remainder > 0) {
                size = min(remainder, sdio_max_byte_size(func));
 
+               /* Indicate byte mode by setting "blocks" = 0 */
                ret = mmc_io_rw_extended(func->card, write, func->num, addr,
-                        incr_addr, buf, 1, size);
+                        incr_addr, buf, 0, size);
                if (ret)
                        return ret;
 
index b0517cc..d29e206 100644 (file)
@@ -128,8 +128,6 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
 
        BUG_ON(!card);
        BUG_ON(fn > 7);
-       BUG_ON(blocks == 1 && blksz > 512);
-       WARN_ON(blocks == 0);
        WARN_ON(blksz == 0);
 
        /* sanity check */
@@ -144,22 +142,20 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
        cmd.arg |= fn << 28;
        cmd.arg |= incr_addr ? 0x04000000 : 0x00000000;
        cmd.arg |= addr << 9;
-       if (blocks == 1 && blksz < 512)
-               cmd.arg |= blksz;                       /* byte mode */
-       else if (blocks == 1 && blksz == 512 &&
-                !(mmc_card_broken_byte_mode_512(card)))
-               cmd.arg |= 0;                           /* byte mode, 0==512 */
+       if (blocks == 0)
+               cmd.arg |= (blksz == 512) ? 0 : blksz;  /* byte mode */
        else
                cmd.arg |= 0x08000000 | blocks;         /* block mode */
        cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
 
        data.blksz = blksz;
-       data.blocks = blocks;
+       /* Code in host drivers/fwk assumes that "blocks" always is >=1 */
+       data.blocks = blocks ? blocks : 1;
        data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
        data.sg = &sg;
        data.sg_len = 1;
 
-       sg_init_one(&sg, buf, blksz * blocks);
+       sg_init_one(&sg, buf, data.blksz * data.blocks);
 
        mmc_set_data_timeout(&data, card);
 
index bc27065..6a960c1 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
-#include <linux/workqueue.h>
 #include <linux/timer.h>
 #include <linux/clk.h>
 #include <linux/mmc/host.h>
@@ -163,7 +162,6 @@ struct omap_hsmmc_host {
         */
        struct  regulator       *vcc;
        struct  regulator       *vcc_aux;
-       struct  work_struct     mmc_carddetect_work;
        void    __iomem         *base;
        resource_size_t         mapbase;
        spinlock_t              irq_lock; /* Prevent races with irq handler */
@@ -177,6 +175,7 @@ struct omap_hsmmc_host {
        int                     suspended;
        int                     irq;
        int                     use_dma, dma_ch;
+       int                     dma_ch_tx, dma_ch_rx;
        int                     dma_line_tx, dma_line_rx;
        int                     slot_id;
        int                     got_dbclk;
@@ -586,7 +585,7 @@ static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
                irq_mask &= ~DTO_ENABLE;
 
        OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
-       OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
+       OMAP_HSMMC_WRITE(host->base, ISE, host->use_dma ? irq_mask : 0);
        OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
 }
 
@@ -854,6 +853,55 @@ omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
 
 static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
 
+/* for hosts with 35xx erratum 2.1.1.128 */
+static ssize_t
+omap_hsmmc_show_unsafe_read(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+       int val = 0;
+
+       if (!(mmc->caps2 & MMC_CAP2_NO_MULTI_READ)) {
+               val = 1;
+               if (mmc->f_max == OMAP_MMC_MAX_CLOCK)
+                       val = 2;
+       }
+
+       return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t
+omap_hsmmc_set_unsafe_read(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret)
+               return -EINVAL;
+
+       switch (val) {
+       case 0:
+               mmc->caps2 |= MMC_CAP2_NO_MULTI_READ;
+               mmc->f_max = OMAP_MMC_MAX_CLOCK;
+               break;
+       case 1:
+               mmc->caps2 &= ~MMC_CAP2_NO_MULTI_READ;
+               mmc->f_max = 32000000;
+               break;
+       case 2:
+               mmc->caps2 &= ~MMC_CAP2_NO_MULTI_READ;
+               mmc->f_max = OMAP_MMC_MAX_CLOCK;
+               break;
+       }
+
+       return count;
+}
+static DEVICE_ATTR(unsafe_read, S_IWUSR | S_IRUGO,
+       omap_hsmmc_show_unsafe_read, omap_hsmmc_set_unsafe_read);
+
 /*
  * Configure the response type and send the cmd.
  */
@@ -991,6 +1039,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
                omap_hsmmc_request_done(host, cmd->mrq);
 }
 
+static void omap_hsmmc_free_dma(struct omap_hsmmc_host *host)
+{
+       int dma_ch;
+
+       dma_ch = xchg(&host->dma_ch_tx, -1);
+       if (dma_ch != -1)
+               omap_free_dma(dma_ch);
+
+       dma_ch = xchg(&host->dma_ch_rx, -1);
+       if (dma_ch != -1)
+               omap_free_dma(dma_ch);
+}
+
 /*
  * DMA clean up for command errors
  */
@@ -1009,9 +1070,9 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
                dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
                        host->data->sg_len,
                        omap_hsmmc_get_dma_dir(host, host->data));
-               omap_free_dma(dma_ch);
                host->data->host_cookie = 0;
        }
+       omap_hsmmc_free_dma(host);
        host->data = NULL;
 }
 
@@ -1093,19 +1154,15 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
        struct mmc_data *data;
        int end_cmd = 0, end_trans = 0;
 
-       if (!host->req_in_progress) {
-               do {
-                       OMAP_HSMMC_WRITE(host->base, STAT, status);
-                       /* Flush posted write */
-                       status = OMAP_HSMMC_READ(host->base, STAT);
-               } while (status & INT_EN_MASK);
+       if (unlikely(!host->req_in_progress)) {
+               OMAP_HSMMC_WRITE(host->base, STAT, status);
                return;
        }
 
        data = host->data;
        dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
 
-       if (status & ERR) {
+       if (unlikely(status & ERR)) {
                omap_hsmmc_dbg_report_irq(host, status);
                if ((status & CMD_TIMEOUT) ||
                        (status & CMD_CRC)) {
@@ -1280,17 +1337,16 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
 }
 
 /*
- * Work Item to notify the core about card insertion/removal
+ * irq handler to notify the core about card insertion/removal
  */
-static void omap_hsmmc_detect(struct work_struct *work)
+static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
 {
-       struct omap_hsmmc_host *host =
-               container_of(work, struct omap_hsmmc_host, mmc_carddetect_work);
+       struct omap_hsmmc_host *host = dev_id;
        struct omap_mmc_slot_data *slot = &mmc_slot(host);
        int carddetect;
 
        if (host->suspended)
-               return;
+               return IRQ_HANDLED;
 
        sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
 
@@ -1305,19 +1361,6 @@ static void omap_hsmmc_detect(struct work_struct *work)
                mmc_detect_change(host->mmc, (HZ * 200) / 1000);
        else
                mmc_detect_change(host->mmc, (HZ * 50) / 1000);
-}
-
-/*
- * ISR for handling card insertion and removal
- */
-static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id)
-{
-       struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id;
-
-       if (host->suspended)
-               return IRQ_HANDLED;
-       schedule_work(&host->mmc_carddetect_work);
-
        return IRQ_HANDLED;
 }
 
@@ -1333,11 +1376,28 @@ static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
        return sync_dev;
 }
 
+static void omap_hsmmc_config_dma_params_once(struct omap_hsmmc_host *host,
+                                             struct mmc_data *data,
+                                             int dma_ch)
+{
+       /* pandora hack: only benefits wifi, so only set there, just in case */
+       if (host->id != OMAP_MMC3_DEVID)
+               return;
+
+       if (data->flags & MMC_DATA_WRITE) {
+               omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_16);
+               omap_set_dma_src_data_pack(dma_ch, 1);
+       } else {
+               omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_16);
+               omap_set_dma_dest_data_pack(dma_ch, 1);
+       }
+}
+
 static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
                                       struct mmc_data *data,
                                       struct scatterlist *sgl)
 {
-       int blksz, nblk, dma_ch;
+       int blksz, nblk, dma_ch, sync;
 
        dma_ch = host->dma_ch;
        if (data->flags & MMC_DATA_WRITE) {
@@ -1345,11 +1405,13 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
                        (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
                omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
                        sg_dma_address(sgl), 0, 0);
+               sync = OMAP_DMA_DST_SYNC;
        } else {
                omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
                        (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
                omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
                        sg_dma_address(sgl), 0, 0);
+               sync = OMAP_DMA_SRC_SYNC;
        }
 
        blksz = host->data->blksz;
@@ -1357,8 +1419,7 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
 
        omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
                        blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
-                       omap_hsmmc_get_dma_sync_dev(host, data),
-                       !(data->flags & MMC_DATA_WRITE));
+                       omap_hsmmc_get_dma_sync_dev(host, data), sync);
 
        omap_start_dma(dma_ch);
 }
@@ -1370,7 +1431,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
 {
        struct omap_hsmmc_host *host = cb_data;
        struct mmc_data *data;
-       int dma_ch, req_in_progress;
+       int req_in_progress;
 
        if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
                dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
@@ -1399,12 +1460,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
                             omap_hsmmc_get_dma_dir(host, data));
 
        req_in_progress = host->req_in_progress;
-       dma_ch = host->dma_ch;
        host->dma_ch = -1;
        spin_unlock(&host->irq_lock);
 
-       omap_free_dma(dma_ch);
-
        /* If DMA has finished after TC, complete the request */
        if (!req_in_progress) {
                struct mmc_request *mrq = host->mrq;
@@ -1420,8 +1478,8 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 {
        int dma_len;
 
-       if (!next && data->host_cookie &&
-           data->host_cookie != host->next_data.cookie) {
+       if (unlikely(!next && data->host_cookie &&
+           data->host_cookie != host->next_data.cookie)) {
                pr_warning("[%s] invalid cookie: data->host_cookie %d"
                       " host->next_data.cookie %d\n",
                       __func__, data->host_cookie, host->next_data.cookie);
@@ -1441,7 +1499,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
        }
 
 
-       if (dma_len == 0)
+       if (unlikely(dma_len == 0))
                return -EINVAL;
 
        if (next) {
@@ -1467,10 +1525,10 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
                struct scatterlist *sgl;
 
                sgl = data->sg + i;
-               if (sgl->length % data->blksz)
+               if (unlikely(sgl->length % data->blksz))
                        return -EINVAL;
        }
-       if ((data->blksz % 4) != 0)
+       if (unlikely((data->blksz % 4) != 0))
                /* REVISIT: The MMC buffer increments only when MSB is written.
                 * Return error for blksz which is non multiple of four.
                 */
@@ -1478,16 +1536,31 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
 
        BUG_ON(host->dma_ch != -1);
 
-       ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
-                              "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
-       if (ret != 0) {
-               dev_err(mmc_dev(host->mmc),
-                       "%s: omap_request_dma() failed with %d\n",
-                       mmc_hostname(host->mmc), ret);
-               return ret;
+       if (data->flags & MMC_DATA_WRITE)
+               dma_ch = host->dma_ch_tx;
+       else
+               dma_ch = host->dma_ch_rx;
+
+       if (dma_ch == -1) {
+               ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
+                                      "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
+               if (unlikely(ret != 0)) {
+                       dev_err(mmc_dev(host->mmc),
+                               "%s: omap_request_dma() failed with %d\n",
+                               mmc_hostname(host->mmc), ret);
+                       return ret;
+               }
+
+               omap_hsmmc_config_dma_params_once(host, data, dma_ch);
+
+               if (data->flags & MMC_DATA_WRITE)
+                       host->dma_ch_tx = dma_ch;
+               else
+                       host->dma_ch_rx = dma_ch;
        }
+
        ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
-       if (ret)
+       if (unlikely(ret))
                return ret;
 
        host->dma_ch = dma_ch;
@@ -1498,41 +1571,15 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
        return 0;
 }
 
-static void set_data_timeout(struct omap_hsmmc_host *host,
-                            unsigned int timeout_ns,
-                            unsigned int timeout_clks)
+/* pandora wifi small transfer hack */
+static int check_mmc3_dma_hack(struct omap_hsmmc_host *host,
+                              struct mmc_request *req)
 {
-       unsigned int timeout, cycle_ns;
-       uint32_t reg, clkd, dto = 0;
-
-       reg = OMAP_HSMMC_READ(host->base, SYSCTL);
-       clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
-       if (clkd == 0)
-               clkd = 1;
-
-       cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd);
-       timeout = timeout_ns / cycle_ns;
-       timeout += timeout_clks;
-       if (timeout) {
-               while ((timeout & 0x80000000) == 0) {
-                       dto += 1;
-                       timeout <<= 1;
-               }
-               dto = 31 - dto;
-               timeout <<= 1;
-               if (timeout && dto)
-                       dto += 1;
-               if (dto >= 13)
-                       dto -= 13;
-               else
-                       dto = 0;
-               if (dto > 14)
-                       dto = 14;
-       }
-
-       reg &= ~DTO_MASK;
-       reg |= dto << DTO_SHIFT;
-       OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
+       if (req->data != NULL && req->data->sg_len == 1
+           && req->data->sg->length <= 16)
+               return 0;
+       else
+               return 1;
 }
 
 /*
@@ -1546,18 +1593,11 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
 
        if (req->data == NULL) {
                OMAP_HSMMC_WRITE(host->base, BLK, 0);
-               /*
-                * Set an arbitrary 100ms data timeout for commands with
-                * busy signal.
-                */
-               if (req->cmd->flags & MMC_RSP_BUSY)
-                       set_data_timeout(host, 100000000U, 0);
                return 0;
        }
 
        OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
                                        | (req->data->blocks << 16));
-       set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);
 
        if (host->use_dma) {
                ret = omap_hsmmc_start_dma_transfer(host, req);
@@ -1588,18 +1628,77 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
                               bool is_first_req)
 {
        struct omap_hsmmc_host *host = mmc_priv(mmc);
+       int use_dma = host->use_dma;
 
        if (mrq->data->host_cookie) {
                mrq->data->host_cookie = 0;
                return ;
        }
 
-       if (host->use_dma)
+       if (host->id == OMAP_MMC3_DEVID)
+               use_dma = check_mmc3_dma_hack(host, mrq);
+       if (use_dma)
                if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
                                                &host->next_data))
                        mrq->data->host_cookie = 0;
 }
 
+#define BWR (1 << 4)
+#define BRR (1 << 5)
+
+static noinline void omap_hsmmc_request_do_pio(struct mmc_host *mmc,
+       struct mmc_request *req)
+{
+       struct omap_hsmmc_host *host = mmc_priv(mmc);
+       u32 *data = sg_virt(req->data->sg);
+       u32 len = req->data->sg->length;
+       int stat;
+       int i;
+
+       for (i = 0; i < 10000000; i++) {
+               stat = OMAP_HSMMC_READ(host->base, STAT);
+               if (stat == 0)
+                       continue;
+
+               //dev_err(mmc_dev(host->mmc), "stat %x, l %d\n", stat, i);
+
+               if (stat & (DATA_TIMEOUT | DATA_CRC))
+                       omap_hsmmc_reset_controller_fsm(host, SRD);
+
+               if (stat & ERR) {
+                       req->cmd->error =
+                       req->data->error = -EINVAL; // ?
+                       omap_hsmmc_xfer_done(host, host->data);
+                       return;
+               }
+       
+               if (req->data->flags & MMC_DATA_WRITE) {
+                       while (len > 0 && (stat & BWR)) {
+                               OMAP_HSMMC_WRITE(host->base, DATA, *data++);
+                               len -= 4;
+                       }
+               } else {
+                       while (len > 0 && (stat & BRR)) {
+                               *data++ = OMAP_HSMMC_READ(host->base, DATA);
+                               len -= 4;
+                       }
+               }
+
+               if ((stat & CC) && host->cmd)
+                       omap_hsmmc_cmd_done(host, host->cmd);
+               if ((stat & TC) && host->mrq) {
+                       omap_hsmmc_xfer_done(host, host->data);
+                       break;
+               }
+       }
+
+       if (len > 0) {
+               req->cmd->error =
+               req->data->error = -ETIMEDOUT;
+               omap_hsmmc_xfer_done(host, req->data);
+       }
+}
+
 /*
  * Request function. for read/write operation
  */
@@ -1610,7 +1709,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
 
        BUG_ON(host->req_in_progress);
        BUG_ON(host->dma_ch != -1);
-       if (host->protect_card) {
+       if (unlikely(host->protect_card)) {
                if (host->reqs_blocked < 3) {
                        /*
                         * Ensure the controller is left in a consistent
@@ -1629,10 +1728,15 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
                return;
        } else if (host->reqs_blocked)
                host->reqs_blocked = 0;
+
+       /* pandora wifi hack... */
+       if (host->id == OMAP_MMC3_DEVID)
+               host->use_dma = check_mmc3_dma_hack(host, req);
+
        WARN_ON(host->mrq != NULL);
        host->mrq = req;
        err = omap_hsmmc_prepare_data(host, req);
-       if (err) {
+       if (unlikely(err)) {
                req->cmd->error = err;
                if (req->data)
                        req->data->error = err;
@@ -1642,6 +1746,9 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
        }
 
        omap_hsmmc_start_command(host, req->cmd, req->data);
+
+       if (host->use_dma == 0)
+               omap_hsmmc_request_do_pio(mmc, req);
 }
 
 /* Routine to configure clock values. Exposed API to core */
@@ -1910,6 +2017,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
        host->use_dma   = 1;
        host->dev->dma_mask = &pdata->dma_mask;
        host->dma_ch    = -1;
+       host->dma_ch_tx = -1;
+       host->dma_ch_rx = -1;
        host->irq       = irq;
        host->id        = pdev->id;
        host->slot_id   = 0;
@@ -1919,7 +2028,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
        host->next_data.cookie = 1;
 
        platform_set_drvdata(pdev, host);
-       INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
 
        mmc->ops        = &omap_hsmmc_ops;
 
@@ -1945,6 +2053,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
        omap_hsmmc_context_save(host);
 
        mmc->caps |= MMC_CAP_DISABLE;
+
        if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
                dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
                mmc->caps2 |= MMC_CAP2_NO_MULTI_READ;
@@ -2047,10 +2156,11 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
 
        /* Request IRQ for card detect */
        if ((mmc_slot(host).card_detect_irq)) {
-               ret = request_irq(mmc_slot(host).card_detect_irq,
-                                 omap_hsmmc_cd_handler,
-                                 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                                 mmc_hostname(mmc), host);
+               ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
+                                          NULL,
+                                          omap_hsmmc_detect,
+                                          IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                                          mmc_hostname(mmc), host);
                if (ret) {
                        dev_dbg(mmc_dev(host->mmc),
                                "Unable to grab MMC CD IRQ\n");
@@ -2078,6 +2188,15 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
                        goto err_slot_name;
        }
 
+       if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
+               ret = device_create_file(&mmc->class_dev, &dev_attr_unsafe_read);
+
+               /* MMC_CAP2_NO_MULTI_READ makes it crawl, try a different workaround */
+               mmc->caps2 &= ~MMC_CAP2_NO_MULTI_READ;
+               mmc->max_segs = 1;
+               mmc->f_max = 32000000;
+       }
+
        omap_hsmmc_debugfs(mmc);
        pm_runtime_mark_last_busy(host->dev);
        pm_runtime_put_autosuspend(host->dev);
@@ -2129,7 +2248,6 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
                free_irq(host->irq, host);
                if (mmc_slot(host).card_detect_irq)
                        free_irq(mmc_slot(host).card_detect_irq, host);
-               flush_work_sync(&host->mmc_carddetect_work);
 
                pm_runtime_put_sync(host->dev);
                pm_runtime_disable(host->dev);
@@ -2176,7 +2294,6 @@ static int omap_hsmmc_suspend(struct device *dev)
                                return ret;
                        }
                }
-               cancel_work_sync(&host->mmc_carddetect_work);
                ret = mmc_suspend_host(host->mmc);
 
                if (ret == 0) {
@@ -2249,6 +2366,9 @@ static int omap_hsmmc_runtime_suspend(struct device *dev)
 
        host = platform_get_drvdata(to_platform_device(dev));
        omap_hsmmc_context_save(host);
+
+       omap_hsmmc_free_dma(host);
+
        dev_dbg(mmc_dev(host->mmc), "disabled\n");
 
        return 0;
index 297c965..fc86521 100644 (file)
@@ -24,6 +24,7 @@
 #include <plat/dma.h>
 #include <plat/gpmc.h>
 #include <plat/nand.h>
+#include <asm/system.h>
 
 #define        DRIVER_NAME     "omap2-nand"
 #define        OMAP_NAND_TIMEOUT_MS    5000
@@ -387,7 +388,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
                                                        dma_addr, 0, 0);
            omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
                                        0x10, buf_len, OMAP_DMA_SYNC_FRAME,
-                                       OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
+                                       OMAP24XX_DMA_GPMC,
+                                       OMAP_DMA_DST_SYNC_PREFETCH);
        } else {
            omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
                                                info->phys_base, 0, 0);
@@ -402,14 +404,18 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
                        PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
        if (ret)
                /* PFPW engine is busy, use cpu copy method */
-               goto out_copy;
+               goto out_copy_unmap;
 
+       /* this will be short, avoid CPU wakeup latency */
+       disable_hlt();
        init_completion(&info->comp);
 
        omap_start_dma(info->dma_ch);
 
        /* setup and start DMA using dma_addr */
        wait_for_completion(&info->comp);
+       enable_hlt();
+
        tim = 0;
        limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
        while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
@@ -421,6 +427,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
        dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
        return 0;
 
+out_copy_unmap:
+       dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
 out_copy:
        if (info->nand.options & NAND_BUSWIDTH_16)
                is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -861,68 +869,13 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
        gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
 }
 
-/**
- * omap_wait - wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND Chip structure
- *
- * Wait function is called during Program and erase operations and
- * the way it is called from MTD layer, we should wait till the NAND
- * chip is ready after the programming/erase operation has completed.
- *
- * Erase can take up to 400ms and program up to 20ms according to
- * general NAND and SmartMedia specs
- */
-static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
-{
-       struct nand_chip *this = mtd->priv;
-       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
-                                                       mtd);
-       unsigned long timeo = jiffies;
-       int status = NAND_STATUS_FAIL, state = this->state;
-
-       if (state == FL_ERASING)
-               timeo += (HZ * 400) / 1000;
-       else
-               timeo += (HZ * 20) / 1000;
-
-       gpmc_nand_write(info->gpmc_cs,
-                       GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
-       while (time_before(jiffies, timeo)) {
-               status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
-               if (status & NAND_STATUS_READY)
-                       break;
-               cond_resched();
-       }
-       return status;
-}
-
 /**
  * omap_dev_ready - calls the platform specific dev_ready function
  * @mtd: MTD device structure
  */
 static int omap_dev_ready(struct mtd_info *mtd)
 {
-       unsigned int val = 0;
-       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
-                                                       mtd);
-
-       val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
-       if ((val & 0x100) == 0x100) {
-               /* Clear IRQ Interrupt */
-               val |= 0x100;
-               val &= ~(0x0);
-               gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
-       } else {
-               unsigned int cnt = 0;
-               while (cnt++ < 0x1FF) {
-                       if  ((val & 0x100) == 0x100)
-                               return 0;
-                       val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
-               }
-       }
-
-       return 1;
+       return !!gpmc_read_status(GPMC_STATUS_WAIT);
 }
 
 static int __devinit omap_nand_probe(struct platform_device *pdev)
@@ -990,7 +943,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
                info->nand.dev_ready = omap_dev_ready;
                info->nand.chip_delay = 0;
        } else {
-               info->nand.waitfunc = omap_wait;
                info->nand.chip_delay = 50;
        }
 
@@ -1020,8 +972,10 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
                } else {
                        omap_set_dma_dest_burst_mode(info->dma_ch,
                                        OMAP_DMA_DATA_BURST_16);
+                       omap_set_dma_dest_data_pack(info->dma_ch, 1);
                        omap_set_dma_src_burst_mode(info->dma_ch,
                                        OMAP_DMA_DATA_BURST_16);
+                       omap_set_dma_src_data_pack(info->dma_ch, 1);
 
                        info->nand.read_buf   = omap_read_buf_dma_pref;
                        info->nand.write_buf  = omap_write_buf_dma_pref;
index 4dcc752..ea4b95b 100644 (file)
@@ -29,7 +29,7 @@ config MTD_UBI_WL_THRESHOLD
 
 config MTD_UBI_BEB_RESERVE
        int "Percentage of reserved eraseblocks for bad eraseblocks handling"
-       default 1
+       default 2
        range 0 25
        help
          If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
@@ -52,12 +52,4 @@ config MTD_UBI_GLUEBI
           work on top of UBI. Do not enable this unless you use legacy
           software.
 
-config MTD_UBI_DEBUG
-       bool "UBI debugging"
-       depends on SYSFS
-       select DEBUG_FS
-       select KALLSYMS
-       help
-         This option enables UBI debugging.
-
 endif # MTD_UBI
index c9302a5..a0803ac 100644 (file)
@@ -1,7 +1,6 @@
 obj-$(CONFIG_MTD_UBI) += ubi.o
 
-ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o
-ubi-y += misc.o
+ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
+ubi-y += misc.o debug.o
 
-ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
 obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
similarity index 56%
rename from drivers/mtd/ubi/scan.c
rename to drivers/mtd/ubi/attach.c
index c5b2357..1cb642b 100644 (file)
  */
 
 /*
- * UBI scanning sub-system.
+ * UBI attaching sub-system.
  *
- * This sub-system is responsible for scanning the flash media, checking UBI
- * headers and providing complete information about the UBI flash image.
+ * This sub-system is responsible for attaching MTD devices and it also
+ * implements flash media scanning.
  *
- * The scanning information is represented by a &struct ubi_scan_info' object.
- * Information about found volumes is represented by &struct ubi_scan_volume
+ * The attaching information is represented by a &struct ubi_attach_info'
+ * object. Information about volumes is represented by &struct ubi_ainf_volume
  * objects which are kept in volume RB-tree with root at the @volumes field.
  * The RB-tree is indexed by the volume ID.
  *
- * Scanned logical eraseblocks are represented by &struct ubi_scan_leb objects.
- * These objects are kept in per-volume RB-trees with the root at the
- * corresponding &struct ubi_scan_volume object. To put it differently, we keep
- * an RB-tree of per-volume objects and each of these objects is the root of
- * RB-tree of per-eraseblock objects.
+ * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
+ * objects are kept in per-volume RB-trees with the root at the corresponding
+ * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
+ * per-volume objects and each of these objects is the root of RB-tree of
+ * per-LEB objects.
  *
  * Corrupted physical eraseblocks are put to the @corr list, free physical
  * eraseblocks are put to the @free list and the physical eraseblock to be
  *
  * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
  * tries to handle them gracefully, without printing too many warnings and
- * error messages. The idea is that we do not lose important data in these case
- * - we may lose only the data which was being written to the media just before
- * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to
- * handle such data losses (e.g., by using the FS journal).
+ * error messages. The idea is that we do not lose important data in these
+ * cases - we may lose only the data which were being written to the media just
+ * before the power cut happened, and the upper layers (e.g., UBIFS) are
+ * supposed to handle such data losses (e.g., by using the FS journal).
  *
  * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
  * the reason is a power cut, UBI puts this PEB to the @erase list, and all
  * PEBs in the @erase list are scheduled for erasure later.
  *
  * 2. Unexpected corruptions which are not caused by power cuts. During
- * scanning, such PEBs are put to the @corr list and UBI preserves them.
+ * attaching, such PEBs are put to the @corr list and UBI preserves them.
  * Obviously, this lessens the amount of available PEBs, and if at some  point
  * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
  * about such PEBs every time the MTD device is attached.
  *
  * However, it is difficult to reliably distinguish between these types of
- * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2
- * if the VID header is corrupted and the data area does not contain all 0xFFs,
- * and there were no bit-flips or integrity errors while reading the data area.
- * Otherwise UBI assumes corruption type 1. So the decision criteria are as
- * follows.
- *   o If the data area contains only 0xFFs, there is no data, and it is safe
+ * corruptions and UBI's strategy is as follows (in case of attaching by
+ * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
+ * the data area does not contain all 0xFFs, and there were no bit-flips or
+ * integrity errors (e.g., ECC errors in case of NAND) while reading the data
+ * area.  Otherwise UBI assumes corruption type 1. So the decision criteria
+ * are as follows.
+ *   o If the data area contains only 0xFFs, there are no data, and it is safe
  *     to just erase this PEB - this is corruption type 1.
  *   o If the data area has bit-flips or data integrity errors (ECC errors on
  *     NAND), it is probably a PEB which was being erased when power cut
 #include <linux/random.h>
 #include "ubi.h"
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
-#else
-#define paranoid_check_si(ubi, si) 0
-#endif
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
 
 /* Temporary variables used during scanning */
 static struct ubi_ec_hdr *ech;
@@ -100,13 +97,18 @@ static struct ubi_vid_hdr *vidh;
 
 /**
  * add_to_list - add physical eraseblock to a list.
- * @si: scanning information
+ * @ai: attaching information
  * @pnum: physical eraseblock number to add
+ * @vol_id: the last used volume id for the PEB
+ * @lnum: the last used LEB number for the PEB
  * @ec: erase counter of the physical eraseblock
  * @to_head: if not zero, add to the head of the list
  * @list: the list to add to
  *
- * This function adds physical eraseblock @pnum to free, erase, or alien lists.
+ * This function allocates a 'struct ubi_ainf_peb' object for physical
+ * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
+ * It stores the @lnum and @vol_id alongside, which can both be
+ * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
  * If @to_head is not zero, PEB will be added to the head of the list, which
  * basically means it will be processed first later. E.g., we add corrupted
  * PEBs (corrupted due to power cuts) to the head of the erase list to make
@@ -114,65 +116,68 @@ static struct ubi_vid_hdr *vidh;
  * returns zero in case of success and a negative error code in case of
  * failure.
  */
-static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head,
-                      struct list_head *list)
+static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
+                      int lnum, int ec, int to_head, struct list_head *list)
 {
-       struct ubi_scan_leb *seb;
+       struct ubi_ainf_peb *aeb;
 
-       if (list == &si->free) {
+       if (list == &ai->free) {
                dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
-       } else if (list == &si->erase) {
+       } else if (list == &ai->erase) {
                dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
-       } else if (list == &si->alien) {
+       } else if (list == &ai->alien) {
                dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
-               si->alien_peb_count += 1;
+               ai->alien_peb_count += 1;
        } else
                BUG();
 
-       seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
-       if (!seb)
+       aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+       if (!aeb)
                return -ENOMEM;
 
-       seb->pnum = pnum;
-       seb->ec = ec;
+       aeb->pnum = pnum;
+       aeb->vol_id = vol_id;
+       aeb->lnum = lnum;
+       aeb->ec = ec;
        if (to_head)
-               list_add(&seb->u.list, list);
+               list_add(&aeb->u.list, list);
        else
-               list_add_tail(&seb->u.list, list);
+               list_add_tail(&aeb->u.list, list);
        return 0;
 }
 
 /**
  * add_corrupted - add a corrupted physical eraseblock.
- * @si: scanning information
+ * @ai: attaching information
  * @pnum: physical eraseblock number to add
  * @ec: erase counter of the physical eraseblock
  *
- * This function adds corrupted physical eraseblock @pnum to the 'corr' list.
- * The corruption was presumably not caused by a power cut. Returns zero in
- * case of success and a negative error code in case of failure.
+ * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
+ * physical eraseblock @pnum and adds it to the 'corr' list.  The corruption
+ * was presumably not caused by a power cut. Returns zero in case of success
+ * and a negative error code in case of failure.
  */
-static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
+static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
 {
-       struct ubi_scan_leb *seb;
+       struct ubi_ainf_peb *aeb;
 
        dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
 
-       seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
-       if (!seb)
+       aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+       if (!aeb)
                return -ENOMEM;
 
-       si->corr_peb_count += 1;
-       seb->pnum = pnum;
-       seb->ec = ec;
-       list_add(&seb->u.list, &si->corr);
+       ai->corr_peb_count += 1;
+       aeb->pnum = pnum;
+       aeb->ec = ec;
+       list_add(&aeb->u.list, &ai->corr);
        return 0;
 }
 
 /**
  * validate_vid_hdr - check volume identifier header.
  * @vid_hdr: the volume identifier header to check
- * @sv: information about the volume this logical eraseblock belongs to
+ * @av: information about the volume this logical eraseblock belongs to
  * @pnum: physical eraseblock number the VID header came from
  *
  * This function checks that data stored in @vid_hdr is consistent. Returns
@@ -184,15 +189,15 @@ static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
  * headers of the same volume.
  */
 static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
-                           const struct ubi_scan_volume *sv, int pnum)
+                           const struct ubi_ainf_volume *av, int pnum)
 {
        int vol_type = vid_hdr->vol_type;
        int vol_id = be32_to_cpu(vid_hdr->vol_id);
        int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
        int data_pad = be32_to_cpu(vid_hdr->data_pad);
 
-       if (sv->leb_count != 0) {
-               int sv_vol_type;
+       if (av->leb_count != 0) {
+               int av_vol_type;
 
                /*
                 * This is not the first logical eraseblock belonging to this
@@ -200,28 +205,28 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
                 * to the data in previous logical eraseblock headers.
                 */
 
-               if (vol_id != sv->vol_id) {
-                       dbg_err("inconsistent vol_id");
+               if (vol_id != av->vol_id) {
+                       ubi_err("inconsistent vol_id");
                        goto bad;
                }
 
-               if (sv->vol_type == UBI_STATIC_VOLUME)
-                       sv_vol_type = UBI_VID_STATIC;
+               if (av->vol_type == UBI_STATIC_VOLUME)
+                       av_vol_type = UBI_VID_STATIC;
                else
-                       sv_vol_type = UBI_VID_DYNAMIC;
+                       av_vol_type = UBI_VID_DYNAMIC;
 
-               if (vol_type != sv_vol_type) {
-                       dbg_err("inconsistent vol_type");
+               if (vol_type != av_vol_type) {
+                       ubi_err("inconsistent vol_type");
                        goto bad;
                }
 
-               if (used_ebs != sv->used_ebs) {
-                       dbg_err("inconsistent used_ebs");
+               if (used_ebs != av->used_ebs) {
+                       ubi_err("inconsistent used_ebs");
                        goto bad;
                }
 
-               if (data_pad != sv->data_pad) {
-                       dbg_err("inconsistent data_pad");
+               if (data_pad != av->data_pad) {
+                       ubi_err("inconsistent data_pad");
                        goto bad;
                }
        }
@@ -230,74 +235,74 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
 
 bad:
        ubi_err("inconsistent VID header at PEB %d", pnum);
-       ubi_dbg_dump_vid_hdr(vid_hdr);
-       ubi_dbg_dump_sv(sv);
+       ubi_dump_vid_hdr(vid_hdr);
+       ubi_dump_av(av);
        return -EINVAL;
 }
 
 /**
- * add_volume - add volume to the scanning information.
- * @si: scanning information
+ * add_volume - add volume to the attaching information.
+ * @ai: attaching information
  * @vol_id: ID of the volume to add
  * @pnum: physical eraseblock number
  * @vid_hdr: volume identifier header
  *
  * If the volume corresponding to the @vid_hdr logical eraseblock is already
- * present in the scanning information, this function does nothing. Otherwise
- * it adds corresponding volume to the scanning information. Returns a pointer
- * to the scanning volume object in case of success and a negative error code
- * in case of failure.
+ * present in the attaching information, this function does nothing. Otherwise
+ * it adds corresponding volume to the attaching information. Returns a pointer
+ * to the allocated "av" object in case of success and a negative error code in
+ * case of failure.
  */
-static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
-                                         int pnum,
+static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
+                                         int vol_id, int pnum,
                                          const struct ubi_vid_hdr *vid_hdr)
 {
-       struct ubi_scan_volume *sv;
-       struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
+       struct ubi_ainf_volume *av;
+       struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
 
        ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
 
        /* Walk the volume RB-tree to look if this volume is already present */
        while (*p) {
                parent = *p;
-               sv = rb_entry(parent, struct ubi_scan_volume, rb);
+               av = rb_entry(parent, struct ubi_ainf_volume, rb);
 
-               if (vol_id == sv->vol_id)
-                       return sv;
+               if (vol_id == av->vol_id)
+                       return av;
 
-               if (vol_id > sv->vol_id)
+               if (vol_id > av->vol_id)
                        p = &(*p)->rb_left;
                else
                        p = &(*p)->rb_right;
        }
 
        /* The volume is absent - add it */
-       sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL);
-       if (!sv)
+       av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+       if (!av)
                return ERR_PTR(-ENOMEM);
 
-       sv->highest_lnum = sv->leb_count = 0;
-       sv->vol_id = vol_id;
-       sv->root = RB_ROOT;
-       sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
-       sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
-       sv->compat = vid_hdr->compat;
-       sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+       av->highest_lnum = av->leb_count = 0;
+       av->vol_id = vol_id;
+       av->root = RB_ROOT;
+       av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+       av->data_pad = be32_to_cpu(vid_hdr->data_pad);
+       av->compat = vid_hdr->compat;
+       av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
                                                            : UBI_STATIC_VOLUME;
-       if (vol_id > si->highest_vol_id)
-               si->highest_vol_id = vol_id;
+       if (vol_id > ai->highest_vol_id)
+               ai->highest_vol_id = vol_id;
 
-       rb_link_node(&sv->rb, parent, p);
-       rb_insert_color(&sv->rb, &si->volumes);
-       si->vols_found += 1;
+       rb_link_node(&av->rb, parent, p);
+       rb_insert_color(&av->rb, &ai->volumes);
+       ai->vols_found += 1;
        dbg_bld("added volume %d", vol_id);
-       return sv;
+       return av;
 }
 
 /**
  * compare_lebs - find out which logical eraseblock is newer.
  * @ubi: UBI device description object
- * @seb: first logical eraseblock to compare
+ * @aeb: first logical eraseblock to compare
  * @pnum: physical eraseblock number of the second logical eraseblock to
  * compare
  * @vid_hdr: volume identifier header of the second logical eraseblock
@@ -306,7 +311,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
  * case of success this function returns a positive value, in case of failure, a
  * negative error code is returned. The success return codes use the following
  * bits:
- *     o bit 0 is cleared: the first PEB (described by @seb) is newer than the
+ *     o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
  *       second PEB (described by @pnum and @vid_hdr);
  *     o bit 0 is set: the second PEB is newer;
  *     o bit 1 is cleared: no bit-flips were detected in the newer LEB;
@@ -314,7 +319,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
  *     o bit 2 is cleared: the older LEB is not corrupted;
  *     o bit 2 is set: the older LEB is corrupted.
  */
-static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
+static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
                        int pnum, const struct ubi_vid_hdr *vid_hdr)
 {
        void *buf;
@@ -323,7 +328,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
        struct ubi_vid_hdr *vh = NULL;
        unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
 
-       if (sqnum2 == seb->sqnum) {
+       if (sqnum2 == aeb->sqnum) {
                /*
                 * This must be a really ancient UBI image which has been
                 * created before sequence numbers support has been added. At
@@ -337,7 +342,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
        }
 
        /* Obviously the LEB with lower sequence counter is older */
-       second_is_newer = !!(sqnum2 > seb->sqnum);
+       second_is_newer = (sqnum2 > aeb->sqnum);
 
        /*
         * Now we know which copy is newer. If the copy flag of the PEB with
@@ -356,7 +361,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
                        return 1;
                }
        } else {
-               if (!seb->copy_flag) {
+               if (!aeb->copy_flag) {
                        /* It is not a copy, so it is newer */
                        dbg_bld("first PEB %d is newer, copy_flag is unset",
                                pnum);
@@ -367,14 +372,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
                if (!vh)
                        return -ENOMEM;
 
-               pnum = seb->pnum;
+               pnum = aeb->pnum;
                err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
                if (err) {
                        if (err == UBI_IO_BITFLIPS)
                                bitflips = 1;
                        else {
-                               dbg_err("VID of PEB %d header is bad, but it "
-                                       "was OK earlier, err %d", pnum, err);
+                               ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
+                                       pnum, err);
                                if (err > 0)
                                        err = -EIO;
 
@@ -429,9 +434,9 @@ out_free_vidh:
 }
 
 /**
- * ubi_scan_add_used - add physical eraseblock to the scanning information.
+ * ubi_add_to_av - add used physical eraseblock to the attaching information.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  * @pnum: the physical eraseblock number
  * @ec: erase counter
  * @vid_hdr: the volume identifier header
@@ -444,14 +449,13 @@ out_free_vidh:
  * to be picked, while the older one has to be dropped. This function returns
  * zero in case of success and a negative error code in case of failure.
  */
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
-                     int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
-                     int bitflips)
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+                 int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
 {
        int err, vol_id, lnum;
        unsigned long long sqnum;
-       struct ubi_scan_volume *sv;
-       struct ubi_scan_leb *seb;
+       struct ubi_ainf_volume *av;
+       struct ubi_ainf_peb *aeb;
        struct rb_node **p, *parent = NULL;
 
        vol_id = be32_to_cpu(vid_hdr->vol_id);
@@ -461,25 +465,25 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
        dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
                pnum, vol_id, lnum, ec, sqnum, bitflips);
 
-       sv = add_volume(si, vol_id, pnum, vid_hdr);
-       if (IS_ERR(sv))
-               return PTR_ERR(sv);
+       av = add_volume(ai, vol_id, pnum, vid_hdr);
+       if (IS_ERR(av))
+               return PTR_ERR(av);
 
-       if (si->max_sqnum < sqnum)
-               si->max_sqnum = sqnum;
+       if (ai->max_sqnum < sqnum)
+               ai->max_sqnum = sqnum;
 
        /*
         * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
         * if this is the first instance of this logical eraseblock or not.
         */
-       p = &sv->root.rb_node;
+       p = &av->root.rb_node;
        while (*p) {
                int cmp_res;
 
                parent = *p;
-               seb = rb_entry(parent, struct ubi_scan_leb, u.rb);
-               if (lnum != seb->lnum) {
-                       if (lnum < seb->lnum)
+               aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+               if (lnum != aeb->lnum) {
+                       if (lnum < aeb->lnum)
                                p = &(*p)->rb_left;
                        else
                                p = &(*p)->rb_right;
@@ -491,8 +495,8 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
                 * logical eraseblock present.
                 */
 
-               dbg_bld("this LEB already exists: PEB %d, sqnum %llu, "
-                       "EC %d", seb->pnum, seb->sqnum, seb->ec);
+               dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
+                       aeb->pnum, aeb->sqnum, aeb->ec);
 
                /*
                 * Make sure that the logical eraseblocks have different
@@ -507,11 +511,11 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
                 * images, but refuse attaching old images with duplicated
                 * logical eraseblocks because there was an unclean reboot.
                 */
-               if (seb->sqnum == sqnum && sqnum != 0) {
+               if (aeb->sqnum == sqnum && sqnum != 0) {
                        ubi_err("two LEBs with same sequence number %llu",
                                sqnum);
-                       ubi_dbg_dump_seb(seb, 0);
-                       ubi_dbg_dump_vid_hdr(vid_hdr);
+                       ubi_dump_aeb(aeb, 0);
+                       ubi_dump_vid_hdr(vid_hdr);
                        return -EINVAL;
                }
 
@@ -519,7 +523,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
                 * Now we have to drop the older one and preserve the newer
                 * one.
                 */
-               cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr);
+               cmp_res = compare_lebs(ubi, aeb, pnum, vid_hdr);
                if (cmp_res < 0)
                        return cmp_res;
 
@@ -528,23 +532,26 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
                         * This logical eraseblock is newer than the one
                         * found earlier.
                         */
-                       err = validate_vid_hdr(vid_hdr, sv, pnum);
+                       err = validate_vid_hdr(vid_hdr, av, pnum);
                        if (err)
                                return err;
 
-                       err = add_to_list(si, seb->pnum, seb->ec, cmp_res & 4,
-                                         &si->erase);
+                       err = add_to_list(ai, aeb->pnum, aeb->vol_id,
+                                         aeb->lnum, aeb->ec, cmp_res & 4,
+                                         &ai->erase);
                        if (err)
                                return err;
 
-                       seb->ec = ec;
-                       seb->pnum = pnum;
-                       seb->scrub = ((cmp_res & 2) || bitflips);
-                       seb->copy_flag = vid_hdr->copy_flag;
-                       seb->sqnum = sqnum;
+                       aeb->ec = ec;
+                       aeb->pnum = pnum;
+                       aeb->vol_id = vol_id;
+                       aeb->lnum = lnum;
+                       aeb->scrub = ((cmp_res & 2) || bitflips);
+                       aeb->copy_flag = vid_hdr->copy_flag;
+                       aeb->sqnum = sqnum;
 
-                       if (sv->highest_lnum == lnum)
-                               sv->last_data_size =
+                       if (av->highest_lnum == lnum)
+                               av->last_data_size =
                                        be32_to_cpu(vid_hdr->data_size);
 
                        return 0;
@@ -553,63 +560,64 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
                         * This logical eraseblock is older than the one found
                         * previously.
                         */
-                       return add_to_list(si, pnum, ec, cmp_res & 4,
-                                          &si->erase);
+                       return add_to_list(ai, pnum, vol_id, lnum, ec,
+                                          cmp_res & 4, &ai->erase);
                }
        }
 
        /*
         * We've met this logical eraseblock for the first time, add it to the
-        * scanning information.
+        * attaching information.
         */
 
-       err = validate_vid_hdr(vid_hdr, sv, pnum);
+       err = validate_vid_hdr(vid_hdr, av, pnum);
        if (err)
                return err;
 
-       seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
-       if (!seb)
+       aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+       if (!aeb)
                return -ENOMEM;
 
-       seb->ec = ec;
-       seb->pnum = pnum;
-       seb->lnum = lnum;
-       seb->scrub = bitflips;
-       seb->copy_flag = vid_hdr->copy_flag;
-       seb->sqnum = sqnum;
-
-       if (sv->highest_lnum <= lnum) {
-               sv->highest_lnum = lnum;
-               sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
+       aeb->ec = ec;
+       aeb->pnum = pnum;
+       aeb->vol_id = vol_id;
+       aeb->lnum = lnum;
+       aeb->scrub = bitflips;
+       aeb->copy_flag = vid_hdr->copy_flag;
+       aeb->sqnum = sqnum;
+
+       if (av->highest_lnum <= lnum) {
+               av->highest_lnum = lnum;
+               av->last_data_size = be32_to_cpu(vid_hdr->data_size);
        }
 
-       sv->leb_count += 1;
-       rb_link_node(&seb->u.rb, parent, p);
-       rb_insert_color(&seb->u.rb, &sv->root);
+       av->leb_count += 1;
+       rb_link_node(&aeb->u.rb, parent, p);
+       rb_insert_color(&aeb->u.rb, &av->root);
        return 0;
 }
 
 /**
- * ubi_scan_find_sv - find volume in the scanning information.
- * @si: scanning information
+ * ubi_find_av - find volume in the attaching information.
+ * @ai: attaching information
  * @vol_id: the requested volume ID
  *
  * This function returns a pointer to the volume description or %NULL if there
- * are no data about this volume in the scanning information.
+ * are no data about this volume in the attaching information.
  */
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
-                                        int vol_id)
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+                                   int vol_id)
 {
-       struct ubi_scan_volume *sv;
-       struct rb_node *p = si->volumes.rb_node;
+       struct ubi_ainf_volume *av;
+       struct rb_node *p = ai->volumes.rb_node;
 
        while (p) {
-               sv = rb_entry(p, struct ubi_scan_volume, rb);
+               av = rb_entry(p, struct ubi_ainf_volume, rb);
 
-               if (vol_id == sv->vol_id)
-                       return sv;
+               if (vol_id == av->vol_id)
+                       return av;
 
-               if (vol_id > sv->vol_id)
+               if (vol_id > av->vol_id)
                        p = p->rb_left;
                else
                        p = p->rb_right;
@@ -619,63 +627,34 @@ struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
 }
 
 /**
- * ubi_scan_find_seb - find LEB in the volume scanning information.
- * @sv: a pointer to the volume scanning information
- * @lnum: the requested logical eraseblock
- *
- * This function returns a pointer to the scanning logical eraseblock or %NULL
- * if there are no data about it in the scanning volume information.
+ * ubi_remove_av - delete attaching information about a volume.
+ * @ai: attaching information
+ * @av: the volume attaching information to delete
  */
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
-                                      int lnum)
-{
-       struct ubi_scan_leb *seb;
-       struct rb_node *p = sv->root.rb_node;
-
-       while (p) {
-               seb = rb_entry(p, struct ubi_scan_leb, u.rb);
-
-               if (lnum == seb->lnum)
-                       return seb;
-
-               if (lnum > seb->lnum)
-                       p = p->rb_left;
-               else
-                       p = p->rb_right;
-       }
-
-       return NULL;
-}
-
-/**
- * ubi_scan_rm_volume - delete scanning information about a volume.
- * @si: scanning information
- * @sv: the volume scanning information to delete
- */
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
 {
        struct rb_node *rb;
-       struct ubi_scan_leb *seb;
+       struct ubi_ainf_peb *aeb;
 
-       dbg_bld("remove scanning information about volume %d", sv->vol_id);
+       dbg_bld("remove attaching information about volume %d", av->vol_id);
 
-       while ((rb = rb_first(&sv->root))) {
-               seb = rb_entry(rb, struct ubi_scan_leb, u.rb);
-               rb_erase(&seb->u.rb, &sv->root);
-               list_add_tail(&seb->u.list, &si->erase);
+       while ((rb = rb_first(&av->root))) {
+               aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
+               rb_erase(&aeb->u.rb, &av->root);
+               list_add_tail(&aeb->u.list, &ai->erase);
        }
 
-       rb_erase(&sv->rb, &si->volumes);
-       kfree(sv);
-       si->vols_found -= 1;
+       rb_erase(&av->rb, &ai->volumes);
+       kfree(av);
+       ai->vols_found -= 1;
 }
 
 /**
- * ubi_scan_erase_peb - erase a physical eraseblock.
+ * early_erase_peb - erase a physical eraseblock.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  * @pnum: physical eraseblock number to erase;
- * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown)
+ * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
  *
  * This function erases physical eraseblock 'pnum', and writes the erase
  * counter header to it. This function should only be used on UBI device
@@ -683,8 +662,8 @@ void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
-                      int pnum, int ec)
+static int early_erase_peb(struct ubi_device *ubi,
+                          const struct ubi_attach_info *ai, int pnum, int ec)
 {
        int err;
        struct ubi_ec_hdr *ec_hdr;
@@ -716,9 +695,9 @@ out_free:
 }
 
 /**
- * ubi_scan_get_free_peb - get a free physical eraseblock.
+ * ubi_early_get_peb - get a free physical eraseblock.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
  * This function returns a free physical eraseblock. It is supposed to be
  * called on the UBI initialization stages when the wear-leveling sub-system is
@@ -726,20 +705,20 @@ out_free:
  * the lists, writes the EC header if it is needed, and removes it from the
  * list.
  *
- * This function returns scanning physical eraseblock information in case of
- * success and an error code in case of failure.
+ * This function returns a pointer to the "aeb" of the found free PEB in case
+ * of success and an error code in case of failure.
  */
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
-                                          struct ubi_scan_info *si)
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+                                      struct ubi_attach_info *ai)
 {
        int err = 0;
-       struct ubi_scan_leb *seb, *tmp_seb;
+       struct ubi_ainf_peb *aeb, *tmp_aeb;
 
-       if (!list_empty(&si->free)) {
-               seb = list_entry(si->free.next, struct ubi_scan_leb, u.list);
-               list_del(&seb->u.list);
-               dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec);
-               return seb;
+       if (!list_empty(&ai->free)) {
+               aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+               list_del(&aeb->u.list);
+               dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
+               return aeb;
        }
 
        /*
@@ -748,18 +727,18 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
         * so forth. We don't want to take care about bad eraseblocks here -
         * they'll be handled later.
         */
-       list_for_each_entry_safe(seb, tmp_seb, &si->erase, u.list) {
-               if (seb->ec == UBI_SCAN_UNKNOWN_EC)
-                       seb->ec = si->mean_ec;
+       list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
+               if (aeb->ec == UBI_UNKNOWN)
+                       aeb->ec = ai->mean_ec;
 
-               err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1);
+               err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
                if (err)
                        continue;
 
-               seb->ec += 1;
-               list_del(&seb->u.list);
-               dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec);
-               return seb;
+               aeb->ec += 1;
+               list_del(&aeb->u.list);
+               dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
+               return aeb;
        }
 
        ubi_err("no free eraseblocks");
@@ -789,9 +768,9 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
        int err;
 
        mutex_lock(&ubi->buf_mutex);
-       memset(ubi->peb_buf1, 0x00, ubi->leb_size);
+       memset(ubi->peb_buf, 0x00, ubi->leb_size);
 
-       err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start,
+       err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
                          ubi->leb_size);
        if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
                /*
@@ -808,17 +787,17 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
        if (err)
                goto out_unlock;
 
-       if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size))
+       if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
                goto out_unlock;
 
-       ubi_err("PEB %d contains corrupted VID header, and the data does not "
-               "contain all 0xFF, this may be a non-UBI PEB or a severe VID "
-               "header corruption which requires manual inspection", pnum);
-       ubi_dbg_dump_vid_hdr(vid_hdr);
-       dbg_msg("hexdump of PEB %d offset %d, length %d",
-               pnum, ubi->leb_start, ubi->leb_size);
+       ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+               pnum);
+       ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+       ubi_dump_vid_hdr(vid_hdr);
+       pr_err("hexdump of PEB %d offset %d, length %d",
+              pnum, ubi->leb_start, ubi->leb_size);
        ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
-                              ubi->peb_buf1, ubi->leb_size, 1);
+                              ubi->peb_buf, ubi->leb_size, 1);
        err = 1;
 
 out_unlock:
@@ -827,16 +806,18 @@ out_unlock:
 }
 
 /**
- * process_eb - read, check UBI headers, and add them to scanning information.
+ * scan_peb - scan and process UBI headers of a PEB.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  * @pnum: the physical eraseblock number
  *
- * This function returns a zero if the physical eraseblock was successfully
- * handled and a negative error code in case of failure.
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
  */
-static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
-                     int pnum)
+static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+                   int pnum)
 {
        long long uninitialized_var(ec);
        int err, bitflips = 0, vol_id, ec_err = 0;
@@ -848,12 +829,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
        if (err < 0)
                return err;
        else if (err) {
-               /*
-                * FIXME: this is actually duty of the I/O sub-system to
-                * initialize this, but MTD does not provide enough
-                * information.
-                */
-               si->bad_peb_count += 1;
+               ai->bad_peb_count += 1;
                return 0;
        }
 
@@ -867,13 +843,13 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
                bitflips = 1;
                break;
        case UBI_IO_FF:
-               si->empty_peb_count += 1;
-               return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 0,
-                                  &si->erase);
+               ai->empty_peb_count += 1;
+               return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+                                  UBI_UNKNOWN, 0, &ai->erase);
        case UBI_IO_FF_BITFLIPS:
-               si->empty_peb_count += 1;
-               return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 1,
-                                  &si->erase);
+               ai->empty_peb_count += 1;
+               return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+                                  UBI_UNKNOWN, 1, &ai->erase);
        case UBI_IO_BAD_HDR_EBADMSG:
        case UBI_IO_BAD_HDR:
                /*
@@ -882,7 +858,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
                 * moved and EC be re-created.
                 */
                ec_err = err;
-               ec = UBI_SCAN_UNKNOWN_EC;
+               ec = UBI_UNKNOWN;
                bitflips = 1;
                break;
        default:
@@ -911,7 +887,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
                         */
                        ubi_err("erase counter overflow, max is %d",
                                UBI_MAX_ERASECOUNTER);
-                       ubi_dbg_dump_ec_hdr(ech);
+                       ubi_dump_ec_hdr(ech);
                        return -EINVAL;
                }
 
@@ -931,9 +907,9 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
                        ubi->image_seq = image_seq;
                if (ubi->image_seq && image_seq &&
                    ubi->image_seq != image_seq) {
-                       ubi_err("bad image sequence number %d in PEB %d, "
-                               "expected %d", image_seq, pnum, ubi->image_seq);
-                       ubi_dbg_dump_ec_hdr(ech);
+                       ubi_err("bad image sequence number %d in PEB %d, expected %d",
+                               image_seq, pnum, ubi->image_seq);
+                       ubi_dump_ec_hdr(ech);
                        return -EINVAL;
                }
        }
@@ -957,7 +933,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
                         * PEB, bit it is not marked as bad yet. This may also
                         * be a result of power cut during erasure.
                         */
-                       si->maybe_bad_peb_count += 1;
+                       ai->maybe_bad_peb_count += 1;
        case UBI_IO_BAD_HDR:
                if (ec_err)
                        /*
@@ -984,23 +960,27 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
                        return err;
                else if (!err)
                        /* This corruption is caused by a power cut */
-                       err = add_to_list(si, pnum, ec, 1, &si->erase);
+                       err = add_to_list(ai, pnum, UBI_UNKNOWN,
+                                         UBI_UNKNOWN, ec, 1, &ai->erase);
                else
                        /* This is an unexpected corruption */
-                       err = add_corrupted(si, pnum, ec);
+                       err = add_corrupted(ai, pnum, ec);
                if (err)
                        return err;
                goto adjust_mean_ec;
        case UBI_IO_FF_BITFLIPS:
-               err = add_to_list(si, pnum, ec, 1, &si->erase);
+               err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+                                 ec, 1, &ai->erase);
                if (err)
                        return err;
                goto adjust_mean_ec;
        case UBI_IO_FF:
                if (ec_err || bitflips)
-                       err = add_to_list(si, pnum, ec, 1, &si->erase);
+                       err = add_to_list(ai, pnum, UBI_UNKNOWN,
+                                         UBI_UNKNOWN, ec, 1, &ai->erase);
                else
-                       err = add_to_list(si, pnum, ec, 0, &si->free);
+                       err = add_to_list(ai, pnum, UBI_UNKNOWN,
+                                         UBI_UNKNOWN, ec, 0, &ai->free);
                if (err)
                        return err;
                goto adjust_mean_ec;
@@ -1017,24 +997,25 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
                /* Unsupported internal volume */
                switch (vidh->compat) {
                case UBI_COMPAT_DELETE:
-                       ubi_msg("\"delete\" compatible internal volume %d:%d"
-                               " found, will remove it", vol_id, lnum);
-                       err = add_to_list(si, pnum, ec, 1, &si->erase);
+                       ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+                               vol_id, lnum);
+                       err = add_to_list(ai, pnum, vol_id, lnum,
+                                         ec, 1, &ai->erase);
                        if (err)
                                return err;
                        return 0;
 
                case UBI_COMPAT_RO:
-                       ubi_msg("read-only compatible internal volume %d:%d"
-                               " found, switch to read-only mode",
+                       ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
                                vol_id, lnum);
                        ubi->ro_mode = 1;
                        break;
 
                case UBI_COMPAT_PRESERVE:
-                       ubi_msg("\"preserve\" compatible internal volume %d:%d"
-                               " found", vol_id, lnum);
-                       err = add_to_list(si, pnum, ec, 0, &si->alien);
+                       ubi_msg("\"preserve\" compatible internal volume %d:%d found",
+                               vol_id, lnum);
+                       err = add_to_list(ai, pnum, vol_id, lnum,
+                                         ec, 0, &ai->alien);
                        if (err)
                                return err;
                        return 0;
@@ -1049,40 +1030,40 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
        if (ec_err)
                ubi_warn("valid VID header but corrupted EC header at PEB %d",
                         pnum);
-       err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips);
+       err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
        if (err)
                return err;
 
 adjust_mean_ec:
        if (!ec_err) {
-               si->ec_sum += ec;
-               si->ec_count += 1;
-               if (ec > si->max_ec)
-                       si->max_ec = ec;
-               if (ec < si->min_ec)
-                       si->min_ec = ec;
+               ai->ec_sum += ec;
+               ai->ec_count += 1;
+               if (ec > ai->max_ec)
+                       ai->max_ec = ec;
+               if (ec < ai->min_ec)
+                       ai->min_ec = ec;
        }
 
        return 0;
 }
 
 /**
- * check_what_we_have - check what PEB were found by scanning.
+ * late_analysis - analyze the overall situation with PEB.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
- * This is a helper function which takes a look what PEBs were found by
- * scanning, and decides whether the flash is empty and should be formatted and
- * whether there are too many corrupted PEBs and we should not attach this
- * MTD device. Returns zero if we should proceed with attaching the MTD device,
- * and %-EINVAL if we should not.
+ * This is a helper function which takes a look what PEBs we have after we
+ * gather information about all of them ("ai" is compete). It decides whether
+ * the flash is empty and should be formatted of whether there are too many
+ * corrupted PEBs and we should not attach this MTD device. Returns zero if we
+ * should proceed with attaching the MTD device, and %-EINVAL if we should not.
  */
-static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
+static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
-       struct ubi_scan_leb *seb;
+       struct ubi_ainf_peb *aeb;
        int max_corr, peb_count;
 
-       peb_count = ubi->peb_count - si->bad_peb_count - si->alien_peb_count;
+       peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
        max_corr = peb_count / 20 ?: 8;
 
        /*
@@ -1090,25 +1071,25 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
         * unclean reboots. However, many of them may indicate some problems
         * with the flash HW or driver.
         */
-       if (si->corr_peb_count) {
+       if (ai->corr_peb_count) {
                ubi_err("%d PEBs are corrupted and preserved",
-                       si->corr_peb_count);
-               printk(KERN_ERR "Corrupted PEBs are:");
-               list_for_each_entry(seb, &si->corr, u.list)
-                       printk(KERN_CONT " %d", seb->pnum);
-               printk(KERN_CONT "\n");
+                       ai->corr_peb_count);
+               pr_err("Corrupted PEBs are:");
+               list_for_each_entry(aeb, &ai->corr, u.list)
+                       pr_cont(" %d", aeb->pnum);
+               pr_cont("\n");
 
                /*
                 * If too many PEBs are corrupted, we refuse attaching,
                 * otherwise, only print a warning.
                 */
-               if (si->corr_peb_count >= max_corr) {
+               if (ai->corr_peb_count >= max_corr) {
                        ubi_err("too many corrupted PEBs, refusing");
                        return -EINVAL;
                }
        }
 
-       if (si->empty_peb_count + si->maybe_bad_peb_count == peb_count) {
+       if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
                /*
                 * All PEBs are empty, or almost all - a couple PEBs look like
                 * they may be bad PEBs which were not marked as bad yet.
@@ -1124,14 +1105,13 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
                 * 2. Flash contains non-UBI data and we do not want to format
                 *    it and destroy possibly important information.
                 */
-               if (si->maybe_bad_peb_count <= 2) {
-                       si->is_empty = 1;
+               if (ai->maybe_bad_peb_count <= 2) {
+                       ai->is_empty = 1;
                        ubi_msg("empty MTD device detected");
                        get_random_bytes(&ubi->image_seq,
                                         sizeof(ubi->image_seq));
                } else {
-                       ubi_err("MTD device is not UBI-formatted and possibly "
-                               "contains non-UBI data - refusing it");
+                       ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
                        return -EINVAL;
                }
 
@@ -1141,40 +1121,41 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
 }
 
 /**
- * ubi_scan - scan an MTD device.
+ * scan_all - scan entire MTD device.
  * @ubi: UBI device description object
  *
  * This function does full scanning of an MTD device and returns complete
- * information about it. In case of failure, an error code is returned.
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
  */
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
+static struct ubi_attach_info *scan_all(struct ubi_device *ubi)
 {
        int err, pnum;
        struct rb_node *rb1, *rb2;
-       struct ubi_scan_volume *sv;
-       struct ubi_scan_leb *seb;
-       struct ubi_scan_info *si;
+       struct ubi_ainf_volume *av;
+       struct ubi_ainf_peb *aeb;
+       struct ubi_attach_info *ai;
 
-       si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL);
-       if (!si)
+       ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+       if (!ai)
                return ERR_PTR(-ENOMEM);
 
-       INIT_LIST_HEAD(&si->corr);
-       INIT_LIST_HEAD(&si->free);
-       INIT_LIST_HEAD(&si->erase);
-       INIT_LIST_HEAD(&si->alien);
-       si->volumes = RB_ROOT;
+       INIT_LIST_HEAD(&ai->corr);
+       INIT_LIST_HEAD(&ai->free);
+       INIT_LIST_HEAD(&ai->erase);
+       INIT_LIST_HEAD(&ai->alien);
+       ai->volumes = RB_ROOT;
 
        err = -ENOMEM;
-       si->scan_leb_slab = kmem_cache_create("ubi_scan_leb_slab",
-                                             sizeof(struct ubi_scan_leb),
-                                             0, 0, NULL);
-       if (!si->scan_leb_slab)
-               goto out_si;
+       ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
+                                              sizeof(struct ubi_ainf_peb),
+                                              0, 0, NULL);
+       if (!ai->aeb_slab_cache)
+               goto out_ai;
 
        ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
        if (!ech)
-               goto out_si;
+               goto out_ai;
 
        vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
        if (!vidh)
@@ -1184,18 +1165,18 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
                cond_resched();
 
                dbg_gen("process PEB %d", pnum);
-               err = process_eb(ubi, si, pnum);
+               err = scan_peb(ubi, ai, pnum);
                if (err < 0)
                        goto out_vidh;
        }
 
-       dbg_msg("scanning is finished");
+       ubi_msg("scanning is finished");
 
        /* Calculate mean erase counter */
-       if (si->ec_count)
-               si->mean_ec = div_u64(si->ec_sum, si->ec_count);
+       if (ai->ec_count)
+               ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
 
-       err = check_what_we_have(ubi, si);
+       err = late_analysis(ubi, ai);
        if (err)
                goto out_vidh;
 
@@ -1203,55 +1184,102 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
         * In case of unknown erase counter we use the mean erase counter
         * value.
         */
-       ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
-               ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
-                       if (seb->ec == UBI_SCAN_UNKNOWN_EC)
-                               seb->ec = si->mean_ec;
+       ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+               ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+                       if (aeb->ec == UBI_UNKNOWN)
+                               aeb->ec = ai->mean_ec;
        }
 
-       list_for_each_entry(seb, &si->free, u.list) {
-               if (seb->ec == UBI_SCAN_UNKNOWN_EC)
-                       seb->ec = si->mean_ec;
+       list_for_each_entry(aeb, &ai->free, u.list) {
+               if (aeb->ec == UBI_UNKNOWN)
+                       aeb->ec = ai->mean_ec;
        }
 
-       list_for_each_entry(seb, &si->corr, u.list)
-               if (seb->ec == UBI_SCAN_UNKNOWN_EC)
-                       seb->ec = si->mean_ec;
+       list_for_each_entry(aeb, &ai->corr, u.list)
+               if (aeb->ec == UBI_UNKNOWN)
+                       aeb->ec = ai->mean_ec;
 
-       list_for_each_entry(seb, &si->erase, u.list)
-               if (seb->ec == UBI_SCAN_UNKNOWN_EC)
-                       seb->ec = si->mean_ec;
+       list_for_each_entry(aeb, &ai->erase, u.list)
+               if (aeb->ec == UBI_UNKNOWN)
+                       aeb->ec = ai->mean_ec;
 
-       err = paranoid_check_si(ubi, si);
+       err = self_check_ai(ubi, ai);
        if (err)
                goto out_vidh;
 
        ubi_free_vid_hdr(ubi, vidh);
        kfree(ech);
 
-       return si;
+       return ai;
 
 out_vidh:
        ubi_free_vid_hdr(ubi, vidh);
 out_ech:
        kfree(ech);
-out_si:
-       ubi_scan_destroy_si(si);
+out_ai:
+       ubi_destroy_ai(ai);
        return ERR_PTR(err);
 }
 
 /**
- * destroy_sv - free the scanning volume information
- * @sv: scanning volume information
- * @si: scanning information
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
  *
- * This function destroys the volume RB-tree (@sv->root) and the scanning
- * volume information.
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_attach(struct ubi_device *ubi)
+{
+       int err;
+       struct ubi_attach_info *ai;
+
+       ai = scan_all(ubi);
+       if (IS_ERR(ai))
+               return PTR_ERR(ai);
+
+       ubi->bad_peb_count = ai->bad_peb_count;
+       ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+       ubi->corr_peb_count = ai->corr_peb_count;
+       ubi->max_ec = ai->max_ec;
+       ubi->mean_ec = ai->mean_ec;
+       dbg_gen("max. sequence number:       %llu", ai->max_sqnum);
+
+       err = ubi_read_volume_table(ubi, ai);
+       if (err)
+               goto out_ai;
+
+       err = ubi_wl_init(ubi, ai);
+       if (err)
+               goto out_vtbl;
+
+       err = ubi_eba_init(ubi, ai);
+       if (err)
+               goto out_wl;
+
+       ubi_destroy_ai(ai);
+       return 0;
+
+out_wl:
+       ubi_wl_close(ubi);
+out_vtbl:
+       ubi_free_internal_volumes(ubi);
+       vfree(ubi->vtbl);
+out_ai:
+       ubi_destroy_ai(ai);
+       return err;
+}
+
+/**
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ *
+ * This function destroys the volume attaching information.
  */
-static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
 {
-       struct ubi_scan_leb *seb;
-       struct rb_node *this = sv->root.rb_node;
+       struct ubi_ainf_peb *aeb;
+       struct rb_node *this = av->root.rb_node;
 
        while (this) {
                if (this->rb_left)
@@ -1259,224 +1287,222 @@ static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
                else if (this->rb_right)
                        this = this->rb_right;
                else {
-                       seb = rb_entry(this, struct ubi_scan_leb, u.rb);
+                       aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
                        this = rb_parent(this);
                        if (this) {
-                               if (this->rb_left == &seb->u.rb)
+                               if (this->rb_left == &aeb->u.rb)
                                        this->rb_left = NULL;
                                else
                                        this->rb_right = NULL;
                        }
 
-                       kmem_cache_free(si->scan_leb_slab, seb);
+                       kmem_cache_free(ai->aeb_slab_cache, aeb);
                }
        }
-       kfree(sv);
+       kfree(av);
 }
 
 /**
- * ubi_scan_destroy_si - destroy scanning information.
- * @si: scanning information
+ * ubi_destroy_ai - destroy attaching information.
+ * @ai: attaching information
  */
-void ubi_scan_destroy_si(struct ubi_scan_info *si)
+void ubi_destroy_ai(struct ubi_attach_info *ai)
 {
-       struct ubi_scan_leb *seb, *seb_tmp;
-       struct ubi_scan_volume *sv;
+       struct ubi_ainf_peb *aeb, *aeb_tmp;
+       struct ubi_ainf_volume *av;
        struct rb_node *rb;
 
-       list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
-               list_del(&seb->u.list);
-               kmem_cache_free(si->scan_leb_slab, seb);
+       list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+               list_del(&aeb->u.list);
+               kmem_cache_free(ai->aeb_slab_cache, aeb);
        }
-       list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
-               list_del(&seb->u.list);
-               kmem_cache_free(si->scan_leb_slab, seb);
+       list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+               list_del(&aeb->u.list);
+               kmem_cache_free(ai->aeb_slab_cache, aeb);
        }
-       list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
-               list_del(&seb->u.list);
-               kmem_cache_free(si->scan_leb_slab, seb);
+       list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+               list_del(&aeb->u.list);
+               kmem_cache_free(ai->aeb_slab_cache, aeb);
        }
-       list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
-               list_del(&seb->u.list);
-               kmem_cache_free(si->scan_leb_slab, seb);
+       list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+               list_del(&aeb->u.list);
+               kmem_cache_free(ai->aeb_slab_cache, aeb);
        }
 
        /* Destroy the volume RB-tree */
-       rb = si->volumes.rb_node;
+       rb = ai->volumes.rb_node;
        while (rb) {
                if (rb->rb_left)
                        rb = rb->rb_left;
                else if (rb->rb_right)
                        rb = rb->rb_right;
                else {
-                       sv = rb_entry(rb, struct ubi_scan_volume, rb);
+                       av = rb_entry(rb, struct ubi_ainf_volume, rb);
 
                        rb = rb_parent(rb);
                        if (rb) {
-                               if (rb->rb_left == &sv->rb)
+                               if (rb->rb_left == &av->rb)
                                        rb->rb_left = NULL;
                                else
                                        rb->rb_right = NULL;
                        }
 
-                       destroy_sv(si, sv);
+                       destroy_av(ai, av);
                }
        }
 
-       if (si->scan_leb_slab)
-               kmem_cache_destroy(si->scan_leb_slab);
+       if (ai->aeb_slab_cache)
+               kmem_cache_destroy(ai->aeb_slab_cache);
 
-       kfree(si);
+       kfree(ai);
 }
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-
 /**
- * paranoid_check_si - check the scanning information.
+ * self_check_ai - check the attaching information.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
- * This function returns zero if the scanning information is all right, and a
+ * This function returns zero if the attaching information is all right, and a
  * negative error code if not or if an error occurred.
  */
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
        int pnum, err, vols_found = 0;
        struct rb_node *rb1, *rb2;
-       struct ubi_scan_volume *sv;
-       struct ubi_scan_leb *seb, *last_seb;
+       struct ubi_ainf_volume *av;
+       struct ubi_ainf_peb *aeb, *last_aeb;
        uint8_t *buf;
 
        if (!ubi->dbg->chk_gen)
                return 0;
 
        /*
-        * At first, check that scanning information is OK.
+        * At first, check that attaching information is OK.
         */
-       ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
+       ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
                int leb_count = 0;
 
                cond_resched();
 
                vols_found += 1;
 
-               if (si->is_empty) {
+               if (ai->is_empty) {
                        ubi_err("bad is_empty flag");
-                       goto bad_sv;
+                       goto bad_av;
                }
 
-               if (sv->vol_id < 0 || sv->highest_lnum < 0 ||
-                   sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 ||
-                   sv->data_pad < 0 || sv->last_data_size < 0) {
+               if (av->vol_id < 0 || av->highest_lnum < 0 ||
+                   av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
+                   av->data_pad < 0 || av->last_data_size < 0) {
                        ubi_err("negative values");
-                       goto bad_sv;
+                       goto bad_av;
                }
 
-               if (sv->vol_id >= UBI_MAX_VOLUMES &&
-                   sv->vol_id < UBI_INTERNAL_VOL_START) {
+               if (av->vol_id >= UBI_MAX_VOLUMES &&
+                   av->vol_id < UBI_INTERNAL_VOL_START) {
                        ubi_err("bad vol_id");
-                       goto bad_sv;
+                       goto bad_av;
                }
 
-               if (sv->vol_id > si->highest_vol_id) {
+               if (av->vol_id > ai->highest_vol_id) {
                        ubi_err("highest_vol_id is %d, but vol_id %d is there",
-                               si->highest_vol_id, sv->vol_id);
+                               ai->highest_vol_id, av->vol_id);
                        goto out;
                }
 
-               if (sv->vol_type != UBI_DYNAMIC_VOLUME &&
-                   sv->vol_type != UBI_STATIC_VOLUME) {
+               if (av->vol_type != UBI_DYNAMIC_VOLUME &&
+                   av->vol_type != UBI_STATIC_VOLUME) {
                        ubi_err("bad vol_type");
-                       goto bad_sv;
+                       goto bad_av;
                }
 
-               if (sv->data_pad > ubi->leb_size / 2) {
+               if (av->data_pad > ubi->leb_size / 2) {
                        ubi_err("bad data_pad");
-                       goto bad_sv;
+                       goto bad_av;
                }
 
-               last_seb = NULL;
-               ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+               last_aeb = NULL;
+               ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
                        cond_resched();
 
-                       last_seb = seb;
+                       last_aeb = aeb;
                        leb_count += 1;
 
-                       if (seb->pnum < 0 || seb->ec < 0) {
+                       if (aeb->pnum < 0 || aeb->ec < 0) {
                                ubi_err("negative values");
-                               goto bad_seb;
+                               goto bad_aeb;
                        }
 
-                       if (seb->ec < si->min_ec) {
-                               ubi_err("bad si->min_ec (%d), %d found",
-                                       si->min_ec, seb->ec);
-                               goto bad_seb;
+                       if (aeb->ec < ai->min_ec) {
+                               ubi_err("bad ai->min_ec (%d), %d found",
+                                       ai->min_ec, aeb->ec);
+                               goto bad_aeb;
                        }
 
-                       if (seb->ec > si->max_ec) {
-                               ubi_err("bad si->max_ec (%d), %d found",
-                                       si->max_ec, seb->ec);
-                               goto bad_seb;
+                       if (aeb->ec > ai->max_ec) {
+                               ubi_err("bad ai->max_ec (%d), %d found",
+                                       ai->max_ec, aeb->ec);
+                               goto bad_aeb;
                        }
 
-                       if (seb->pnum >= ubi->peb_count) {
+                       if (aeb->pnum >= ubi->peb_count) {
                                ubi_err("too high PEB number %d, total PEBs %d",
-                                       seb->pnum, ubi->peb_count);
-                               goto bad_seb;
+                                       aeb->pnum, ubi->peb_count);
+                               goto bad_aeb;
                        }
 
-                       if (sv->vol_type == UBI_STATIC_VOLUME) {
-                               if (seb->lnum >= sv->used_ebs) {
+                       if (av->vol_type == UBI_STATIC_VOLUME) {
+                               if (aeb->lnum >= av->used_ebs) {
                                        ubi_err("bad lnum or used_ebs");
-                                       goto bad_seb;
+                                       goto bad_aeb;
                                }
                        } else {
-                               if (sv->used_ebs != 0) {
+                               if (av->used_ebs != 0) {
                                        ubi_err("non-zero used_ebs");
-                                       goto bad_seb;
+                                       goto bad_aeb;
                                }
                        }
 
-                       if (seb->lnum > sv->highest_lnum) {
+                       if (aeb->lnum > av->highest_lnum) {
                                ubi_err("incorrect highest_lnum or lnum");
-                               goto bad_seb;
+                               goto bad_aeb;
                        }
                }
 
-               if (sv->leb_count != leb_count) {
+               if (av->leb_count != leb_count) {
                        ubi_err("bad leb_count, %d objects in the tree",
                                leb_count);
-                       goto bad_sv;
+                       goto bad_av;
                }
 
-               if (!last_seb)
+               if (!last_aeb)
                        continue;
 
-               seb = last_seb;
+               aeb = last_aeb;
 
-               if (seb->lnum != sv->highest_lnum) {
+               if (aeb->lnum != av->highest_lnum) {
                        ubi_err("bad highest_lnum");
-                       goto bad_seb;
+                       goto bad_aeb;
                }
        }
 
-       if (vols_found != si->vols_found) {
-               ubi_err("bad si->vols_found %d, should be %d",
-                       si->vols_found, vols_found);
+       if (vols_found != ai->vols_found) {
+               ubi_err("bad ai->vols_found %d, should be %d",
+                       ai->vols_found, vols_found);
                goto out;
        }
 
-       /* Check that scanning information is correct */
-       ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
-               last_seb = NULL;
-               ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+       /* Check that attaching information is correct */
+       ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+               last_aeb = NULL;
+               ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
                        int vol_type;
 
                        cond_resched();
 
-                       last_seb = seb;
+                       last_aeb = aeb;
 
-                       err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1);
+                       err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
                        if (err && err != UBI_IO_BITFLIPS) {
                                ubi_err("VID header is not OK (%d)", err);
                                if (err > 0)
@@ -1486,52 +1512,52 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
 
                        vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
                                   UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
-                       if (sv->vol_type != vol_type) {
+                       if (av->vol_type != vol_type) {
                                ubi_err("bad vol_type");
                                goto bad_vid_hdr;
                        }
 
-                       if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
-                               ubi_err("bad sqnum %llu", seb->sqnum);
+                       if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
+                               ubi_err("bad sqnum %llu", aeb->sqnum);
                                goto bad_vid_hdr;
                        }
 
-                       if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
-                               ubi_err("bad vol_id %d", sv->vol_id);
+                       if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
+                               ubi_err("bad vol_id %d", av->vol_id);
                                goto bad_vid_hdr;
                        }
 
-                       if (sv->compat != vidh->compat) {
+                       if (av->compat != vidh->compat) {
                                ubi_err("bad compat %d", vidh->compat);
                                goto bad_vid_hdr;
                        }
 
-                       if (seb->lnum != be32_to_cpu(vidh->lnum)) {
-                               ubi_err("bad lnum %d", seb->lnum);
+                       if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
+                               ubi_err("bad lnum %d", aeb->lnum);
                                goto bad_vid_hdr;
                        }
 
-                       if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
-                               ubi_err("bad used_ebs %d", sv->used_ebs);
+                       if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+                               ubi_err("bad used_ebs %d", av->used_ebs);
                                goto bad_vid_hdr;
                        }
 
-                       if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
-                               ubi_err("bad data_pad %d", sv->data_pad);
+                       if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
+                               ubi_err("bad data_pad %d", av->data_pad);
                                goto bad_vid_hdr;
                        }
                }
 
-               if (!last_seb)
+               if (!last_aeb)
                        continue;
 
-               if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
-                       ubi_err("bad highest_lnum %d", sv->highest_lnum);
+               if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
+                       ubi_err("bad highest_lnum %d", av->highest_lnum);
                        goto bad_vid_hdr;
                }
 
-               if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
-                       ubi_err("bad last_data_size %d", sv->last_data_size);
+               if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
+                       ubi_err("bad last_data_size %d", av->last_data_size);
                        goto bad_vid_hdr;
                }
        }
@@ -1553,21 +1579,21 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
                        buf[pnum] = 1;
        }
 
-       ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb)
-               ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
-                       buf[seb->pnum] = 1;
+       ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+               ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+                       buf[aeb->pnum] = 1;
 
-       list_for_each_entry(seb, &si->free, u.list)
-               buf[seb->pnum] = 1;
+       list_for_each_entry(aeb, &ai->free, u.list)
+               buf[aeb->pnum] = 1;
 
-       list_for_each_entry(seb, &si->corr, u.list)
-               buf[seb->pnum] = 1;
+       list_for_each_entry(aeb, &ai->corr, u.list)
+               buf[aeb->pnum] = 1;
 
-       list_for_each_entry(seb, &si->erase, u.list)
-               buf[seb->pnum] = 1;
+       list_for_each_entry(aeb, &ai->erase, u.list)
+               buf[aeb->pnum] = 1;
 
-       list_for_each_entry(seb, &si->alien, u.list)
-               buf[seb->pnum] = 1;
+       list_for_each_entry(aeb, &ai->alien, u.list)
+               buf[aeb->pnum] = 1;
 
        err = 0;
        for (pnum = 0; pnum < ubi->peb_count; pnum++)
@@ -1581,25 +1607,23 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
                goto out;
        return 0;
 
-bad_seb:
-       ubi_err("bad scanning information about LEB %d", seb->lnum);
-       ubi_dbg_dump_seb(seb, 0);
-       ubi_dbg_dump_sv(sv);
+bad_aeb:
+       ubi_err("bad attaching information about LEB %d", aeb->lnum);
+       ubi_dump_aeb(aeb, 0);
+       ubi_dump_av(av);
        goto out;
 
-bad_sv:
-       ubi_err("bad scanning information about volume %d", sv->vol_id);
-       ubi_dbg_dump_sv(sv);
+bad_av:
+       ubi_err("bad attaching information about volume %d", av->vol_id);
+       ubi_dump_av(av);
        goto out;
 
 bad_vid_hdr:
-       ubi_err("bad scanning information about volume %d", sv->vol_id);
-       ubi_dbg_dump_sv(sv);
-       ubi_dbg_dump_vid_hdr(vidh);
+       ubi_err("bad attaching information about volume %d", av->vol_id);
+       ubi_dump_av(av);
+       ubi_dump_vid_hdr(vidh);
 
 out:
-       ubi_dbg_dump_stack();
+       dump_stack();
        return -EINVAL;
 }
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
index 1f9c363..8be9edb 100644 (file)
  * module load parameters or the kernel boot parameters. If MTD devices were
  * specified, UBI does not attach any MTD device, but it is possible to do
  * later using the "UBI control device".
- *
- * At the moment we only attach UBI devices by scanning, which will become a
- * bottleneck when flashes reach certain large size. Then one may improve UBI
- * and add other methods, although it does not seem to be easy to do.
  */
 
 #include <linux/err.h>
@@ -554,10 +550,10 @@ static void uif_close(struct ubi_device *ubi)
 }
 
 /**
- * free_internal_volumes - free internal volumes.
+ * ubi_free_internal_volumes - free internal volumes.
  * @ubi: UBI device description object
  */
-static void free_internal_volumes(struct ubi_device *ubi)
+void ubi_free_internal_volumes(struct ubi_device *ubi)
 {
        int i;
 
@@ -568,59 +564,6 @@ static void free_internal_volumes(struct ubi_device *ubi)
        }
 }
 
-/**
- * attach_by_scanning - attach an MTD device using scanning method.
- * @ubi: UBI device descriptor
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- *
- * Note, currently this is the only method to attach UBI devices. Hopefully in
- * the future we'll have more scalable attaching methods and avoid full media
- * scanning. But even in this case scanning will be needed as a fall-back
- * attaching method if there are some on-flash table corruptions.
- */
-static int attach_by_scanning(struct ubi_device *ubi)
-{
-       int err;
-       struct ubi_scan_info *si;
-
-       si = ubi_scan(ubi);
-       if (IS_ERR(si))
-               return PTR_ERR(si);
-
-       ubi->bad_peb_count = si->bad_peb_count;
-       ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
-       ubi->corr_peb_count = si->corr_peb_count;
-       ubi->max_ec = si->max_ec;
-       ubi->mean_ec = si->mean_ec;
-       ubi_msg("max. sequence number:       %llu", si->max_sqnum);
-
-       err = ubi_read_volume_table(ubi, si);
-       if (err)
-               goto out_si;
-
-       err = ubi_wl_init_scan(ubi, si);
-       if (err)
-               goto out_vtbl;
-
-       err = ubi_eba_init_scan(ubi, si);
-       if (err)
-               goto out_wl;
-
-       ubi_scan_destroy_si(si);
-       return 0;
-
-out_wl:
-       ubi_wl_close(ubi);
-out_vtbl:
-       free_internal_volumes(ubi);
-       vfree(ubi->vtbl);
-out_si:
-       ubi_scan_destroy_si(si);
-       return err;
-}
-
 /**
  * io_init - initialize I/O sub-system for a given UBI device.
  * @ubi: UBI device description object
@@ -638,6 +581,9 @@ out_si:
  */
 static int io_init(struct ubi_device *ubi)
 {
+       dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+       dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
        if (ubi->mtd->numeraseregions != 0) {
                /*
                 * Some flashes have several erase regions. Different regions
@@ -707,11 +653,11 @@ static int io_init(struct ubi_device *ubi)
        ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
        ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
 
-       dbg_msg("min_io_size      %d", ubi->min_io_size);
-       dbg_msg("max_write_size   %d", ubi->max_write_size);
-       dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
-       dbg_msg("ec_hdr_alsize    %d", ubi->ec_hdr_alsize);
-       dbg_msg("vid_hdr_alsize   %d", ubi->vid_hdr_alsize);
+       dbg_gen("min_io_size      %d", ubi->min_io_size);
+       dbg_gen("max_write_size   %d", ubi->max_write_size);
+       dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+       dbg_gen("ec_hdr_alsize    %d", ubi->ec_hdr_alsize);
+       dbg_gen("vid_hdr_alsize   %d", ubi->vid_hdr_alsize);
 
        if (ubi->vid_hdr_offset == 0)
                /* Default offset */
@@ -728,10 +674,10 @@ static int io_init(struct ubi_device *ubi)
        ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
        ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
 
-       dbg_msg("vid_hdr_offset   %d", ubi->vid_hdr_offset);
-       dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
-       dbg_msg("vid_hdr_shift    %d", ubi->vid_hdr_shift);
-       dbg_msg("leb_start        %d", ubi->leb_start);
+       dbg_gen("vid_hdr_offset   %d", ubi->vid_hdr_offset);
+       dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+       dbg_gen("vid_hdr_shift    %d", ubi->vid_hdr_shift);
+       dbg_gen("leb_start        %d", ubi->leb_start);
 
        /* The shift must be aligned to 32-bit boundary */
        if (ubi->vid_hdr_shift % 4) {
@@ -757,7 +703,7 @@ static int io_init(struct ubi_device *ubi)
        ubi->max_erroneous = ubi->peb_count / 10;
        if (ubi->max_erroneous < 16)
                ubi->max_erroneous = 16;
-       dbg_msg("max_erroneous    %d", ubi->max_erroneous);
+       dbg_gen("max_erroneous    %d", ubi->max_erroneous);
 
        /*
         * It may happen that EC and VID headers are situated in one minimal
@@ -765,36 +711,24 @@ static int io_init(struct ubi_device *ubi)
         * read-only mode.
         */
        if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
-               ubi_warn("EC and VID headers are in the same minimal I/O unit, "
-                        "switch to read-only mode");
+               ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
                ubi->ro_mode = 1;
        }
 
        ubi->leb_size = ubi->peb_size - ubi->leb_start;
 
        if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
-               ubi_msg("MTD device %d is write-protected, attach in "
-                       "read-only mode", ubi->mtd->index);
+               ubi_msg("MTD device %d is write-protected, attach in read-only mode",
+                       ubi->mtd->index);
                ubi->ro_mode = 1;
        }
 
-       ubi_msg("physical eraseblock size:   %d bytes (%d KiB)",
-               ubi->peb_size, ubi->peb_size >> 10);
-       ubi_msg("logical eraseblock size:    %d bytes", ubi->leb_size);
-       ubi_msg("smallest flash I/O unit:    %d", ubi->min_io_size);
-       if (ubi->hdrs_min_io_size != ubi->min_io_size)
-               ubi_msg("sub-page size:              %d",
-                       ubi->hdrs_min_io_size);
-       ubi_msg("VID header offset:          %d (aligned %d)",
-               ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
-       ubi_msg("data offset:                %d", ubi->leb_start);
-
        /*
-        * Note, ideally, we have to initialize ubi->bad_peb_count here. But
+        * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
         * unfortunately, MTD does not provide this information. We should loop
         * over all physical eraseblocks and invoke mtd->block_is_bad() for
-        * each physical eraseblock. So, we skip ubi->bad_peb_count
-        * uninitialized and initialize it after scanning.
+        * each physical eraseblock. So, we leave @ubi->bad_peb_count
+        * uninitialized so far.
         */
 
        return 0;
@@ -805,7 +739,7 @@ static int io_init(struct ubi_device *ubi)
  * @ubi: UBI device description object
  * @vol_id: ID of the volume to re-size
  *
- * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
+ * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
  * the volume table to the largest possible size. See comments in ubi-header.h
  * for more description of the flag. Returns zero in case of success and a
  * negative error code in case of failure.
@@ -886,7 +820,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
        for (i = 0; i < UBI_MAX_DEVICES; i++) {
                ubi = ubi_devices[i];
                if (ubi && mtd->index == ubi->mtd->index) {
-                       dbg_err("mtd%d is already attached to ubi%d",
+                       ubi_err("mtd%d is already attached to ubi%d",
                                mtd->index, i);
                        return -EEXIST;
                }
@@ -901,8 +835,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
         * no sense to attach emulated MTD devices, so we prohibit this.
         */
        if (mtd->type == MTD_UBIVOLUME) {
-               ubi_err("refuse attaching mtd%d - it is already emulated on "
-                       "top of UBI", mtd->index);
+               ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
+                       mtd->index);
                return -EINVAL;
        }
 
@@ -912,7 +846,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
                        if (!ubi_devices[ubi_num])
                                break;
                if (ubi_num == UBI_MAX_DEVICES) {
-                       dbg_err("only %d UBI devices may be created",
+                       ubi_err("only %d UBI devices may be created",
                                UBI_MAX_DEVICES);
                        return -ENFILE;
                }
@@ -922,7 +856,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
 
                /* Make sure ubi_num is not busy */
                if (ubi_devices[ubi_num]) {
-                       dbg_err("ubi%d already exists", ubi_num);
+                       ubi_err("ubi%d already exists", ubi_num);
                        return -EEXIST;
                }
        }
@@ -942,29 +876,23 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
        spin_lock_init(&ubi->volumes_lock);
 
        ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
-       dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
-       dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
 
        err = io_init(ubi);
        if (err)
                goto out_free;
 
        err = -ENOMEM;
-       ubi->peb_buf1 = vmalloc(ubi->peb_size);
-       if (!ubi->peb_buf1)
-               goto out_free;
-
-       ubi->peb_buf2 = vmalloc(ubi->peb_size);
-       if (!ubi->peb_buf2)
+       ubi->peb_buf = vmalloc(ubi->peb_size);
+       if (!ubi->peb_buf)
                goto out_free;
 
        err = ubi_debugging_init_dev(ubi);
        if (err)
                goto out_free;
 
-       err = attach_by_scanning(ubi);
+       err = ubi_attach(ubi);
        if (err) {
-               dbg_err("failed to attach by scanning, error %d", err);
+               ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
                goto out_debugging;
        }
 
@@ -990,23 +918,24 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
                goto out_debugfs;
        }
 
-       ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
-       ubi_msg("MTD device name:            \"%s\"", mtd->name);
-       ubi_msg("MTD device size:            %llu MiB", ubi->flash_size >> 20);
-       ubi_msg("number of good PEBs:        %d", ubi->good_peb_count);
-       ubi_msg("number of bad PEBs:         %d", ubi->bad_peb_count);
-       ubi_msg("number of corrupted PEBs:   %d", ubi->corr_peb_count);
-       ubi_msg("max. allowed volumes:       %d", ubi->vtbl_slots);
-       ubi_msg("wear-leveling threshold:    %d", CONFIG_MTD_UBI_WL_THRESHOLD);
-       ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
-       ubi_msg("number of user volumes:     %d",
-               ubi->vol_count - UBI_INT_VOL_COUNT);
-       ubi_msg("available PEBs:             %d", ubi->avail_pebs);
-       ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
-       ubi_msg("number of PEBs reserved for bad PEB handling: %d",
-               ubi->beb_rsvd_pebs);
-       ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
-       ubi_msg("image sequence number:  %d", ubi->image_seq);
+       ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
+               mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
+       ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+               ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+       ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
+               ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+       ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
+               ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+       ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+               ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+       ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
+               ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+               ubi->vtbl_slots);
+       ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+               ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+               ubi->image_seq);
+       ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+               ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
 
        /*
         * The below lock makes sure we do not race with 'ubi_thread()' which
@@ -1029,13 +958,12 @@ out_uif:
        uif_close(ubi);
 out_detach:
        ubi_wl_close(ubi);
-       free_internal_volumes(ubi);
+       ubi_free_internal_volumes(ubi);
        vfree(ubi->vtbl);
 out_debugging:
        ubi_debugging_exit_dev(ubi);
 out_free:
-       vfree(ubi->peb_buf1);
-       vfree(ubi->peb_buf2);
+       vfree(ubi->peb_buf);
        if (ref)
                put_device(&ubi->dev);
        else
@@ -1084,7 +1012,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
 
        ubi_assert(ubi_num == ubi->ubi_num);
        ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
-       dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+       ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
 
        /*
         * Before freeing anything, we have to stop the background thread to
@@ -1102,12 +1030,11 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
        ubi_debugfs_exit_dev(ubi);
        uif_close(ubi);
        ubi_wl_close(ubi);
-       free_internal_volumes(ubi);
+       ubi_free_internal_volumes(ubi);
        vfree(ubi->vtbl);
        put_mtd_device(ubi->mtd);
        ubi_debugging_exit_dev(ubi);
-       vfree(ubi->peb_buf1);
-       vfree(ubi->peb_buf2);
+       vfree(ubi->peb_buf);
        ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
        put_device(&ubi->dev);
        return 0;
@@ -1320,8 +1247,7 @@ static int __init bytes_str_to_int(const char *str)
 
        result = simple_strtoul(str, &endp, 0);
        if (str == endp || result >= INT_MAX) {
-               printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
-                      str);
+               ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
                return -EINVAL;
        }
 
@@ -1337,8 +1263,7 @@ static int __init bytes_str_to_int(const char *str)
        case '\0':
                break;
        default:
-               printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
-                      str);
+               ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
                return -EINVAL;
        }
 
@@ -1365,21 +1290,20 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
                return -EINVAL;
 
        if (mtd_devs == UBI_MAX_DEVICES) {
-               printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
-                      UBI_MAX_DEVICES);
+               ubi_err("UBI error: too many parameters, max. is %d\n",
+                       UBI_MAX_DEVICES);
                return -EINVAL;
        }
 
        len = strnlen(val, MTD_PARAM_LEN_MAX);
        if (len == MTD_PARAM_LEN_MAX) {
-               printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
-                      "max. is %d\n", val, MTD_PARAM_LEN_MAX);
+               ubi_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
+                       val, MTD_PARAM_LEN_MAX);
                return -EINVAL;
        }
 
        if (len == 0) {
-               printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
-                      "ignored\n");
+               pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
                return 0;
        }
 
@@ -1393,8 +1317,7 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
                tokens[i] = strsep(&pbuf, ",");
 
        if (pbuf) {
-               printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
-                      val);
+               ubi_err("UBI error: too many arguments at \"%s\"\n", val);
                return -EINVAL;
        }
 
index 7ac2c05..1408b67 100644 (file)
@@ -63,7 +63,7 @@ static int get_exclusive(struct ubi_volume_desc *desc)
        users = vol->readers + vol->writers + vol->exclusive;
        ubi_assert(users > 0);
        if (users > 1) {
-               dbg_err("%d users for volume %d", users, vol->vol_id);
+               ubi_err("%d users for volume %d", users, vol->vol_id);
                err = -EBUSY;
        } else {
                vol->readers = vol->writers = 0;
@@ -140,9 +140,9 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
                vol->updating = 0;
                vfree(vol->upd_buf);
        } else if (vol->changing_leb) {
-               dbg_gen("only %lld of %lld bytes received for atomic LEB change"
-                       " for volume %d:%d, cancel", vol->upd_received,
-                       vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
+               dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
+                       vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
+                       vol->vol_id);
                vol->changing_leb = 0;
                vfree(vol->upd_buf);
        }
@@ -159,7 +159,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
 
        if (vol->updating) {
                /* Update is in progress, seeking is prohibited */
-               dbg_err("updating");
+               ubi_err("updating");
                return -EBUSY;
        }
 
@@ -178,7 +178,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
        }
 
        if (new_offset < 0 || new_offset > vol->used_bytes) {
-               dbg_err("bad seek %lld", new_offset);
+               ubi_err("bad seek %lld", new_offset);
                return -EINVAL;
        }
 
@@ -189,7 +189,8 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
        return new_offset;
 }
 
-static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
+                         int datasync)
 {
        struct ubi_volume_desc *desc = file->private_data;
        struct ubi_device *ubi = desc->vol->ubi;
@@ -216,11 +217,11 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
                count, *offp, vol->vol_id);
 
        if (vol->updating) {
-               dbg_err("updating");
+               ubi_err("updating");
                return -EBUSY;
        }
        if (vol->upd_marker) {
-               dbg_err("damaged volume, update marker is set");
+               ubi_err("damaged volume, update marker is set");
                return -EBADF;
        }
        if (*offp == vol->used_bytes || count == 0)
@@ -300,7 +301,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
 
        lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
        if (off & (ubi->min_io_size - 1)) {
-               dbg_err("unaligned position");
+               ubi_err("unaligned position");
                return -EINVAL;
        }
 
@@ -309,7 +310,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
 
        /* We can write only in fractions of the minimum I/O unit */
        if (count & (ubi->min_io_size - 1)) {
-               dbg_err("unaligned write length");
+               ubi_err("unaligned write length");
                return -EINVAL;
        }
 
@@ -334,8 +335,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
                        break;
                }
 
-               err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
-                                       UBI_UNKNOWN);
+               err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
                if (err)
                        break;
 
@@ -477,9 +477,6 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
                if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
                    req.bytes < 0 || req.bytes > vol->usable_leb_size)
                        break;
-               if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
-                   req.dtype != UBI_UNKNOWN)
-                       break;
 
                err = get_exclusive(desc);
                if (err < 0)
@@ -518,7 +515,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
                if (err)
                        break;
 
-               err = ubi_wl_flush(ubi);
+               err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
                break;
        }
 
@@ -532,7 +529,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
                        err = -EFAULT;
                        break;
                }
-               err = ubi_leb_map(desc, req.lnum, req.dtype);
+               err = ubi_leb_map(desc, req.lnum);
                break;
        }
 
@@ -647,8 +644,8 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
        return 0;
 
 bad:
-       dbg_err("bad volume creation request");
-       ubi_dbg_dump_mkvol_req(req);
+       ubi_err("bad volume creation request");
+       ubi_dump_mkvol_req(req);
        return err;
 }
 
@@ -713,12 +710,12 @@ static int rename_volumes(struct ubi_device *ubi,
        for (i = 0; i < req->count - 1; i++) {
                for (n = i + 1; n < req->count; n++) {
                        if (req->ents[i].vol_id == req->ents[n].vol_id) {
-                               dbg_err("duplicated volume id %d",
+                               ubi_err("duplicated volume id %d",
                                        req->ents[i].vol_id);
                                return -EINVAL;
                        }
                        if (!strcmp(req->ents[i].name, req->ents[n].name)) {
-                               dbg_err("duplicated volume name \"%s\"",
+                               ubi_err("duplicated volume name \"%s\"",
                                        req->ents[i].name);
                                return -EINVAL;
                        }
@@ -741,7 +738,7 @@ static int rename_volumes(struct ubi_device *ubi,
                re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
                if (IS_ERR(re->desc)) {
                        err = PTR_ERR(re->desc);
-                       dbg_err("cannot open volume %d, error %d", vol_id, err);
+                       ubi_err("cannot open volume %d, error %d", vol_id, err);
                        kfree(re);
                        goto out_free;
                }
@@ -757,7 +754,7 @@ static int rename_volumes(struct ubi_device *ubi,
                re->new_name_len = name_len;
                memcpy(re->new_name, name, name_len);
                list_add_tail(&re->list, &rename_list);
-               dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
+               dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
                        vol_id, re->desc->vol->name, name);
        }
 
@@ -800,7 +797,7 @@ static int rename_volumes(struct ubi_device *ubi,
                                continue;
 
                        /* The volume exists but busy, or an error occurred */
-                       dbg_err("cannot open volume \"%s\", error %d",
+                       ubi_err("cannot open volume \"%s\", error %d",
                                re->new_name, err);
                        goto out_free;
                }
@@ -815,7 +812,7 @@ static int rename_volumes(struct ubi_device *ubi,
                re1->remove = 1;
                re1->desc = desc;
                list_add(&re1->list, &rename_list);
-               dbg_msg("will remove volume %d, name \"%s\"",
+               dbg_gen("will remove volume %d, name \"%s\"",
                        re1->desc->vol->vol_id, re1->desc->vol->name);
        }
 
@@ -946,7 +943,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
        {
                struct ubi_rnvol_req *req;
 
-               dbg_msg("re-name volumes");
+               dbg_gen("re-name volumes");
                req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
                if (!req) {
                        err = -ENOMEM;
index ab80c0d..33a60a5 100644 (file)
  * Author: Artem Bityutskiy (Битюцкий Артём)
  */
 
-/*
- * Here we keep all the UBI debugging stuff which should normally be disabled
- * and compiled-out, but it is extremely helpful when hunting bugs or doing big
- * changes.
- */
-
-#ifdef CONFIG_MTD_UBI_DEBUG
-
 #include "ubi.h"
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
 
+
+/**
+ * ubi_dump_flash - dump a region of flash.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to dump
+ * @offset: the starting offset within the physical eraseblock to dump
+ * @len: the length of the region to dump
+ */
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+       int err;
+       size_t read;
+       void *buf;
+       loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+       buf = vmalloc(len);
+       if (!buf)
+               return;
+       err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+       if (err && err != -EUCLEAN) {
+               ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+                       err, len, pnum, offset, read);
+               goto out;
+       }
+
+       ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
+               len, pnum, offset);
+       print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+out:
+       vfree(buf);
+       return;
+}
+
 /**
- * ubi_dbg_dump_ec_hdr - dump an erase counter header.
+ * ubi_dump_ec_hdr - dump an erase counter header.
  * @ec_hdr: the erase counter header to dump
  */
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
 {
-       printk(KERN_DEBUG "Erase counter header dump:\n");
-       printk(KERN_DEBUG "\tmagic          %#08x\n",
-              be32_to_cpu(ec_hdr->magic));
-       printk(KERN_DEBUG "\tversion        %d\n", (int)ec_hdr->version);
-       printk(KERN_DEBUG "\tec             %llu\n",
-              (long long)be64_to_cpu(ec_hdr->ec));
-       printk(KERN_DEBUG "\tvid_hdr_offset %d\n",
-              be32_to_cpu(ec_hdr->vid_hdr_offset));
-       printk(KERN_DEBUG "\tdata_offset    %d\n",
-              be32_to_cpu(ec_hdr->data_offset));
-       printk(KERN_DEBUG "\timage_seq      %d\n",
-              be32_to_cpu(ec_hdr->image_seq));
-       printk(KERN_DEBUG "\thdr_crc        %#08x\n",
-              be32_to_cpu(ec_hdr->hdr_crc));
-       printk(KERN_DEBUG "erase counter header hexdump:\n");
+       pr_err("Erase counter header dump:\n");
+       pr_err("\tmagic          %#08x\n", be32_to_cpu(ec_hdr->magic));
+       pr_err("\tversion        %d\n", (int)ec_hdr->version);
+       pr_err("\tec             %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+       pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+       pr_err("\tdata_offset    %d\n", be32_to_cpu(ec_hdr->data_offset));
+       pr_err("\timage_seq      %d\n", be32_to_cpu(ec_hdr->image_seq));
+       pr_err("\thdr_crc        %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+       pr_err("erase counter header hexdump:\n");
        print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
                       ec_hdr, UBI_EC_HDR_SIZE, 1);
 }
 
 /**
- * ubi_dbg_dump_vid_hdr - dump a volume identifier header.
+ * ubi_dump_vid_hdr - dump a volume identifier header.
  * @vid_hdr: the volume identifier header to dump
  */
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
 {
-       printk(KERN_DEBUG "Volume identifier header dump:\n");
-       printk(KERN_DEBUG "\tmagic     %08x\n", be32_to_cpu(vid_hdr->magic));
-       printk(KERN_DEBUG "\tversion   %d\n",  (int)vid_hdr->version);
-       printk(KERN_DEBUG "\tvol_type  %d\n",  (int)vid_hdr->vol_type);
-       printk(KERN_DEBUG "\tcopy_flag %d\n",  (int)vid_hdr->copy_flag);
-       printk(KERN_DEBUG "\tcompat    %d\n",  (int)vid_hdr->compat);
-       printk(KERN_DEBUG "\tvol_id    %d\n",  be32_to_cpu(vid_hdr->vol_id));
-       printk(KERN_DEBUG "\tlnum      %d\n",  be32_to_cpu(vid_hdr->lnum));
-       printk(KERN_DEBUG "\tdata_size %d\n",  be32_to_cpu(vid_hdr->data_size));
-       printk(KERN_DEBUG "\tused_ebs  %d\n",  be32_to_cpu(vid_hdr->used_ebs));
-       printk(KERN_DEBUG "\tdata_pad  %d\n",  be32_to_cpu(vid_hdr->data_pad));
-       printk(KERN_DEBUG "\tsqnum     %llu\n",
+       pr_err("Volume identifier header dump:\n");
+       pr_err("\tmagic     %08x\n", be32_to_cpu(vid_hdr->magic));
+       pr_err("\tversion   %d\n",  (int)vid_hdr->version);
+       pr_err("\tvol_type  %d\n",  (int)vid_hdr->vol_type);
+       pr_err("\tcopy_flag %d\n",  (int)vid_hdr->copy_flag);
+       pr_err("\tcompat    %d\n",  (int)vid_hdr->compat);
+       pr_err("\tvol_id    %d\n",  be32_to_cpu(vid_hdr->vol_id));
+       pr_err("\tlnum      %d\n",  be32_to_cpu(vid_hdr->lnum));
+       pr_err("\tdata_size %d\n",  be32_to_cpu(vid_hdr->data_size));
+       pr_err("\tused_ebs  %d\n",  be32_to_cpu(vid_hdr->used_ebs));
+       pr_err("\tdata_pad  %d\n",  be32_to_cpu(vid_hdr->data_pad));
+       pr_err("\tsqnum     %llu\n",
                (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
-       printk(KERN_DEBUG "\thdr_crc   %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
-       printk(KERN_DEBUG "Volume identifier header hexdump:\n");
+       pr_err("\thdr_crc   %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+       pr_err("Volume identifier header hexdump:\n");
        print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
                       vid_hdr, UBI_VID_HDR_SIZE, 1);
 }
 
 /**
- * ubi_dbg_dump_vol_info- dump volume information.
+ * ubi_dump_vol_info - dump volume information.
  * @vol: UBI volume description object
  */
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
+void ubi_dump_vol_info(const struct ubi_volume *vol)
 {
-       printk(KERN_DEBUG "Volume information dump:\n");
-       printk(KERN_DEBUG "\tvol_id          %d\n", vol->vol_id);
-       printk(KERN_DEBUG "\treserved_pebs   %d\n", vol->reserved_pebs);
-       printk(KERN_DEBUG "\talignment       %d\n", vol->alignment);
-       printk(KERN_DEBUG "\tdata_pad        %d\n", vol->data_pad);
-       printk(KERN_DEBUG "\tvol_type        %d\n", vol->vol_type);
-       printk(KERN_DEBUG "\tname_len        %d\n", vol->name_len);
-       printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size);
-       printk(KERN_DEBUG "\tused_ebs        %d\n", vol->used_ebs);
-       printk(KERN_DEBUG "\tused_bytes      %lld\n", vol->used_bytes);
-       printk(KERN_DEBUG "\tlast_eb_bytes   %d\n", vol->last_eb_bytes);
-       printk(KERN_DEBUG "\tcorrupted       %d\n", vol->corrupted);
-       printk(KERN_DEBUG "\tupd_marker      %d\n", vol->upd_marker);
+       pr_err("Volume information dump:\n");
+       pr_err("\tvol_id          %d\n", vol->vol_id);
+       pr_err("\treserved_pebs   %d\n", vol->reserved_pebs);
+       pr_err("\talignment       %d\n", vol->alignment);
+       pr_err("\tdata_pad        %d\n", vol->data_pad);
+       pr_err("\tvol_type        %d\n", vol->vol_type);
+       pr_err("\tname_len        %d\n", vol->name_len);
+       pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
+       pr_err("\tused_ebs        %d\n", vol->used_ebs);
+       pr_err("\tused_bytes      %lld\n", vol->used_bytes);
+       pr_err("\tlast_eb_bytes   %d\n", vol->last_eb_bytes);
+       pr_err("\tcorrupted       %d\n", vol->corrupted);
+       pr_err("\tupd_marker      %d\n", vol->upd_marker);
 
        if (vol->name_len <= UBI_VOL_NAME_MAX &&
            strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
-               printk(KERN_DEBUG "\tname            %s\n", vol->name);
+               pr_err("\tname            %s\n", vol->name);
        } else {
-               printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+               pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
                       vol->name[0], vol->name[1], vol->name[2],
                       vol->name[3], vol->name[4]);
        }
 }
 
 /**
- * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
  * @r: the object to dump
  * @idx: volume table index
  */
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
 {
        int name_len = be16_to_cpu(r->name_len);
 
-       printk(KERN_DEBUG "Volume table record %d dump:\n", idx);
-       printk(KERN_DEBUG "\treserved_pebs   %d\n",
-              be32_to_cpu(r->reserved_pebs));
-       printk(KERN_DEBUG "\talignment       %d\n", be32_to_cpu(r->alignment));
-       printk(KERN_DEBUG "\tdata_pad        %d\n", be32_to_cpu(r->data_pad));
-       printk(KERN_DEBUG "\tvol_type        %d\n", (int)r->vol_type);
-       printk(KERN_DEBUG "\tupd_marker      %d\n", (int)r->upd_marker);
-       printk(KERN_DEBUG "\tname_len        %d\n", name_len);
+       pr_err("Volume table record %d dump:\n", idx);
+       pr_err("\treserved_pebs   %d\n", be32_to_cpu(r->reserved_pebs));
+       pr_err("\talignment       %d\n", be32_to_cpu(r->alignment));
+       pr_err("\tdata_pad        %d\n", be32_to_cpu(r->data_pad));
+       pr_err("\tvol_type        %d\n", (int)r->vol_type);
+       pr_err("\tupd_marker      %d\n", (int)r->upd_marker);
+       pr_err("\tname_len        %d\n", name_len);
 
        if (r->name[0] == '\0') {
-               printk(KERN_DEBUG "\tname            NULL\n");
+               pr_err("\tname            NULL\n");
                return;
        }
 
        if (name_len <= UBI_VOL_NAME_MAX &&
            strnlen(&r->name[0], name_len + 1) == name_len) {
-               printk(KERN_DEBUG "\tname            %s\n", &r->name[0]);
+               pr_err("\tname            %s\n", &r->name[0]);
        } else {
-               printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+               pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
                        r->name[0], r->name[1], r->name[2], r->name[3],
                        r->name[4]);
        }
-       printk(KERN_DEBUG "\tcrc             %#08x\n", be32_to_cpu(r->crc));
+       pr_err("\tcrc             %#08x\n", be32_to_cpu(r->crc));
 }
 
 /**
- * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object.
- * @sv: the object to dump
+ * ubi_dump_av - dump a &struct ubi_ainf_volume object.
+ * @av: the object to dump
  */
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
+void ubi_dump_av(const struct ubi_ainf_volume *av)
 {
-       printk(KERN_DEBUG "Volume scanning information dump:\n");
-       printk(KERN_DEBUG "\tvol_id         %d\n", sv->vol_id);
-       printk(KERN_DEBUG "\thighest_lnum   %d\n", sv->highest_lnum);
-       printk(KERN_DEBUG "\tleb_count      %d\n", sv->leb_count);
-       printk(KERN_DEBUG "\tcompat         %d\n", sv->compat);
-       printk(KERN_DEBUG "\tvol_type       %d\n", sv->vol_type);
-       printk(KERN_DEBUG "\tused_ebs       %d\n", sv->used_ebs);
-       printk(KERN_DEBUG "\tlast_data_size %d\n", sv->last_data_size);
-       printk(KERN_DEBUG "\tdata_pad       %d\n", sv->data_pad);
+       pr_err("Volume attaching information dump:\n");
+       pr_err("\tvol_id         %d\n", av->vol_id);
+       pr_err("\thighest_lnum   %d\n", av->highest_lnum);
+       pr_err("\tleb_count      %d\n", av->leb_count);
+       pr_err("\tcompat         %d\n", av->compat);
+       pr_err("\tvol_type       %d\n", av->vol_type);
+       pr_err("\tused_ebs       %d\n", av->used_ebs);
+       pr_err("\tlast_data_size %d\n", av->last_data_size);
+       pr_err("\tdata_pad       %d\n", av->data_pad);
 }
 
 /**
- * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object.
- * @seb: the object to dump
+ * ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
+ * @aeb: the object to dump
  * @type: object type: 0 - not corrupted, 1 - corrupted
  */
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
 {
-       printk(KERN_DEBUG "eraseblock scanning information dump:\n");
-       printk(KERN_DEBUG "\tec       %d\n", seb->ec);
-       printk(KERN_DEBUG "\tpnum     %d\n", seb->pnum);
+       pr_err("eraseblock attaching information dump:\n");
+       pr_err("\tec       %d\n", aeb->ec);
+       pr_err("\tpnum     %d\n", aeb->pnum);
        if (type == 0) {
-               printk(KERN_DEBUG "\tlnum     %d\n", seb->lnum);
-               printk(KERN_DEBUG "\tscrub    %d\n", seb->scrub);
-               printk(KERN_DEBUG "\tsqnum    %llu\n", seb->sqnum);
+               pr_err("\tlnum     %d\n", aeb->lnum);
+               pr_err("\tscrub    %d\n", aeb->scrub);
+               pr_err("\tsqnum    %llu\n", aeb->sqnum);
        }
 }
 
 /**
- * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
  * @req: the object to dump
  */
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
 {
        char nm[17];
 
-       printk(KERN_DEBUG "Volume creation request dump:\n");
-       printk(KERN_DEBUG "\tvol_id    %d\n",   req->vol_id);
-       printk(KERN_DEBUG "\talignment %d\n",   req->alignment);
-       printk(KERN_DEBUG "\tbytes     %lld\n", (long long)req->bytes);
-       printk(KERN_DEBUG "\tvol_type  %d\n",   req->vol_type);
-       printk(KERN_DEBUG "\tname_len  %d\n",   req->name_len);
+       pr_err("Volume creation request dump:\n");
+       pr_err("\tvol_id    %d\n",   req->vol_id);
+       pr_err("\talignment %d\n",   req->alignment);
+       pr_err("\tbytes     %lld\n", (long long)req->bytes);
+       pr_err("\tvol_type  %d\n",   req->vol_type);
+       pr_err("\tname_len  %d\n",   req->name_len);
 
        memcpy(nm, req->name, 16);
        nm[16] = 0;
-       printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm);
-}
-
-/**
- * ubi_dbg_dump_flash - dump a region of flash.
- * @ubi: UBI device description object
- * @pnum: the physical eraseblock number to dump
- * @offset: the starting offset within the physical eraseblock to dump
- * @len: the length of the region to dump
- */
-void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
-{
-       int err;
-       size_t read;
-       void *buf;
-       loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
-
-       buf = vmalloc(len);
-       if (!buf)
-               return;
-       err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
-       if (err && err != -EUCLEAN) {
-               ubi_err("error %d while reading %d bytes from PEB %d:%d, "
-                       "read %zd bytes", err, len, pnum, offset, read);
-               goto out;
-       }
-
-       dbg_msg("dumping %d bytes of data from PEB %d, offset %d",
-               len, pnum, offset);
-       print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
-out:
-       vfree(buf);
-       return;
+       pr_err("\t1st 16 characters of name: %s\n", nm);
 }
 
 /**
@@ -271,6 +257,9 @@ static struct dentry *dfs_rootdir;
  */
 int ubi_debugfs_init(void)
 {
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        dfs_rootdir = debugfs_create_dir("ubi", NULL);
        if (IS_ERR_OR_NULL(dfs_rootdir)) {
                int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -288,7 +277,8 @@ int ubi_debugfs_init(void)
  */
 void ubi_debugfs_exit(void)
 {
-       debugfs_remove(dfs_rootdir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove(dfs_rootdir);
 }
 
 /* Read an UBI debugfs file */
@@ -418,6 +408,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
        struct dentry *dent;
        struct ubi_debug_info *d = ubi->dbg;
 
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
                     ubi->ubi_num);
        if (n == UBI_DFS_DIR_LEN) {
@@ -485,7 +478,6 @@ out:
  */
 void ubi_debugfs_exit_dev(struct ubi_device *ubi)
 {
-       debugfs_remove_recursive(ubi->dbg->dfs_dir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove_recursive(ubi->dbg->dfs_dir);
 }
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
index ead2cd1..3dbc877 100644 (file)
 #ifndef __UBI_DEBUG_H__
 #define __UBI_DEBUG_H__
 
-#ifdef CONFIG_MTD_UBI_DEBUG
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
+
 #include <linux/random.h>
 
 #define ubi_assert(expr)  do {                                               \
        if (unlikely(!(expr))) {                                             \
-               printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
+               pr_crit("UBI assert failed in %s at %u (pid %d)\n",          \
                       __func__, __LINE__, current->pid);                    \
-               ubi_dbg_dump_stack();                                        \
+               dump_stack();                                                \
        }                                                                    \
 } while (0)
 
-#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
-
-#define ubi_dbg_dump_stack() dump_stack()
-
-#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a)  \
+#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a)                   \
                print_hex_dump(l, ps, pt, r, g, b, len, a)
 
 #define ubi_dbg_msg(type, fmt, ...) \
-       pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
-
-/* Just a debugging messages not related to any specific UBI subsystem */
-#define dbg_msg(fmt, ...)                                    \
-       printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
-              current->pid, __func__, ##__VA_ARGS__)
+       pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid,       \
+                ##__VA_ARGS__)
 
 /* General debugging messages */
 #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
 /* Initialization and build messages */
 #define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
 
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
-void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
-int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
-int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
-                       int offset, int len);
+void ubi_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dump_av(const struct ubi_ainf_volume *av);
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+                         int len);
 int ubi_debugging_init_dev(struct ubi_device *ubi);
 void ubi_debugging_exit_dev(struct ubi_device *ubi);
 int ubi_debugfs_init(void);
@@ -167,73 +158,4 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
        return 0;
 }
 
-#else
-
-/* Use "if (0)" to make compiler check arguments even if debugging is off */
-#define ubi_assert(expr)  do {                                               \
-       if (0) {                                                             \
-               printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
-                      __func__, __LINE__, current->pid);                    \
-       }                                                                    \
-} while (0)
-
-#define dbg_err(fmt, ...) do {                                               \
-       if (0)                                                               \
-               ubi_err(fmt, ##__VA_ARGS__);                                 \
-} while (0)
-
-#define ubi_dbg_msg(fmt, ...) do {                                           \
-       if (0)                                                               \
-               printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__);                  \
-} while (0)
-
-#define dbg_msg(fmt, ...)  ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_gen(fmt, ...)  ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_eba(fmt, ...)  ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_wl(fmt, ...)   ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_io(fmt, ...)   ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_bld(fmt, ...)  ubi_dbg_msg(fmt, ##__VA_ARGS__)
-
-static inline void ubi_dbg_dump_stack(void)                          { return; }
-static inline void
-ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)                 { return; }
-static inline void
-ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)              { return; }
-static inline void
-ubi_dbg_dump_vol_info(const struct ubi_volume *vol)                  { return; }
-static inline void
-ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)   { return; }
-static inline void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) { return; }
-static inline void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb,
-                                   int type)                        { return; }
-static inline void
-ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)              { return; }
-static inline void ubi_dbg_dump_flash(struct ubi_device *ubi,
-                                     int pnum, int offset, int len) { return; }
-static inline void
-ubi_dbg_print_hex_dump(const char *l, const char *ps, int pt, int r,
-                      int g, const void *b, size_t len, bool a)     { return; }
-static inline int ubi_dbg_check_all_ff(struct ubi_device *ubi,
-                                      int pnum, int offset,
-                                      int len)                    { return 0; }
-static inline int ubi_dbg_check_write(struct ubi_device *ubi,
-                                     const void *buf, int pnum,
-                                     int offset, int len)         { return 0; }
-
-static inline int ubi_debugging_init_dev(struct ubi_device *ubi)   { return 0; }
-static inline void ubi_debugging_exit_dev(struct ubi_device *ubi)  { return; }
-static inline int ubi_debugfs_init(void)                           { return 0; }
-static inline void ubi_debugfs_exit(void)                          { return; }
-static inline int ubi_debugfs_init_dev(struct ubi_device *ubi)     { return 0; }
-static inline void ubi_debugfs_exit_dev(struct ubi_device *ubi)    { return; }
-
-static inline int
-ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)              { return 0; }
-static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) { return 0; }
-static inline int
-ubi_dbg_is_write_failure(const struct ubi_device *ubi)             { return 0; }
-static inline int
-ubi_dbg_is_erase_failure(const struct ubi_device *ubi)             { return 0; }
-
-#endif /* !CONFIG_MTD_UBI_DEBUG */
 #endif /* !__UBI_DEBUG_H__ */
index 22b3636..9ca92fa 100644 (file)
@@ -341,7 +341,7 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
        dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
 
        vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
-       err = ubi_wl_put_peb(ubi, pnum, 0);
+       err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
 
 out_unlock:
        leb_write_unlock(ubi, vol_id, lnum);
@@ -420,9 +420,8 @@ retry:
                                 */
                                if (err == UBI_IO_BAD_HDR_EBADMSG ||
                                    err == UBI_IO_BAD_HDR) {
-                                       ubi_warn("corrupted VID header at PEB "
-                                                "%d, LEB %d:%d", pnum, vol_id,
-                                                lnum);
+                                       ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
+                                                pnum, vol_id, lnum);
                                        err = -EBADMSG;
                                } else
                                        ubi_ro_mode(ubi);
@@ -507,7 +506,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
                return -ENOMEM;
 
 retry:
-       new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
+       new_pnum = ubi_wl_get_peb(ubi);
        if (new_pnum < 0) {
                ubi_free_vid_hdr(ubi, vid_hdr);
                return new_pnum;
@@ -529,18 +528,18 @@ retry:
 
        data_size = offset + len;
        mutex_lock(&ubi->buf_mutex);
-       memset(ubi->peb_buf1 + offset, 0xFF, len);
+       memset(ubi->peb_buf + offset, 0xFF, len);
 
        /* Read everything before the area where the write failure happened */
        if (offset > 0) {
-               err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
+               err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
                if (err && err != UBI_IO_BITFLIPS)
                        goto out_unlock;
        }
 
-       memcpy(ubi->peb_buf1 + offset, buf, len);
+       memcpy(ubi->peb_buf + offset, buf, len);
 
-       err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
+       err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
        if (err) {
                mutex_unlock(&ubi->buf_mutex);
                goto write_error;
@@ -550,7 +549,7 @@ retry:
        ubi_free_vid_hdr(ubi, vid_hdr);
 
        vol->eba_tbl[lnum] = new_pnum;
-       ubi_wl_put_peb(ubi, pnum, 1);
+       ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
 
        ubi_msg("data was successfully recovered");
        return 0;
@@ -558,7 +557,7 @@ retry:
 out_unlock:
        mutex_unlock(&ubi->buf_mutex);
 out_put:
-       ubi_wl_put_peb(ubi, new_pnum, 1);
+       ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
        ubi_free_vid_hdr(ubi, vid_hdr);
        return err;
 
@@ -568,7 +567,7 @@ write_error:
         * get another one.
         */
        ubi_warn("failed to write to PEB %d", new_pnum);
-       ubi_wl_put_peb(ubi, new_pnum, 1);
+       ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
        if (++tries > UBI_IO_RETRIES) {
                ubi_free_vid_hdr(ubi, vid_hdr);
                return err;
@@ -585,7 +584,6 @@ write_error:
  * @buf: the data to write
  * @offset: offset within the logical eraseblock where to write
  * @len: how many bytes to write
- * @dtype: data type
  *
  * This function writes data to logical eraseblock @lnum of a dynamic volume
  * @vol. Returns zero in case of success and a negative error code in case
@@ -593,7 +591,7 @@ write_error:
  * written to the flash media, but may be some garbage.
  */
 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
-                     const void *buf, int offset, int len, int dtype)
+                     const void *buf, int offset, int len)
 {
        int err, pnum, tries = 0, vol_id = vol->vol_id;
        struct ubi_vid_hdr *vid_hdr;
@@ -641,7 +639,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
        vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
 
 retry:
-       pnum = ubi_wl_get_peb(ubi, dtype);
+       pnum = ubi_wl_get_peb(ubi);
        if (pnum < 0) {
                ubi_free_vid_hdr(ubi, vid_hdr);
                leb_write_unlock(ubi, vol_id, lnum);
@@ -661,9 +659,8 @@ retry:
        if (len) {
                err = ubi_io_write_data(ubi, buf, pnum, offset, len);
                if (err) {
-                       ubi_warn("failed to write %d bytes at offset %d of "
-                                "LEB %d:%d, PEB %d", len, offset, vol_id,
-                                lnum, pnum);
+                       ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+                                len, offset, vol_id, lnum, pnum);
                        goto write_error;
                }
        }
@@ -687,7 +684,7 @@ write_error:
         * eraseblock, so just put it and request a new one. We assume that if
         * this physical eraseblock went bad, the erase code will handle that.
         */
-       err = ubi_wl_put_peb(ubi, pnum, 1);
+       err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
        if (err || ++tries > UBI_IO_RETRIES) {
                ubi_ro_mode(ubi);
                leb_write_unlock(ubi, vol_id, lnum);
@@ -707,7 +704,6 @@ write_error:
  * @lnum: logical eraseblock number
  * @buf: data to write
  * @len: how many bytes to write
- * @dtype: data type
  * @used_ebs: how many logical eraseblocks will this volume contain
  *
  * This function writes data to logical eraseblock @lnum of static volume
@@ -724,8 +720,7 @@ write_error:
  * code in case of failure.
  */
 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
-                        int lnum, const void *buf, int len, int dtype,
-                        int used_ebs)
+                        int lnum, const void *buf, int len, int used_ebs)
 {
        int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
        struct ubi_vid_hdr *vid_hdr;
@@ -763,7 +758,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
        vid_hdr->data_crc = cpu_to_be32(crc);
 
 retry:
-       pnum = ubi_wl_get_peb(ubi, dtype);
+       pnum = ubi_wl_get_peb(ubi);
        if (pnum < 0) {
                ubi_free_vid_hdr(ubi, vid_hdr);
                leb_write_unlock(ubi, vol_id, lnum);
@@ -807,7 +802,7 @@ write_error:
                return err;
        }
 
-       err = ubi_wl_put_peb(ubi, pnum, 1);
+       err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
        if (err || ++tries > UBI_IO_RETRIES) {
                ubi_ro_mode(ubi);
                leb_write_unlock(ubi, vol_id, lnum);
@@ -827,7 +822,6 @@ write_error:
  * @lnum: logical eraseblock number
  * @buf: data to write
  * @len: how many bytes to write
- * @dtype: data type
  *
  * This function changes the contents of a logical eraseblock atomically. @buf
  * has to contain new logical eraseblock data, and @len - the length of the
@@ -839,7 +833,7 @@ write_error:
  * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
  */
 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
-                             int lnum, const void *buf, int len, int dtype)
+                             int lnum, const void *buf, int len)
 {
        int err, pnum, tries = 0, vol_id = vol->vol_id;
        struct ubi_vid_hdr *vid_hdr;
@@ -856,7 +850,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
                err = ubi_eba_unmap_leb(ubi, vol, lnum);
                if (err)
                        return err;
-               return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+               return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
        }
 
        vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -881,7 +875,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
        vid_hdr->data_crc = cpu_to_be32(crc);
 
 retry:
-       pnum = ubi_wl_get_peb(ubi, dtype);
+       pnum = ubi_wl_get_peb(ubi);
        if (pnum < 0) {
                err = pnum;
                goto out_leb_unlock;
@@ -905,7 +899,7 @@ retry:
        }
 
        if (vol->eba_tbl[lnum] >= 0) {
-               err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
+               err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
                if (err)
                        goto out_leb_unlock;
        }
@@ -930,7 +924,7 @@ write_error:
                goto out_leb_unlock;
        }
 
-       err = ubi_wl_put_peb(ubi, pnum, 1);
+       err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
        if (err || ++tries > UBI_IO_RETRIES) {
                ubi_ro_mode(ubi);
                goto out_leb_unlock;
@@ -979,7 +973,7 @@ static int is_error_sane(int err)
  * physical eraseblock @to. The @vid_hdr buffer may be changed by this
  * function. Returns:
  *   o %0 in case of success;
- *   o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc;
+ *   o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
  *   o a negative error code in case of failure.
  */
 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
@@ -1044,22 +1038,21 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
         * cancel it.
         */
        if (vol->eba_tbl[lnum] != from) {
-               dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
-                      "PEB %d, cancel", vol_id, lnum, from,
-                      vol->eba_tbl[lnum]);
+               dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+                      vol_id, lnum, from, vol->eba_tbl[lnum]);
                err = MOVE_CANCEL_RACE;
                goto out_unlock_leb;
        }
 
        /*
         * OK, now the LEB is locked and we can safely start moving it. Since
-        * this function utilizes the @ubi->peb_buf1 buffer which is shared
+        * this function utilizes the @ubi->peb_buf buffer which is shared
         * with some other functions - we lock the buffer by taking the
         * @ubi->buf_mutex.
         */
        mutex_lock(&ubi->buf_mutex);
        dbg_wl("read %d bytes of data", aldata_size);
-       err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
+       err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
        if (err && err != UBI_IO_BITFLIPS) {
                ubi_warn("error %d while reading data from PEB %d",
                         err, from);
@@ -1079,10 +1072,10 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
         */
        if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
                aldata_size = data_size =
-                       ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
+                       ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
 
        cond_resched();
-       crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
+       crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
        cond_resched();
 
        /*
@@ -1111,17 +1104,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
        err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
        if (err) {
                if (err != UBI_IO_BITFLIPS) {
-                       ubi_warn("error %d while reading VID header back from "
-                                 "PEB %d", err, to);
+                       ubi_warn("error %d while reading VID header back from PEB %d",
+                                err, to);
                        if (is_error_sane(err))
                                err = MOVE_TARGET_RD_ERR;
                } else
-                       err = MOVE_CANCEL_BITFLIPS;
+                       err = MOVE_TARGET_BITFLIPS;
                goto out_unlock_buf;
        }
 
        if (data_size > 0) {
-               err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
+               err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
                if (err) {
                        if (err == -EIO)
                                err = MOVE_TARGET_WR_ERR;
@@ -1134,24 +1127,24 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
                 * We've written the data and are going to read it back to make
                 * sure it was written correctly.
                 */
-
-               err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
+               memset(ubi->peb_buf, 0xFF, aldata_size);
+               err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
                if (err) {
                        if (err != UBI_IO_BITFLIPS) {
-                               ubi_warn("error %d while reading data back "
-                                        "from PEB %d", err, to);
+                               ubi_warn("error %d while reading data back from PEB %d",
+                                        err, to);
                                if (is_error_sane(err))
                                        err = MOVE_TARGET_RD_ERR;
                        } else
-                               err = MOVE_CANCEL_BITFLIPS;
+                               err = MOVE_TARGET_BITFLIPS;
                        goto out_unlock_buf;
                }
 
                cond_resched();
 
-               if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
-                       ubi_warn("read data back from PEB %d and it is "
-                                "different", to);
+               if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
+                       ubi_warn("read data back from PEB %d and it is different",
+                                to);
                        err = -EINVAL;
                        goto out_unlock_buf;
                }
@@ -1171,7 +1164,7 @@ out_unlock_leb:
  * print_rsvd_warning - warn about not having enough reserved PEBs.
  * @ubi: UBI device description object
  *
- * This is a helper function for 'ubi_eba_init_scan()' which is called when UBI
+ * This is a helper function for 'ubi_eba_init()' which is called when UBI
  * cannot reserve enough PEBs for bad block handling. This function makes a
  * decision whether we have to print a warning or not. The algorithm is as
  * follows:
@@ -1186,13 +1179,13 @@ out_unlock_leb:
  * reported by real users.
  */
 static void print_rsvd_warning(struct ubi_device *ubi,
-                              struct ubi_scan_info *si)
+                              struct ubi_attach_info *ai)
 {
        /*
         * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
         * large number to distinguish between newly flashed and used images.
         */
-       if (si->max_sqnum > (1 << 18)) {
+       if (ai->max_sqnum > (1 << 18)) {
                int min = ubi->beb_rsvd_level / 10;
 
                if (!min)
@@ -1201,27 +1194,27 @@ static void print_rsvd_warning(struct ubi_device *ubi,
                        return;
        }
 
-       ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d,"
-                " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+       ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+                ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
        if (ubi->corr_peb_count)
                ubi_warn("%d PEBs are corrupted and not used",
-                       ubi->corr_peb_count);
+                        ubi->corr_peb_count);
 }
 
 /**
- * ubi_eba_init_scan - initialize the EBA sub-system using scanning information.
+ * ubi_eba_init - initialize the EBA sub-system using attaching information.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
        int i, j, err, num_volumes;
-       struct ubi_scan_volume *sv;
+       struct ubi_ainf_volume *av;
        struct ubi_volume *vol;
-       struct ubi_scan_leb *seb;
+       struct ubi_ainf_peb *aeb;
        struct rb_node *rb;
 
        dbg_eba("initialize EBA sub-system");
@@ -1230,7 +1223,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
        mutex_init(&ubi->alc_mutex);
        ubi->ltree = RB_ROOT;
 
-       ubi->global_sqnum = si->max_sqnum + 1;
+       ubi->global_sqnum = ai->max_sqnum + 1;
        num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
 
        for (i = 0; i < num_volumes; i++) {
@@ -1250,19 +1243,19 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
                for (j = 0; j < vol->reserved_pebs; j++)
                        vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
 
-               sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
-               if (!sv)
+               av = ubi_find_av(ai, idx2vol_id(ubi, i));
+               if (!av)
                        continue;
 
-               ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
-                       if (seb->lnum >= vol->reserved_pebs)
+               ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+                       if (aeb->lnum >= vol->reserved_pebs)
                                /*
                                 * This may happen in case of an unclean reboot
                                 * during re-size.
                                 */
-                               ubi_scan_move_to_list(sv, seb, &si->erase);
+                               ubi_move_aeb_to_list(av, aeb, &ai->erase);
                        else
-                               vol->eba_tbl[seb->lnum] = seb->pnum;
+                               vol->eba_tbl[aeb->lnum] = aeb->pnum;
                }
        }
 
@@ -1284,7 +1277,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
                if (ubi->avail_pebs < ubi->beb_rsvd_level) {
                        /* No enough free physical eraseblocks */
                        ubi->beb_rsvd_pebs = ubi->avail_pebs;
-                       print_rsvd_warning(ubi, si);
+                       print_rsvd_warning(ubi, ai);
                } else
                        ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
 
index 941bc3c..dd8f6bd 100644 (file)
@@ -41,7 +41,7 @@
 #include "ubi-media.h"
 
 #define err_msg(fmt, ...)                                   \
-       printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \
+       pr_err("gluebi (pid %d): %s: " fmt "\n",            \
               current->pid, __func__, ##__VA_ARGS__)
 
 /**
@@ -238,7 +238,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
                if (to_write > total_written)
                        to_write = total_written;
 
-               err = ubi_write(gluebi->desc, lnum, buf, offs, to_write);
+               err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
                if (err)
                        break;
 
@@ -360,9 +360,8 @@ static int gluebi_create(struct ubi_device_info *di,
        mutex_lock(&devices_mutex);
        g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
        if (g)
-               err_msg("gluebi MTD device %d form UBI device %d volume %d "
-                       "already exists", g->mtd.index, vi->ubi_num,
-                       vi->vol_id);
+               err_msg("gluebi MTD device %d form UBI device %d volume %d already exists",
+                       g->mtd.index, vi->ubi_num, vi->vol_id);
        mutex_unlock(&devices_mutex);
 
        if (mtd_device_register(mtd, NULL, 0)) {
@@ -395,8 +394,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
        mutex_lock(&devices_mutex);
        gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
        if (!gluebi) {
-               err_msg("got remove notification for unknown UBI device %d "
-                       "volume %d", vi->ubi_num, vi->vol_id);
+               err_msg("got remove notification for unknown UBI device %d volume %d",
+                       vi->ubi_num, vi->vol_id);
                err = -ENOENT;
        } else if (gluebi->refcnt)
                err = -EBUSY;
@@ -409,9 +408,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
        mtd = &gluebi->mtd;
        err = mtd_device_unregister(mtd);
        if (err) {
-               err_msg("cannot remove fake MTD device %d, UBI device %d, "
-                       "volume %d, error %d", mtd->index, gluebi->ubi_num,
-                       gluebi->vol_id, err);
+               err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d",
+                       mtd->index, gluebi->ubi_num, gluebi->vol_id, err);
                mutex_lock(&devices_mutex);
                list_add_tail(&gluebi->list, &gluebi_devices);
                mutex_unlock(&devices_mutex);
@@ -441,8 +439,8 @@ static int gluebi_updated(struct ubi_volume_info *vi)
        gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
        if (!gluebi) {
                mutex_unlock(&devices_mutex);
-               err_msg("got update notification for unknown UBI device %d "
-                       "volume %d", vi->ubi_num, vi->vol_id);
+               err_msg("got update notification for unknown UBI device %d volume %d",
+                       vi->ubi_num, vi->vol_id);
                return -ENOENT;
        }
 
@@ -468,8 +466,8 @@ static int gluebi_resized(struct ubi_volume_info *vi)
        gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
        if (!gluebi) {
                mutex_unlock(&devices_mutex);
-               err_msg("got update notification for unknown UBI device %d "
-                       "volume %d", vi->ubi_num, vi->vol_id);
+               err_msg("got update notification for unknown UBI device %d volume %d",
+                       vi->ubi_num, vi->vol_id);
                return -ENOENT;
        }
        gluebi->mtd.size = vi->used_bytes;
@@ -526,9 +524,9 @@ static void __exit ubi_gluebi_exit(void)
 
                err = mtd_device_unregister(mtd);
                if (err)
-                       err_msg("error %d while removing gluebi MTD device %d, "
-                               "UBI device %d, volume %d - ignoring", err,
-                               mtd->index, gluebi->ubi_num, gluebi->vol_id);
+                       err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring",
+                               err, mtd->index, gluebi->ubi_num,
+                               gluebi->vol_id);
                kfree(mtd->name);
                kfree(gluebi);
        }
index 6e7f4d4..67f46f8 100644 (file)
 #include <linux/slab.h>
 #include "ubi.h"
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
-                                const struct ubi_ec_hdr *ec_hdr);
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
-                                 const struct ubi_vid_hdr *vid_hdr);
-#else
-#define paranoid_check_not_bad(ubi, pnum) 0
-#define paranoid_check_peb_ec_hdr(ubi, pnum)  0
-#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr)  0
-#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
-#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
-#endif
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+                            const struct ubi_ec_hdr *ec_hdr);
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+                             const struct ubi_vid_hdr *vid_hdr);
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+                           int offset, int len);
 
 /**
  * ubi_io_read - read data from a physical eraseblock.
@@ -142,7 +136,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
        ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
        ubi_assert(len > 0);
 
-       err = paranoid_check_not_bad(ubi, pnum);
+       err = self_check_not_bad(ubi, pnum);
        if (err)
                return err;
 
@@ -183,22 +177,21 @@ retry:
                         * enabled. A corresponding message will be printed
                         * later, when it is has been scrubbed.
                         */
-                       dbg_msg("fixable bit-flip detected at PEB %d", pnum);
+                       ubi_msg("fixable bit-flip detected at PEB %d", pnum);
                        ubi_assert(len == read);
                        return UBI_IO_BITFLIPS;
                }
 
                if (retries++ < UBI_IO_RETRIES) {
-                       dbg_io("error %d%s while reading %d bytes from PEB "
-                              "%d:%d, read only %zd bytes, retry",
-                              err, errstr, len, pnum, offset, read);
+                       ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+                                err, errstr, len, pnum, offset, read);
                        yield();
                        goto retry;
                }
 
-               ubi_err("error %d%s while reading %d bytes from PEB %d:%d, "
-                       "read %zd bytes", err, errstr, len, pnum, offset, read);
-               ubi_dbg_dump_stack();
+               ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+                       err, errstr, len, pnum, offset, read);
+               dump_stack();
 
                /*
                 * The driver should never return -EBADMSG if it failed to read
@@ -257,14 +250,12 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
                return -EROFS;
        }
 
-       /* The below has to be compiled out if paranoid checks are disabled */
-
-       err = paranoid_check_not_bad(ubi, pnum);
+       err = self_check_not_bad(ubi, pnum);
        if (err)
                return err;
 
        /* The area we are writing to has to contain all 0xFF bytes */
-       err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+       err = ubi_self_check_all_ff(ubi, pnum, offset, len);
        if (err)
                return err;
 
@@ -273,33 +264,33 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
                 * We write to the data area of the physical eraseblock. Make
                 * sure it has valid EC and VID headers.
                 */
-               err = paranoid_check_peb_ec_hdr(ubi, pnum);
+               err = self_check_peb_ec_hdr(ubi, pnum);
                if (err)
                        return err;
-               err = paranoid_check_peb_vid_hdr(ubi, pnum);
+               err = self_check_peb_vid_hdr(ubi, pnum);
                if (err)
                        return err;
        }
 
        if (ubi_dbg_is_write_failure(ubi)) {
-               dbg_err("cannot write %d bytes to PEB %d:%d "
-                       "(emulated)", len, pnum, offset);
-               ubi_dbg_dump_stack();
+               ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
+                       len, pnum, offset);
+               dump_stack();
                return -EIO;
        }
 
        addr = (loff_t)pnum * ubi->peb_size + offset;
        err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
        if (err) {
-               ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
-                       "%zd bytes", err, len, pnum, offset, written);
-               ubi_dbg_dump_stack();
-               ubi_dbg_dump_flash(ubi, pnum, offset, len);
+               ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+                       err, len, pnum, offset, written);
+               dump_stack();
+               ubi_dump_flash(ubi, pnum, offset, len);
        } else
                ubi_assert(written == len);
 
        if (!err) {
-               err = ubi_dbg_check_write(ubi, buf, pnum, offset, len);
+               err = self_check_write(ubi, buf, pnum, offset, len);
                if (err)
                        return err;
 
@@ -310,7 +301,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
                offset += len;
                len = ubi->peb_size - offset;
                if (len)
-                       err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+                       err = ubi_self_check_all_ff(ubi, pnum, offset, len);
        }
 
        return err;
@@ -364,13 +355,13 @@ retry:
        err = ubi->mtd->erase(ubi->mtd, &ei);
        if (err) {
                if (retries++ < UBI_IO_RETRIES) {
-                       dbg_io("error %d while erasing PEB %d, retry",
-                              err, pnum);
+                       ubi_warn("error %d while erasing PEB %d, retry",
+                                err, pnum);
                        yield();
                        goto retry;
                }
                ubi_err("cannot erase PEB %d, error %d", pnum, err);
-               ubi_dbg_dump_stack();
+               dump_stack();
                return err;
        }
 
@@ -383,21 +374,21 @@ retry:
 
        if (ei.state == MTD_ERASE_FAILED) {
                if (retries++ < UBI_IO_RETRIES) {
-                       dbg_io("error while erasing PEB %d, retry", pnum);
+                       ubi_warn("error while erasing PEB %d, retry", pnum);
                        yield();
                        goto retry;
                }
                ubi_err("cannot erase PEB %d", pnum);
-               ubi_dbg_dump_stack();
+               dump_stack();
                return -EIO;
        }
 
-       err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+       err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
        if (err)
                return err;
 
        if (ubi_dbg_is_erase_failure(ubi)) {
-               dbg_err("cannot erase PEB %d (emulated)", pnum);
+               ubi_err("cannot erase PEB %d (emulated)", pnum);
                return -EIO;
        }
 
@@ -431,11 +422,11 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
                        goto out;
 
                /* Make sure the PEB contains only 0xFF bytes */
-               err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+               err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
                if (err)
                        goto out;
 
-               err = ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size);
+               err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
                if (err == 0) {
                        ubi_err("erased PEB %d, but a non-0xFF byte found",
                                pnum);
@@ -444,17 +435,17 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
                }
 
                /* Write a pattern and check it */
-               memset(ubi->peb_buf1, patterns[i], ubi->peb_size);
-               err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+               memset(ubi->peb_buf, patterns[i], ubi->peb_size);
+               err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
                if (err)
                        goto out;
 
-               memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size);
-               err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+               memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
+               err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
                if (err)
                        goto out;
 
-               err = ubi_check_pattern(ubi->peb_buf1, patterns[i],
+               err = ubi_check_pattern(ubi->peb_buf, patterns[i],
                                        ubi->peb_size);
                if (err == 0) {
                        ubi_err("pattern %x checking failed for PEB %d",
@@ -521,8 +512,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
         * It is important to first invalidate the EC header, and then the VID
         * header. Otherwise a power cut may lead to valid EC header and
         * invalid VID header, in which case UBI will treat this PEB as
-        * corrupted and will try to preserve it, and print scary warnings (see
-        * the header comment in scan.c for more information).
+        * corrupted and will try to preserve it, and print scary warnings.
         */
        addr = (loff_t)pnum * ubi->peb_size;
        err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
@@ -564,7 +554,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
         */
        ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
                pnum, err, err1);
-       ubi_dbg_dump_flash(ubi, pnum, 0, ubi->peb_size);
+       ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
        return -EIO;
 }
 
@@ -590,7 +580,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
 
        ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
 
-       err = paranoid_check_not_bad(ubi, pnum);
+       err = self_check_not_bad(ubi, pnum);
        if (err != 0)
                return err;
 
@@ -695,8 +685,7 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
        leb_start = be32_to_cpu(ec_hdr->data_offset);
 
        if (ec_hdr->version != UBI_VERSION) {
-               ubi_err("node with incompatible UBI version found: "
-                       "this UBI version is %d, image version is %d",
+               ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
                        UBI_VERSION, (int)ec_hdr->version);
                goto bad;
        }
@@ -722,8 +711,8 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
 
 bad:
        ubi_err("bad EC header");
-       ubi_dbg_dump_ec_hdr(ec_hdr);
-       ubi_dbg_dump_stack();
+       ubi_dump_ec_hdr(ec_hdr);
+       dump_stack();
        return 1;
 }
 
@@ -787,10 +776,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
                if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
                        /* The physical eraseblock is supposedly empty */
                        if (verbose)
-                               ubi_warn("no EC header found at PEB %d, "
-                                        "only 0xFF bytes", pnum);
-                       dbg_bld("no EC header found at PEB %d, "
-                               "only 0xFF bytes", pnum);
+                               ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
+                                        pnum);
+                       dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+                               pnum);
                        if (!read_err)
                                return UBI_IO_FF;
                        else
@@ -802,12 +791,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
                 * 0xFF bytes. Report that the header is corrupted.
                 */
                if (verbose) {
-                       ubi_warn("bad magic number at PEB %d: %08x instead of "
-                                "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
-                       ubi_dbg_dump_ec_hdr(ec_hdr);
+                       ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+                                pnum, magic, UBI_EC_HDR_MAGIC);
+                       ubi_dump_ec_hdr(ec_hdr);
                }
-               dbg_bld("bad magic number at PEB %d: %08x instead of "
-                       "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+               dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+                       pnum, magic, UBI_EC_HDR_MAGIC);
                return UBI_IO_BAD_HDR;
        }
 
@@ -816,12 +805,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
 
        if (hdr_crc != crc) {
                if (verbose) {
-                       ubi_warn("bad EC header CRC at PEB %d, calculated "
-                                "%#08x, read %#08x", pnum, crc, hdr_crc);
-                       ubi_dbg_dump_ec_hdr(ec_hdr);
+                       ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+                                pnum, crc, hdr_crc);
+                       ubi_dump_ec_hdr(ec_hdr);
                }
-               dbg_bld("bad EC header CRC at PEB %d, calculated "
-                       "%#08x, read %#08x", pnum, crc, hdr_crc);
+               dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+                       pnum, crc, hdr_crc);
 
                if (!read_err)
                        return UBI_IO_BAD_HDR;
@@ -875,7 +864,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
        crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
        ec_hdr->hdr_crc = cpu_to_be32(crc);
 
-       err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+       err = self_check_ec_hdr(ubi, pnum, ec_hdr);
        if (err)
                return err;
 
@@ -906,40 +895,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
        int usable_leb_size = ubi->leb_size - data_pad;
 
        if (copy_flag != 0 && copy_flag != 1) {
-               dbg_err("bad copy_flag");
+               ubi_err("bad copy_flag");
                goto bad;
        }
 
        if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
            data_pad < 0) {
-               dbg_err("negative values");
+               ubi_err("negative values");
                goto bad;
        }
 
        if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
-               dbg_err("bad vol_id");
+               ubi_err("bad vol_id");
                goto bad;
        }
 
        if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
-               dbg_err("bad compat");
+               ubi_err("bad compat");
                goto bad;
        }
 
        if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
            compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
            compat != UBI_COMPAT_REJECT) {
-               dbg_err("bad compat");
+               ubi_err("bad compat");
                goto bad;
        }
 
        if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
-               dbg_err("bad vol_type");
+               ubi_err("bad vol_type");
                goto bad;
        }
 
        if (data_pad >= ubi->leb_size / 2) {
-               dbg_err("bad data_pad");
+               ubi_err("bad data_pad");
                goto bad;
        }
 
@@ -956,45 +945,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
                 * mapped logical eraseblocks.
                 */
                if (used_ebs == 0) {
-                       dbg_err("zero used_ebs");
+                       ubi_err("zero used_ebs");
                        goto bad;
                }
                if (data_size == 0) {
-                       dbg_err("zero data_size");
+                       ubi_err("zero data_size");
                        goto bad;
                }
                if (lnum < used_ebs - 1) {
                        if (data_size != usable_leb_size) {
-                               dbg_err("bad data_size");
+                               ubi_err("bad data_size");
                                goto bad;
                        }
                } else if (lnum == used_ebs - 1) {
                        if (data_size == 0) {
-                               dbg_err("bad data_size at last LEB");
+                               ubi_err("bad data_size at last LEB");
                                goto bad;
                        }
                } else {
-                       dbg_err("too high lnum");
+                       ubi_err("too high lnum");
                        goto bad;
                }
        } else {
                if (copy_flag == 0) {
                        if (data_crc != 0) {
-                               dbg_err("non-zero data CRC");
+                               ubi_err("non-zero data CRC");
                                goto bad;
                        }
                        if (data_size != 0) {
-                               dbg_err("non-zero data_size");
+                               ubi_err("non-zero data_size");
                                goto bad;
                        }
                } else {
                        if (data_size == 0) {
-                               dbg_err("zero data_size of copy");
+                               ubi_err("zero data_size of copy");
                                goto bad;
                        }
                }
                if (used_ebs != 0) {
-                       dbg_err("bad used_ebs");
+                       ubi_err("bad used_ebs");
                        goto bad;
                }
        }
@@ -1003,8 +992,8 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
 
 bad:
        ubi_err("bad VID header");
-       ubi_dbg_dump_vid_hdr(vid_hdr);
-       ubi_dbg_dump_stack();
+       ubi_dump_vid_hdr(vid_hdr);
+       dump_stack();
        return 1;
 }
 
@@ -1047,10 +1036,10 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
 
                if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
                        if (verbose)
-                               ubi_warn("no VID header found at PEB %d, "
-                                        "only 0xFF bytes", pnum);
-                       dbg_bld("no VID header found at PEB %d, "
-                               "only 0xFF bytes", pnum);
+                               ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
+                                        pnum);
+                       dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+                               pnum);
                        if (!read_err)
                                return UBI_IO_FF;
                        else
@@ -1058,12 +1047,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
                }
 
                if (verbose) {
-                       ubi_warn("bad magic number at PEB %d: %08x instead of "
-                                "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
-                       ubi_dbg_dump_vid_hdr(vid_hdr);
+                       ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+                                pnum, magic, UBI_VID_HDR_MAGIC);
+                       ubi_dump_vid_hdr(vid_hdr);
                }
-               dbg_bld("bad magic number at PEB %d: %08x instead of "
-                       "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+               dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+                       pnum, magic, UBI_VID_HDR_MAGIC);
                return UBI_IO_BAD_HDR;
        }
 
@@ -1072,12 +1061,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
 
        if (hdr_crc != crc) {
                if (verbose) {
-                       ubi_warn("bad CRC at PEB %d, calculated %#08x, "
-                                "read %#08x", pnum, crc, hdr_crc);
-                       ubi_dbg_dump_vid_hdr(vid_hdr);
+                       ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
+                                pnum, crc, hdr_crc);
+                       ubi_dump_vid_hdr(vid_hdr);
                }
-               dbg_bld("bad CRC at PEB %d, calculated %#08x, "
-                       "read %#08x", pnum, crc, hdr_crc);
+               dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+                       pnum, crc, hdr_crc);
                if (!read_err)
                        return UBI_IO_BAD_HDR;
                else
@@ -1118,7 +1107,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
        dbg_io("write VID header to PEB %d", pnum);
        ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
 
-       err = paranoid_check_peb_ec_hdr(ubi, pnum);
+       err = self_check_peb_ec_hdr(ubi, pnum);
        if (err)
                return err;
 
@@ -1127,7 +1116,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
        crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
        vid_hdr->hdr_crc = cpu_to_be32(crc);
 
-       err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+       err = self_check_vid_hdr(ubi, pnum, vid_hdr);
        if (err)
                return err;
 
@@ -1137,17 +1126,15 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
        return err;
 }
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-
 /**
- * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
+ * self_check_not_bad - ensure that a physical eraseblock is not bad.
  * @ubi: UBI device description object
  * @pnum: physical eraseblock number to check
  *
  * This function returns zero if the physical eraseblock is good, %-EINVAL if
  * it is bad and a negative error code if an error occurred.
  */
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
 {
        int err;
 
@@ -1158,13 +1145,13 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
        if (!err)
                return err;
 
-       ubi_err("paranoid check failed for PEB %d", pnum);
-       ubi_dbg_dump_stack();
+       ubi_err("self-check failed for PEB %d", pnum);
+       dump_stack();
        return err > 0 ? -EINVAL : err;
 }
 
 /**
- * paranoid_check_ec_hdr - check if an erase counter header is all right.
+ * self_check_ec_hdr - check if an erase counter header is all right.
  * @ubi: UBI device description object
  * @pnum: physical eraseblock number the erase counter header belongs to
  * @ec_hdr: the erase counter header to check
@@ -1172,8 +1159,8 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
  * This function returns zero if the erase counter header contains valid
  * values, and %-EINVAL if not.
  */
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
-                                const struct ubi_ec_hdr *ec_hdr)
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+                            const struct ubi_ec_hdr *ec_hdr)
 {
        int err;
        uint32_t magic;
@@ -1190,27 +1177,27 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
 
        err = validate_ec_hdr(ubi, ec_hdr);
        if (err) {
-               ubi_err("paranoid check failed for PEB %d", pnum);
+               ubi_err("self-check failed for PEB %d", pnum);
                goto fail;
        }
 
        return 0;
 
 fail:
-       ubi_dbg_dump_ec_hdr(ec_hdr);
-       ubi_dbg_dump_stack();
+       ubi_dump_ec_hdr(ec_hdr);
+       dump_stack();
        return -EINVAL;
 }
 
 /**
- * paranoid_check_peb_ec_hdr - check erase counter header.
+ * self_check_peb_ec_hdr - check erase counter header.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock number to check
  *
  * This function returns zero if the erase counter header is all right and and
  * a negative error code if not or if an error occurred.
  */
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
 {
        int err;
        uint32_t crc, hdr_crc;
@@ -1231,14 +1218,14 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
        hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
        if (hdr_crc != crc) {
                ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
-               ubi_err("paranoid check failed for PEB %d", pnum);
-               ubi_dbg_dump_ec_hdr(ec_hdr);
-               ubi_dbg_dump_stack();
+               ubi_err("self-check failed for PEB %d", pnum);
+               ubi_dump_ec_hdr(ec_hdr);
+               dump_stack();
                err = -EINVAL;
                goto exit;
        }
 
-       err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+       err = self_check_ec_hdr(ubi, pnum, ec_hdr);
 
 exit:
        kfree(ec_hdr);
@@ -1246,7 +1233,7 @@ exit:
 }
 
 /**
- * paranoid_check_vid_hdr - check that a volume identifier header is all right.
+ * self_check_vid_hdr - check that a volume identifier header is all right.
  * @ubi: UBI device description object
  * @pnum: physical eraseblock number the volume identifier header belongs to
  * @vid_hdr: the volume identifier header to check
@@ -1254,8 +1241,8 @@ exit:
  * This function returns zero if the volume identifier header is all right, and
  * %-EINVAL if not.
  */
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
-                                 const struct ubi_vid_hdr *vid_hdr)
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+                             const struct ubi_vid_hdr *vid_hdr)
 {
        int err;
        uint32_t magic;
@@ -1272,29 +1259,29 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
 
        err = validate_vid_hdr(ubi, vid_hdr);
        if (err) {
-               ubi_err("paranoid check failed for PEB %d", pnum);
+               ubi_err("self-check failed for PEB %d", pnum);
                goto fail;
        }
 
        return err;
 
 fail:
-       ubi_err("paranoid check failed for PEB %d", pnum);
-       ubi_dbg_dump_vid_hdr(vid_hdr);
-       ubi_dbg_dump_stack();
+       ubi_err("self-check failed for PEB %d", pnum);
+       ubi_dump_vid_hdr(vid_hdr);
+       dump_stack();
        return -EINVAL;
 
 }
 
 /**
- * paranoid_check_peb_vid_hdr - check volume identifier header.
+ * self_check_peb_vid_hdr - check volume identifier header.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock number to check
  *
  * This function returns zero if the volume identifier header is all right,
  * and a negative error code if not or if an error occurred.
  */
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
 {
        int err;
        uint32_t crc, hdr_crc;
@@ -1317,16 +1304,16 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
        crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
        hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
        if (hdr_crc != crc) {
-               ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
-                       "read %#08x", pnum, crc, hdr_crc);
-               ubi_err("paranoid check failed for PEB %d", pnum);
-               ubi_dbg_dump_vid_hdr(vid_hdr);
-               ubi_dbg_dump_stack();
+               ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+                       pnum, crc, hdr_crc);
+               ubi_err("self-check failed for PEB %d", pnum);
+               ubi_dump_vid_hdr(vid_hdr);
+               dump_stack();
                err = -EINVAL;
                goto exit;
        }
 
-       err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+       err = self_check_vid_hdr(ubi, pnum, vid_hdr);
 
 exit:
        ubi_free_vid_hdr(ubi, vid_hdr);
@@ -1334,7 +1321,7 @@ exit:
 }
 
 /**
- * ubi_dbg_check_write - make sure write succeeded.
+ * self_check_write - make sure write succeeded.
  * @ubi: UBI device description object
  * @buf: buffer with data which were written
  * @pnum: physical eraseblock number the data were written to
@@ -1345,8 +1332,8 @@ exit:
  * the original data buffer - the data have to match. Returns zero if the data
  * match and a negative error code if not or in case of failure.
  */
-int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
-                       int offset, int len)
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+                           int offset, int len)
 {
        int err, i;
        size_t read;
@@ -1374,7 +1361,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
                if (c == c1)
                        continue;
 
-               ubi_err("paranoid check failed for PEB %d:%d, len %d",
+               ubi_err("self-check failed for PEB %d:%d, len %d",
                        pnum, offset, len);
                ubi_msg("data differ at position %d", i);
                dump_len = max_t(int, 128, len - i);
@@ -1386,7 +1373,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
                        i, i + dump_len);
                print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
                               buf1 + i, dump_len, 1);
-               ubi_dbg_dump_stack();
+               dump_stack();
                err = -EINVAL;
                goto out_free;
        }
@@ -1400,7 +1387,7 @@ out_free:
 }
 
 /**
- * ubi_dbg_check_all_ff - check that a region of flash is empty.
+ * ubi_self_check_all_ff - check that a region of flash is empty.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock number to check
  * @offset: the starting offset within the physical eraseblock to check
@@ -1410,7 +1397,7 @@ out_free:
  * @offset of the physical eraseblock @pnum, and a negative error code if not
  * or if an error occurred.
  */
-int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
 {
        size_t read;
        int err;
@@ -1428,15 +1415,15 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
 
        err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
        if (err && !mtd_is_bitflip(err)) {
-               ubi_err("error %d while reading %d bytes from PEB %d:%d, "
-                       "read %zd bytes", err, len, pnum, offset, read);
+               ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+                       err, len, pnum, offset, read);
                goto error;
        }
 
        err = ubi_check_pattern(buf, 0xFF, len);
        if (err == 0) {
-               ubi_err("flash region at PEB %d:%d, length %d does not "
-                       "contain all 0xFF bytes", pnum, offset, len);
+               ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+                       pnum, offset, len);
                goto fail;
        }
 
@@ -1444,14 +1431,12 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
        return 0;
 
 fail:
-       ubi_err("paranoid check failed for PEB %d", pnum);
+       ubi_err("self-check failed for PEB %d", pnum);
        ubi_msg("hex dump of the %d-%d region", offset, offset + len);
        print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
        err = -EINVAL;
 error:
-       ubi_dbg_dump_stack();
+       dump_stack();
        vfree(buf);
        return err;
 }
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
index 1a35fc5..c36c762 100644 (file)
@@ -221,7 +221,7 @@ out_free:
        kfree(desc);
 out_put_ubi:
        ubi_put_device(ubi);
-       dbg_err("cannot open device %d, volume %d, error %d",
+       ubi_err("cannot open device %d, volume %d, error %d",
                ubi_num, vol_id, err);
        return ERR_PTR(err);
 }
@@ -426,11 +426,9 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
  * @buf: data to write
  * @offset: offset within the logical eraseblock where to write
  * @len: how many bytes to write
- * @dtype: expected data type
  *
  * This function writes @len bytes of data from @buf to offset @offset of
- * logical eraseblock @lnum. The @dtype argument describes expected lifetime of
- * the data.
+ * logical eraseblock @lnum.
  *
  * This function takes care of physical eraseblock write failures. If write to
  * the physical eraseblock write operation fails, the logical eraseblock is
@@ -447,7 +445,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
  * returns immediately with %-EBADF code.
  */
 int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
-                 int offset, int len, int dtype)
+                 int offset, int len)
 {
        struct ubi_volume *vol = desc->vol;
        struct ubi_device *ubi = vol->ubi;
@@ -466,17 +464,13 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
            offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
                return -EINVAL;
 
-       if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
-           dtype != UBI_UNKNOWN)
-               return -EINVAL;
-
        if (vol->upd_marker)
                return -EBADF;
 
        if (len == 0)
                return 0;
 
-       return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
+       return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_write);
 
@@ -486,7 +480,6 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
  * @lnum: logical eraseblock number to change
  * @buf: data to write
  * @len: how many bytes to write
- * @dtype: expected data type
  *
  * This function changes the contents of a logical eraseblock atomically. @buf
  * has to contain new logical eraseblock data, and @len - the length of the
@@ -497,7 +490,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
  * code in case of failure.
  */
 int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
-                  int len, int dtype)
+                  int len)
 {
        struct ubi_volume *vol = desc->vol;
        struct ubi_device *ubi = vol->ubi;
@@ -515,17 +508,13 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
            len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
                return -EINVAL;
 
-       if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
-           dtype != UBI_UNKNOWN)
-               return -EINVAL;
-
        if (vol->upd_marker)
                return -EBADF;
 
        if (len == 0)
                return 0;
 
-       return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
+       return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_change);
 
@@ -562,7 +551,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
        if (err)
                return err;
 
-       return ubi_wl_flush(ubi);
+       return ubi_wl_flush(ubi, vol->vol_id, lnum);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_erase);
 
@@ -626,7 +615,6 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
  * ubi_leb_map - map logical eraseblock to a physical eraseblock.
  * @desc: volume descriptor
  * @lnum: logical eraseblock number
- * @dtype: expected data type
  *
  * This function maps an un-mapped logical eraseblock @lnum to a physical
  * eraseblock. This means, that after a successful invocation of this
@@ -639,7 +627,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
  * eraseblock is already mapped, and other negative error codes in case of
  * other failures.
  */
-int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
 {
        struct ubi_volume *vol = desc->vol;
        struct ubi_device *ubi = vol->ubi;
@@ -652,17 +640,13 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
        if (lnum < 0 || lnum >= vol->reserved_pebs)
                return -EINVAL;
 
-       if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
-           dtype != UBI_UNKNOWN)
-               return -EINVAL;
-
        if (vol->upd_marker)
                return -EBADF;
 
        if (vol->eba_tbl[lnum] >= 0)
                return -EBADMSG;
 
-       return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+       return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_map);
 
@@ -722,6 +706,33 @@ int ubi_sync(int ubi_num)
 }
 EXPORT_SYMBOL_GPL(ubi_sync);
 
+/**
+ * ubi_flush - flush UBI work queue.
+ * @ubi_num: UBI device to flush work queue
+ * @vol_id: volume id to flush for
+ * @lnum: logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id / logical
+ * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
+ * a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_flush(int ubi_num, int vol_id, int lnum)
+{
+       struct ubi_device *ubi;
+       int err = 0;
+
+       ubi = ubi_get_device(ubi_num);
+       if (!ubi)
+               return -ENODEV;
+
+       err = ubi_wl_flush(ubi, vol_id, lnum);
+       ubi_put_device(ubi);
+       return err;
+}
+EXPORT_SYMBOL_GPL(ubi_flush);
+
 BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
 
 /**
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
deleted file mode 100644 (file)
index d48aef1..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) International Business Machines Corp., 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Author: Artem Bityutskiy (Битюцкий Артём)
- */
-
-#ifndef __UBI_SCAN_H__
-#define __UBI_SCAN_H__
-
-/* The erase counter value for this physical eraseblock is unknown */
-#define UBI_SCAN_UNKNOWN_EC (-1)
-
-/**
- * struct ubi_scan_leb - scanning information about a physical eraseblock.
- * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown)
- * @pnum: physical eraseblock number
- * @lnum: logical eraseblock number
- * @scrub: if this physical eraseblock needs scrubbing
- * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
- * @sqnum: sequence number
- * @u: unions RB-tree or @list links
- * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
- * @u.list: link in one of the eraseblock lists
- *
- * One object of this type is allocated for each physical eraseblock during
- * scanning.
- */
-struct ubi_scan_leb {
-       int ec;
-       int pnum;
-       int lnum;
-       unsigned int scrub:1;
-       unsigned int copy_flag:1;
-       unsigned long long sqnum;
-       union {
-               struct rb_node rb;
-               struct list_head list;
-       } u;
-};
-
-/**
- * struct ubi_scan_volume - scanning information about a volume.
- * @vol_id: volume ID
- * @highest_lnum: highest logical eraseblock number in this volume
- * @leb_count: number of logical eraseblocks in this volume
- * @vol_type: volume type
- * @used_ebs: number of used logical eraseblocks in this volume (only for
- *            static volumes)
- * @last_data_size: amount of data in the last logical eraseblock of this
- *                  volume (always equivalent to the usable logical eraseblock
- *                  size in case of dynamic volumes)
- * @data_pad: how many bytes at the end of logical eraseblocks of this volume
- *            are not used (due to volume alignment)
- * @compat: compatibility flags of this volume
- * @rb: link in the volume RB-tree
- * @root: root of the RB-tree containing all the eraseblock belonging to this
- *        volume (&struct ubi_scan_leb objects)
- *
- * One object of this type is allocated for each volume during scanning.
- */
-struct ubi_scan_volume {
-       int vol_id;
-       int highest_lnum;
-       int leb_count;
-       int vol_type;
-       int used_ebs;
-       int last_data_size;
-       int data_pad;
-       int compat;
-       struct rb_node rb;
-       struct rb_root root;
-};
-
-/**
- * struct ubi_scan_info - UBI scanning information.
- * @volumes: root of the volume RB-tree
- * @corr: list of corrupted physical eraseblocks
- * @free: list of free physical eraseblocks
- * @erase: list of physical eraseblocks which have to be erased
- * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
- *         those belonging to "preserve"-compatible internal volumes)
- * @corr_peb_count: count of PEBs in the @corr list
- * @empty_peb_count: count of PEBs which are presumably empty (contain only
- *                   0xFF bytes)
- * @alien_peb_count: count of PEBs in the @alien list
- * @bad_peb_count: count of bad physical eraseblocks
- * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
- *                       as bad yet, but which look like bad
- * @vols_found: number of volumes found during scanning
- * @highest_vol_id: highest volume ID
- * @is_empty: flag indicating whether the MTD device is empty or not
- * @min_ec: lowest erase counter value
- * @max_ec: highest erase counter value
- * @max_sqnum: highest sequence number value
- * @mean_ec: mean erase counter value
- * @ec_sum: a temporary variable used when calculating @mean_ec
- * @ec_count: a temporary variable used when calculating @mean_ec
- * @scan_leb_slab: slab cache for &struct ubi_scan_leb objects
- *
- * This data structure contains the result of scanning and may be used by other
- * UBI sub-systems to build final UBI data structures, further error-recovery
- * and so on.
- */
-struct ubi_scan_info {
-       struct rb_root volumes;
-       struct list_head corr;
-       struct list_head free;
-       struct list_head erase;
-       struct list_head alien;
-       int corr_peb_count;
-       int empty_peb_count;
-       int alien_peb_count;
-       int bad_peb_count;
-       int maybe_bad_peb_count;
-       int vols_found;
-       int highest_vol_id;
-       int is_empty;
-       int min_ec;
-       int max_ec;
-       unsigned long long max_sqnum;
-       int mean_ec;
-       uint64_t ec_sum;
-       int ec_count;
-       struct kmem_cache *scan_leb_slab;
-};
-
-struct ubi_device;
-struct ubi_vid_hdr;
-
-/*
- * ubi_scan_move_to_list - move a PEB from the volume tree to a list.
- *
- * @sv: volume scanning information
- * @seb: scanning eraseblock information
- * @list: the list to move to
- */
-static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
-                                        struct ubi_scan_leb *seb,
-                                        struct list_head *list)
-{
-               rb_erase(&seb->u.rb, &sv->root);
-               list_add_tail(&seb->u.list, list);
-}
-
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
-                     int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
-                     int bitflips);
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
-                                        int vol_id);
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
-                                      int lnum);
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv);
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
-                                          struct ubi_scan_info *si);
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
-                      int pnum, int ec);
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi);
-void ubi_scan_destroy_si(struct ubi_scan_info *si);
-
-#endif /* !__UBI_SCAN_H__ */
index 6fb8ec2..468ffbc 100644 (file)
@@ -149,10 +149,10 @@ enum {
  * The @image_seq field is used to validate a UBI image that has been prepared
  * for a UBI device. The @image_seq value can be any value, but it must be the
  * same on all eraseblocks. UBI will ensure that all new erase counter headers
- * also contain this value, and will check the value when scanning at start-up.
+ * also contain this value, and will check the value when attaching the flash.
  * One way to make use of @image_seq is to increase its value by one every time
  * an image is flashed over an existing image, then, if the flashing does not
- * complete, UBI will detect the error when scanning.
+ * complete, UBI will detect the error when attaching the media.
  */
 struct ubi_ec_hdr {
        __be32  magic;
@@ -298,8 +298,8 @@ struct ubi_vid_hdr {
 #define UBI_INT_VOL_COUNT 1
 
 /*
- * Starting ID of internal volumes. There is reserved room for 4096 internal
- * volumes.
+ * Starting ID of internal volumes: 0x7fffefff.
+ * There is reserved room for 4096 internal volumes.
  */
 #define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
 
index d51d75d..149d7fb 100644 (file)
@@ -43,7 +43,6 @@
 #include <asm/pgtable.h>
 
 #include "ubi-media.h"
-#include "scan.h"
 
 /* Maximum number of supported UBI devices */
 #define UBI_MAX_DEVICES 32
 #define UBI_NAME_STR "ubi"
 
 /* Normal UBI messages */
-#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
+#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__)
 /* UBI warning messages */
-#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
-                                 __func__, ##__VA_ARGS__)
+#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n",  \
+                                  __func__, ##__VA_ARGS__)
 /* UBI error messages */
-#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
+#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n",      \
                                 __func__, ##__VA_ARGS__)
 
 /* Lowest number PEBs reserved for bad PEB handling */
 /* Background thread name pattern */
 #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
 
-/* This marker in the EBA table means that the LEB is um-mapped */
+/*
+ * This marker in the EBA table means that the LEB is um-mapped.
+ * NOTE! It has to have the same value as %UBI_ALL.
+ */
 #define UBI_LEB_UNMAPPED -1
 
 /*
@@ -82,6 +84,9 @@
  */
 #define UBI_PROT_QUEUE_LEN 10
 
+/* The volume ID/LEB number/erase counter is unknown */
+#define UBI_UNKNOWN -1
+
 /*
  * Error codes returned by the I/O sub-system.
  *
@@ -118,7 +123,7 @@ enum {
  *                     PEB
  * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
  *                     PEB
- * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
+ * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the
  *                       target PEB
  * MOVE_RETRY: retry scrubbing the PEB
  */
@@ -127,7 +132,7 @@ enum {
        MOVE_SOURCE_RD_ERR,
        MOVE_TARGET_RD_ERR,
        MOVE_TARGET_WR_ERR,
-       MOVE_CANCEL_BITFLIPS,
+       MOVE_TARGET_BITFLIPS,
        MOVE_RETRY,
 };
 
@@ -222,8 +227,6 @@ struct ubi_volume_desc;
  * @upd_ebs: how many eraseblocks are expected to be updated
  * @ch_lnum: LEB number which is being changing by the atomic LEB change
  *           operation
- * @ch_dtype: data persistency type which is being changing by the atomic LEB
- *            change operation
  * @upd_bytes: how many bytes are expected to be received for volume update or
  *             atomic LEB change
  * @upd_received: how many bytes were already received for volume update or
@@ -270,7 +273,6 @@ struct ubi_volume {
 
        int upd_ebs;
        int ch_lnum;
-       int ch_dtype;
        long long upd_bytes;
        long long upd_received;
        void *upd_buf;
@@ -387,9 +389,8 @@ struct ubi_wl_entry;
  *                  time (MTD write buffer size)
  * @mtd: MTD device descriptor
  *
- * @peb_buf1: a buffer of PEB size used for different purposes
- * @peb_buf2: another buffer of PEB size used for different purposes
- * @buf_mutex: protects @peb_buf1 and @peb_buf2
+ * @peb_buf: a buffer of PEB size used for different purposes
+ * @buf_mutex: protects @peb_buf
  * @ckvol_mutex: serializes static volume checking when opening
  *
  * @dbg: debugging information for this UBI device
@@ -471,14 +472,131 @@ struct ubi_device {
        int max_write_size;
        struct mtd_info *mtd;
 
-       void *peb_buf1;
-       void *peb_buf2;
+       void *peb_buf;
        struct mutex buf_mutex;
        struct mutex ckvol_mutex;
 
        struct ubi_debug_info *dbg;
 };
 
+/**
+ * struct ubi_ainf_peb - attach information about a physical eraseblock.
+ * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
+ * @pnum: physical eraseblock number
+ * @vol_id: ID of the volume this LEB belongs to
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
+ * @u.list: link in one of the eraseblock lists
+ *
+ * One object of this type is allocated for each physical eraseblock when
+ * attaching an MTD device. Note, if this PEB does not belong to any LEB /
+ * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
+ */
+struct ubi_ainf_peb {
+       int ec;
+       int pnum;
+       int vol_id;
+       int lnum;
+       unsigned int scrub:1;
+       unsigned int copy_flag:1;
+       unsigned long long sqnum;
+       union {
+               struct rb_node rb;
+               struct list_head list;
+       } u;
+};
+
+/**
+ * struct ubi_ainf_volume - attaching information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ *            static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ *                  volume (always equivalent to the usable logical eraseblock
+ *                  size in case of dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ *            are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ *        volume (&struct ubi_ainf_peb objects)
+ *
+ * One object of this type is allocated for each volume when attaching an MTD
+ * device.
+ */
+struct ubi_ainf_volume {
+       int vol_id;
+       int highest_lnum;
+       int leb_count;
+       int vol_type;
+       int used_ebs;
+       int last_data_size;
+       int data_pad;
+       int compat;
+       struct rb_node rb;
+       struct rb_root root;
+};
+
+/**
+ * struct ubi_attach_info - MTD device attaching information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ *         those belonging to "preserve"-compatible internal volumes)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ *                   0xFF bytes)
+ * @alien_peb_count: count of PEBs in the @alien list
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
+ *                       as bad yet, but which look like bad
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ *
+ * This data structure contains the result of attaching an MTD device and may
+ * be used by other UBI sub-systems to build final UBI data structures, further
+ * error-recovery and so on.
+ */
+struct ubi_attach_info {
+       struct rb_root volumes;
+       struct list_head corr;
+       struct list_head free;
+       struct list_head erase;
+       struct list_head alien;
+       int corr_peb_count;
+       int empty_peb_count;
+       int alien_peb_count;
+       int bad_peb_count;
+       int maybe_bad_peb_count;
+       int vols_found;
+       int highest_vol_id;
+       int is_empty;
+       int min_ec;
+       int max_ec;
+       unsigned long long max_sqnum;
+       int mean_ec;
+       uint64_t ec_sum;
+       int ec_count;
+       struct kmem_cache *aeb_slab_cache;
+};
+
 #include "debug.h"
 
 extern struct kmem_cache *ubi_wl_entry_slab;
@@ -489,12 +607,23 @@ extern struct class *ubi_class;
 extern struct mutex ubi_devices_mutex;
 extern struct blocking_notifier_head ubi_notifiers;
 
+/* scan.c */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+                 int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+                                   int vol_id);
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+                                      struct ubi_attach_info *ai);
+int ubi_attach(struct ubi_device *ubi);
+void ubi_destroy_ai(struct ubi_attach_info *ai);
+
 /* vtbl.c */
 int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
                           struct ubi_vtbl_record *vtbl_rec);
 int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
                            struct list_head *rename_list);
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
 
 /* vmt.c */
 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
@@ -527,22 +656,22 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
                     void *buf, int offset, int len, int check);
 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
-                     const void *buf, int offset, int len, int dtype);
+                     const void *buf, int offset, int len);
 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
-                        int lnum, const void *buf, int len, int dtype,
-                        int used_ebs);
+                        int lnum, const void *buf, int len, int used_ebs);
 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
-                             int lnum, const void *buf, int len, int dtype);
+                             int lnum, const void *buf, int len);
 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
                     struct ubi_vid_hdr *vid_hdr);
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
 
 /* wl.c */
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture);
-int ubi_wl_flush(struct ubi_device *ubi);
+int ubi_wl_get_peb(struct ubi_device *ubi);
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+                  int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
 void ubi_wl_close(struct ubi_device *ubi);
 int ubi_thread(void *u);
 
@@ -575,6 +704,7 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
 int ubi_notify_all(struct ubi_device *ubi, int ntype,
                   struct notifier_block *nb);
 int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_internal_volumes(struct ubi_device *ubi);
 
 /* kapi.c */
 void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
@@ -595,6 +725,21 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
             rb = rb_next(rb),                                               \
             pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
 
+/*
+ * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
+ *
+ * @av: volume attaching information
+ * @aeb: attaching eraseblock information
+ * @list: the list to move to
+ */
+static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
+                                        struct ubi_ainf_peb *aeb,
+                                        struct list_head *list)
+{
+               rb_erase(&aeb->u.rb, &av->root);
+               list_add_tail(&aeb->u.list, list);
+}
+
 /**
  * ubi_zalloc_vid_hdr - allocate a volume identifier header object.
  * @ubi: UBI device description object
@@ -669,7 +814,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
        if (!ubi->ro_mode) {
                ubi->ro_mode = 1;
                ubi_warn("switch to read-only mode");
-               ubi_dbg_dump_stack();
+               dump_stack();
        }
 }
 
index d1802b0..22cd95c 100644 (file)
@@ -151,7 +151,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
        }
 
        if (bytes == 0) {
-               err = ubi_wl_flush(ubi);
+               err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
                if (err)
                        return err;
 
@@ -188,14 +188,12 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
        dbg_gen("start changing LEB %d:%d, %u bytes",
                vol->vol_id, req->lnum, req->bytes);
        if (req->bytes == 0)
-               return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
-                                                req->dtype);
+               return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
 
        vol->upd_bytes = req->bytes;
        vol->upd_received = 0;
        vol->changing_leb = 1;
        vol->ch_lnum = req->lnum;
-       vol->ch_dtype = req->dtype;
 
        vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
        if (!vol->upd_buf)
@@ -248,8 +246,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
                        return 0;
                }
 
-               err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len,
-                                       UBI_UNKNOWN);
+               err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
        } else {
                /*
                 * When writing static volume, and this is the last logical
@@ -261,8 +258,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
                 * contain zeros, not random trash.
                 */
                memset(buf + len, 0, vol->usable_leb_size - len);
-               err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
-                                          UBI_UNKNOWN, used_ebs);
+               err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
        }
 
        return err;
@@ -367,7 +363,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
 
        ubi_assert(vol->upd_received <= vol->upd_bytes);
        if (vol->upd_received == vol->upd_bytes) {
-               err = ubi_wl_flush(ubi);
+               err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
                if (err)
                        return err;
                /* The update is finished, clear the update marker */
@@ -423,7 +419,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
                       len - vol->upd_bytes);
                len = ubi_calc_data_len(ubi, vol->upd_buf, len);
                err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
-                                               vol->upd_buf, len, UBI_UNKNOWN);
+                                               vol->upd_buf, len);
                if (err)
                        return err;
        }
index 863835f..0669cff 100644 (file)
 #include <linux/export.h>
 #include "ubi.h"
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_volumes(struct ubi_device *ubi);
-#else
-#define paranoid_check_volumes(ubi) 0
-#endif
+static int self_check_volumes(struct ubi_device *ubi);
 
 static ssize_t vol_attribute_show(struct device *dev,
                                  struct device_attribute *attr, char *buf);
@@ -227,7 +223,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
                        }
 
                if (vol_id == UBI_VOL_NUM_AUTO) {
-                       dbg_err("out of volume IDs");
+                       ubi_err("out of volume IDs");
                        err = -ENFILE;
                        goto out_unlock;
                }
@@ -241,7 +237,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
        /* Ensure that this volume does not exist */
        err = -EEXIST;
        if (ubi->volumes[vol_id]) {
-               dbg_err("volume %d already exists", vol_id);
+               ubi_err("volume %d already exists", vol_id);
                goto out_unlock;
        }
 
@@ -250,7 +246,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
                if (ubi->volumes[i] &&
                    ubi->volumes[i]->name_len == req->name_len &&
                    !strcmp(ubi->volumes[i]->name, req->name)) {
-                       dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
+                       ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
                        goto out_unlock;
                }
 
@@ -261,9 +257,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
 
        /* Reserve physical eraseblocks */
        if (vol->reserved_pebs > ubi->avail_pebs) {
-               dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
+               ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
                if (ubi->corr_peb_count)
-                       dbg_err("%d PEBs are corrupted and not used",
+                       ubi_err("%d PEBs are corrupted and not used",
                                ubi->corr_peb_count);
                err = -ENOSPC;
                goto out_unlock;
@@ -284,7 +280,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
         * Finish all pending erases because there may be some LEBs belonging
         * to the same volume ID.
         */
-       err = ubi_wl_flush(ubi);
+       err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
        if (err)
                goto out_acc;
 
@@ -360,8 +356,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
        spin_unlock(&ubi->volumes_lock);
 
        ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
-       if (paranoid_check_volumes(ubi))
-               dbg_err("check failed while creating volume %d", vol_id);
+       self_check_volumes(ubi);
        return err;
 
 out_sysfs:
@@ -461,8 +456,8 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
        spin_unlock(&ubi->volumes_lock);
 
        ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
-       if (!no_vtbl && paranoid_check_volumes(ubi))
-               dbg_err("check failed while removing volume %d", vol_id);
+       if (!no_vtbl)
+               self_check_volumes(ubi);
 
        return err;
 
@@ -500,7 +495,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
 
        if (vol->vol_type == UBI_STATIC_VOLUME &&
            reserved_pebs < vol->used_ebs) {
-               dbg_err("too small size %d, %d LEBs contain data",
+               ubi_err("too small size %d, %d LEBs contain data",
                        reserved_pebs, vol->used_ebs);
                return -EINVAL;
        }
@@ -529,10 +524,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
        if (pebs > 0) {
                spin_lock(&ubi->volumes_lock);
                if (pebs > ubi->avail_pebs) {
-                       dbg_err("not enough PEBs: requested %d, available %d",
+                       ubi_err("not enough PEBs: requested %d, available %d",
                                pebs, ubi->avail_pebs);
                        if (ubi->corr_peb_count)
-                               dbg_err("%d PEBs are corrupted and not used",
+                               ubi_err("%d PEBs are corrupted and not used",
                                        ubi->corr_peb_count);
                        spin_unlock(&ubi->volumes_lock);
                        err = -ENOSPC;
@@ -588,8 +583,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
        }
 
        ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
-       if (paranoid_check_volumes(ubi))
-               dbg_err("check failed while re-sizing volume %d", vol_id);
+       self_check_volumes(ubi);
        return err;
 
 out_acc:
@@ -638,8 +632,8 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
                }
        }
 
-       if (!err && paranoid_check_volumes(ubi))
-               ;
+       if (!err)
+               self_check_volumes(ubi);
        return err;
 }
 
@@ -686,8 +680,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
                return err;
        }
 
-       if (paranoid_check_volumes(ubi))
-               dbg_err("check failed while adding volume %d", vol_id);
+       self_check_volumes(ubi);
        return err;
 
 out_cdev:
@@ -712,16 +705,14 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
        volume_sysfs_close(vol);
 }
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-
 /**
- * paranoid_check_volume - check volume information.
+ * self_check_volume - check volume information.
  * @ubi: UBI device description object
  * @vol_id: volume ID
  *
  * Returns zero if volume is all right and a a negative error code if not.
  */
-static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
+static int self_check_volume(struct ubi_device *ubi, int vol_id)
 {
        int idx = vol_id2idx(ubi, vol_id);
        int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
@@ -771,7 +762,7 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
        }
 
        if (vol->upd_marker && vol->corrupted) {
-               dbg_err("update marker and corrupted simultaneously");
+               ubi_err("update marker and corrupted simultaneously");
                goto fail;
        }
 
@@ -853,22 +844,22 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
        return 0;
 
 fail:
-       ubi_err("paranoid check failed for volume %d", vol_id);
+       ubi_err("self-check failed for volume %d", vol_id);
        if (vol)
-               ubi_dbg_dump_vol_info(vol);
-       ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+               ubi_dump_vol_info(vol);
+       ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
        dump_stack();
        spin_unlock(&ubi->volumes_lock);
        return -EINVAL;
 }
 
 /**
- * paranoid_check_volumes - check information about all volumes.
+ * self_check_volumes - check information about all volumes.
  * @ubi: UBI device description object
  *
  * Returns zero if volumes are all right and a a negative error code if not.
  */
-static int paranoid_check_volumes(struct ubi_device *ubi)
+static int self_check_volumes(struct ubi_device *ubi)
 {
        int i, err = 0;
 
@@ -876,11 +867,10 @@ static int paranoid_check_volumes(struct ubi_device *ubi)
                return 0;
 
        for (i = 0; i < ubi->vtbl_slots; i++) {
-               err = paranoid_check_volume(ubi, i);
+               err = self_check_volume(ubi, i);
                if (err)
                        break;
        }
 
        return err;
 }
-#endif
index 357e42e..dccfced 100644 (file)
  * LEB 1. This scheme guarantees recoverability from unclean reboots.
  *
  * In this UBI implementation the on-flash volume table does not contain any
- * information about how many data static volumes contain. This information may
- * be found from the scanning data.
+ * information about how much data static volumes contain.
  *
  * But it would still be beneficial to store this information in the volume
  * table. For example, suppose we have a static volume X, and all its physical
  * eraseblocks became bad for some reasons. Suppose we are attaching the
- * corresponding MTD device, the scanning has found no logical eraseblocks
+ * corresponding MTD device, for some reason we find no logical eraseblocks
  * corresponding to the volume X. According to the volume table volume X does
  * exist. So we don't know whether it is just empty or all its physical
- * eraseblocks went bad. So we cannot alarm the user about this corruption.
+ * eraseblocks went bad. So we cannot alarm the user properly.
  *
  * The volume table also stores so-called "update marker", which is used for
  * volume updates. Before updating the volume, the update marker is set, and
 #include <asm/div64.h>
 #include "ubi.h"
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-static void paranoid_vtbl_check(const struct ubi_device *ubi);
-#else
-#define paranoid_vtbl_check(ubi)
-#endif
+static void self_vtbl_check(const struct ubi_device *ubi);
 
 /* Empty volume table record */
 static struct ubi_vtbl_record empty_vtbl_record;
@@ -106,12 +101,12 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
                        return err;
 
                err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
-                                       ubi->vtbl_size, UBI_LONGTERM);
+                                       ubi->vtbl_size);
                if (err)
                        return err;
        }
 
-       paranoid_vtbl_check(ubi);
+       self_vtbl_check(ubi);
        return 0;
 }
 
@@ -158,7 +153,7 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
                        return err;
 
                err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
-                                       ubi->vtbl_size, UBI_LONGTERM);
+                                       ubi->vtbl_size);
                if (err)
                        return err;
        }
@@ -197,7 +192,7 @@ static int vtbl_check(const struct ubi_device *ubi,
                if (be32_to_cpu(vtbl[i].crc) != crc) {
                        ubi_err("bad CRC at record %u: %#08x, not %#08x",
                                 i, crc, be32_to_cpu(vtbl[i].crc));
-                       ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+                       ubi_dump_vtbl_record(&vtbl[i], i);
                        return 1;
                }
 
@@ -229,7 +224,7 @@ static int vtbl_check(const struct ubi_device *ubi,
 
                n = ubi->leb_size % alignment;
                if (data_pad != n) {
-                       dbg_err("bad data_pad, has to be %d", n);
+                       ubi_err("bad data_pad, has to be %d", n);
                        err = 6;
                        goto bad;
                }
@@ -245,7 +240,7 @@ static int vtbl_check(const struct ubi_device *ubi,
                }
 
                if (reserved_pebs > ubi->good_peb_count) {
-                       dbg_err("too large reserved_pebs %d, good PEBs %d",
+                       ubi_err("too large reserved_pebs %d, good PEBs %d",
                                reserved_pebs, ubi->good_peb_count);
                        err = 9;
                        goto bad;
@@ -275,10 +270,10 @@ static int vtbl_check(const struct ubi_device *ubi,
 
                        if (len1 > 0 && len1 == len2 &&
                            !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
-                               ubi_err("volumes %d and %d have the same name"
-                                       " \"%s\"", i, n, vtbl[i].name);
-                               ubi_dbg_dump_vtbl_record(&vtbl[i], i);
-                               ubi_dbg_dump_vtbl_record(&vtbl[n], n);
+                               ubi_err("volumes %d and %d have the same name \"%s\"",
+                                       i, n, vtbl[i].name);
+                               ubi_dump_vtbl_record(&vtbl[i], i);
+                               ubi_dump_vtbl_record(&vtbl[n], n);
                                return -EINVAL;
                        }
                }
@@ -288,65 +283,64 @@ static int vtbl_check(const struct ubi_device *ubi,
 
 bad:
        ubi_err("volume table check failed: record %d, error %d", i, err);
-       ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+       ubi_dump_vtbl_record(&vtbl[i], i);
        return -EINVAL;
 }
 
 /**
  * create_vtbl - create a copy of volume table.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  * @copy: number of the volume table copy
  * @vtbl: contents of the volume table
  *
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
+static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
                       int copy, void *vtbl)
 {
        int err, tries = 0;
        struct ubi_vid_hdr *vid_hdr;
-       struct ubi_scan_leb *new_seb;
+       struct ubi_ainf_peb *new_aeb;
 
-       ubi_msg("create volume table (copy #%d)", copy + 1);
+       dbg_gen("create volume table (copy #%d)", copy + 1);
 
        vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
        if (!vid_hdr)
                return -ENOMEM;
 
 retry:
-       new_seb = ubi_scan_get_free_peb(ubi, si);
-       if (IS_ERR(new_seb)) {
-               err = PTR_ERR(new_seb);
+       new_aeb = ubi_early_get_peb(ubi, ai);
+       if (IS_ERR(new_aeb)) {
+               err = PTR_ERR(new_aeb);
                goto out_free;
        }
 
-       vid_hdr->vol_type = UBI_VID_DYNAMIC;
+       vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
        vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
        vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
        vid_hdr->data_size = vid_hdr->used_ebs =
                             vid_hdr->data_pad = cpu_to_be32(0);
        vid_hdr->lnum = cpu_to_be32(copy);
-       vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
+       vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
 
        /* The EC header is already there, write the VID header */
-       err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
+       err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
        if (err)
                goto write_error;
 
        /* Write the layout volume contents */
-       err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size);
+       err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
        if (err)
                goto write_error;
 
        /*
-        * And add it to the scanning information. Don't delete the old version
-        * of this LEB as it will be deleted and freed in 'ubi_scan_add_used()'.
+        * And add it to the attaching information. Don't delete the old version
+        * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
         */
-       err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
-                               vid_hdr, 0);
-       kmem_cache_free(si->scan_leb_slab, new_seb);
+       err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
+       kmem_cache_free(ai->aeb_slab_cache, new_aeb);
        ubi_free_vid_hdr(ubi, vid_hdr);
        return err;
 
@@ -356,10 +350,10 @@ write_error:
                 * Probably this physical eraseblock went bad, try to pick
                 * another one.
                 */
-               list_add(&new_seb->u.list, &si->erase);
+               list_add(&new_aeb->u.list, &ai->erase);
                goto retry;
        }
-       kmem_cache_free(si->scan_leb_slab, new_seb);
+       kmem_cache_free(ai->aeb_slab_cache, new_aeb);
 out_free:
        ubi_free_vid_hdr(ubi, vid_hdr);
        return err;
@@ -369,20 +363,20 @@ out_free:
 /**
  * process_lvol - process the layout volume.
  * @ubi: UBI device description object
- * @si: scanning information
- * @sv: layout volume scanning information
+ * @ai: attaching information
+ * @av: layout volume attaching information
  *
  * This function is responsible for reading the layout volume, ensuring it is
  * not corrupted, and recovering from corruptions if needed. Returns volume
  * table in case of success and a negative error code in case of failure.
  */
 static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
-                                           struct ubi_scan_info *si,
-                                           struct ubi_scan_volume *sv)
+                                           struct ubi_attach_info *ai,
+                                           struct ubi_ainf_volume *av)
 {
        int err;
        struct rb_node *rb;
-       struct ubi_scan_leb *seb;
+       struct ubi_ainf_peb *aeb;
        struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
        int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
 
@@ -414,14 +408,14 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
        dbg_gen("check layout volume");
 
        /* Read both LEB 0 and LEB 1 into memory */
-       ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
-               leb[seb->lnum] = vzalloc(ubi->vtbl_size);
-               if (!leb[seb->lnum]) {
+       ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+               leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
+               if (!leb[aeb->lnum]) {
                        err = -ENOMEM;
                        goto out_free;
                }
 
-               err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
+               err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
                                       ubi->vtbl_size);
                if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
                        /*
@@ -429,12 +423,12 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
                         * uncorrectable ECC error, but we have our own CRC and
                         * the data will be checked later. If the data is OK,
                         * the PEB will be scrubbed (because we set
-                        * seb->scrub). If the data is not OK, the contents of
+                        * aeb->scrub). If the data is not OK, the contents of
                         * the PEB will be recovered from the second copy, and
-                        * seb->scrub will be cleared in
-                        * 'ubi_scan_add_used()'.
+                        * aeb->scrub will be cleared in
+                        * 'ubi_add_to_av()'.
                         */
-                       seb->scrub = 1;
+                       aeb->scrub = 1;
                else if (err)
                        goto out_free;
        }
@@ -453,7 +447,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
                                                  ubi->vtbl_size);
                if (leb_corrupted[1]) {
                        ubi_warn("volume table copy #2 is corrupted");
-                       err = create_vtbl(ubi, si, 1, leb[0]);
+                       err = create_vtbl(ubi, ai, 1, leb[0]);
                        if (err)
                                goto out_free;
                        ubi_msg("volume table was restored");
@@ -476,7 +470,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
                }
 
                ubi_warn("volume table copy #1 is corrupted");
-               err = create_vtbl(ubi, si, 0, leb[1]);
+               err = create_vtbl(ubi, ai, 0, leb[1]);
                if (err)
                        goto out_free;
                ubi_msg("volume table was restored");
@@ -494,13 +488,13 @@ out_free:
 /**
  * create_empty_lvol - create empty layout volume.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
  * This function returns volume table contents in case of success and a
  * negative error code in case of failure.
  */
 static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
-                                                struct ubi_scan_info *si)
+                                                struct ubi_attach_info *ai)
 {
        int i;
        struct ubi_vtbl_record *vtbl;
@@ -515,7 +509,7 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
        for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
                int err;
 
-               err = create_vtbl(ubi, si, i, vtbl);
+               err = create_vtbl(ubi, ai, i, vtbl);
                if (err) {
                        vfree(vtbl);
                        return ERR_PTR(err);
@@ -528,18 +522,19 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
 /**
  * init_volumes - initialize volume information for existing volumes.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: scanning information
  * @vtbl: volume table
  *
  * This function allocates volume description objects for existing volumes.
  * Returns zero in case of success and a negative error code in case of
  * failure.
  */
-static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
+static int init_volumes(struct ubi_device *ubi,
+                       const struct ubi_attach_info *ai,
                        const struct ubi_vtbl_record *vtbl)
 {
        int i, reserved_pebs = 0;
-       struct ubi_scan_volume *sv;
+       struct ubi_ainf_volume *av;
        struct ubi_volume *vol;
 
        for (i = 0; i < ubi->vtbl_slots; i++) {
@@ -567,8 +562,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
                if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
                        /* Auto re-size flag may be set only for one volume */
                        if (ubi->autoresize_vol_id != -1) {
-                               ubi_err("more than one auto-resize volume (%d "
-                                       "and %d)", ubi->autoresize_vol_id, i);
+                               ubi_err("more than one auto-resize volume (%d and %d)",
+                                       ubi->autoresize_vol_id, i);
                                kfree(vol);
                                return -EINVAL;
                        }
@@ -595,8 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
                }
 
                /* Static volumes only */
-               sv = ubi_scan_find_sv(si, i);
-               if (!sv) {
+               av = ubi_find_av(ai, i);
+               if (!av) {
                        /*
                         * No eraseblocks belonging to this volume found. We
                         * don't actually know whether this static volume is
@@ -608,22 +603,22 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
                        continue;
                }
 
-               if (sv->leb_count != sv->used_ebs) {
+               if (av->leb_count != av->used_ebs) {
                        /*
                         * We found a static volume which misses several
                         * eraseblocks. Treat it as corrupted.
                         */
                        ubi_warn("static volume %d misses %d LEBs - corrupted",
-                                sv->vol_id, sv->used_ebs - sv->leb_count);
+                                av->vol_id, av->used_ebs - av->leb_count);
                        vol->corrupted = 1;
                        continue;
                }
 
-               vol->used_ebs = sv->used_ebs;
+               vol->used_ebs = av->used_ebs;
                vol->used_bytes =
                        (long long)(vol->used_ebs - 1) * vol->usable_leb_size;
-               vol->used_bytes += sv->last_data_size;
-               vol->last_eb_bytes = sv->last_data_size;
+               vol->used_bytes += av->last_data_size;
+               vol->last_eb_bytes = av->last_data_size;
        }
 
        /* And add the layout volume */
@@ -632,7 +627,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
                return -ENOMEM;
 
        vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
-       vol->alignment = 1;
+       vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
        vol->vol_type = UBI_DYNAMIC_VOLUME;
        vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
        memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
@@ -665,105 +660,104 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
 }
 
 /**
- * check_sv - check volume scanning information.
+ * check_av - check volume attaching information.
  * @vol: UBI volume description object
- * @sv: volume scanning information
+ * @av: volume attaching information
  *
- * This function returns zero if the volume scanning information is consistent
+ * This function returns zero if the volume attaching information is consistent
  * to the data read from the volume tabla, and %-EINVAL if not.
  */
-static int check_sv(const struct ubi_volume *vol,
-                   const struct ubi_scan_volume *sv)
+static int check_av(const struct ubi_volume *vol,
+                   const struct ubi_ainf_volume *av)
 {
        int err;
 
-       if (sv->highest_lnum >= vol->reserved_pebs) {
+       if (av->highest_lnum >= vol->reserved_pebs) {
                err = 1;
                goto bad;
        }
-       if (sv->leb_count > vol->reserved_pebs) {
+       if (av->leb_count > vol->reserved_pebs) {
                err = 2;
                goto bad;
        }
-       if (sv->vol_type != vol->vol_type) {
+       if (av->vol_type != vol->vol_type) {
                err = 3;
                goto bad;
        }
-       if (sv->used_ebs > vol->reserved_pebs) {
+       if (av->used_ebs > vol->reserved_pebs) {
                err = 4;
                goto bad;
        }
-       if (sv->data_pad != vol->data_pad) {
+       if (av->data_pad != vol->data_pad) {
                err = 5;
                goto bad;
        }
        return 0;
 
 bad:
-       ubi_err("bad scanning information, error %d", err);
-       ubi_dbg_dump_sv(sv);
-       ubi_dbg_dump_vol_info(vol);
+       ubi_err("bad attaching information, error %d", err);
+       ubi_dump_av(av);
+       ubi_dump_vol_info(vol);
        return -EINVAL;
 }
 
 /**
- * check_scanning_info - check that scanning information.
+ * check_attaching_info - check that attaching information.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
  * Even though we protect on-flash data by CRC checksums, we still don't trust
- * the media. This function ensures that scanning information is consistent to
- * the information read from the volume table. Returns zero if the scanning
+ * the media. This function ensures that attaching information is consistent to
+ * the information read from the volume table. Returns zero if the attaching
  * information is OK and %-EINVAL if it is not.
  */
-static int check_scanning_info(const struct ubi_device *ubi,
-                              struct ubi_scan_info *si)
+static int check_attaching_info(const struct ubi_device *ubi,
+                              struct ubi_attach_info *ai)
 {
        int err, i;
-       struct ubi_scan_volume *sv;
+       struct ubi_ainf_volume *av;
        struct ubi_volume *vol;
 
-       if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
-               ubi_err("scanning found %d volumes, maximum is %d + %d",
-                       si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+       if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+               ubi_err("found %d volumes while attaching, maximum is %d + %d",
+                       ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
                return -EINVAL;
        }
 
-       if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
-           si->highest_vol_id < UBI_INTERNAL_VOL_START) {
-               ubi_err("too large volume ID %d found by scanning",
-                       si->highest_vol_id);
+       if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+           ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
+               ubi_err("too large volume ID %d found", ai->highest_vol_id);
                return -EINVAL;
        }
 
        for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
                cond_resched();
 
-               sv = ubi_scan_find_sv(si, i);
+               av = ubi_find_av(ai, i);
                vol = ubi->volumes[i];
                if (!vol) {
-                       if (sv)
-                               ubi_scan_rm_volume(si, sv);
+                       if (av)
+                               ubi_remove_av(ai, av);
                        continue;
                }
 
                if (vol->reserved_pebs == 0) {
                        ubi_assert(i < ubi->vtbl_slots);
 
-                       if (!sv)
+                       if (!av)
                                continue;
 
                        /*
-                        * During scanning we found a volume which does not
+                        * During attaching we found a volume which does not
                         * exist according to the information in the volume
                         * table. This must have happened due to an unclean
                         * reboot while the volume was being removed. Discard
                         * these eraseblocks.
                         */
-                       ubi_msg("finish volume %d removal", sv->vol_id);
-                       ubi_scan_rm_volume(si, sv);
-               } else if (sv) {
-                       err = check_sv(vol, sv);
+                       ubi_msg("finish volume %d removal", av->vol_id);
+                       ubi_remove_av(ai, av);
+               } else if (av) {
+                       err = check_av(vol, av);
                        if (err)
                                return err;
                }
@@ -775,16 +769,16 @@ static int check_scanning_info(const struct ubi_device *ubi,
 /**
  * ubi_read_volume_table - read the volume table.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
  * This function reads volume table, checks it, recover from errors if needed,
  * or creates it if needed. Returns zero in case of success and a negative
  * error code in case of failure.
  */
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
        int i, err;
-       struct ubi_scan_volume *sv;
+       struct ubi_ainf_volume *av;
 
        empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
 
@@ -799,8 +793,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
        ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
        ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
 
-       sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
-       if (!sv) {
+       av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
+       if (!av) {
                /*
                 * No logical eraseblocks belonging to the layout volume were
                 * found. This could mean that the flash is just empty. In
@@ -809,8 +803,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
                 * But if flash is not empty this must be a corruption or the
                 * MTD device just contains garbage.
                 */
-               if (si->is_empty) {
-                       ubi->vtbl = create_empty_lvol(ubi, si);
+               if (ai->is_empty) {
+                       ubi->vtbl = create_empty_lvol(ubi, ai);
                        if (IS_ERR(ubi->vtbl))
                                return PTR_ERR(ubi->vtbl);
                } else {
@@ -818,14 +812,14 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
                        return -EINVAL;
                }
        } else {
-               if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+               if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
                        /* This must not happen with proper UBI images */
-                       dbg_err("too many LEBs (%d) in layout volume",
-                               sv->leb_count);
+                       ubi_err("too many LEBs (%d) in layout volume",
+                               av->leb_count);
                        return -EINVAL;
                }
 
-               ubi->vtbl = process_lvol(ubi, si, sv);
+               ubi->vtbl = process_lvol(ubi, ai, av);
                if (IS_ERR(ubi->vtbl))
                        return PTR_ERR(ubi->vtbl);
        }
@@ -836,15 +830,15 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
         * The layout volume is OK, initialize the corresponding in-RAM data
         * structures.
         */
-       err = init_volumes(ubi, si, ubi->vtbl);
+       err = init_volumes(ubi, ai, ubi->vtbl);
        if (err)
                goto out_free;
 
        /*
-        * Make sure that the scanning information is consistent to the
+        * Make sure that the attaching information is consistent to the
         * information stored in the volume table.
         */
-       err = check_scanning_info(ubi, si);
+       err = check_attaching_info(ubi, ai);
        if (err)
                goto out_free;
 
@@ -859,21 +853,17 @@ out_free:
        return err;
 }
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-
 /**
- * paranoid_vtbl_check - check volume table.
+ * self_vtbl_check - check volume table.
  * @ubi: UBI device description object
  */
-static void paranoid_vtbl_check(const struct ubi_device *ubi)
+static void self_vtbl_check(const struct ubi_device *ubi)
 {
        if (!ubi->dbg->chk_gen)
                return;
 
        if (vtbl_check(ubi, ubi->vtbl)) {
-               ubi_err("paranoid check failed");
+               ubi_err("self-check failed");
                BUG();
        }
 }
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
index 776506e..e8db4cb 100644 (file)
  * physical eraseblocks with low erase counter to free physical eraseblocks
  * with high erase counter.
  *
- * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
- * an "optimal" physical eraseblock. For example, when it is known that the
- * physical eraseblock will be "put" soon because it contains short-term data,
- * the WL sub-system may pick a free physical eraseblock with low erase
- * counter, and so forth.
- *
  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
  * bad.
  *
@@ -70,8 +64,7 @@
  *    to the user; instead, we first want to let users fill them up with data;
  *
  *  o there is a chance that the user will put the physical eraseblock very
- *    soon, so it makes sense not to move it for some time, but wait; this is
- *    especially important in case of "short term" physical eraseblocks.
+ *    soon, so it makes sense not to move it for some time, but wait.
  *
  * Physical eraseblocks stay protected only for limited time. But the "time" is
  * measured in erase cycles in this case. This is implemented with help of the
  * @list: a link in the list of pending works
  * @func: worker function
  * @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
  * @torture: if the physical eraseblock has to be tortured
  *
  * The @func pointer points to the worker function. If the @cancel argument is
@@ -159,21 +154,16 @@ struct ubi_work {
        int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
        /* The below fields are only relevant to erasure works */
        struct ubi_wl_entry *e;
+       int vol_id;
+       int lnum;
        int torture;
 };
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
-static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
-                                    struct ubi_wl_entry *e,
-                                    struct rb_root *root);
-static int paranoid_check_in_pq(const struct ubi_device *ubi,
-                               struct ubi_wl_entry *e);
-#else
-#define paranoid_check_ec(ubi, pnum, ec) 0
-#define paranoid_check_in_wl_tree(ubi, e, root)
-#define paranoid_check_in_pq(ubi, e) 0
-#endif
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+                                struct ubi_wl_entry *e, struct rb_root *root);
+static int self_check_in_pq(const struct ubi_device *ubi,
+                           struct ubi_wl_entry *e);
 
 /**
  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
@@ -350,18 +340,19 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
 /**
  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
  * @root: the RB-tree where to look for
- * @max: highest possible erase counter
+ * @diff: maximum possible difference from the smallest erase counter
  *
  * This function looks for a wear leveling entry with erase counter closest to
- * @max and less than @max.
+ * min + @diff, where min is the smallest erase counter.
  */
-static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
+static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
 {
        struct rb_node *p;
        struct ubi_wl_entry *e;
+       int max;
 
        e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
-       max += e->ec;
+       max = e->ec + diff;
 
        p = root->rb_node;
        while (p) {
@@ -382,19 +373,15 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
 /**
  * ubi_wl_get_peb - get a physical eraseblock.
  * @ubi: UBI device description object
- * @dtype: type of data which will be stored in this physical eraseblock
  *
  * This function returns a physical eraseblock in case of success and a
  * negative error code in case of failure. Might sleep.
  */
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+int ubi_wl_get_peb(struct ubi_device *ubi)
 {
        int err;
        struct ubi_wl_entry *e, *first, *last;
 
-       ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
-                  dtype == UBI_UNKNOWN);
-
 retry:
        spin_lock(&ubi->wl_lock);
        if (!ubi->free.rb_node) {
@@ -412,45 +399,15 @@ retry:
                goto retry;
        }
 
-       switch (dtype) {
-       case UBI_LONGTERM:
-               /*
-                * For long term data we pick a physical eraseblock with high
-                * erase counter. But the highest erase counter we can pick is
-                * bounded by the the lowest erase counter plus
-                * %WL_FREE_MAX_DIFF.
-                */
-               e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
-               break;
-       case UBI_UNKNOWN:
-               /*
-                * For unknown data we pick a physical eraseblock with medium
-                * erase counter. But we by no means can pick a physical
-                * eraseblock with erase counter greater or equivalent than the
-                * lowest erase counter plus %WL_FREE_MAX_DIFF/2.
-                */
-               first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
-                                       u.rb);
-               last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
-
-               if (last->ec - first->ec < WL_FREE_MAX_DIFF)
-                       e = rb_entry(ubi->free.rb_node,
-                                       struct ubi_wl_entry, u.rb);
-               else
-                       e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
-               break;
-       case UBI_SHORTTERM:
-               /*
-                * For short term data we pick a physical eraseblock with the
-                * lowest erase counter as we expect it will be erased soon.
-                */
-               e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
-               break;
-       default:
-               BUG();
-       }
+       first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
+       last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
+
+       if (last->ec - first->ec < WL_FREE_MAX_DIFF)
+               e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb);
+       else
+               e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
 
-       paranoid_check_in_wl_tree(ubi, e, &ubi->free);
+       self_check_in_wl_tree(ubi, e, &ubi->free);
 
        /*
         * Move the physical eraseblock to the protection queue where it will
@@ -461,8 +418,8 @@ retry:
        prot_queue_add(ubi, e);
        spin_unlock(&ubi->wl_lock);
 
-       err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
-                                  ubi->peb_size - ubi->vid_hdr_aloffset);
+       err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
+                                   ubi->peb_size - ubi->vid_hdr_aloffset);
        if (err) {
                ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
                return err;
@@ -487,7 +444,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
        if (!e)
                return -ENODEV;
 
-       if (paranoid_check_in_pq(ubi, e))
+       if (self_check_in_pq(ubi, e))
                return -ENODEV;
 
        list_del(&e->u.list);
@@ -513,7 +470,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
 
        dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
 
-       err = paranoid_check_ec(ubi, e->pnum, e->ec);
+       err = self_check_ec(ubi, e->pnum, e->ec);
        if (err)
                return -EINVAL;
 
@@ -626,13 +583,15 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
  * schedule_erase - schedule an erase work.
  * @ubi: UBI device description object
  * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
  * @torture: if the physical eraseblock has to be tortured
  *
  * This function returns zero in case of success and a %-ENOMEM in case of
  * failure.
  */
 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
-                         int torture)
+                         int vol_id, int lnum, int torture)
 {
        struct ubi_work *wl_wrk;
 
@@ -645,6 +604,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
 
        wl_wrk->func = &erase_worker;
        wl_wrk->e = e;
+       wl_wrk->vol_id = vol_id;
+       wl_wrk->lnum = lnum;
        wl_wrk->torture = torture;
 
        schedule_ubi_work(ubi, wl_wrk);
@@ -713,7 +674,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                               e1->ec, e2->ec);
                        goto out_cancel;
                }
-               paranoid_check_in_wl_tree(ubi, e1, &ubi->used);
+               self_check_in_wl_tree(ubi, e1, &ubi->used);
                rb_erase(&e1->u.rb, &ubi->used);
                dbg_wl("move PEB %d EC %d to PEB %d EC %d",
                       e1->pnum, e1->ec, e2->pnum, e2->ec);
@@ -722,12 +683,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                scrubbing = 1;
                e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
                e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
-               paranoid_check_in_wl_tree(ubi, e1, &ubi->scrub);
+               self_check_in_wl_tree(ubi, e1, &ubi->scrub);
                rb_erase(&e1->u.rb, &ubi->scrub);
                dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
        }
 
-       paranoid_check_in_wl_tree(ubi, e2, &ubi->free);
+       self_check_in_wl_tree(ubi, e2, &ubi->free);
        rb_erase(&e2->u.rb, &ubi->free);
        ubi->move_from = e1;
        ubi->move_to = e2;
@@ -797,7 +758,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                        scrubbing = 1;
                        goto out_not_moved;
                }
-               if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+               if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
                    err == MOVE_TARGET_RD_ERR) {
                        /*
                         * Target PEB had bit-flips or write error - torture it.
@@ -845,7 +806,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
        ubi->move_to_put = ubi->wl_scheduled = 0;
        spin_unlock(&ubi->wl_lock);
 
-       err = schedule_erase(ubi, e1, 0);
+       err = schedule_erase(ubi, e1, vol_id, lnum, 0);
        if (err) {
                kmem_cache_free(ubi_wl_entry_slab, e1);
                if (e2)
@@ -860,7 +821,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                 */
                dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
                       e2->pnum, vol_id, lnum);
-               err = schedule_erase(ubi, e2, 0);
+               err = schedule_erase(ubi, e2, vol_id, lnum, 0);
                if (err) {
                        kmem_cache_free(ubi_wl_entry_slab, e2);
                        goto out_ro;
@@ -899,7 +860,7 @@ out_not_moved:
        spin_unlock(&ubi->wl_lock);
 
        ubi_free_vid_hdr(ubi, vid_hdr);
-       err = schedule_erase(ubi, e2, torture);
+       err = schedule_erase(ubi, e2, vol_id, lnum, torture);
        if (err) {
                kmem_cache_free(ubi_wl_entry_slab, e2);
                goto out_ro;
@@ -1018,6 +979,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
 {
        struct ubi_wl_entry *e = wl_wrk->e;
        int pnum = e->pnum, err, need;
+       int vol_id = wl_wrk->vol_id;
+       int lnum = wl_wrk->lnum;
 
        if (cancel) {
                dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1026,7 +989,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
                return 0;
        }
 
-       dbg_wl("erase PEB %d EC %d", pnum, e->ec);
+       dbg_wl("erase PEB %d EC %d LEB %d:%d",
+              pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
 
        err = sync_erase(ubi, e, wl_wrk->torture);
        if (!err) {
@@ -1056,7 +1020,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
                int err1;
 
                /* Re-schedule the LEB for erasure */
-               err1 = schedule_erase(ubi, e, 0);
+               err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
                if (err1) {
                        err = err1;
                        goto out_ro;
@@ -1124,6 +1088,8 @@ out_ro:
 /**
  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
  * @ubi: UBI device description object
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
  * @pnum: physical eraseblock to return
  * @torture: if this physical eraseblock has to be tortured
  *
@@ -1132,7 +1098,8 @@ out_ro:
  * occurred to this @pnum and it has to be tested. This function returns zero
  * in case of success, and a negative error code in case of failure.
  */
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+                  int pnum, int torture)
 {
        int err;
        struct ubi_wl_entry *e;
@@ -1174,13 +1141,13 @@ retry:
                return 0;
        } else {
                if (in_wl_tree(e, &ubi->used)) {
-                       paranoid_check_in_wl_tree(ubi, e, &ubi->used);
+                       self_check_in_wl_tree(ubi, e, &ubi->used);
                        rb_erase(&e->u.rb, &ubi->used);
                } else if (in_wl_tree(e, &ubi->scrub)) {
-                       paranoid_check_in_wl_tree(ubi, e, &ubi->scrub);
+                       self_check_in_wl_tree(ubi, e, &ubi->scrub);
                        rb_erase(&e->u.rb, &ubi->scrub);
                } else if (in_wl_tree(e, &ubi->erroneous)) {
-                       paranoid_check_in_wl_tree(ubi, e, &ubi->erroneous);
+                       self_check_in_wl_tree(ubi, e, &ubi->erroneous);
                        rb_erase(&e->u.rb, &ubi->erroneous);
                        ubi->erroneous_peb_count -= 1;
                        ubi_assert(ubi->erroneous_peb_count >= 0);
@@ -1198,7 +1165,7 @@ retry:
        }
        spin_unlock(&ubi->wl_lock);
 
-       err = schedule_erase(ubi, e, torture);
+       err = schedule_erase(ubi, e, vol_id, lnum, torture);
        if (err) {
                spin_lock(&ubi->wl_lock);
                wl_tree_add(e, &ubi->used);
@@ -1222,7 +1189,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
 {
        struct ubi_wl_entry *e;
 
-       dbg_msg("schedule PEB %d for scrubbing", pnum);
+       ubi_msg("schedule PEB %d for scrubbing", pnum);
 
 retry:
        spin_lock(&ubi->wl_lock);
@@ -1247,7 +1214,7 @@ retry:
        }
 
        if (in_wl_tree(e, &ubi->used)) {
-               paranoid_check_in_wl_tree(ubi, e, &ubi->used);
+               self_check_in_wl_tree(ubi, e, &ubi->used);
                rb_erase(&e->u.rb, &ubi->used);
        } else {
                int err;
@@ -1274,23 +1241,54 @@ retry:
 /**
  * ubi_wl_flush - flush all pending works.
  * @ubi: UBI device description object
+ * @vol_id: the volume id to flush for
+ * @lnum: the logical eraseblock number to flush for
  *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function executes all pending works for a particular volume id /
+ * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
+ * acts as a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
  */
-int ubi_wl_flush(struct ubi_device *ubi)
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
 {
-       int err;
+       int err = 0;
+       int found = 1;
 
        /*
         * Erase while the pending works queue is not empty, but not more than
         * the number of currently pending works.
         */
-       dbg_wl("flush (%d pending works)", ubi->works_count);
-       while (ubi->works_count) {
-               err = do_work(ubi);
-               if (err)
-                       return err;
+       dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
+              vol_id, lnum, ubi->works_count);
+
+       while (found) {
+               struct ubi_work *wrk;
+               found = 0;
+
+               down_read(&ubi->work_sem);
+               spin_lock(&ubi->wl_lock);
+               list_for_each_entry(wrk, &ubi->works, list) {
+                       if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
+                           (lnum == UBI_ALL || wrk->lnum == lnum)) {
+                               list_del(&wrk->list);
+                               ubi->works_count -= 1;
+                               ubi_assert(ubi->works_count >= 0);
+                               spin_unlock(&ubi->wl_lock);
+
+                               err = wrk->func(ubi, wrk, 0);
+                               if (err) {
+                                       up_read(&ubi->work_sem);
+                                       return err;
+                               }
+
+                               spin_lock(&ubi->wl_lock);
+                               found = 1;
+                               break;
+                       }
+               }
+               spin_unlock(&ubi->wl_lock);
+               up_read(&ubi->work_sem);
        }
 
        /*
@@ -1300,18 +1298,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
        down_write(&ubi->work_sem);
        up_write(&ubi->work_sem);
 
-       /*
-        * And in case last was the WL worker and it canceled the LEB
-        * movement, flush again.
-        */
-       while (ubi->works_count) {
-               dbg_wl("flush more (%d pending works)", ubi->works_count);
-               err = do_work(ubi);
-               if (err)
-                       return err;
-       }
-
-       return 0;
+       return err;
 }
 
 /**
@@ -1420,26 +1407,26 @@ static void cancel_pending(struct ubi_device *ubi)
 }
 
 /**
- * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
  * @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
  *
  * This function returns zero in case of success, and a negative error code in
  * case of failure.
  */
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
        int err, i;
        struct rb_node *rb1, *rb2;
-       struct ubi_scan_volume *sv;
-       struct ubi_scan_leb *seb, *tmp;
+       struct ubi_ainf_volume *av;
+       struct ubi_ainf_peb *aeb, *tmp;
        struct ubi_wl_entry *e;
 
        ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
        spin_lock_init(&ubi->wl_lock);
        mutex_init(&ubi->move_mutex);
        init_rwsem(&ubi->work_sem);
-       ubi->max_ec = si->max_ec;
+       ubi->max_ec = ai->max_ec;
        INIT_LIST_HEAD(&ubi->works);
 
        sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
@@ -1453,48 +1440,48 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
                INIT_LIST_HEAD(&ubi->pq[i]);
        ubi->pq_head = 0;
 
-       list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
+       list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
                cond_resched();
 
                e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                if (!e)
                        goto out_free;
 
-               e->pnum = seb->pnum;
-               e->ec = seb->ec;
+               e->pnum = aeb->pnum;
+               e->ec = aeb->ec;
                ubi->lookuptbl[e->pnum] = e;
-               if (schedule_erase(ubi, e, 0)) {
+               if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
                        kmem_cache_free(ubi_wl_entry_slab, e);
                        goto out_free;
                }
        }
 
-       list_for_each_entry(seb, &si->free, u.list) {
+       list_for_each_entry(aeb, &ai->free, u.list) {
                cond_resched();
 
                e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                if (!e)
                        goto out_free;
 
-               e->pnum = seb->pnum;
-               e->ec = seb->ec;
+               e->pnum = aeb->pnum;
+               e->ec = aeb->ec;
                ubi_assert(e->ec >= 0);
                wl_tree_add(e, &ubi->free);
                ubi->lookuptbl[e->pnum] = e;
        }
 
-       ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
-               ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+       ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+               ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
                        cond_resched();
 
                        e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                        if (!e)
                                goto out_free;
 
-                       e->pnum = seb->pnum;
-                       e->ec = seb->ec;
+                       e->pnum = aeb->pnum;
+                       e->ec = aeb->ec;
                        ubi->lookuptbl[e->pnum] = e;
-                       if (!seb->scrub) {
+                       if (!aeb->scrub) {
                                dbg_wl("add PEB %d EC %d to the used tree",
                                       e->pnum, e->ec);
                                wl_tree_add(e, &ubi->used);
@@ -1567,10 +1554,8 @@ void ubi_wl_close(struct ubi_device *ubi)
        kfree(ubi->lookuptbl);
 }
 
-#ifdef CONFIG_MTD_UBI_DEBUG
-
 /**
- * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
+ * self_check_ec - make sure that the erase counter of a PEB is correct.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock number to check
  * @ec: the erase counter to check
@@ -1579,7 +1564,7 @@ void ubi_wl_close(struct ubi_device *ubi)
  * is equivalent to @ec, and a negative error code if not or if an error
  * occurred.
  */
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
 {
        int err;
        long long read_ec;
@@ -1601,9 +1586,9 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
 
        read_ec = be64_to_cpu(ec_hdr->ec);
        if (ec != read_ec) {
-               ubi_err("paranoid check failed for PEB %d", pnum);
+               ubi_err("self-check failed for PEB %d", pnum);
                ubi_err("read EC is %lld, should be %d", read_ec, ec);
-               ubi_dbg_dump_stack();
+               dump_stack();
                err = 1;
        } else
                err = 0;
@@ -1614,7 +1599,7 @@ out_free:
 }
 
 /**
- * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
  * @ubi: UBI device description object
  * @e: the wear-leveling entry to check
  * @root: the root of the tree
@@ -1622,9 +1607,8 @@ out_free:
  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
  * is not.
  */
-static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
-                                    struct ubi_wl_entry *e,
-                                    struct rb_root *root)
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+                                struct ubi_wl_entry *e, struct rb_root *root)
 {
        if (!ubi->dbg->chk_gen)
                return 0;
@@ -1632,22 +1616,22 @@ static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
        if (in_wl_tree(e, root))
                return 0;
 
-       ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
+       ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
                e->pnum, e->ec, root);
-       ubi_dbg_dump_stack();
+       dump_stack();
        return -EINVAL;
 }
 
 /**
- * paranoid_check_in_pq - check if wear-leveling entry is in the protection
+ * self_check_in_pq - check if wear-leveling entry is in the protection
  *                        queue.
  * @ubi: UBI device description object
  * @e: the wear-leveling entry to check
  *
  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
  */
-static int paranoid_check_in_pq(const struct ubi_device *ubi,
-                               struct ubi_wl_entry *e)
+static int self_check_in_pq(const struct ubi_device *ubi,
+                           struct ubi_wl_entry *e)
 {
        struct ubi_wl_entry *p;
        int i;
@@ -1660,10 +1644,8 @@ static int paranoid_check_in_pq(const struct ubi_device *ubi,
                        if (p == e)
                                return 0;
 
-       ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
+       ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
                e->pnum, e->ec);
-       ubi_dbg_dump_stack();
+       dump_stack();
        return -EINVAL;
 }
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
index da9072b..f5a24d9 100644 (file)
@@ -2000,7 +2000,7 @@ static const struct file_operations interfaces_proc_fops = {
  */
 struct cxgb4vf_debugfs_entry {
        const char *name;               /* name of debugfs node */
-       mode_t mode;                    /* file system mode */
+       umode_t mode;                   /* file system mode */
        const struct file_operations *fops;
 };
 
index 9f53698..8653eb7 100644 (file)
@@ -1549,6 +1549,17 @@ static const struct driver_info ax88772_info = {
        .tx_fixup = asix_tx_fixup,
 };
 
+static const struct driver_info hg20f9_info = {
+       .description = "HG20F9 USB 2.0 Ethernet",
+       .bind = ax88772_bind,
+       .status = asix_status,
+       .link_reset = ax88772_link_reset,
+       .reset = ax88772_reset,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
+       .rx_fixup = asix_rx_fixup_common,
+       .tx_fixup = asix_tx_fixup,
+};
+
 static const struct driver_info ax88178_info = {
        .description = "ASIX AX88178 USB 2.0 Ethernet",
        .bind = ax88178_bind,
@@ -1687,6 +1698,10 @@ static const struct usb_device_id        products [] = {
        // ASIX 88772a
        USB_DEVICE(0x0db0, 0xa877),
        .driver_info = (unsigned long) &ax88772_info,
+}, {
+       // HG20F9
+       USB_DEVICE(0x066B, 0x20F9),
+       .driver_info = (unsigned long) &hg20f9_info,
 }, {
        // Asus USB Ethernet Adapter
        USB_DEVICE (0x0b95, 0x7e2b),
index e3a02eb..e64b88f 100644 (file)
@@ -211,11 +211,12 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
        priv->led_cdev.name = priv->led_name;
        priv->led_cdev.brightness_set = ath9k_led_brightness;
 
+       INIT_WORK(&priv->led_work, ath9k_led_work);
+
        ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &priv->led_cdev);
        if (ret < 0)
                return;
 
-       INIT_WORK(&priv->led_work, ath9k_led_work);
        priv->led_registered = true;
 
        return;
index de57f90..3c16422 100644 (file)
@@ -56,7 +56,7 @@ static int carl9170_debugfs_open(struct inode *inode, struct file *file)
 
 struct carl9170_debugfs_fops {
        unsigned int read_bufsize;
-       mode_t attr;
+       umode_t attr;
        char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
                      ssize_t *len);
        ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
index 35d86fa..ce4b510 100644 (file)
@@ -704,7 +704,7 @@ out_unlock:
 
 struct lbs_debugfs_files {
        const char *name;
-       int perm;
+       umode_t perm;
        struct file_operations fops;
 };
 
index ba7ef2f..a9c725e 100644 (file)
@@ -672,7 +672,8 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
        u8 thermalvalue, delta, delta_lck, delta_iqk;
        long ele_a, ele_d, temp_cck, val_x, value32;
        long val_y, ele_c = 0;
-       u8 ofdm_index[2], cck_index = 0, ofdm_index_old[2], cck_index_old = 0;
+       u8 ofdm_index[2], ofdm_index_old[2] = {0, 0}, cck_index_old = 0;
+       s8 cck_index = 0;
        int i;
        bool is2t = IS_92C_SERIAL(rtlhal->version);
        s8 txpwr_level[2] = {0, 0};
@@ -721,7 +722,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
                        for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
                                if (ele_d == (ofdmswing_table[i] &
                                    MASKOFDM_D)) {
-
+                                       ofdm_index_old[1] = (u8) i;
                                        RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
                                           DBG_LOUD,
                                           ("Initial pathB ele_d reg0x%x = "
index ad87a1a..a1c1591 100644 (file)
@@ -209,7 +209,7 @@ out:
        return ret;
 }
 
-int wl1251_acx_feature_cfg(struct wl1251 *wl)
+int wl1251_acx_feature_cfg(struct wl1251 *wl, u32 data_flow_options)
 {
        struct acx_feature_config *feature;
        int ret;
@@ -222,8 +222,8 @@ int wl1251_acx_feature_cfg(struct wl1251 *wl)
                goto out;
        }
 
-       /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
-       feature->data_flow_options = 0;
+       /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE can be set */
+       feature->data_flow_options = data_flow_options;
        feature->options = 0;
 
        ret = wl1251_cmd_configure(wl, ACX_FEATURE_CFG,
@@ -408,7 +408,8 @@ out:
        return ret;
 }
 
-int wl1251_acx_group_address_tbl(struct wl1251 *wl)
+int wl1251_acx_group_address_tbl(struct wl1251 *wl, bool enable,
+                                void *mc_list, u32 mc_list_len)
 {
        struct acx_dot11_grp_addr_tbl *acx;
        int ret;
@@ -422,9 +423,9 @@ int wl1251_acx_group_address_tbl(struct wl1251 *wl)
        }
 
        /* MAC filtering */
-       acx->enabled = 0;
-       acx->num_groups = 0;
-       memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN);
+       acx->enabled = enable;
+       acx->num_groups = mc_list_len;
+       memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
 
        ret = wl1251_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
                                   acx, sizeof(*acx));
@@ -869,7 +870,7 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
        }
 
        *mactime = tsf_info->current_tsf_lsb |
-               (tsf_info->current_tsf_msb << 31);
+               ((u64)tsf_info->current_tsf_msb << 32);
 
 out:
        kfree(tsf_info);
@@ -907,12 +908,18 @@ int wl1251_acx_rate_policies(struct wl1251 *wl)
        }
 
        /* configure one default (one-size-fits-all) rate class */
-       acx->rate_class_cnt = 1;
+       acx->rate_class_cnt = 2;
        acx->rate_class[0].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
        acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT;
        acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT;
        acx->rate_class[0].aflags = 0;
 
+       /* no-retry rate class */
+       acx->rate_class[1].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
+       acx->rate_class[1].short_retry_limit = 0;
+       acx->rate_class[1].long_retry_limit = 0;
+       acx->rate_class[1].aflags = 0;
+
        ret = wl1251_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
        if (ret < 0) {
                wl1251_warning("Setting of rate policies failed: %d", ret);
@@ -1027,6 +1034,32 @@ out:
        return ret;
 }
 
+int wl1251_acx_arp_ip_filter(struct wl1251 *wl, bool enable, __be32 address)
+{
+       struct wl1251_acx_arp_filter *acx;
+       int ret;
+
+       wl1251_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx)
+               return -ENOMEM;
+
+       acx->version = ACX_IPV4_VERSION;
+       acx->enable = enable;
+
+       if (enable)
+               memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE);
+
+       ret = wl1251_cmd_configure(wl, ACX_ARP_IP_FILTER,
+                                  acx, sizeof(*acx));
+       if (ret < 0)
+               wl1251_warning("failed to set arp ip filter: %d", ret);
+
+       kfree(acx);
+       return ret;
+}
+
 int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
                      u8 aifs, u16 txop)
 {
index c2ba100..2bdec38 100644 (file)
@@ -350,8 +350,8 @@ struct acx_slot {
 } __packed;
 
 
-#define ADDRESS_GROUP_MAX      (8)
-#define ADDRESS_GROUP_MAX_LEN  (ETH_ALEN * ADDRESS_GROUP_MAX)
+#define ACX_MC_ADDRESS_GROUP_MAX       (8)
+#define ACX_MC_ADDRESS_GROUP_MAX_LEN   (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX)
 
 struct acx_dot11_grp_addr_tbl {
        struct acx_header header;
@@ -359,7 +359,7 @@ struct acx_dot11_grp_addr_tbl {
        u8 enabled;
        u8 num_groups;
        u8 pad[2];
-       u8 mac_table[ADDRESS_GROUP_MAX_LEN];
+       u8 mac_table[ACX_MC_ADDRESS_GROUP_MAX_LEN];
 } __packed;
 
 
@@ -1232,6 +1232,20 @@ struct wl1251_acx_bet_enable {
        u8 padding[2];
 } __packed;
 
+#define ACX_IPV4_VERSION 4
+#define ACX_IPV6_VERSION 6
+#define ACX_IPV4_ADDR_SIZE 4
+struct wl1251_acx_arp_filter {
+       struct acx_header header;
+       u8 version;     /* The IP version: 4 - IPv4, 6 - IPv6.*/
+       u8 enable;      /* 1 - ARP filtering is enabled, 0 - disabled */
+       u8 padding[2];
+       u8 address[16]; /* The IP address used to filter ARP packets.
+                          ARP packets that do not match this address are
+                          dropped. When the IP Version is 4, the last 12
+                          bytes of the the address are ignored. */
+} __attribute__((packed));
+
 struct wl1251_acx_ac_cfg {
        struct acx_header header;
 
@@ -1440,7 +1454,7 @@ int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event,
 int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth);
 int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len);
 int wl1251_acx_tx_power(struct wl1251 *wl, int power);
-int wl1251_acx_feature_cfg(struct wl1251 *wl);
+int wl1251_acx_feature_cfg(struct wl1251 *wl, u32 data_flow_options);
 int wl1251_acx_mem_map(struct wl1251 *wl,
                       struct acx_header *mem_map, size_t len);
 int wl1251_acx_data_path_params(struct wl1251 *wl,
@@ -1449,7 +1463,8 @@ int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time);
 int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter);
 int wl1251_acx_pd_threshold(struct wl1251 *wl);
 int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time);
-int wl1251_acx_group_address_tbl(struct wl1251 *wl);
+int wl1251_acx_group_address_tbl(struct wl1251 *wl, bool enable,
+                                void *mc_list, u32 mc_list_len);
 int wl1251_acx_service_period_timeout(struct wl1251 *wl);
 int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold);
 int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter);
@@ -1473,6 +1488,7 @@ int wl1251_acx_mem_cfg(struct wl1251 *wl);
 int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
 int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
                          u8 max_consecutive);
+int wl1251_acx_arp_ip_filter(struct wl1251 *wl, bool enable, __be32 address);
 int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
                      u8 aifs, u16 txop);
 int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
index d729daf..03c0801 100644 (file)
@@ -105,9 +105,16 @@ int wl1251_boot_init_seq(struct wl1251 *wl)
        wl1251_reg_write32(wl, PLL_CAL_TIME, 0x9);
 
        /*
-        * PG 1.2: set the clock buffer time to be 210 usec (CLK_BUF_TIME)
+        * set the clock buffer time (CLK_BUF_TIME) to
+        * PG 1.1 & 1.0: 760usec
+        * PG 1.2: 210usec
         */
-       wl1251_reg_write32(wl, CLK_BUF_TIME, 0x6);
+       if (wl->chip_id == CHIP_ID_1251_PG10 ||
+           wl->chip_id == CHIP_ID_1251_PG11)
+               tmp = 0x19;
+       else
+               tmp = 0x6;
+       wl1251_reg_write32(wl, CLK_BUF_TIME, tmp);
 
        /*
         * set the clock detect feature to work in the restart wu procedure
@@ -118,25 +125,37 @@ int wl1251_boot_init_seq(struct wl1251 *wl)
        wl1251_reg_write32(wl, ELP_CFG_MODE, tmp);
 
        /* PG 1.2: enable the BB PLL fix. Enable the PLL_LIMP_CLK_EN_CMD */
-       elp_cmd |= 0x00000040;
-       wl1251_reg_write32(wl, ELP_CMD, elp_cmd);
-
-       /* PG 1.2: Set the BB PLL stable time to be 1000usec
-        * (PLL_STABLE_TIME) */
-       wl1251_reg_write32(wl, CFG_PLL_SYNC_CNT, 0x20);
-
-       /* PG 1.2: read clock request time */
-       init_data = wl1251_reg_read32(wl, CLK_REQ_TIME);
+       if (wl->chip_id != CHIP_ID_1251_PG10 &&
+           wl->chip_id != CHIP_ID_1251_PG11) {
+               elp_cmd |= 0x00000040;
+               wl1251_reg_write32(wl, ELP_CMD, elp_cmd);
+       }
 
-       /*
-        * PG 1.2: set the clock request time to be ref_clk_settling_time -
-        * 1ms = 4ms
-        */
-       if (init_data > 0x21)
-               tmp = init_data - 0x21;
+       /* Set the BB PLL stable time (PLL_STABLE_TIME) to
+        * PG 1.1 & 1.0: 30usec
+        * PG 1.2: 1000usec */
+       if (wl->chip_id == CHIP_ID_1251_PG10 ||
+           wl->chip_id == CHIP_ID_1251_PG11)
+               tmp = 0x00;
        else
-               tmp = 0;
-       wl1251_reg_write32(wl, CLK_REQ_TIME, tmp);
+               tmp = 0x20;
+       wl1251_reg_write32(wl, CFG_PLL_SYNC_CNT, tmp);
+
+       if (wl->chip_id != CHIP_ID_1251_PG10 &&
+           wl->chip_id != CHIP_ID_1251_PG11) {
+               /* PG 1.2: read clock request time */
+               init_data = wl1251_reg_read32(wl, CLK_REQ_TIME);
+
+               /*
+                * PG 1.2: set the clock request time to be
+                * ref_clk_settling_time - 1ms = 4ms
+                */
+               if (init_data > 0x21)
+                       tmp = init_data - 0x21;
+               else
+                       tmp = 0;
+               wl1251_reg_write32(wl, CLK_REQ_TIME, tmp);
+       }
 
        /* set BB PLL configurations in RF AFE */
        wl1251_reg_write32(wl, 0x003058cc, 0x4B5);
@@ -299,7 +318,8 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
                ROAMING_TRIGGER_LOW_RSSI_EVENT_ID |
                ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID |
                REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID |
-               BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID;
+               BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID |
+               PS_REPORT_EVENT_ID;
 
        ret = wl1251_event_unmask(wl);
        if (ret < 0) {
index d14d69d..eaaebb7 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/crc7.h>
+#include <linux/etherdevice.h>
 
 #include "wl1251.h"
 #include "reg.h"
@@ -22,6 +23,7 @@ int wl1251_cmd_send(struct wl1251 *wl, u16 id, void *buf, size_t len)
 {
        struct wl1251_cmd_header *cmd;
        unsigned long timeout;
+       u32 poll_count = 0;
        u32 intr;
        int ret = 0;
 
@@ -45,11 +47,20 @@ int wl1251_cmd_send(struct wl1251 *wl, u16 id, void *buf, size_t len)
                        goto out;
                }
 
-               msleep(1);
+               poll_count++;
+               if (poll_count < 30)
+                       udelay(1);
+               else
+                       msleep(1);
 
                intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
        }
 
+       wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
+
+       if (cmd->status != CMD_STATUS_SUCCESS)
+               wl1251_error("command %d returned %d", id, cmd->status);
+
        wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
                           WL1251_ACX_INTR_CMD_COMPLETE);
 
@@ -203,11 +214,11 @@ out:
        return ret;
 }
 
-int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
+int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable)
 {
        struct cmd_enabledisable_path *cmd;
        int ret;
-       u16 cmd_rx, cmd_tx;
+       u16 cmd_rx;
 
        wl1251_debug(DEBUG_CMD, "cmd data path");
 
@@ -219,13 +230,10 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
 
        cmd->channel = channel;
 
-       if (enable) {
+       if (enable)
                cmd_rx = CMD_ENABLE_RX;
-               cmd_tx = CMD_ENABLE_TX;
-       } else {
+       else
                cmd_rx = CMD_DISABLE_RX;
-               cmd_tx = CMD_DISABLE_TX;
-       }
 
        ret = wl1251_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd));
        if (ret < 0) {
@@ -237,17 +245,38 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
        wl1251_debug(DEBUG_BOOT, "rx %s cmd channel %d",
                     enable ? "start" : "stop", channel);
 
+out:
+       kfree(cmd);
+       return ret;
+}
+
+int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable)
+{
+       struct cmd_enabledisable_path *cmd;
+       int ret;
+       u16 cmd_tx;
+
+       wl1251_debug(DEBUG_CMD, "cmd data path");
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       cmd->channel = channel;
+
+       if (enable)
+               cmd_tx = CMD_ENABLE_TX;
+       else
+               cmd_tx = CMD_DISABLE_TX;
+
        ret = wl1251_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd));
-       if (ret < 0) {
+       if (ret < 0)
                wl1251_error("tx %s cmd for channel %d failed",
                             enable ? "start" : "stop", channel);
-               goto out;
-       }
-
-       wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
-                    enable ? "start" : "stop", channel);
+       else
+               wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
+                            enable ? "start" : "stop", channel);
 
-out:
        kfree(cmd);
        return ret;
 }
@@ -277,15 +306,6 @@ int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
        join->rx_config_options = wl->rx_config;
        join->rx_filter_options = wl->rx_filter;
 
-       /*
-        * FIXME: disable temporarily all filters because after commit
-        * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
-        * association. The filter logic needs to be implemented properly
-        * and once that is done, this hack can be removed.
-        */
-       join->rx_config_options = 0;
-       join->rx_filter_options = WL1251_DEFAULT_RX_FILTER;
-
        join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
                RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
 
@@ -306,6 +326,28 @@ out:
        return ret;
 }
 
+int wl1251_cmd_disconnect(struct wl1251 *wl)
+{
+       struct wl1251_cmd_disconnect *cmd;
+       int ret;
+
+       wl1251_debug(DEBUG_CMD, "cmd disconnect");
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       cmd->rx_config_options = wl->rx_config;
+       cmd->rx_filter_options = 0;
+
+       ret = wl1251_cmd_send(wl, CMD_DISCONNECT, cmd, sizeof(*cmd));
+       if (ret < 0)
+               wl1251_error("cmd disconnect failed: %d", ret);
+
+       kfree(cmd);
+       return ret;
+}
+
 int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode)
 {
        struct wl1251_cmd_ps_params *ps_params = NULL;
@@ -323,7 +365,8 @@ int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode)
        ps_params->send_null_data = 1;
        ps_params->retries = 5;
        ps_params->hang_over_period = 128;
-       ps_params->null_data_rate = 1; /* 1 Mbps */
+       ps_params->null_data_rate = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
+               RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
 
        ret = wl1251_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
                              sizeof(*ps_params));
@@ -419,7 +462,9 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
        struct wl1251_cmd_scan *cmd;
        int i, ret = 0;
 
-       wl1251_debug(DEBUG_CMD, "cmd scan");
+       wl1251_debug(DEBUG_CMD, "cmd scan channels %d", n_channels);
+
+       WARN_ON(n_channels > SCAN_MAX_NUM_OF_CHANNELS);
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (!cmd)
@@ -430,6 +475,13 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
                                                    CFG_RX_MGMT_EN |
                                                    CFG_RX_BCN_EN);
        cmd->params.scan_options = 0;
+       /*
+        * Use high priority scan when not associated to prevent fw issue
+        * causing never-ending scans (sometimes 20+ minutes).
+        * Note: This bug may be caused by the fw's DTIM handling.
+        */
+       if (is_zero_ether_addr(wl->bssid))
+               cmd->params.scan_options |= cpu_to_le16(WL1251_SCAN_OPT_PRIORITY_HIGH);
        cmd->params.num_channels = n_channels;
        cmd->params.num_probe_requests = n_probes;
        cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
index ee4f2b3..3df8179 100644 (file)
@@ -35,9 +35,11 @@ int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len);
 int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len);
 int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity,
                   void *bitmap, u16 bitmap_len, u8 bitmap_control);
-int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable);
+int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable);
+int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable);
 int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
                    u16 beacon_interval, u8 dtim_interval);
+int wl1251_cmd_disconnect(struct wl1251 *wl);
 int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode);
 int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
                           size_t len);
@@ -167,6 +169,11 @@ struct cmd_read_write_memory {
 #define CMDMBOX_HEADER_LEN 4
 #define CMDMBOX_INFO_ELEM_HEADER_LEN 4
 
+#define WL1251_SCAN_OPT_PASSIVE                1
+#define WL1251_SCAN_OPT_5GHZ_BAND      2
+#define WL1251_SCAN_OPT_TRIGGERD_SCAN  4
+#define WL1251_SCAN_OPT_PRIORITY_HIGH  8
+
 #define WL1251_SCAN_MIN_DURATION 30000
 #define WL1251_SCAN_MAX_DURATION 60000
 
@@ -276,6 +283,13 @@ struct cmd_join {
        u8 reserved;
 } __packed;
 
+struct wl1251_cmd_disconnect {
+       struct wl1251_cmd_header header;
+
+       u32 rx_config_options;
+       u32 rx_filter_options;
+} __packed;
+
 struct cmd_enabledisable_path {
        struct wl1251_cmd_header header;
 
index 6c27400..66d75e3 100644 (file)
@@ -261,6 +261,42 @@ static const struct file_operations tx_queue_status_ops = {
        .llseek = generic_file_llseek,
 };
 
+static ssize_t dump_nvs_read(struct file *file, char __user *userbuf,
+                            size_t count, loff_t *ppos)
+{
+       struct wl1251 *wl = file->private_data;
+
+       if (wl->eeprom_dump == NULL)
+               return -EINVAL;
+
+       return simple_read_from_buffer(userbuf, count, ppos,
+               wl->eeprom_dump, 752);
+}
+
+static ssize_t dump_full_read(struct file *file, char __user *userbuf,
+                             size_t count, loff_t *ppos)
+{
+       struct wl1251 *wl = file->private_data;
+
+       if (wl->eeprom_dump == NULL)
+               return -EINVAL;
+
+       return simple_read_from_buffer(userbuf, count, ppos,
+               wl->eeprom_dump, 1024);
+}
+
+static const struct file_operations dump_nvs_ops = {
+       .read = dump_nvs_read,
+       .open = wl1251_open_file_generic,
+       .llseek = generic_file_llseek,
+};
+
+static const struct file_operations dump_full_ops = {
+       .read = dump_full_read,
+       .open = wl1251_open_file_generic,
+       .llseek = generic_file_llseek,
+};
+
 static void wl1251_debugfs_delete_files(struct wl1251 *wl)
 {
        DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -358,6 +394,11 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl)
        DEBUGFS_DEL(tx_queue_status);
        DEBUGFS_DEL(retry_count);
        DEBUGFS_DEL(excessive_retries);
+
+       if (wl->eeprom_dump != NULL) {
+               DEBUGFS_DEL(dump_nvs);
+               DEBUGFS_DEL(dump_full);
+       }
 }
 
 static int wl1251_debugfs_add_files(struct wl1251 *wl)
@@ -460,6 +501,12 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl)
        DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
        DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
 
+       /* temporary (?) hack for EEPROM dumping */
+       if (wl->eeprom_dump != NULL) {
+               DEBUGFS_ADD(dump_nvs, wl->debugfs.rootdir);
+               DEBUGFS_ADD(dump_full, wl->debugfs.rootdir);
+       }
+
 out:
        if (ret < 0)
                wl1251_debugfs_delete_files(wl);
index 9f15cca..719a877 100644 (file)
@@ -67,29 +67,23 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
 
        if (vector & BSS_LOSE_EVENT_ID) {
                wl1251_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
-
-               if (wl->psm_requested &&
-                   wl->station_mode != STATION_ACTIVE_MODE) {
-                       ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
-                       if (ret < 0)
-                               return ret;
-               }
+               wl1251_no_ps_event(wl);
+               wl->bss_lost = 1;
+               ieee80211_queue_delayed_work(wl->hw, &wl->ps_work, 0);
        }
 
-       if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID &&
-           wl->station_mode != STATION_ACTIVE_MODE) {
+       if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
                wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
+               wl1251_no_ps_event(wl);
 
                /* indicate to the stack, that beacons have been lost */
-               ieee80211_beacon_loss(wl->vif);
+               if (wl->vif && wl->vif->type == NL80211_IFTYPE_STATION)
+                       ieee80211_beacon_loss(wl->vif);
        }
 
        if (vector & REGAINED_BSS_EVENT_ID) {
-               if (wl->psm_requested) {
-                       ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
-                       if (ret < 0)
-                               return ret;
-               }
+               wl->bss_lost = 0;
+               ieee80211_queue_delayed_work(wl->hw, &wl->ps_work, 0);
        }
 
        if (wl->vif && wl->rssi_thold) {
@@ -110,6 +104,27 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
                }
        }
 
+       if (vector & PS_REPORT_EVENT_ID) {
+               if (mbox->ps_status == ENTER_POWER_SAVE_SUCCESS) {
+                       /* enable beacon filtering */
+                       ret = wl1251_acx_beacon_filter_opt(wl, true);
+                       if (ret < 0)
+                               wl1251_error("beacon filter enable failed");
+
+               } else if (wl->ps_transitioning) {
+                       if (mbox->ps_status == ENTER_POWER_SAVE_FAIL)
+                               wl->station_mode = STATION_ACTIVE_MODE;
+                       /* always happens on exit from idle - ignore for now
+                       else if (mbox->ps_status == EXIT_POWER_SAVE_FAIL)
+                               wl->station_mode = STATION_POWER_SAVE_MODE;
+                       */
+               }
+
+               //wl1251_error("ps_status %d, mode %d",
+               //      mbox->ps_status, wl->station_mode);
+               wl->ps_transitioning = false;
+       }
+
        return 0;
 }
 
index 30eb5d1..b638229 100644 (file)
@@ -73,6 +73,13 @@ enum {
        EVENT_MBOX_ALL_EVENT_ID                  = 0x7fffffff,
 };
 
+enum {
+       ENTER_POWER_SAVE_FAIL    =  0,
+       ENTER_POWER_SAVE_SUCCESS =  1,
+       EXIT_POWER_SAVE_FAIL     =  2,
+       EXIT_POWER_SAVE_SUCCESS  =  3,
+};
+
 struct event_debug_report {
        u8 debug_event_id;
        u8 num_params;
index 89b43d3..1d799bf 100644 (file)
@@ -33,7 +33,7 @@ int wl1251_hw_init_hwenc_config(struct wl1251 *wl)
 {
        int ret;
 
-       ret = wl1251_acx_feature_cfg(wl);
+       ret = wl1251_acx_feature_cfg(wl, 0);
        if (ret < 0) {
                wl1251_warning("couldn't set feature config");
                return ret;
@@ -127,7 +127,7 @@ int wl1251_hw_init_phy_config(struct wl1251 *wl)
        if (ret < 0)
                return ret;
 
-       ret = wl1251_acx_group_address_tbl(wl);
+       ret = wl1251_acx_group_address_tbl(wl, true, NULL, 0);
        if (ret < 0)
                return ret;
 
@@ -394,8 +394,13 @@ int wl1251_hw_init(struct wl1251 *wl)
        if (ret < 0)
                goto out_free_data_path;
 
-       /* Enable data path */
-       ret = wl1251_cmd_data_path(wl, wl->channel, 1);
+       /* Enable rx data path */
+       ret = wl1251_cmd_data_path_rx(wl, wl->channel, 1);
+       if (ret < 0)
+               goto out_free_data_path;
+
+       /* Enable tx data path */
+       ret = wl1251_cmd_data_path_tx(wl, wl->channel, 1);
        if (ret < 0)
                goto out_free_data_path;
 
index 40c1574..e7f1818 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/etherdevice.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
+#include <linux/netdevice.h>
 
 #include "wl1251.h"
 #include "wl12xx_80211.h"
 #include "debugfs.h"
 #include "boot.h"
 
+static bool use_fw_ps = true;
+module_param(use_fw_ps, bool, 0644);
+MODULE_PARM_DESC(use_fw_ps, "Enable powersave once and leave it for chip's "
+                           "firmware to manage. When disabled, mac80211 "
+                           "will toggle powersave on tx activity instead. "
+                           "Default: y/Y/1");
+
 void wl1251_enable_interrupts(struct wl1251 *wl)
 {
        wl->if_ops->enable_irq(wl);
@@ -212,12 +220,11 @@ out:
        return ret;
 }
 
-#define WL1251_IRQ_LOOP_COUNT 10
-static void wl1251_irq_work(struct work_struct *work)
+#define WL1251_IRQ_LOOP_COUNT 100
+irqreturn_t wl1251_irq(int irq, void *cookie)
 {
        u32 intr, ctr = WL1251_IRQ_LOOP_COUNT;
-       struct wl1251 *wl =
-               container_of(work, struct wl1251, irq_work);
+       struct wl1251 *wl = cookie;
        int ret;
 
        mutex_lock(&wl->mutex);
@@ -279,9 +286,18 @@ static void wl1251_irq_work(struct work_struct *work)
                        goto out_sleep;
                }
 
+               if (intr & WL1251_ACX_INTR_TX_RESULT) {
+                       wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT");
+                       wl1251_tx_complete(wl);
+               }
+
                if (intr & WL1251_ACX_INTR_RX0_DATA) {
                        wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX0_DATA");
                        wl1251_rx(wl);
+
+                       if ((intr & WL1251_ACX_INTR_RX1_DATA)
+                           && skb_queue_len(&wl->tx_queue) > 0)
+                               wl1251_tx_work_unlocked(wl, false);
                }
 
                if (intr & WL1251_ACX_INTR_RX1_DATA) {
@@ -289,11 +305,6 @@ static void wl1251_irq_work(struct work_struct *work)
                        wl1251_rx(wl);
                }
 
-               if (intr & WL1251_ACX_INTR_TX_RESULT) {
-                       wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT");
-                       wl1251_tx_complete(wl);
-               }
-
                if (intr & WL1251_ACX_INTR_EVENT_A) {
                        wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT_A");
                        wl1251_event_handle(wl, 0);
@@ -308,6 +319,9 @@ static void wl1251_irq_work(struct work_struct *work)
                        wl1251_debug(DEBUG_IRQ,
                                     "WL1251_ACX_INTR_INIT_COMPLETE");
 
+               if (skb_queue_len(&wl->tx_queue) > 0)
+                       wl1251_tx_work_unlocked(wl, false);
+
                if (--ctr == 0)
                        break;
 
@@ -320,6 +334,16 @@ out_sleep:
 
 out:
        mutex_unlock(&wl->mutex);
+       return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(wl1251_irq);
+
+static void wl1251_irq_work(struct work_struct *work)
+{
+       struct wl1251 *wl =
+               container_of(work, struct wl1251, irq_work);
+
+       wl1251_irq(0, wl);
 }
 
 static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
@@ -334,6 +358,12 @@ static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
        if (ret < 0)
                goto out;
 
+       /*
+        * Join command applies filters, and if we are not associated,
+        * BSSID filter must be disabled for association to work.
+        */
+       if (is_zero_ether_addr(wl->bssid))
+               wl->rx_config &= ~CFG_BSSID_FILTER_EN;
 
        ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval,
                              dtim_period);
@@ -344,35 +374,10 @@ static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
        if (ret < 0)
                wl1251_warning("join timeout");
 
-out:
-       return ret;
-}
-
-static void wl1251_filter_work(struct work_struct *work)
-{
-       struct wl1251 *wl =
-               container_of(work, struct wl1251, filter_work);
-       int ret;
-
-       mutex_lock(&wl->mutex);
-
-       if (wl->state == WL1251_STATE_OFF)
-               goto out;
-
-       ret = wl1251_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto out;
-
-       ret = wl1251_join(wl, wl->bss_type, wl->channel, wl->beacon_int,
-                         wl->dtim_period);
-       if (ret < 0)
-               goto out_sleep;
-
-out_sleep:
-       wl1251_ps_elp_sleep(wl);
+       wl1251_no_ps_event(wl);
 
 out:
-       mutex_unlock(&wl->mutex);
+       return ret;
 }
 
 static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -478,7 +483,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
 
        cancel_work_sync(&wl->irq_work);
        cancel_work_sync(&wl->tx_work);
-       cancel_work_sync(&wl->filter_work);
+       cancel_delayed_work_sync(&wl->ps_work);
        cancel_delayed_work_sync(&wl->elp_work);
 
        mutex_lock(&wl->mutex);
@@ -503,6 +508,9 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
        wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
        wl->rssi_thold = 0;
        wl->channel = WL1251_DEFAULT_CHANNEL;
+       wl->monitor_present = false;
+       wl->joined = false;
+       wl->long_doze_mode_set = false;
 
        wl1251_debugfs_reset(wl);
 
@@ -559,9 +567,40 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
        mutex_lock(&wl->mutex);
        wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
        wl->vif = NULL;
+       memset(wl->bssid, 0, ETH_ALEN);
        mutex_unlock(&wl->mutex);
 }
 
+static int wl1251_build_null_data(struct wl1251 *wl)
+{
+       struct sk_buff *skb = NULL;
+       int size;
+       void *ptr;
+       int ret = -ENOMEM;
+
+
+       if (wl->bss_type == BSS_TYPE_IBSS) {
+               size = sizeof(struct wl12xx_null_data_template);
+               ptr = NULL;
+       } else {
+               skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+               if (!skb)
+                       goto out;
+               size = skb->len;
+               ptr = skb->data;
+       }
+
+       ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, ptr, size);
+
+out:
+       dev_kfree_skb(skb);
+       if (ret)
+               wl1251_warning("cmd buld null data failed %d", ret);
+
+       return ret;
+
+}
+
 static int wl1251_build_qos_null_data(struct wl1251 *wl)
 {
        struct ieee80211_qos_hdr template;
@@ -583,6 +622,99 @@ static int wl1251_build_qos_null_data(struct wl1251 *wl)
                                       sizeof(template));
 }
 
+static void wl1251_ps_work(struct work_struct *work)
+{
+       struct delayed_work *dwork;
+       struct wl1251 *wl;
+       unsigned long diff, wait;
+       bool need_ps;
+       bool have_ps;
+       int ret;
+       int i;
+
+       dwork = container_of(work, struct delayed_work, work);
+       wl = container_of(dwork, struct wl1251, ps_work);
+
+       mutex_lock(&wl->mutex);
+
+       /* don't change PS modes while still transitioning, to avoid possbile
+        * fw bugs (it normally takes ~130ms to enable and ~10ms to disable) */
+       if (wl->ps_transitioning) {
+               diff = jiffies - wl->ps_change_jiffies;
+               if (diff > msecs_to_jiffies(500)) {
+                       wl1251_error("PS change taking too long: %lu", diff);
+                       wl->ps_transitioning = false;
+               } else {
+                       //wl1251_error("PS still transitioning");
+                       ieee80211_queue_delayed_work(wl->hw, &wl->ps_work,
+                               msecs_to_jiffies(50));
+                       goto out;
+               }
+       }
+
+       have_ps = wl->station_mode == STATION_POWER_SAVE_MODE;
+       need_ps = wl->psm_requested && !wl->bss_lost
+               && wl->rate < wl->ps_rate_threshold;
+
+       if (need_ps == have_ps) {
+               //wl1251_info("ps: already in mode %d", have_ps);
+               goto out;
+       }
+
+       /* don't enter PS if there was recent activity */
+       if (need_ps) {
+               wait = 0;
+
+               diff = jiffies - wl->last_no_ps_jiffies[1];
+               if (diff < msecs_to_jiffies(1000))
+                       wait = msecs_to_jiffies(1000) - diff + 1;
+
+               diff = jiffies - wl->last_no_ps_jiffies[0];
+               if (diff < msecs_to_jiffies(3000))
+                       wait += msecs_to_jiffies(1000);
+
+               for (i = 0; i < ARRAY_SIZE(wl->tx_frames); i++) {
+                       if (wl->tx_frames[i] != NULL) {
+                               //wl1251_error("  frm %d busy", i);
+                               if (wait < msecs_to_jiffies(50))
+                                       wait = msecs_to_jiffies(50);
+                               break;
+                       }
+               }
+
+               if (wait > 0) {
+                       ieee80211_queue_delayed_work(wl->hw, &wl->ps_work, wait);
+                       goto out;
+               }
+       }
+
+       ret = wl1251_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       if (need_ps) {
+               wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
+                                           wl->dtim_period);
+       }
+       ret = wl1251_ps_set_mode(wl,
+               need_ps ? STATION_POWER_SAVE_MODE : STATION_ACTIVE_MODE);
+       if (ret < 0)
+               goto out_sleep;
+
+       // wl1251_info("psm %d, r %u", need_ps, wl->rate);
+
+out_sleep:
+       wl1251_ps_elp_sleep(wl);
+
+out:
+       mutex_unlock(&wl->mutex);
+}
+
+static bool wl1251_can_do_pm(struct ieee80211_conf *conf, struct wl1251 *wl)
+{
+       return (conf->flags & IEEE80211_CONF_PS) && !wl->monitor_present;
+}
+
 static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
 {
        struct wl1251 *wl = hw->priv;
@@ -591,8 +723,10 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
 
        channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
 
-       wl1251_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
+       wl1251_debug(DEBUG_MAC80211,
+                    "mac80211 config ch %d monitor %s psm %s power %d",
                     channel,
+                    conf->flags & IEEE80211_CONF_MONITOR ? "on" : "off",
                     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
                     conf->power_level);
 
@@ -602,42 +736,62 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
        if (ret < 0)
                goto out;
 
+       if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+               u32 mode;
+
+               if (conf->flags & IEEE80211_CONF_MONITOR) {
+                       wl->monitor_present = true;
+                       mode = DF_SNIFF_MODE_ENABLE | DF_ENCRYPTION_DISABLE;
+               } else {
+                       wl->monitor_present = false;
+                       mode = 0;
+               }
+
+               ret = wl1251_acx_feature_cfg(wl, mode);
+               if (ret < 0)
+                       goto out_sleep;
+       }
+
        if (channel != wl->channel) {
                wl->channel = channel;
 
-               ret = wl1251_join(wl, wl->bss_type, wl->channel,
-                                 wl->beacon_int, wl->dtim_period);
+               /*
+                * Use ENABLE_RX command for channel switching when no
+                * interface is present (monitor mode only).
+                * This leaves the tx path disabled in firmware, whereas
+                * the usual JOIN command seems to transmit some frames
+                * at firmware level.
+                *
+                * Note that bss_type must be BSS_TYPE_STA_BSS, also at least
+                * one join has to be performed before CMD_ENABLE_RX can
+                * properly switch channels (join will be done by CONF_IDLE).
+                */
+               if (wl->vif == NULL) {
+                       wl->bss_type = BSS_TYPE_STA_BSS;
+                       wl->joined = false;
+                       ret = wl1251_cmd_data_path_rx(wl, wl->channel, 1);
+               } else {
+                       ret = wl1251_join(wl, wl->bss_type, wl->channel,
+                                         wl->beacon_int, wl->dtim_period);
+               }
                if (ret < 0)
                        goto out_sleep;
        }
 
-       if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
+       if (wl1251_can_do_pm(conf, wl) && !wl->psm_requested) {
                wl1251_debug(DEBUG_PSM, "psm enabled");
 
                wl->psm_requested = true;
 
                wl->dtim_period = conf->ps_dtim_period;
 
-               ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
-                                                 wl->dtim_period);
-
-               /*
-                * mac80211 enables PSM only if we're already associated.
-                */
-               ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
-               if (ret < 0)
-                       goto out_sleep;
-       } else if (!(conf->flags & IEEE80211_CONF_PS) &&
-                  wl->psm_requested) {
+               ieee80211_queue_delayed_work(wl->hw, &wl->ps_work, 0);
+       } else if (!wl1251_can_do_pm(conf, wl) && wl->psm_requested) {
                wl1251_debug(DEBUG_PSM, "psm disabled");
 
                wl->psm_requested = false;
 
-               if (wl->station_mode != STATION_ACTIVE_MODE) {
-                       ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
-                       if (ret < 0)
-                               goto out_sleep;
-               }
+               ieee80211_queue_delayed_work(wl->hw, &wl->ps_work, 0);
        }
 
        if (changed & IEEE80211_CONF_CHANGE_IDLE) {
@@ -649,6 +803,11 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
                        ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
                        if (ret < 0)
                                goto out_sleep;
+
+                       ret = wl1251_event_wait(wl, PS_REPORT_EVENT_ID, 100);
+                       if (ret < 0)
+                               wl1251_error("error waiting for wakeup");
+
                        ret = wl1251_join(wl, wl->bss_type, wl->channel,
                                          wl->beacon_int, wl->dtim_period);
                        if (ret < 0)
@@ -673,29 +832,72 @@ out:
        return ret;
 }
 
+struct wl1251_filter_params {
+       bool enabled;
+       int mc_list_length;
+       u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
+};
+
+static u64 wl1251_op_prepare_multicast(struct ieee80211_hw *hw,
+                                      struct netdev_hw_addr_list *mc_list)
+{
+       struct wl1251_filter_params *fp;
+       struct netdev_hw_addr *ha;
+       struct wl1251 *wl = hw->priv;
+
+       if (unlikely(wl->state == WL1251_STATE_OFF))
+               return 0;
+
+       fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
+       if (!fp) {
+               wl1251_error("Out of memory setting filters.");
+               return 0;
+       }
+
+       /* update multicast filtering parameters */
+       fp->mc_list_length = 0;
+       if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
+               fp->enabled = false;
+       } else {
+               fp->enabled = true;
+               netdev_hw_addr_list_for_each(ha, mc_list) {
+                       memcpy(fp->mc_list[fp->mc_list_length],
+                                       ha->addr, ETH_ALEN);
+                       fp->mc_list_length++;
+               }
+       }
+
+       return (u64)(unsigned long)fp;
+}
+
 #define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
                                  FIF_ALLMULTI | \
                                  FIF_FCSFAIL | \
                                  FIF_BCN_PRBRESP_PROMISC | \
                                  FIF_CONTROL | \
-                                 FIF_OTHER_BSS)
+                                 FIF_OTHER_BSS | \
+                                 FIF_PROBE_REQ)
 
 static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
                                       unsigned int changed,
-                                      unsigned int *total,u64 multicast)
+                                      unsigned int *total, u64 multicast)
 {
+       struct wl1251_filter_params *fp = (void *)(unsigned long)multicast;
        struct wl1251 *wl = hw->priv;
+       int ret;
 
        wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter");
 
        *total &= WL1251_SUPPORTED_FILTERS;
        changed &= WL1251_SUPPORTED_FILTERS;
 
-       if (changed == 0)
+       if (changed == 0) {
                /* no filters which we support changed */
+               kfree(fp);
                return;
+       }
 
-       /* FIXME: wl->rx_config and wl->rx_filter are not protected */
+       mutex_lock(&wl->mutex);
 
        wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
        wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
@@ -718,15 +920,35 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
        }
        if (*total & FIF_CONTROL)
                wl->rx_filter |= CFG_RX_CTL_EN;
-       if (*total & FIF_OTHER_BSS)
-               wl->rx_filter &= ~CFG_BSSID_FILTER_EN;
+       if (*total & FIF_OTHER_BSS || is_zero_ether_addr(wl->bssid))
+               wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+       if (*total & FIF_PROBE_REQ)
+               wl->rx_filter |= CFG_RX_PREQ_EN;
 
-       /*
-        * FIXME: workqueues need to be properly cancelled on stop(), for
-        * now let's just disable changing the filter settings. They will
-        * be updated any on config().
-        */
-       /* schedule_work(&wl->filter_work); */
+       if (wl->state == WL1251_STATE_OFF)
+               goto out;
+
+       ret = wl1251_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       if (*total & FIF_ALLMULTI || *total & FIF_PROMISC_IN_BSS)
+               ret = wl1251_acx_group_address_tbl(wl, false, NULL, 0);
+       else if (fp)
+               ret = wl1251_acx_group_address_tbl(wl, fp->enabled,
+                                                  fp->mc_list,
+                                                  fp->mc_list_length);
+       if (ret < 0)
+               goto out;
+
+       /* send filters to firmware */
+       wl1251_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
+
+       wl1251_ps_elp_sleep(wl);
+
+out:
+       mutex_unlock(&wl->mutex);
+       kfree(fp);
 }
 
 /* HW encryption */
@@ -806,12 +1028,12 @@ static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        mutex_lock(&wl->mutex);
 
-       ret = wl1251_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto out_unlock;
-
        switch (cmd) {
        case SET_KEY:
+               if (wl->monitor_present) {
+                       ret = -EOPNOTSUPP;
+                       goto out_unlock;
+               }
                wl_cmd->key_action = KEY_ADD_OR_REPLACE;
                break;
        case DISABLE_KEY:
@@ -822,6 +1044,10 @@ static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                break;
        }
 
+       ret = wl1251_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out_unlock;
+
        ret = wl1251_set_key_type(wl, wl_cmd, cmd, key, addr);
        if (ret < 0) {
                wl1251_error("Set KEY type failed");
@@ -922,6 +1148,7 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
        ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
                              req->n_channels, WL1251_SCAN_NUM_PROBES);
        if (ret < 0) {
+               wl1251_debug(DEBUG_SCAN, "scan failed %d", ret);
                wl->scanning = false;
                goto out_sleep;
        }
@@ -965,6 +1192,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
 {
        struct wl1251 *wl = hw->priv;
        struct sk_buff *beacon, *skb;
+       bool do_join = false;
+       bool enable;
        int ret;
 
        wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -985,28 +1214,20 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
                wl->rssi_thold = bss_conf->cqm_rssi_thold;
        }
 
-       if (changed & BSS_CHANGED_BSSID) {
+       if ((changed & BSS_CHANGED_BSSID) &&
+           memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
                memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
 
-               skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
-               if (!skb)
-                       goto out_sleep;
-
-               ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
-                                             skb->data, skb->len);
-               dev_kfree_skb(skb);
-               if (ret < 0)
-                       goto out_sleep;
-
-               ret = wl1251_build_qos_null_data(wl);
-               if (ret < 0)
-                       goto out;
+               if (!is_zero_ether_addr(wl->bssid)) {
+                       ret = wl1251_build_null_data(wl);
+                       if (ret < 0)
+                               goto out_sleep;
 
-               if (wl->bss_type != BSS_TYPE_IBSS) {
-                       ret = wl1251_join(wl, wl->bss_type, wl->channel,
-                                         wl->beacon_int, wl->dtim_period);
+                       ret = wl1251_build_qos_null_data(wl);
                        if (ret < 0)
                                goto out_sleep;
+
+                       do_join = true;
                }
        }
 
@@ -1063,6 +1284,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
+       if (changed & BSS_CHANGED_ARP_FILTER) {
+               __be32 addr = bss_conf->arp_addr_list[0];
+               WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+
+               enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
+               wl1251_acx_arp_ip_filter(wl, enable, addr);
+
+               if (ret < 0)
+                       goto out_sleep;
+       }
+
        if (changed & BSS_CHANGED_BEACON) {
                beacon = ieee80211_beacon_get(hw, vif);
                if (!beacon)
@@ -1084,9 +1316,12 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
                if (ret < 0)
                        goto out_sleep;
 
-               ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
-                                 wl->channel, wl->dtim_period);
+               do_join = true;
+       }
 
+       if (do_join) {
+               ret = wl1251_join(wl, wl->bss_type, wl->channel,
+                                 wl->beacon_int, wl->dtim_period);
                if (ret < 0)
                        goto out_sleep;
        }
@@ -1233,6 +1468,7 @@ static const struct ieee80211_ops wl1251_ops = {
        .add_interface = wl1251_op_add_interface,
        .remove_interface = wl1251_op_remove_interface,
        .config = wl1251_op_config,
+       .prepare_multicast = wl1251_op_prepare_multicast,
        .configure_filter = wl1251_op_configure_filter,
        .tx = wl1251_op_tx,
        .set_key = wl1251_op_set_key,
@@ -1303,6 +1539,34 @@ static int wl1251_read_eeprom_mac(struct wl1251 *wl)
        return 0;
 }
 
+/* temporary (?) hack for EEPROM dumping
+ * (it seems this can only be done before fw is running) */
+static int wl1251_dump_eeprom(struct wl1251 *wl)
+{
+       int ret;
+
+       wl1251_set_partition(wl, 0, 0, REGISTERS_BASE, REGISTERS_DOWN_SIZE);
+
+       wl->eeprom_dump = kzalloc(1024, GFP_KERNEL);
+       if (wl->eeprom_dump == NULL) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = wl1251_read_eeprom(wl, 0, wl->eeprom_dump, 1024);
+       if (ret != 0) {
+               wl1251_error("eeprom dump failed: %d", ret);
+               kfree(wl->eeprom_dump);
+               wl->eeprom_dump = NULL;
+               goto out;
+       }
+
+       wl1251_info("eeprom dumped.");
+
+out:
+       return ret;
+}
+
 static int wl1251_register_hw(struct wl1251 *wl)
 {
        int ret;
@@ -1343,6 +1607,9 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
                IEEE80211_HW_SUPPORTS_UAPSD |
                IEEE80211_HW_SUPPORTS_CQM_RSSI;
 
+       if (use_fw_ps)
+               wl->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+
        wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                                         BIT(NL80211_IFTYPE_ADHOC);
        wl->hw->wiphy->max_scan_ssids = 1;
@@ -1352,6 +1619,8 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
 
        if (wl->use_eeprom)
                wl1251_read_eeprom_mac(wl);
+       if (wl->dump_eeprom)
+               wl1251_dump_eeprom(wl);
 
        ret = wl1251_register_hw(wl);
        if (ret)
@@ -1389,10 +1658,13 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
 
        skb_queue_head_init(&wl->tx_queue);
 
-       INIT_WORK(&wl->filter_work, wl1251_filter_work);
+       INIT_DELAYED_WORK(&wl->ps_work, wl1251_ps_work);
        INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
        wl->channel = WL1251_DEFAULT_CHANNEL;
+       wl->monitor_present = false;
+       wl->joined = false;
        wl->scanning = false;
+       wl->bss_type = MAX_BSS_TYPE;
        wl->default_key = 0;
        wl->listen_int = 1;
        wl->rx_counter = 0;
@@ -1410,6 +1682,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
        wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
        wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
        wl->vif = NULL;
+       wl->ps_rate_threshold = 100000;
 
        for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
                wl->tx_frames[i] = NULL;
@@ -1461,6 +1734,9 @@ int wl1251_free_hw(struct wl1251 *wl)
 
        ieee80211_free_hw(wl->hw);
 
+       if (wl->eeprom_dump != NULL)
+               kfree(wl->eeprom_dump);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(wl1251_free_hw);
index db719f7..5c5c3ff 100644 (file)
@@ -112,21 +112,20 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_station_mode mode)
        case STATION_POWER_SAVE_MODE:
                wl1251_debug(DEBUG_PSM, "entering psm");
 
-               /* enable beacon filtering */
-               ret = wl1251_acx_beacon_filter_opt(wl, true);
-               if (ret < 0)
-                       return ret;
-
-               ret = wl1251_acx_wake_up_conditions(wl,
-                                                   WAKE_UP_EVENT_DTIM_BITMAP,
-                                                   wl->listen_int);
-               if (ret < 0)
-                       return ret;
+               if (wl->long_doze_mode != wl->long_doze_mode_set) {
+                       wl1251_acx_wake_up_conditions(wl, wl->long_doze_mode
+                               ? WAKE_UP_EVENT_DTIM_BITMAP
+                               : WAKE_UP_EVENT_BEACON_BITMAP,
+                               wl->listen_int);
+                       wl->long_doze_mode_set = wl->long_doze_mode;
+               }
 
+#if 0 /* problems seen on one router */
                ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE,
                                            WL1251_DEFAULT_BET_CONSECUTIVE);
                if (ret < 0)
                        return ret;
+#endif
 
                ret = wl1251_cmd_ps_mode(wl, CHIP_POWER_SAVE_MODE);
                if (ret < 0)
@@ -143,7 +142,7 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_station_mode mode)
                if (ret < 0)
                        return ret;
 
-               ret = wl1251_cmd_template_set(wl, CMD_DISCONNECT, NULL, 0);
+               ret = wl1251_cmd_disconnect(wl);
                if (ret < 0)
                        return ret;
                break;
@@ -155,30 +154,29 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_station_mode mode)
                if (ret < 0)
                        return ret;
 
+#if 0
                /* disable BET */
                ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE,
                                            WL1251_DEFAULT_BET_CONSECUTIVE);
                if (ret < 0)
                        return ret;
+#endif
 
                /* disable beacon filtering */
                ret = wl1251_acx_beacon_filter_opt(wl, false);
                if (ret < 0)
                        return ret;
 
-               ret = wl1251_acx_wake_up_conditions(wl,
-                                                   WAKE_UP_EVENT_DTIM_BITMAP,
-                                                   wl->listen_int);
-               if (ret < 0)
-                       return ret;
-
                ret = wl1251_cmd_ps_mode(wl, CHIP_ACTIVE_MODE);
                if (ret < 0)
                        return ret;
 
                break;
        }
+       if (mode != wl->station_mode)
+               wl->ps_transitioning = true;
        wl->station_mode = mode;
+       wl->ps_change_jiffies = jiffies;
 
        return ret;
 }
index 6af3526..79a119d 100644 (file)
@@ -83,7 +83,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
 
        status->flag |= RX_FLAG_MACTIME_MPDU;
 
-       if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
+       if (!wl->monitor_present && (desc->flags & RX_DESC_ENCRYPTION_MASK)) {
                status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
 
                if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL)))
@@ -180,7 +180,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
        wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);
 
        /* The actual length doesn't include the target's alignment */
-       skb->len = desc->length  - PLCP_HEADER_LENGTH;
+       skb_trim(skb, desc->length - PLCP_HEADER_LENGTH);
 
        fc = (u16 *)skb->data;
 
@@ -194,6 +194,8 @@ static void wl1251_rx_body(struct wl1251 *wl,
 
        memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
        ieee80211_rx_ni(wl->hw, skb);
+
+       wl1251_update_rate(wl, length);
 }
 
 static void wl1251_rx_ack(struct wl1251 *wl)
index e2750a1..d141d83 100644 (file)
 
 #include "wl1251.h"
 
+static bool force_nvs_file = false;
+module_param(force_nvs_file, bool, 0644);
+MODULE_PARM_DESC(force_nvs_file, "Force loading NVS data from file, "
+                                "not EEPROM. Default: n/N/0");
+static bool dump_eeprom = false;
+module_param(dump_eeprom, bool, 0644);
+MODULE_PARM_DESC(dump_eeprom, "Dump EEPROM on module load and makes it "
+                             "accessable through debugfs. Default: n/N/0");
+
 #ifndef SDIO_VENDOR_ID_TI
 #define SDIO_VENDOR_ID_TI              0x104c
 #endif
@@ -151,16 +160,6 @@ static void wl1251_sdio_disable_irq(struct wl1251 *wl)
        sdio_release_host(func);
 }
 
-/* Interrupts when using dedicated WLAN_IRQ pin */
-static irqreturn_t wl1251_line_irq(int irq, void *cookie)
-{
-       struct wl1251 *wl = cookie;
-
-       ieee80211_queue_work(wl->hw, &wl->irq_work);
-
-       return IRQ_HANDLED;
-}
-
 static void wl1251_enable_line_irq(struct wl1251 *wl)
 {
        return enable_irq(wl->irq);
@@ -218,10 +217,64 @@ static struct wl1251_if_operations wl1251_sdio_ops = {
        .power = wl1251_sdio_set_power,
 };
 
+static ssize_t
+wl1251_show_long_doze(struct device *dev, struct device_attribute *attr,
+       char *buf)
+{
+       struct wl1251 *wl = dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", wl->long_doze_mode);
+}
+
+static ssize_t
+wl1251_set_long_doze(struct device *dev, struct device_attribute *attr,
+       const char *buf, size_t count)
+{
+       struct wl1251 *wl = dev_get_drvdata(dev);
+       int val, ret;
+
+       ret = kstrtoint(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       wl->long_doze_mode = !!val;
+       return count;
+}
+
+static ssize_t
+wl1251_show_ps_rate_thr(struct device *dev, struct device_attribute *attr,
+       char *buf)
+{
+       struct wl1251 *wl = dev_get_drvdata(dev);
+       return sprintf(buf, "%u\n", wl->ps_rate_threshold);
+}
+
+static ssize_t
+wl1251_set_ps_rate_thr(struct device *dev, struct device_attribute *attr,
+       const char *buf, size_t count)
+{
+       struct wl1251 *wl = dev_get_drvdata(dev);
+       unsigned int val;
+       int ret;
+
+       ret = kstrtouint(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       wl->ps_rate_threshold = val;
+       return count;
+}
+
+static struct device_attribute wl1251_attrs[] = {
+       __ATTR(long_doze_mode, S_IRUGO | S_IWUGO,
+               wl1251_show_long_doze, wl1251_set_long_doze),
+       __ATTR(ps_rate_threshold, S_IRUGO | S_IWUGO,
+               wl1251_show_ps_rate_thr, wl1251_set_ps_rate_thr),
+};
+
 static int wl1251_sdio_probe(struct sdio_func *func,
                             const struct sdio_device_id *id)
 {
-       int ret;
+       int ret, t;
        struct wl1251 *wl;
        struct ieee80211_hw *hw;
        struct wl1251_sdio *wl_sdio;
@@ -259,9 +312,15 @@ static int wl1251_sdio_probe(struct sdio_func *func,
                wl->use_eeprom = wl12xx_board_data->use_eeprom;
        }
 
+       if (force_nvs_file)
+               wl->use_eeprom = false;
+       wl->dump_eeprom = dump_eeprom;
+
        if (wl->irq) {
                irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
-               ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
+
+               ret = request_threaded_irq(wl->irq, NULL, wl1251_irq,
+                       IRQF_ONESHOT, "wl1251", wl);
                if (ret < 0) {
                        wl1251_error("request_irq() failed: %d", ret);
                        goto disable;
@@ -286,6 +345,16 @@ static int wl1251_sdio_probe(struct sdio_func *func,
 
        sdio_set_drvdata(func, wl);
 
+       for (t = 0; t < ARRAY_SIZE(wl1251_attrs); t++) {
+               ret = device_create_file(&func->dev, &wl1251_attrs[t]);
+               if (ret) {
+                       while (--t >= 0)
+                               device_remove_file(&func->dev,
+                                                  &wl1251_attrs[t]);
+                       goto out_free_irq;
+               }
+       }
+
        /* Tell PM core that we don't need the card to be powered now */
        pm_runtime_put_noidle(&func->dev);
 
@@ -309,10 +378,14 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
 {
        struct wl1251 *wl = sdio_get_drvdata(func);
        struct wl1251_sdio *wl_sdio = wl->if_priv;
+       int t;
 
        /* Undo decrement done above in wl1251_probe */
        pm_runtime_get_noresume(&func->dev);
 
+       for (t = 0; t < ARRAY_SIZE(wl1251_attrs); t++)
+               device_remove_file(&func->dev, &wl1251_attrs[t]);
+
        if (wl->irq)
                free_irq(wl->irq, wl);
        wl1251_free_hw(wl);
index 134ae9c..c213023 100644 (file)
@@ -73,6 +73,8 @@ static void wl1251_spi_reset(struct wl1251 *wl)
        spi_sync(wl_to_spi(wl), &m);
 
        wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+
+       kfree(cmd);
 }
 
 static void wl1251_spi_wake(struct wl1251 *wl)
@@ -127,6 +129,8 @@ static void wl1251_spi_wake(struct wl1251 *wl)
        spi_sync(wl_to_spi(wl), &m);
 
        wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+
+       kfree(cmd);
 }
 
 static void wl1251_spi_reset_wake(struct wl1251 *wl)
index 28121c5..421afe4 100644 (file)
@@ -28,6 +28,7 @@
 #include "tx.h"
 #include "ps.h"
 #include "io.h"
+#include "event.h"
 
 static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
 {
@@ -50,7 +51,7 @@ static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
                return false;
 }
 
-static int wl1251_tx_path_status(struct wl1251 *wl)
+int wl1251_tx_path_status(struct wl1251 *wl)
 {
        u32 status, addr, data_out_count;
        bool busy;
@@ -89,8 +90,12 @@ static void wl1251_tx_control(struct tx_double_buffer_desc *tx_hdr,
        /* 802.11 packets */
        tx_hdr->control.packet_type = 0;
 
-       if (control->flags & IEEE80211_TX_CTL_NO_ACK)
+       /* Also disable retry and ACK policy for injected packets */
+       if ((control->flags & IEEE80211_TX_CTL_NO_ACK) ||
+           (control->flags & IEEE80211_TX_CTL_INJECTED)) {
+               tx_hdr->control.rate_policy = 1;
                tx_hdr->control.ack_policy = 1;
+       }
 
        tx_hdr->control.tx_complete = 1;
 
@@ -251,6 +256,7 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
 
        wl1251_mem_write(wl, addr, skb->data, len);
 
+       wl1251_update_rate(wl, len);
        wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
                     "queue %d", tx_hdr->id, skb, tx_hdr->length,
                     tx_hdr->rate, tx_hdr->xmit_queue);
@@ -277,8 +283,28 @@ static void wl1251_tx_trigger(struct wl1251 *wl)
                TX_STATUS_DATA_OUT_COUNT_MASK;
 }
 
+static void enable_tx_for_packet_injection(struct wl1251 *wl)
+{
+       int ret;
+
+       ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel,
+                             wl->beacon_int, wl->dtim_period);
+       if (ret < 0) {
+               wl1251_warning("join failed");
+               return;
+       }
+
+       ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
+       if (ret < 0) {
+               wl1251_warning("join timeout");
+               return;
+       }
+
+       wl->joined = true;
+}
+
 /* caller must hold wl->mutex */
-static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
+int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info;
        int ret = 0;
@@ -287,6 +313,9 @@ static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
        info = IEEE80211_SKB_CB(skb);
 
        if (info->control.hw_key) {
+               if (unlikely(wl->monitor_present))
+                       return -EINVAL;
+
                idx = info->control.hw_key->hw_key_idx;
                if (unlikely(wl->default_key != idx)) {
                        ret = wl1251_acx_default_key(wl, idx);
@@ -295,6 +324,10 @@ static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
                }
        }
 
+       /* Enable tx path in monitor mode for packet injection */
+       if ((wl->vif == NULL) && !wl->joined)
+               enable_tx_for_packet_injection(wl);
+
        ret = wl1251_tx_path_status(wl);
        if (ret < 0)
                return ret;
@@ -312,20 +345,17 @@ static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
        return ret;
 }
 
-void wl1251_tx_work(struct work_struct *work)
+void wl1251_tx_work_unlocked(struct wl1251 *wl, bool need_pm)
 {
-       struct wl1251 *wl = container_of(work, struct wl1251, tx_work);
        struct sk_buff *skb;
        bool woken_up = false;
        int ret;
 
-       mutex_lock(&wl->mutex);
-
        if (unlikely(wl->state == WL1251_STATE_OFF))
                goto out;
 
        while ((skb = skb_dequeue(&wl->tx_queue))) {
-               if (!woken_up) {
+               if (need_pm && !woken_up) {
                        ret = wl1251_ps_elp_wakeup(wl);
                        if (ret < 0)
                                goto out;
@@ -345,7 +375,14 @@ void wl1251_tx_work(struct work_struct *work)
 out:
        if (woken_up)
                wl1251_ps_elp_sleep(wl);
+}
+
+void wl1251_tx_work(struct work_struct *work)
+{
+       struct wl1251 *wl = container_of(work, struct wl1251, tx_work);
 
+       mutex_lock(&wl->mutex);
+       wl1251_tx_work_unlocked(wl, true);
        mutex_unlock(&wl->mutex);
 }
 
@@ -394,6 +431,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
        info = IEEE80211_SKB_CB(skb);
 
        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
+           !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
            (result->status == TX_SUCCESS))
                info->flags |= IEEE80211_TX_STAT_ACK;
 
@@ -457,24 +495,6 @@ void wl1251_tx_complete(struct wl1251 *wl)
                }
        }
 
-       queue_len = skb_queue_len(&wl->tx_queue);
-
-       if ((num_complete > 0) && (queue_len > 0)) {
-               /* firmware buffer has space, reschedule tx_work */
-               wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
-               ieee80211_queue_work(wl->hw, &wl->tx_work);
-       }
-
-       if (wl->tx_queue_stopped &&
-           queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
-               /* tx_queue has space, restart queues */
-               wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
-               spin_lock_irqsave(&wl->wl_lock, flags);
-               ieee80211_wake_queues(wl->hw);
-               wl->tx_queue_stopped = false;
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-       }
-
        /* Every completed frame needs to be acknowledged */
        if (num_complete) {
                /*
@@ -523,6 +543,29 @@ void wl1251_tx_complete(struct wl1251 *wl)
        }
 
        wl->next_tx_complete = result_index;
+
+       queue_len = skb_queue_len(&wl->tx_queue);
+       if (queue_len > 0) {
+               /* avoid stalling tx */
+               wl1251_tx_work_unlocked(wl, false);
+               queue_len = skb_queue_len(&wl->tx_queue);
+       }
+
+       if (queue_len > 0) {
+               /* still something to send? Schedule for later */
+               wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
+               ieee80211_queue_work(wl->hw, &wl->tx_work);
+       }
+
+       if (wl->tx_queue_stopped &&
+           queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
+               /* tx_queue has space, restart queues */
+               wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
+               spin_lock_irqsave(&wl->wl_lock, flags);
+               ieee80211_wake_queues(wl->hw);
+               wl->tx_queue_stopped = false;
+               spin_unlock_irqrestore(&wl->wl_lock, flags);
+       }
 }
 
 /* caller must hold wl->mutex */
index 81338d3..4541a58 100644 (file)
@@ -224,8 +224,12 @@ static inline int wl1251_tx_get_queue(int queue)
        }
 }
 
+void wl1251_tx_work_unlocked(struct wl1251 *wl, bool need_pm);
 void wl1251_tx_work(struct work_struct *work);
 void wl1251_tx_complete(struct wl1251 *wl);
 void wl1251_tx_flush(struct wl1251 *wl);
 
+int wl1251_tx_path_status(struct wl1251 *wl);
+int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb);
+
 #endif
index a77f1bb..eca30a0 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/mutex.h>
 #include <linux/list.h>
 #include <linux/bitops.h>
+#include <linux/interrupt.h>
 #include <net/mac80211.h>
 
 #define DRIVER_NAME "wl1251"
@@ -93,6 +94,7 @@ enum {
        } while (0)
 
 #define WL1251_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN |  \
+                                 CFG_MC_FILTER_EN |    \
                                  CFG_BSSID_FILTER_EN)
 
 #define WL1251_DEFAULT_RX_FILTER (CFG_RX_PRSP_EN |  \
@@ -255,6 +257,9 @@ struct wl1251_debugfs {
 
        struct dentry *retry_count;
        struct dentry *excessive_retries;
+
+       struct dentry *dump_nvs;
+       struct dentry *dump_full;
 };
 
 struct wl1251_if_operations {
@@ -278,6 +283,9 @@ struct wl1251 {
        void (*set_power)(bool enable);
        int irq;
        bool use_eeprom;
+       bool dump_eeprom;
+       bool long_doze_mode;
+       bool long_doze_mode_set;
 
        spinlock_t wl_lock;
 
@@ -303,6 +311,8 @@ struct wl1251 {
        u8 bss_type;
        u8 listen_int;
        int channel;
+       bool monitor_present;
+       bool joined;
 
        void *target_mem_map;
        struct acx_data_path_params_resp *data_path;
@@ -315,7 +325,6 @@ struct wl1251 {
        bool tx_queue_stopped;
 
        struct work_struct tx_work;
-       struct work_struct filter_work;
 
        /* Pending TX frames */
        struct sk_buff *tx_frames[16];
@@ -392,6 +401,22 @@ struct wl1251 {
 
        /* Most recently reported noise in dBm */
        s8 noise;
+
+       void *eeprom_dump;
+
+       /* PS hacks.. */
+       unsigned long ps_change_jiffies;
+       /* when we had PS "unfriendly" event like sync loss */
+       unsigned long last_no_ps_jiffies[2];
+       struct delayed_work ps_work;
+       bool bss_lost;
+       bool ps_transitioning;
+
+       /* rate accounting */
+       u32 ps_rate_threshold;
+       unsigned long rate_jiffies;
+       u32 rate_counter;
+       u32 rate;
 };
 
 int wl1251_plt_start(struct wl1251 *wl);
@@ -402,6 +427,36 @@ int wl1251_free_hw(struct wl1251 *wl);
 int wl1251_init_ieee80211(struct wl1251 *wl);
 void wl1251_enable_interrupts(struct wl1251 *wl);
 void wl1251_disable_interrupts(struct wl1251 *wl);
+irqreturn_t wl1251_irq(int irq, void *cookie);
+
+static inline void wl1251_no_ps_event(struct wl1251 *wl)
+{
+       wl->last_no_ps_jiffies[0] = wl->last_no_ps_jiffies[1];
+       wl->last_no_ps_jiffies[1] = jiffies;
+}
+
+static inline void wl1251_update_rate(struct wl1251 *wl, u32 length)
+{
+       bool in_psm, rate_above_eq;
+       unsigned long diff;
+
+       diff = jiffies - wl->rate_jiffies;
+       if (diff >= msecs_to_jiffies(2000)) {
+               wl->rate_jiffies = jiffies;
+               wl->rate = wl->rate_counter = 0;
+       }
+       else if (diff >= msecs_to_jiffies(1000)) {
+               wl->rate_jiffies += msecs_to_jiffies(1000);
+               wl->rate = wl->rate_counter;
+               wl->rate_counter = 0;
+       }
+       wl->rate_counter += length;
+
+       in_psm = wl->station_mode == STATION_POWER_SAVE_MODE;
+       rate_above_eq = wl->rate >= wl->ps_rate_threshold;
+       if (in_psm == rate_above_eq)
+               ieee80211_queue_delayed_work(wl->hw, &wl->ps_work, 0);
+}
 
 #define DEFAULT_HW_GEN_MODULATION_TYPE    CCK_LONG /* Long Preamble */
 #define DEFAULT_HW_GEN_TX_RATE          RATE_2MBPS
index bb16f5b..590b79b 100644 (file)
 
 #define BQ27000_REG_RSOC               0x0B /* Relative State-of-Charge */
 #define BQ27000_REG_ILMD               0x76 /* Initial last measured discharge */
-#define BQ27000_FLAG_CHGS              BIT(7)
+#define BQ27000_FLAG_EDVF              BIT(0) /* Final End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_EDV1              BIT(1) /* First End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_CI                        BIT(4) /* Capacity Inaccurate flag */
 #define BQ27000_FLAG_FC                        BIT(5)
+#define BQ27000_FLAG_CHGS              BIT(7) /* Charge state flag */
+
+#define BQ27000_FLAGS_IMPORTANT                (BQ27000_FLAG_FC|BQ27000_FLAG_CHGS|BIT(31))
 
 #define BQ27500_REG_SOC                        0x2C
 #define BQ27500_REG_DCAP               0x3C /* Design capacity */
 #define BQ27500_FLAG_DSC               BIT(0)
+#define BQ27500_FLAG_SOCF              BIT(1) /* State-of-Charge threshold final */
+#define BQ27500_FLAG_SOC1              BIT(2) /* State-of-Charge threshold 1 */
 #define BQ27500_FLAG_FC                        BIT(9)
 
+#define BQ27500_FLAGS_IMPORTANT                (BQ27500_FLAG_FC|BQ27500_FLAG_DSC|BIT(31))
+
 #define BQ27000_RS                     20 /* Resistor sense */
 
 struct bq27x00_device_info;
@@ -77,11 +86,12 @@ struct bq27x00_reg_cache {
        int time_to_empty_avg;
        int time_to_full;
        int charge_full;
-       int cycle_count;
-       int capacity;
+       int charge_now;
+       int energy;
        int flags;
 
-       int current_now;
+       int voltage;
+       int curr;
 };
 
 struct bq27x00_device_info {
@@ -108,6 +118,7 @@ static enum power_supply_property bq27x00_battery_props[] = {
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_CURRENT_NOW,
        POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_CAPACITY_LEVEL,
        POWER_SUPPLY_PROP_TEMP,
        POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
        POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -149,7 +160,7 @@ static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di)
                rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true);
 
        if (rsoc < 0)
-               dev_err(di->dev, "error reading relative State-of-Charge\n");
+               dev_dbg(di->dev, "error reading relative State-of-Charge\n");
 
        return rsoc;
 }
@@ -164,7 +175,8 @@ static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
 
        charge = bq27x00_read(di, reg, false);
        if (charge < 0) {
-               dev_err(di->dev, "error reading nominal available capacity\n");
+               dev_dbg(di->dev, "error reading charge register %02x: %d\n",
+                       reg, charge);
                return charge;
        }
 
@@ -208,7 +220,7 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
                ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
 
        if (ilmd < 0) {
-               dev_err(di->dev, "error reading initial last measured discharge\n");
+               dev_dbg(di->dev, "error reading initial last measured discharge\n");
                return ilmd;
        }
 
@@ -220,6 +232,48 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
        return ilmd;
 }
 
+/*
+ * Return the battery Available energy in µWh
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_read_energy(struct bq27x00_device_info *di)
+{
+       int ae;
+
+       ae = bq27x00_read(di, BQ27x00_REG_AE, false);
+       if (ae < 0) {
+               dev_dbg(di->dev, "error reading available energy\n");
+               return ae;
+       }
+
+       if (di->chip == BQ27500)
+               ae *= 1000;
+       else
+               ae = ae * 29200 / BQ27000_RS;
+
+       return ae;
+}
+
+/*
+ * Return the battery temperature in tenths of degree Kelvin
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
+{
+       int temp;
+
+       temp = bq27x00_read(di, BQ27x00_REG_TEMP, false);
+       if (temp < 0) {
+               dev_err(di->dev, "error reading temperature\n");
+               return temp;
+       }
+
+       if (di->chip != BQ27500)
+               temp = 5 * temp / 2;
+
+       return temp;
+}
+
 /*
  * Return the battery Cycle count total
  * Or < 0 if something fails.
@@ -245,7 +299,8 @@ static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg)
 
        tval = bq27x00_read(di, reg, false);
        if (tval < 0) {
-               dev_err(di->dev, "error reading register %02x: %d\n", reg, tval);
+               dev_dbg(di->dev, "error reading time register %02x: %d\n",
+                       reg, tval);
                return tval;
        }
 
@@ -255,36 +310,75 @@ static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg)
        return tval * 60;
 }
 
+static int bq27x00_time(int tval)
+{
+       if (tval == 65535)
+               return -ENODATA;
+
+       return tval * 60;
+}
+
+static int bq27x00_read_i2c_n(struct bq27x00_device_info *di, u8 *data,
+       size_t len, u8 start);
+
 static void bq27x00_update(struct bq27x00_device_info *di)
 {
        struct bq27x00_reg_cache cache = {0, };
        bool is_bq27500 = di->chip == BQ27500;
+       u8 state[0x28];
+       int flags_changed;
+       int ret;
 
-       cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500);
-       if (cache.flags >= 0) {
-               cache.capacity = bq27x00_battery_read_rsoc(di);
-               cache.temperature = bq27x00_read(di, BQ27x00_REG_TEMP, false);
-               cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE);
-               cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
-               cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
-               cache.charge_full = bq27x00_battery_read_lmd(di);
-               cache.cycle_count = bq27x00_battery_read_cyct(di);
-
-               if (!is_bq27500)
-                       cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
-
-               /* We only have to read charge design full once */
-               if (di->charge_design_full <= 0)
-                       di->charge_design_full = bq27x00_battery_read_ilmd(di);
+       /* pandora hack */
+       WARN_ON_ONCE(!is_bq27500);
+       (void)bq27x00_battery_read_energy;
+       (void)bq27x00_battery_read_temperature;
+       (void)bq27x00_battery_read_time;
+
+       /* reading reserved field breaks subsequent reads,
+        * so can't read everything in one go :( */
+       ret = bq27x00_read_i2c_n(di, state + 6, sizeof(state) - 6, 6);
+       if (ret < 0) {
+               dev_err(di->dev, "error reading state: %d\n", ret);
+               return;
        }
 
-       /* Ignore current_now which is a snapshot of the current battery state
-        * and is likely to be different even between two consecutive reads */
-       if (memcmp(&di->cache, &cache, sizeof(cache) - sizeof(int)) != 0) {
-               di->cache = cache;
-               power_supply_changed(&di->bat);
+       cache.flags         = get_unaligned_le16(&state[BQ27x00_REG_FLAGS]);
+       cache.energy        = get_unaligned_le16(&state[BQ27x00_REG_AE]) * 1000;
+       cache.time_to_empty = get_unaligned_le16(&state[BQ27x00_REG_TTE]);
+       cache.time_to_empty = bq27x00_time(cache.time_to_empty);
+       cache.time_to_empty_avg = get_unaligned_le16(&state[BQ27x00_REG_TTECP]);
+       cache.time_to_empty_avg = bq27x00_time(cache.time_to_empty_avg);
+       cache.time_to_full  = get_unaligned_le16(&state[BQ27x00_REG_TTF]);
+       cache.time_to_full  = bq27x00_time(cache.time_to_full);
+       cache.charge_full   = get_unaligned_le16(&state[BQ27x00_REG_LMD]) * 1000;
+       cache.charge_now    = get_unaligned_le16(&state[BQ27x00_REG_NAC]) * 1000;
+       cache.temperature   = get_unaligned_le16(&state[BQ27x00_REG_TEMP]);
+       cache.voltage       = get_unaligned_le16(&state[BQ27x00_REG_VOLT]) * 1000;
+       cache.curr          = (s16)get_unaligned_le16(&state[BQ27x00_REG_AI]) * 1000;
+
+       /* We only have to read charge design full once */
+       if (di->charge_design_full <= 0)
+               di->charge_design_full = bq27x00_battery_read_ilmd(di);
+
+       /*
+        * On bq27500, DSG is not set on discharge with very low currents,
+        * so check AI to not misreport that we are charging in status query
+        */
+       if (is_bq27500 && !(cache.flags & BQ27500_FLAG_DSC)) {
+               if (cache.curr <= 0)
+                       cache.flags |= BQ27500_FLAG_DSC;
        }
 
+       flags_changed = di->cache.flags ^ cache.flags;
+       di->cache = cache;
+       if (is_bq27500)
+               flags_changed &= BQ27500_FLAGS_IMPORTANT;
+       else
+               flags_changed &= BQ27000_FLAGS_IMPORTANT;
+       if (flags_changed)
+               power_supply_changed(&di->bat);
+
        di->last_update = jiffies;
 }
 
@@ -302,25 +396,6 @@ static void bq27x00_battery_poll(struct work_struct *work)
        }
 }
 
-
-/*
- * Return the battery temperature in tenths of degree Celsius
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_temperature(struct bq27x00_device_info *di,
-       union power_supply_propval *val)
-{
-       if (di->cache.temperature < 0)
-               return di->cache.temperature;
-
-       if (di->chip == BQ27500)
-               val->intval = di->cache.temperature - 2731;
-       else
-               val->intval = ((di->cache.temperature * 5) - 5463) / 2;
-
-       return 0;
-}
-
 /*
  * Return the battery average current in µA
  * Note that current can be negative signed as well
@@ -330,20 +405,20 @@ static int bq27x00_battery_current(struct bq27x00_device_info *di,
        union power_supply_propval *val)
 {
        int curr;
+       int flags;
 
-       if (di->chip == BQ27500)
-           curr = bq27x00_read(di, BQ27x00_REG_AI, false);
-       else
-           curr = di->cache.current_now;
-
-       if (curr < 0)
+       curr = bq27x00_read(di, BQ27x00_REG_AI, false);
+       if (curr < 0) {
+               dev_err(di->dev, "error reading current\n");
                return curr;
+       }
 
        if (di->chip == BQ27500) {
                /* bq27500 returns signed value */
                val->intval = (int)((s16)curr) * 1000;
        } else {
-               if (di->cache.flags & BQ27000_FLAG_CHGS) {
+               flags = bq27x00_read(di, BQ27x00_REG_FLAGS, false);
+               if (flags & BQ27000_FLAG_CHGS) {
                        dev_dbg(di->dev, "negative current!\n");
                        curr = -curr;
                }
@@ -382,50 +457,56 @@ static int bq27x00_battery_status(struct bq27x00_device_info *di,
        return 0;
 }
 
-/*
- * Return the battery Voltage in milivolts
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
+static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di,
        union power_supply_propval *val)
 {
-       int volt;
+       int level;
 
-       volt = bq27x00_read(di, BQ27x00_REG_VOLT, false);
-       if (volt < 0)
-               return volt;
+       if (di->chip == BQ27500) {
+               if (di->cache.flags & BQ27500_FLAG_FC)
+                       level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+               else if (di->cache.flags & BQ27500_FLAG_SOC1)
+                       level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+               else if (di->cache.flags & BQ27500_FLAG_SOCF)
+                       level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+               else
+                       level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+       } else {
+               if (di->cache.flags & BQ27000_FLAG_FC)
+                       level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+               else if (di->cache.flags & BQ27000_FLAG_EDV1)
+                       level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+               else if (di->cache.flags & BQ27000_FLAG_EDVF)
+                       level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+               else
+                       level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+       }
 
-       val->intval = volt * 1000;
+       val->intval = level;
 
        return 0;
 }
 
 /*
- * Return the battery Available energy in µWh
+ * Return the battery Voltage in milivolts
  * Or < 0 if something fails.
  */
-static int bq27x00_battery_energy(struct bq27x00_device_info *di,
+static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
        union power_supply_propval *val)
 {
-       int ae;
+       int volt;
 
-       ae = bq27x00_read(di, BQ27x00_REG_AE, false);
-       if (ae < 0) {
-               dev_err(di->dev, "error reading available energy\n");
-               return ae;
+       volt = bq27x00_read(di, BQ27x00_REG_VOLT, false);
+       if (volt < 0) {
+               dev_err(di->dev, "error reading voltage\n");
+               return volt;
        }
 
-       if (di->chip == BQ27500)
-               ae *= 1000;
-       else
-               ae = ae * 29200 / BQ27000_RS;
-
-       val->intval = ae;
+       val->intval = volt * 1000;
 
        return 0;
 }
 
-
 static int bq27x00_simple_value(int value,
        union power_supply_propval *val)
 {
@@ -462,19 +543,31 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
                ret = bq27x00_battery_status(di, val);
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-               ret = bq27x00_battery_voltage(di, val);
+               if (time_is_before_jiffies(di->last_update + HZ))
+                       ret = bq27x00_battery_voltage(di, val);
+               else
+                       ret = bq27x00_simple_value(di->cache.voltage, val);
                break;
        case POWER_SUPPLY_PROP_PRESENT:
                val->intval = di->cache.flags < 0 ? 0 : 1;
                break;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
-               ret = bq27x00_battery_current(di, val);
+               ret = 0;
+               if (time_is_before_jiffies(di->last_update + HZ))
+                       ret = bq27x00_battery_current(di, val);
+               else
+                       val->intval = di->cache.curr;
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
-               ret = bq27x00_simple_value(di->cache.capacity, val);
+               ret = bq27x00_simple_value(bq27x00_battery_read_rsoc(di), val);
+               break;
+       case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+               ret = bq27x00_battery_capacity_level(di, val);
                break;
        case POWER_SUPPLY_PROP_TEMP:
-               ret = bq27x00_battery_temperature(di, val);
+               ret = bq27x00_simple_value(di->cache.temperature, val);
+               if (ret == 0)
+                       val->intval -= 2731;
                break;
        case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
                ret = bq27x00_simple_value(di->cache.time_to_empty, val);
@@ -489,7 +582,7 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
                val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
                break;
        case POWER_SUPPLY_PROP_CHARGE_NOW:
-               ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val);
+               ret = bq27x00_simple_value(di->cache.charge_now, val);
                break;
        case POWER_SUPPLY_PROP_CHARGE_FULL:
                ret = bq27x00_simple_value(di->cache.charge_full, val);
@@ -498,10 +591,10 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
                ret = bq27x00_simple_value(di->charge_design_full, val);
                break;
        case POWER_SUPPLY_PROP_CYCLE_COUNT:
-               ret = bq27x00_simple_value(di->cache.cycle_count, val);
+               ret = bq27x00_simple_value(bq27x00_battery_read_cyct(di), val);
                break;
        case POWER_SUPPLY_PROP_ENERGY_NOW:
-               ret = bq27x00_battery_energy(di, val);
+               ret = bq27x00_simple_value(di->cache.energy, val);
                break;
        default:
                return -EINVAL;
@@ -514,8 +607,13 @@ static void bq27x00_external_power_changed(struct power_supply *psy)
 {
        struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
 
+       mutex_lock(&di->lock);
+
        cancel_delayed_work_sync(&di->work);
-       schedule_delayed_work(&di->work, 0);
+       set_timer_slack(&di->work.timer, 1 * HZ);
+       schedule_delayed_work(&di->work, 3 * HZ);
+
+       mutex_unlock(&di->lock);
 }
 
 static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
@@ -546,6 +644,14 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
 
 static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di)
 {
+       /*
+        * power_supply_unregister call bq27x00_battery_get_property which
+        * call bq27x00_battery_poll.
+        * Make sure that bq27x00_battery_poll will not call
+        * schedule_delayed_work again after unregister (which cause OOPS).
+        */
+       poll_interval = 0;
+
        cancel_delayed_work_sync(&di->work);
 
        power_supply_unregister(&di->bat);
@@ -597,6 +703,27 @@ static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single)
        return ret;
 }
 
+static int bq27x00_read_i2c_n(struct bq27x00_device_info *di, u8 *data,
+       size_t len, u8 start)
+{
+       struct i2c_client *client = to_i2c_client(di->dev);
+       struct i2c_msg msg[2];
+
+       if (!client->adapter)
+               return -ENODEV;
+
+       msg[0].addr = client->addr;
+       msg[0].flags = 0;
+       msg[0].buf = &start;
+       msg[0].len = sizeof(start);
+       msg[1].addr = client->addr;
+       msg[1].flags = I2C_M_RD;
+       msg[1].buf = data;
+       msg[1].len = len;
+
+       return i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+}
+
 static int bq27x00_battery_probe(struct i2c_client *client,
                                 const struct i2c_device_id *id)
 {
index da25eb9..84c1d96 100644 (file)
@@ -21,6 +21,7 @@
 static void power_supply_update_bat_leds(struct power_supply *psy)
 {
        union power_supply_propval status;
+       union power_supply_propval current_now;
        unsigned long delay_on = 0;
        unsigned long delay_off = 0;
 
@@ -31,8 +32,11 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
 
        switch (status.intval) {
        case POWER_SUPPLY_STATUS_FULL:
+               psy->get_property(psy, POWER_SUPPLY_PROP_CURRENT_NOW,
+                       &current_now);
                led_trigger_event(psy->charging_full_trig, LED_FULL);
-               led_trigger_event(psy->charging_trig, LED_OFF);
+               led_trigger_event(psy->charging_trig,
+                       current_now.intval > 0 ? LED_FULL : LED_OFF);
                led_trigger_event(psy->full_trig, LED_FULL);
                led_trigger_event(psy->charging_blink_full_solid_trig,
                        LED_FULL);
index 54b9198..9488b37 100644 (file)
 #include <linux/power_supply.h>
 #include <linux/notifier.h>
 #include <linux/usb/otg.h>
+#include <linux/ratelimit.h>
+#include <linux/regulator/machine.h>
+#include <linux/leds.h>
 
 #define TWL4030_BCIMSTATEC     0x02
 #define TWL4030_BCIICHG                0x08
 #define TWL4030_BCIVAC         0x0a
 #define TWL4030_BCIVBUS                0x0c
+#define TWL4030_BCIMFSTS3      0x0f
 #define TWL4030_BCIMFSTS4      0x10
+#define TWL4030_BCIMFKEY       0x11
 #define TWL4030_BCICTL1                0x23
+#define TWL4030_BCIIREF1       0x27
+#define TWL4030_BCIIREF2       0x28
 
 #define TWL4030_BCIAUTOWEN     BIT(5)
 #define TWL4030_CONFIG_DONE    BIT(4)
+#define TWL4030_CVENAC         BIT(2)
 #define TWL4030_BCIAUTOUSB     BIT(1)
 #define TWL4030_BCIAUTOAC      BIT(0)
 #define TWL4030_CGAIN          BIT(5)
 #define TWL4030_USBFASTMCHG    BIT(2)
 #define TWL4030_STS_VBUS       BIT(7)
 #define TWL4030_STS_USB_ID     BIT(2)
+#define TWL4030_STS_CHG                BIT(1)
 
 /* BCI interrupts */
 #define TWL4030_WOVF           BIT(0) /* Watchdog overflow */
 #define TWL4030_MSTATEC_COMPLETE1      0x0b
 #define TWL4030_MSTATEC_COMPLETE4      0x0e
 
-static bool allow_usb;
+#define TWL4030_KEY_IIREF              0xe7
+#define TWL4030_BATSTSMCHG             BIT(6)
+
+#define IRQ_CHECK_PERIOD       (3 * HZ)
+#define IRQ_CHECK_THRESHOLD    4
+
+static bool allow_usb = 1;
 module_param(allow_usb, bool, 0644);
 MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current");
 
@@ -74,8 +89,24 @@ struct twl4030_bci {
        struct work_struct      work;
        int                     irq_chg;
        int                     irq_bci;
+       bool                    ac_charge_enable;
+       bool                    usb_charge_enable;
+       int                     usb_current;
+       int                     ac_current;
+       enum power_supply_type  current_supply;
+       struct regulator        *usb_reg;
+       int                     usb_enabled;
+       int                     irq_had_charger;
+
+       unsigned long           irq_check_count_time;
+       int                     irq_check_count;
+       int                     irq_check_ac_disabled;
+
+       struct led_trigger      *charging_any_trig;
+       int                     was_charging_any;
 
        unsigned long           event;
+       struct ratelimit_state  ratelimit;
 };
 
 /*
@@ -101,9 +132,14 @@ static int twl4030_bci_read(u8 reg, u8 *val)
        return twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, val, reg);
 }
 
+static int twl4030_bci_write(u8 reg, u8 val)
+{
+       return twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE, val, reg);
+}
+
 static int twl4030_clear_set_boot_bci(u8 clear, u8 set)
 {
-       return twl4030_clear_set(TWL4030_MODULE_PM_MASTER, 0,
+       return twl4030_clear_set(TWL4030_MODULE_PM_MASTER, clear,
                        TWL4030_CONFIG_DONE | TWL4030_BCIAUTOWEN | set,
                        TWL4030_PM_MASTER_BOOT_BCI);
 }
@@ -151,13 +187,16 @@ static int twl4030_bci_have_vbus(struct twl4030_bci *bci)
 }
 
 /*
- * Enable/Disable USB Charge funtionality.
+ * Enable/Disable USB Charge functionality.
  */
 static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
 {
        int ret;
 
        if (enable) {
+               if (!bci->usb_charge_enable)
+                       return -EACCES;
+
                /* Check for USB charger conneted */
                if (!twl4030_bci_have_vbus(bci))
                        return -ENODEV;
@@ -171,6 +210,12 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
                        return -EACCES;
                }
 
+               /* Need to keep regulator on */
+               if (!bci->usb_enabled &&
+                   bci->usb_reg &&
+                   regulator_enable(bci->usb_reg) == 0)
+                       bci->usb_enabled = 1;
+
                /* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
                ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
                if (ret < 0)
@@ -181,6 +226,9 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
                        TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
        } else {
                ret = twl4030_clear_set_boot_bci(TWL4030_BCIAUTOUSB, 0);
+               if (bci->usb_enabled &&
+                   regulator_disable(bci->usb_reg) == 0)
+                       bci->usb_enabled = 0;
        }
 
        return ret;
@@ -201,17 +249,133 @@ static int twl4030_charger_enable_ac(bool enable)
        return ret;
 }
 
+static int set_charge_current(struct twl4030_bci *bci, int new_current)
+{
+       u8 val, boot_bci_prev, cgain_set, cgain_clear;
+       int ret, ret2;
+
+       ret = twl4030_bci_read(TWL4030_BCIMFSTS3, &val);
+       if (ret)
+               goto out_norestore;
+
+       if (!(val & TWL4030_BATSTSMCHG)) {
+               dev_err(bci->dev, "missing battery, can't change charge_current\n");
+               goto out_norestore;
+       }
+
+       ret = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &boot_bci_prev,
+               TWL4030_PM_MASTER_BOOT_BCI);
+       if (ret)
+               goto out_norestore;
+
+       /* 
+        * Stop automatic charging here, because charge current change
+        * requires multiple register writes and CGAIN change requires
+        * automatic charge to be stopped (and CV mode disabled too).
+        */
+       ret = twl4030_clear_set_boot_bci(
+               TWL4030_CVENAC | TWL4030_BCIAUTOAC | TWL4030_BCIAUTOUSB, 0);
+       if (ret)
+               goto out;
+
+       ret = twl4030_bci_write(TWL4030_BCIMFKEY, TWL4030_KEY_IIREF);
+       if (ret)
+               goto out;
+
+       ret = twl4030_bci_write(TWL4030_BCIIREF1, new_current & 0xff);
+       if (ret)
+               goto out;
+
+       ret = twl4030_bci_write(TWL4030_BCIMFKEY, TWL4030_KEY_IIREF);
+       if (ret)
+               goto out;
+
+       ret = twl4030_bci_write(TWL4030_BCIIREF2, (new_current >> 8) & 0x1);
+       if (ret)
+               goto out;
+
+       /* Set CGAIN = 0 or 1 */
+       if (new_current > 511) {
+               cgain_set = TWL4030_CGAIN;
+               cgain_clear = 0;
+       } else {
+               cgain_set = 0;
+               cgain_clear = TWL4030_CGAIN;
+       }
+
+       ret = twl4030_clear_set(TWL4030_MODULE_MAIN_CHARGE,
+                       cgain_clear, cgain_set, TWL4030_BCICTL1);
+       if (ret)
+               goto out;
+
+       ret = twl4030_bci_read(TWL4030_BCICTL1, &val);
+       if (ret != 0 || (val & TWL4030_CGAIN) != cgain_set) {
+               dev_err(bci->dev, "CGAIN change failed\n");
+               goto out;
+       }
+
+out:
+       ret2 = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, boot_bci_prev,
+               TWL4030_PM_MASTER_BOOT_BCI);
+       if (ret2 != 0)
+               dev_err(bci->dev, "failed boot_bci restore: %d\n", ret2);
+
+out_norestore:
+       if (ret != 0)
+               dev_err(bci->dev, "charge current change failed: %d\n", ret);
+
+       return ret;
+}
+
 /*
  * TWL4030 CHG_PRES (AC charger presence) events
  */
 static irqreturn_t twl4030_charger_interrupt(int irq, void *arg)
 {
        struct twl4030_bci *bci = arg;
+       int have_charger;
+       u8 hw_cond;
+       int ret;
+
+       ret = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &hw_cond,
+                             TWL4030_PM_MASTER_STS_HW_CONDITIONS);
+       if (ret < 0) {
+               dev_err(bci->dev, "HW_CONDITIONS read failed: %d\n", ret);
+               goto out;
+       }
+
+       have_charger = (hw_cond & TWL4030_STS_CHG) ? 1 : 0;
+       if (have_charger == bci->irq_had_charger)
+               goto out;
+       bci->irq_had_charger = have_charger;
+
+       dev_dbg(bci->dev, "CHG_PRES irq, hw_cond %02x\n", hw_cond);
+
+       /*
+        * deal with rare mysterious issue of CHG_PRES changing states at ~4Hz
+        * without any charger connected or anything
+        */
+       if (time_before(jiffies, bci->irq_check_count_time + IRQ_CHECK_PERIOD)) {
+               bci->irq_check_count++;
+               if (have_charger && bci->irq_check_count > IRQ_CHECK_THRESHOLD) {
+                       dev_err(bci->dev, "spurious CHG_PRES irqs detected (%d), disabling charger\n",
+                               bci->irq_check_count);
+                       twl4030_charger_enable_ac(false);
+                       bci->irq_check_ac_disabled = true;
+               }
+       } else {
+               bci->irq_check_count_time = jiffies;
+               bci->irq_check_count = 1;
+               if (have_charger && bci->irq_check_ac_disabled) {
+                       twl4030_charger_enable_ac(true);
+                       bci->irq_check_ac_disabled = false;
+               }
+       }
 
-       dev_dbg(bci->dev, "CHG_PRES irq\n");
        power_supply_changed(&bci->ac);
        power_supply_changed(&bci->usb);
 
+out:
        return IRQ_HANDLED;
 }
 
@@ -243,21 +407,30 @@ static irqreturn_t twl4030_bci_interrupt(int irq, void *arg)
        }
 
        /* various monitoring events, for now we just log them here */
-       if (irqs1 & (TWL4030_TBATOR2 | TWL4030_TBATOR1))
+       if (irqs1 & (TWL4030_TBATOR2 | TWL4030_TBATOR1) &&
+                       __ratelimit(&bci->ratelimit))
                dev_warn(bci->dev, "battery temperature out of range\n");
 
-       if (irqs1 & TWL4030_BATSTS)
+       if (irqs1 & TWL4030_BATSTS && __ratelimit(&bci->ratelimit))
                dev_crit(bci->dev, "battery disconnected\n");
 
-       if (irqs2 & TWL4030_VBATOV)
+       if (irqs2 & TWL4030_VBATOV && __ratelimit(&bci->ratelimit))
                dev_crit(bci->dev, "VBAT overvoltage\n");
 
-       if (irqs2 & TWL4030_VBUSOV)
+       if (irqs2 & TWL4030_VBUSOV && __ratelimit(&bci->ratelimit))
                dev_crit(bci->dev, "VBUS overvoltage\n");
 
-       if (irqs2 & TWL4030_ACCHGOV)
+       if (irqs2 & TWL4030_ACCHGOV && __ratelimit(&bci->ratelimit))
                dev_crit(bci->dev, "Ac charger overvoltage\n");
 
+#if 0
+       /* ack the interrupts */
+       twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, irqs1,
+                        TWL4030_INTERRUPTS_BCIISR1A);
+       twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, irqs2,
+                        TWL4030_INTERRUPTS_BCIISR2A);
+#endif
+
        return IRQ_HANDLED;
 }
 
@@ -318,6 +491,147 @@ static int twl4030_charger_get_current(void)
        return ret;
 }
 
+static ssize_t twl4030_bci_ac_show_enable(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       u8 boot_bci;
+       int ret;
+
+       ret = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &boot_bci,
+                             TWL4030_PM_MASTER_BOOT_BCI);
+       if (ret)
+               return ret;
+
+       return sprintf(buf, "%d\n", (boot_bci & TWL4030_BCIAUTOAC) ? 1 : 0);
+}
+
+static ssize_t twl4030_bci_ac_store_enable(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct power_supply *psy = dev_get_drvdata(dev);
+       struct twl4030_bci *bci = container_of(psy, struct twl4030_bci, ac);
+       unsigned long enable;
+       int ret;
+
+       ret = strict_strtoul(buf, 10, &enable);
+       if (ret || enable > 1)
+               return -EINVAL;
+
+       bci->ac_charge_enable = enable;
+       twl4030_charger_enable_ac(enable);
+
+       return count;
+}
+static struct device_attribute dev_attr_enable_ac =
+       __ATTR(enable, S_IRUGO | S_IWUSR, twl4030_bci_ac_show_enable,
+       twl4030_bci_ac_store_enable);
+
+static ssize_t twl4030_bci_usb_show_enable(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
+{
+       u8 boot_bci;
+       int ret;
+
+       ret = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &boot_bci,
+                             TWL4030_PM_MASTER_BOOT_BCI);
+       if (ret)
+               return ret;
+
+       return sprintf(buf, "%d\n", (boot_bci & TWL4030_BCIAUTOUSB) ? 1 : 0);
+}
+
+static ssize_t twl4030_bci_usb_store_enable(struct device *dev,
+                                           struct device_attribute *attr,
+                                           const char *buf, size_t count)
+{
+       struct power_supply *psy = dev_get_drvdata(dev);
+       struct twl4030_bci *bci = container_of(psy, struct twl4030_bci, usb);
+       unsigned long enable;
+       int ret;
+
+       ret = strict_strtoul(buf, 10, &enable);
+       if (ret || enable > 1)
+               return -EINVAL;
+
+       bci->usb_charge_enable = enable;
+       twl4030_charger_enable_usb(bci, enable);
+
+       return count;
+}
+static struct device_attribute dev_attr_enable_usb =
+       __ATTR(enable, S_IRUGO | S_IWUSR, twl4030_bci_usb_show_enable,
+       twl4030_bci_usb_store_enable);
+
+static ssize_t show_charge_current(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       int ret, val;
+       u8 ctl;
+       
+       val = twl4030bci_read_adc_val(TWL4030_BCIIREF1);
+       if (val < 0)
+               return val;
+       ret = twl4030_bci_read(TWL4030_BCICTL1, &ctl);
+       if (ret < 0)
+               return ret;
+
+       val &= 0x1ff;
+       if (ctl & TWL4030_CGAIN)
+               val |= 0x200;
+
+       return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t store_charge_current(struct device *dev,
+                       struct device_attribute *attr, const char *buf,
+                       size_t count)
+{
+       struct power_supply *psy = dev_get_drvdata(dev);
+       struct twl4030_bci *bci = dev_get_drvdata(psy->dev->parent);
+       unsigned long new_current;
+       int ret;
+
+       ret = strict_strtoul(buf, 10, &new_current);
+       if (ret)
+               return -EINVAL;
+
+       ret = set_charge_current(bci, new_current);
+       if (ret)
+               return ret;
+
+       if (psy->type == POWER_SUPPLY_TYPE_MAINS)
+               bci->ac_current = new_current;
+       else
+               bci->usb_current = new_current;
+
+       return count;
+}
+static DEVICE_ATTR(charge_current, S_IRUGO | S_IWUSR, show_charge_current,
+       store_charge_current);
+
+static struct attribute *bci_ac_attrs[] = {
+       &dev_attr_enable_ac.attr,
+       &dev_attr_charge_current.attr,
+       NULL,
+};
+
+static struct attribute *bci_usb_attrs[] = {
+       &dev_attr_enable_usb.attr,
+       &dev_attr_charge_current.attr,
+       NULL,
+};
+       
+static const struct attribute_group bci_ac_attr_group = {
+       .attrs = bci_ac_attrs,
+};
+
+static const struct attribute_group bci_usb_attr_group = {
+       .attrs = bci_usb_attrs,
+};
+
 /*
  * Returns the main charge FSM state
  * Or < 0 on failure.
@@ -355,7 +669,8 @@ static int twl4030_bci_get_property(struct power_supply *psy,
                                    union power_supply_propval *val)
 {
        struct twl4030_bci *bci = dev_get_drvdata(psy->dev->parent);
-       int is_charging;
+       int is_charging_any = 0;
+       int is_charging = 0;
        int state;
        int ret;
 
@@ -363,10 +678,29 @@ static int twl4030_bci_get_property(struct power_supply *psy,
        if (state < 0)
                return state;
 
-       if (psy->type == POWER_SUPPLY_TYPE_USB)
-               is_charging = state & TWL4030_MSTATEC_USB;
-       else
-               is_charging = state & TWL4030_MSTATEC_AC;
+       if (twl4030_bci_state_to_status(state) ==
+           POWER_SUPPLY_STATUS_CHARGING) {
+               is_charging_any =
+                       state & (TWL4030_MSTATEC_USB | TWL4030_MSTATEC_AC);
+               if (psy->type == POWER_SUPPLY_TYPE_USB)
+                       is_charging = state & TWL4030_MSTATEC_USB;
+               else
+                       is_charging = state & TWL4030_MSTATEC_AC;
+       }
+
+       if (is_charging_any != bci->was_charging_any) {
+               led_trigger_event(bci->charging_any_trig,
+                       is_charging_any ? LED_FULL : LED_OFF);
+               bci->was_charging_any = is_charging_any;
+       }
+
+       if (is_charging && psy->type != bci->current_supply) {
+               if (psy->type == POWER_SUPPLY_TYPE_USB)
+                       set_charge_current(bci, bci->usb_current);
+               else
+                       set_charge_current(bci, bci->ac_current);
+               bci->current_supply = psy->type;
+       }
 
        switch (psp) {
        case POWER_SUPPLY_PROP_STATUS:
@@ -423,10 +757,16 @@ static enum power_supply_property twl4030_charger_props[] = {
 
 static int __init twl4030_bci_probe(struct platform_device *pdev)
 {
+       const struct twl4030_bci_platform_data *pdata = pdev->dev.platform_data;
        struct twl4030_bci *bci;
        int ret;
        u32 reg;
 
+       if (pdata == NULL) {
+               dev_err(&pdev->dev, "No platform data\n");
+               return -EINVAL;
+       }
+
        bci = kzalloc(sizeof(*bci), GFP_KERNEL);
        if (bci == NULL)
                return -ENOMEM;
@@ -434,14 +774,25 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
        bci->dev = &pdev->dev;
        bci->irq_chg = platform_get_irq(pdev, 0);
        bci->irq_bci = platform_get_irq(pdev, 1);
+       bci->ac_current = 860; /* ~1.2A */
+       bci->usb_current = 330; /* ~560mA */
+       bci->irq_had_charger = -1;
+       bci->irq_check_count_time = jiffies;
 
        platform_set_drvdata(pdev, bci);
 
+       ratelimit_state_init(&bci->ratelimit, HZ, 2);
+
+       led_trigger_register_simple("twl4030_bci-charging",
+               &bci->charging_any_trig);
+
        bci->ac.name = "twl4030_ac";
        bci->ac.type = POWER_SUPPLY_TYPE_MAINS;
        bci->ac.properties = twl4030_charger_props;
        bci->ac.num_properties = ARRAY_SIZE(twl4030_charger_props);
        bci->ac.get_property = twl4030_bci_get_property;
+       bci->ac.supplied_to = pdata->supplied_to;
+       bci->ac.num_supplicants = pdata->num_supplicants;
 
        ret = power_supply_register(&pdev->dev, &bci->ac);
        if (ret) {
@@ -454,6 +805,14 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
        bci->usb.properties = twl4030_charger_props;
        bci->usb.num_properties = ARRAY_SIZE(twl4030_charger_props);
        bci->usb.get_property = twl4030_bci_get_property;
+       bci->usb.supplied_to = pdata->supplied_to;
+       bci->usb.num_supplicants = pdata->num_supplicants;
+
+       bci->usb_reg = regulator_get(bci->dev, "bci3v1");
+       if (IS_ERR(bci->usb_reg)) {
+               dev_warn(&pdev->dev, "regulator get bci3v1 failed\n");
+               bci->usb_reg = NULL;
+       }
 
        ret = power_supply_register(&pdev->dev, &bci->usb);
        if (ret) {
@@ -485,6 +844,18 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
                otg_register_notifier(bci->transceiver, &bci->otg_nb);
        }
 
+       ret = sysfs_create_group(&bci->ac.dev->kobj, &bci_ac_attr_group);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to create sysfs group: %d\n", ret);
+               goto fail_sysfs1;
+       }
+
+       ret = sysfs_create_group(&bci->usb.dev->kobj, &bci_usb_attr_group);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to create sysfs group: %d\n", ret);
+               goto fail_sysfs2;
+       }
+
        /* Enable interrupts now. */
        reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
                TWL4030_TBATOR1 | TWL4030_BATSTS);
@@ -501,12 +872,18 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
        if (ret < 0)
                dev_warn(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
 
+       bci->ac_charge_enable = true;
+       bci->usb_charge_enable = true;
        twl4030_charger_enable_ac(true);
        twl4030_charger_enable_usb(bci, true);
 
        return 0;
 
 fail_unmask_interrupts:
+       sysfs_remove_group(&bci->usb.dev->kobj, &bci_usb_attr_group);
+fail_sysfs2:
+       sysfs_remove_group(&bci->ac.dev->kobj, &bci_ac_attr_group);
+fail_sysfs1:
        if (bci->transceiver != NULL) {
                otg_unregister_notifier(bci->transceiver, &bci->otg_nb);
                otg_put_transceiver(bci->transceiver);
@@ -519,6 +896,7 @@ fail_chg_irq:
 fail_register_usb:
        power_supply_unregister(&bci->ac);
 fail_register_ac:
+       led_trigger_unregister_simple(bci->charging_any_trig);
        platform_set_drvdata(pdev, NULL);
        kfree(bci);
 
@@ -529,6 +907,9 @@ static int __exit twl4030_bci_remove(struct platform_device *pdev)
 {
        struct twl4030_bci *bci = platform_get_drvdata(pdev);
 
+       sysfs_remove_group(&bci->usb.dev->kobj, &bci_usb_attr_group);
+       sysfs_remove_group(&bci->ac.dev->kobj, &bci_ac_attr_group);
+
        twl4030_charger_enable_ac(false);
        twl4030_charger_enable_usb(bci, false);
 
@@ -546,6 +927,7 @@ static int __exit twl4030_bci_remove(struct platform_device *pdev)
        free_irq(bci->irq_chg, bci);
        power_supply_unregister(&bci->usb);
        power_supply_unregister(&bci->ac);
+       led_trigger_unregister_simple(bci->charging_any_trig);
        platform_set_drvdata(pdev, NULL);
        kfree(bci);
 
index 11cc308..b44f667 100644 (file)
@@ -58,6 +58,16 @@ struct twlreg_info {
 
        /* chip specific features */
        unsigned long           features;
+
+       /*
+        * optional override functions for voltage set/get
+        * these are currently only used for SMPS regulators
+        */
+       int                     (*get_voltage)(void *data);
+       int                     (*set_voltage)(void *data, int target_uV);
+
+       /* data passed from board for external get/set voltage */
+       void                    *data;
 };
 
 
@@ -375,7 +385,7 @@ static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
  * VAUX3 at 3V is incorrectly listed in some TI manuals as unsupported.
  * TI are revising the twl5030/tps659x0 specs to support that 3.0V setting.
  */
-#ifdef CONFIG_TWL4030_ALLOW_UNSUPPORTED
+#if 1 //def CONFIG_TWL4030_ALLOW_UNSUPPORTED
 #define UNSUP_MASK     0x0000
 #else
 #define UNSUP_MASK     0x8000
@@ -522,15 +532,25 @@ twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
        struct twlreg_info *info = rdev_get_drvdata(rdev);
        int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
 
-       twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
-               vsel);
+       if (info->set_voltage) {
+               return info->set_voltage(info->data, min_uV);
+       } else {
+               twlreg_write(info, TWL_MODULE_PM_RECEIVER,
+                       VREG_VOLTAGE_SMPS_4030, vsel);
+       }
+
        return 0;
 }
 
 static int twl4030smps_get_voltage(struct regulator_dev *rdev)
 {
        struct twlreg_info *info = rdev_get_drvdata(rdev);
-       int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+       int vsel;
+
+       if (info->get_voltage)
+               return info->get_voltage(info->data);
+
+       vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
                VREG_VOLTAGE_SMPS_4030);
 
        return vsel * 12500 + 600000;
@@ -541,6 +561,32 @@ static struct regulator_ops twl4030smps_ops = {
        .get_voltage    = twl4030smps_get_voltage,
 };
 
+static int twl6030coresmps_set_voltage(struct regulator_dev *rdev, int min_uV,
+       int max_uV, unsigned *selector)
+{
+       struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+       if (info->set_voltage)
+               return info->set_voltage(info->data, min_uV);
+
+       return -ENODEV;
+}
+
+static int twl6030coresmps_get_voltage(struct regulator_dev *rdev)
+{
+       struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+       if (info->get_voltage)
+               return info->get_voltage(info->data);
+
+       return -ENODEV;
+}
+
+static struct regulator_ops twl6030coresmps_ops = {
+       .set_voltage    = twl6030coresmps_set_voltage,
+       .get_voltage    = twl6030coresmps_get_voltage,
+};
+
 static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
 {
        struct twlreg_info      *info = rdev_get_drvdata(rdev);
@@ -898,6 +944,16 @@ static struct regulator_ops twlsmps_ops = {
                }, \
        }
 
+#define TWL6030_ADJUSTABLE_SMPS(label) { \
+       .desc = { \
+               .name = #label, \
+               .id = TWL6030_REG_##label, \
+               .ops = &twl6030coresmps_ops, \
+               .type = REGULATOR_VOLTAGE, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
+
 #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
        .base = offset, \
        .min_mV = min_mVolts, \
@@ -999,6 +1055,9 @@ static struct twlreg_info twl_regs[] = {
        /* 6030 REG with base as PMC Slave Misc : 0x0030 */
        /* Turnon-delay and remap configuration values for 6030 are not
           verified since the specification is not public */
+       TWL6030_ADJUSTABLE_SMPS(VDD1),
+       TWL6030_ADJUSTABLE_SMPS(VDD2),
+       TWL6030_ADJUSTABLE_SMPS(VDD3),
        TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300),
        TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300),
        TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300),
@@ -1052,6 +1111,7 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
        struct regulator_init_data      *initdata;
        struct regulation_constraints   *c;
        struct regulator_dev            *rdev;
+       struct twl_regulator_driver_data        *drvdata;
 
        for (i = 0, info = NULL; i < ARRAY_SIZE(twl_regs); i++) {
                if (twl_regs[i].desc.id != pdev->id)
@@ -1066,8 +1126,16 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
        if (!initdata)
                return -EINVAL;
 
-       /* copy the features into regulator data */
-       info->features = (unsigned long)initdata->driver_data;
+       drvdata = initdata->driver_data;
+
+       if (!drvdata)
+               return -EINVAL;
+
+       /* copy the driver data into regulator data */
+       info->features = drvdata->features;
+       info->data = drvdata->data;
+       info->set_voltage = drvdata->set_voltage;
+       info->get_voltage = drvdata->get_voltage;
 
        /* Constrain board-specific capabilities according to what
         * this driver and the chip itself can actually do.
index 565742b..9592b93 100644 (file)
@@ -73,6 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
                err = -EINVAL;
 
        mutex_unlock(&rtc->ops_lock);
+       /* A timer might have just expired */
+       schedule_work(&rtc->irqwork);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_set_time);
@@ -112,6 +114,8 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
                err = -EINVAL;
 
        mutex_unlock(&rtc->ops_lock);
+       /* A timer might have just expired */
+       schedule_work(&rtc->irqwork);
 
        return err;
 }
@@ -380,18 +384,27 @@ EXPORT_SYMBOL_GPL(rtc_set_alarm);
 int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
        int err;
+       struct rtc_time now;
 
        err = rtc_valid_tm(&alarm->time);
        if (err != 0)
                return err;
 
+       err = rtc_read_time(rtc, &now);
+       if (err)
+               return err;
+
        err = mutex_lock_interruptible(&rtc->ops_lock);
        if (err)
                return err;
 
        rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
        rtc->aie_timer.period = ktime_set(0, 0);
-       if (alarm->enabled) {
+
+       /* Alarm has to be enabled & in the futrure for us to enqueue it */
+       if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 <
+                        rtc->aie_timer.node.expires.tv64)) {
+
                rtc->aie_timer.enabled = 1;
                timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
        }
@@ -569,6 +582,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
 void rtc_update_irq(struct rtc_device *rtc,
                unsigned long num, unsigned long events)
 {
+       pm_stay_awake(rtc->dev.parent);
        schedule_work(&rtc->irqwork);
 }
 EXPORT_SYMBOL_GPL(rtc_update_irq);
@@ -831,6 +845,7 @@ void rtc_timer_do_work(struct work_struct *work)
 
        mutex_lock(&rtc->ops_lock);
 again:
+       pm_relax(rtc->dev.parent);
        __rtc_read_time(rtc, &tm);
        now = rtc_tm_to_ktime(tm);
        while ((next = timerqueue_getnext(&rtc->timerqueue))) {
index 65894f0..42986d7 100644 (file)
@@ -1073,7 +1073,7 @@ static const struct file_operations dasd_stats_global_fops = {
 static void dasd_profile_init(struct dasd_profile *profile,
                              struct dentry *base_dentry)
 {
-       mode_t mode;
+       umode_t mode;
        struct dentry *pde;
 
        if (!base_dentry)
@@ -1112,7 +1112,7 @@ static void dasd_statistics_removeroot(void)
 
 static void dasd_statistics_createroot(void)
 {
-       mode_t mode;
+       umode_t mode;
        struct dentry *pde;
 
        dasd_debugfs_root_entry = NULL;
index dee1a09..caca9b7 100644 (file)
@@ -472,7 +472,7 @@ static const struct file_operations bfad_debugfs_op_regwr = {
 
 struct bfad_debugfs_entry {
        const char *name;
-       mode_t  mode;
+       umode_t mode;
        const struct file_operations *fops;
 };
 
index 25cdff3..b3742c4 100644 (file)
@@ -132,4 +132,8 @@ source "drivers/staging/nvec/Kconfig"
 
 source "drivers/staging/media/Kconfig"
 
+source "drivers/staging/omapdrm/Kconfig"
+
+source "drivers/staging/android/Kconfig"
+
 endif # STAGING
index a25f3f2..b7426d3 100644 (file)
@@ -57,3 +57,5 @@ obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4)  += ste_rmi4/
 obj-$(CONFIG_DRM_PSB)          += gma500/
 obj-$(CONFIG_INTEL_MEI)                += mei/
 obj-$(CONFIG_MFD_NVEC)         += nvec/
+obj-$(CONFIG_DRM_OMAP)         += omapdrm/
+obj-$(CONFIG_ANDROID)          += android/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
new file mode 100644 (file)
index 0000000..db971b9
--- /dev/null
@@ -0,0 +1,86 @@
+menu "Android"
+
+config ANDROID
+       bool "Android Drivers"
+       default N
+       ---help---
+         Enable support for various drivers needed on the Android platform
+
+if ANDROID
+
+config ANDROID_BINDER_IPC
+       tristate "Android Binder IPC Driver"
+       default n
+
+config ASHMEM
+       tristate "Enable the Anonymous Shared Memory Subsystem"
+       default n
+       depends on SHMEM || TINY_SHMEM
+       help
+         The ashmem subsystem is a new shared memory allocator, similar to
+         POSIX SHM but with different behavior and sporting a simpler
+         file-based API.
+
+config ANDROID_LOGGER
+       tristate "Android log driver"
+       default n
+
+config ANDROID_PERSISTENT_RAM
+       bool
+       depends on HAVE_MEMBLOCK
+       select REED_SOLOMON
+       select REED_SOLOMON_ENC8
+       select REED_SOLOMON_DEC8
+
+config ANDROID_RAM_CONSOLE
+       bool "Android RAM buffer console"
+       depends on !S390 && !UML && HAVE_MEMBLOCK
+       select ANDROID_PERSISTENT_RAM
+       default n
+
+config ANDROID_TIMED_OUTPUT
+       bool "Timed output class driver"
+       default y
+
+config ANDROID_TIMED_GPIO
+       tristate "Android timed gpio driver"
+       depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
+       default n
+
+config ANDROID_LOW_MEMORY_KILLER
+       tristate "Android Low Memory Killer"
+       default N
+       ---help---
+         Register processes to be killed when memory is low
+
+source "drivers/staging/android/switch/Kconfig"
+
+config ANDROID_INTF_ALARM
+       bool "Android alarm driver"
+       depends on RTC_CLASS
+       default n
+       help
+         Provides non-wakeup and rtc backed wakeup alarms based on rtc or
+         elapsed realtime, and a non-wakeup alarm on the monotonic clock.
+         Also provides an interface to set the wall time which must be used
+         for elapsed realtime to work.
+
+config ANDROID_INTF_ALARM_DEV
+       bool "Android alarm device"
+       depends on ANDROID_INTF_ALARM
+       default y
+       help
+         Exports the alarm interface to user-space.
+
+config ANDROID_ALARM_OLDDRV_COMPAT
+       bool "Android Alarm compatability with old drivers"
+       depends on ANDROID_INTF_ALARM
+       default n
+       help
+         Provides preprocessor alias to aid compatability with
+         older out-of-tree drivers that use the Android Alarm
+         in-kernel API. This will be removed eventually.
+
+endif # if ANDROID
+
+endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
new file mode 100644 (file)
index 0000000..9b6c9ed
--- /dev/null
@@ -0,0 +1,11 @@
+obj-$(CONFIG_ANDROID_BINDER_IPC)       += binder.o
+obj-$(CONFIG_ASHMEM)                   += ashmem.o
+obj-$(CONFIG_ANDROID_LOGGER)           += logger.o
+obj-$(CONFIG_ANDROID_PERSISTENT_RAM)   += persistent_ram.o
+obj-$(CONFIG_ANDROID_RAM_CONSOLE)      += ram_console.o
+obj-$(CONFIG_ANDROID_TIMED_OUTPUT)     += timed_output.o
+obj-$(CONFIG_ANDROID_TIMED_GPIO)       += timed_gpio.o
+obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)        += lowmemorykiller.o
+obj-$(CONFIG_ANDROID_SWITCH)           += switch/
+obj-$(CONFIG_ANDROID_INTF_ALARM)       += alarm.o
+obj-$(CONFIG_ANDROID_INTF_ALARM_DEV)   += alarm-dev.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
new file mode 100644 (file)
index 0000000..b15fb0d
--- /dev/null
@@ -0,0 +1,10 @@
+TODO:
+       - checkpatch.pl cleanups
+       - sparse fixes
+       - rename files to be not so "generic"
+       - make sure things build as modules properly
+       - add proper arch dependencies as needed
+       - audit userspace interfaces to make sure they are sane
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
+Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
new file mode 100644 (file)
index 0000000..03efb34
--- /dev/null
@@ -0,0 +1,297 @@
+/* drivers/rtc/alarm-dev.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include "android_alarm.h"
+
+/* XXX - Hack out wakelocks, while they are out of tree */
+struct wake_lock {
+       int i;
+};
+#define wake_lock(x)
+#define wake_lock_timeout(x, y)
+#define wake_unlock(x)
+#define WAKE_LOCK_SUSPEND 0
+#define wake_lock_init(x, y, z) ((x)->i = 1)
+#define wake_lock_destroy(x)
+
+#define ANDROID_ALARM_PRINT_INFO (1U << 0)
+#define ANDROID_ALARM_PRINT_IO (1U << 1)
+#define ANDROID_ALARM_PRINT_INT (1U << 2)
+
+
+static int debug_mask = ANDROID_ALARM_PRINT_INFO;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+       do { \
+               if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+                       pr_info(args); \
+               } \
+       } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+       ANDROID_ALARM_RTC_WAKEUP_MASK | \
+       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD               _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD      _IOW('a', 3, time_t)
+
+static int alarm_opened;
+static DEFINE_SPINLOCK(alarm_slock);
+static struct wake_lock alarm_wake_lock;
+static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
+static uint32_t alarm_pending;
+static uint32_t alarm_enabled;
+static uint32_t wait_pending;
+
+static struct android_alarm alarms[ANDROID_ALARM_TYPE_COUNT];
+
+static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       int rv = 0;
+       unsigned long flags;
+       struct timespec new_alarm_time;
+       struct timespec new_rtc_time;
+       struct timespec tmp_time;
+       enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
+       uint32_t alarm_type_mask = 1U << alarm_type;
+
+       if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
+               return -EINVAL;
+
+       if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
+               if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+                       return -EPERM;
+               if (file->private_data == NULL &&
+                   cmd != ANDROID_ALARM_SET_RTC) {
+                       spin_lock_irqsave(&alarm_slock, flags);
+                       if (alarm_opened) {
+                               spin_unlock_irqrestore(&alarm_slock, flags);
+                               return -EBUSY;
+                       }
+                       alarm_opened = 1;
+                       file->private_data = (void *)1;
+                       spin_unlock_irqrestore(&alarm_slock, flags);
+               }
+       }
+
+       switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+       case ANDROID_ALARM_CLEAR(0):
+               spin_lock_irqsave(&alarm_slock, flags);
+               pr_alarm(IO, "alarm %d clear\n", alarm_type);
+               android_alarm_try_to_cancel(&alarms[alarm_type]);
+               if (alarm_pending) {
+                       alarm_pending &= ~alarm_type_mask;
+                       if (!alarm_pending && !wait_pending)
+                               wake_unlock(&alarm_wake_lock);
+               }
+               alarm_enabled &= ~alarm_type_mask;
+               spin_unlock_irqrestore(&alarm_slock, flags);
+               break;
+
+       case ANDROID_ALARM_SET_OLD:
+       case ANDROID_ALARM_SET_AND_WAIT_OLD:
+               if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
+                       rv = -EFAULT;
+                       goto err1;
+               }
+               new_alarm_time.tv_nsec = 0;
+               goto from_old_alarm_set;
+
+       case ANDROID_ALARM_SET_AND_WAIT(0):
+       case ANDROID_ALARM_SET(0):
+               if (copy_from_user(&new_alarm_time, (void __user *)arg,
+                   sizeof(new_alarm_time))) {
+                       rv = -EFAULT;
+                       goto err1;
+               }
+from_old_alarm_set:
+               spin_lock_irqsave(&alarm_slock, flags);
+               pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type,
+                       new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
+               alarm_enabled |= alarm_type_mask;
+               android_alarm_start_range(&alarms[alarm_type],
+                       timespec_to_ktime(new_alarm_time),
+                       timespec_to_ktime(new_alarm_time));
+               spin_unlock_irqrestore(&alarm_slock, flags);
+               if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
+                   && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
+                       break;
+               /* fall though */
+       case ANDROID_ALARM_WAIT:
+               spin_lock_irqsave(&alarm_slock, flags);
+               pr_alarm(IO, "alarm wait\n");
+               if (!alarm_pending && wait_pending) {
+                       wake_unlock(&alarm_wake_lock);
+                       wait_pending = 0;
+               }
+               spin_unlock_irqrestore(&alarm_slock, flags);
+               rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
+               if (rv)
+                       goto err1;
+               spin_lock_irqsave(&alarm_slock, flags);
+               rv = alarm_pending;
+               wait_pending = 1;
+               alarm_pending = 0;
+               spin_unlock_irqrestore(&alarm_slock, flags);
+               break;
+       case ANDROID_ALARM_SET_RTC:
+               if (copy_from_user(&new_rtc_time, (void __user *)arg,
+                   sizeof(new_rtc_time))) {
+                       rv = -EFAULT;
+                       goto err1;
+               }
+               rv = android_alarm_set_rtc(new_rtc_time);
+               spin_lock_irqsave(&alarm_slock, flags);
+               alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
+               wake_up(&alarm_wait_queue);
+               spin_unlock_irqrestore(&alarm_slock, flags);
+               if (rv < 0)
+                       goto err1;
+               break;
+       case ANDROID_ALARM_GET_TIME(0):
+               switch (alarm_type) {
+               case ANDROID_ALARM_RTC_WAKEUP:
+               case ANDROID_ALARM_RTC:
+                       getnstimeofday(&tmp_time);
+                       break;
+               case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
+               case ANDROID_ALARM_ELAPSED_REALTIME:
+                       tmp_time =
+                               ktime_to_timespec(alarm_get_elapsed_realtime());
+                       break;
+               case ANDROID_ALARM_TYPE_COUNT:
+               case ANDROID_ALARM_SYSTEMTIME:
+                       ktime_get_ts(&tmp_time);
+                       break;
+               }
+               if (copy_to_user((void __user *)arg, &tmp_time,
+                   sizeof(tmp_time))) {
+                       rv = -EFAULT;
+                       goto err1;
+               }
+               break;
+
+       default:
+               rv = -EINVAL;
+               goto err1;
+       }
+err1:
+       return rv;
+}
+
+static int alarm_open(struct inode *inode, struct file *file)
+{
+       file->private_data = NULL;
+       return 0;
+}
+
+static int alarm_release(struct inode *inode, struct file *file)
+{
+       int i;
+       unsigned long flags;
+
+       spin_lock_irqsave(&alarm_slock, flags);
+       if (file->private_data != 0) {
+               for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
+                       uint32_t alarm_type_mask = 1U << i;
+                       if (alarm_enabled & alarm_type_mask) {
+                               pr_alarm(INFO, "alarm_release: clear alarm, "
+                                       "pending %d\n",
+                                       !!(alarm_pending & alarm_type_mask));
+                               alarm_enabled &= ~alarm_type_mask;
+                       }
+                       spin_unlock_irqrestore(&alarm_slock, flags);
+                       android_alarm_cancel(&alarms[i]);
+                       spin_lock_irqsave(&alarm_slock, flags);
+               }
+               if (alarm_pending | wait_pending) {
+                       if (alarm_pending)
+                               pr_alarm(INFO, "alarm_release: clear "
+                                       "pending alarms %x\n", alarm_pending);
+                       wake_unlock(&alarm_wake_lock);
+                       wait_pending = 0;
+                       alarm_pending = 0;
+               }
+               alarm_opened = 0;
+       }
+       spin_unlock_irqrestore(&alarm_slock, flags);
+       return 0;
+}
+
+static void alarm_triggered(struct android_alarm *alarm)
+{
+       unsigned long flags;
+       uint32_t alarm_type_mask = 1U << alarm->type;
+
+       pr_alarm(INT, "alarm_triggered type %d\n", alarm->type);
+       spin_lock_irqsave(&alarm_slock, flags);
+       if (alarm_enabled & alarm_type_mask) {
+               wake_lock_timeout(&alarm_wake_lock, 5 * HZ);
+               alarm_enabled &= ~alarm_type_mask;
+               alarm_pending |= alarm_type_mask;
+               wake_up(&alarm_wait_queue);
+       }
+       spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+static const struct file_operations alarm_fops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = alarm_ioctl,
+       .open = alarm_open,
+       .release = alarm_release,
+};
+
+static struct miscdevice alarm_device = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "alarm",
+       .fops = &alarm_fops,
+};
+
+static int __init alarm_dev_init(void)
+{
+       int err;
+       int i;
+
+       err = misc_register(&alarm_device);
+       if (err)
+               return err;
+
+       for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++)
+               android_alarm_init(&alarms[i], i, alarm_triggered);
+       wake_lock_init(&alarm_wake_lock, WAKE_LOCK_SUSPEND, "alarm");
+
+       return 0;
+}
+
+static void  __exit alarm_dev_exit(void)
+{
+       misc_deregister(&alarm_device);
+       wake_lock_destroy(&alarm_wake_lock);
+}
+
+module_init(alarm_dev_init);
+module_exit(alarm_dev_exit);
+
diff --git a/drivers/staging/android/alarm.c b/drivers/staging/android/alarm.c
new file mode 100644 (file)
index 0000000..c68950b
--- /dev/null
@@ -0,0 +1,601 @@
+/* drivers/rtc/alarm.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include "android_alarm.h"
+
+/* XXX - Hack out wakelocks, while they are out of tree */
+struct wake_lock {
+       int i;
+};
+#define wake_lock(x)
+#define wake_lock_timeout(x, y)
+#define wake_unlock(x)
+#define WAKE_LOCK_SUSPEND 0
+#define wake_lock_init(x, y, z) ((x)->i = 1)
+#define wake_lock_destroy(x)
+
+#define ANDROID_ALARM_PRINT_ERROR (1U << 0)
+#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1)
+#define ANDROID_ALARM_PRINT_TSET (1U << 2)
+#define ANDROID_ALARM_PRINT_CALL (1U << 3)
+#define ANDROID_ALARM_PRINT_SUSPEND (1U << 4)
+#define ANDROID_ALARM_PRINT_INT (1U << 5)
+#define ANDROID_ALARM_PRINT_FLOW (1U << 6)
+
+static int debug_mask = ANDROID_ALARM_PRINT_ERROR | \
+                       ANDROID_ALARM_PRINT_INIT_STATUS;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+       do { \
+               if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+                       pr_info(args); \
+               } \
+       } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+       ANDROID_ALARM_RTC_WAKEUP_MASK | \
+       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD               _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD      _IOW('a', 3, time_t)
+
+struct alarm_queue {
+       struct rb_root alarms;
+       struct rb_node *first;
+       struct hrtimer timer;
+       ktime_t delta;
+       bool stopped;
+       ktime_t stopped_time;
+};
+
+static struct rtc_device *alarm_rtc_dev;
+static DEFINE_SPINLOCK(alarm_slock);
+static DEFINE_MUTEX(alarm_setrtc_mutex);
+static struct wake_lock alarm_rtc_wake_lock;
+static struct platform_device *alarm_platform_dev;
+struct alarm_queue alarms[ANDROID_ALARM_TYPE_COUNT];
+static bool suspended;
+
+static void update_timer_locked(struct alarm_queue *base, bool head_removed)
+{
+       struct android_alarm *alarm;
+       bool is_wakeup = base == &alarms[ANDROID_ALARM_RTC_WAKEUP] ||
+                       base == &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+
+       if (base->stopped) {
+               pr_alarm(FLOW, "changed alarm while setting the wall time\n");
+               return;
+       }
+
+       if (is_wakeup && !suspended && head_removed)
+               wake_unlock(&alarm_rtc_wake_lock);
+
+       if (!base->first)
+               return;
+
+       alarm = container_of(base->first, struct android_alarm, node);
+
+       pr_alarm(FLOW, "selected alarm, type %d, func %pF at %lld\n",
+               alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+       if (is_wakeup && suspended) {
+               pr_alarm(FLOW, "changed alarm while suspened\n");
+               wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+               return;
+       }
+
+       hrtimer_try_to_cancel(&base->timer);
+       base->timer.node.expires = ktime_add(base->delta, alarm->expires);
+       base->timer._softexpires = ktime_add(base->delta, alarm->softexpires);
+       hrtimer_start_expires(&base->timer, HRTIMER_MODE_ABS);
+}
+
+static void alarm_enqueue_locked(struct android_alarm *alarm)
+{
+       struct alarm_queue *base = &alarms[alarm->type];
+       struct rb_node **link = &base->alarms.rb_node;
+       struct rb_node *parent = NULL;
+       struct android_alarm *entry;
+       int leftmost = 1;
+       bool was_first = false;
+
+       pr_alarm(FLOW, "added alarm, type %d, func %pF at %lld\n",
+               alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+       if (base->first == &alarm->node) {
+               base->first = rb_next(&alarm->node);
+               was_first = true;
+       }
+       if (!RB_EMPTY_NODE(&alarm->node)) {
+               rb_erase(&alarm->node, &base->alarms);
+               RB_CLEAR_NODE(&alarm->node);
+       }
+
+       while (*link) {
+               parent = *link;
+               entry = rb_entry(parent, struct android_alarm, node);
+               /*
+               * We dont care about collisions. Nodes with
+               * the same expiry time stay together.
+               */
+               if (alarm->expires.tv64 < entry->expires.tv64) {
+                       link = &(*link)->rb_left;
+               } else {
+                       link = &(*link)->rb_right;
+                       leftmost = 0;
+               }
+       }
+       if (leftmost)
+               base->first = &alarm->node;
+       if (leftmost || was_first)
+               update_timer_locked(base, was_first);
+
+       rb_link_node(&alarm->node, parent, link);
+       rb_insert_color(&alarm->node, &base->alarms);
+}
+
+/**
+ * android_alarm_init - initialize an alarm
+ * @alarm:     the alarm to be initialized
+ * @type:      the alarm type to be used
+ * @function:  alarm callback function
+ */
+void android_alarm_init(struct android_alarm *alarm,
+       enum android_alarm_type type, void (*function)(struct android_alarm *))
+{
+       RB_CLEAR_NODE(&alarm->node);
+       alarm->type = type;
+       alarm->function = function;
+
+       pr_alarm(FLOW, "created alarm, type %d, func %pF\n", type, function);
+}
+
+
+/**
+ * android_alarm_start_range - (re)start an alarm
+ * @alarm:     the alarm to be added
+ * @start:     earliest expiry time
+ * @end:       expiry time
+ */
+void android_alarm_start_range(struct android_alarm *alarm, ktime_t start,
+                                                               ktime_t end)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&alarm_slock, flags);
+       alarm->softexpires = start;
+       alarm->expires = end;
+       alarm_enqueue_locked(alarm);
+       spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+/**
+ * android_alarm_try_to_cancel - try to deactivate an alarm
+ * @alarm:     alarm to stop
+ *
+ * Returns:
+ *  0 when the alarm was not active
+ *  1 when the alarm was active
+ * -1 when the alarm may currently be excuting the callback function and
+ *    cannot be stopped (it may also be inactive)
+ */
+int android_alarm_try_to_cancel(struct android_alarm *alarm)
+{
+       struct alarm_queue *base = &alarms[alarm->type];
+       unsigned long flags;
+       bool first = false;
+       int ret = 0;
+
+       spin_lock_irqsave(&alarm_slock, flags);
+       if (!RB_EMPTY_NODE(&alarm->node)) {
+               pr_alarm(FLOW, "canceled alarm, type %d, func %pF at %lld\n",
+                       alarm->type, alarm->function,
+                       ktime_to_ns(alarm->expires));
+               ret = 1;
+               if (base->first == &alarm->node) {
+                       base->first = rb_next(&alarm->node);
+                       first = true;
+               }
+               rb_erase(&alarm->node, &base->alarms);
+               RB_CLEAR_NODE(&alarm->node);
+               if (first)
+                       update_timer_locked(base, true);
+       } else
+               pr_alarm(FLOW, "tried to cancel alarm, type %d, func %pF\n",
+                       alarm->type, alarm->function);
+       spin_unlock_irqrestore(&alarm_slock, flags);
+       if (!ret && hrtimer_callback_running(&base->timer))
+               ret = -1;
+       return ret;
+}
+
+/**
+ * android_alarm_cancel - cancel an alarm and wait for the handler to finish.
+ * @alarm:     the alarm to be cancelled
+ *
+ * Returns:
+ *  0 when the alarm was not active
+ *  1 when the alarm was active
+ */
+int android_alarm_cancel(struct android_alarm *alarm)
+{
+       for (;;) {
+               int ret = android_alarm_try_to_cancel(alarm);
+               if (ret >= 0)
+                       return ret;
+               cpu_relax();
+       }
+}
+
+/**
+ * alarm_set_rtc - set the kernel and rtc walltime
+ * @new_time:  timespec value containing the new time
+ */
+int android_alarm_set_rtc(struct timespec new_time)
+{
+       int i;
+       int ret;
+       unsigned long flags;
+       struct rtc_time rtc_new_rtc_time;
+       struct timespec tmp_time;
+
+       rtc_time_to_tm(new_time.tv_sec, &rtc_new_rtc_time);
+
+       pr_alarm(TSET, "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n",
+               new_time.tv_sec, new_time.tv_nsec,
+               rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min,
+               rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1,
+               rtc_new_rtc_time.tm_mday,
+               rtc_new_rtc_time.tm_year + 1900);
+
+       mutex_lock(&alarm_setrtc_mutex);
+       spin_lock_irqsave(&alarm_slock, flags);
+       wake_lock(&alarm_rtc_wake_lock);
+       getnstimeofday(&tmp_time);
+       for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+               hrtimer_try_to_cancel(&alarms[i].timer);
+               alarms[i].stopped = true;
+               alarms[i].stopped_time = timespec_to_ktime(tmp_time);
+       }
+       alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+               alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+               ktime_sub(alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta,
+                       timespec_to_ktime(timespec_sub(tmp_time, new_time)));
+       spin_unlock_irqrestore(&alarm_slock, flags);
+       ret = do_settimeofday(&new_time);
+       spin_lock_irqsave(&alarm_slock, flags);
+       for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+               alarms[i].stopped = false;
+               update_timer_locked(&alarms[i], false);
+       }
+       spin_unlock_irqrestore(&alarm_slock, flags);
+       if (ret < 0) {
+               pr_alarm(ERROR, "alarm_set_rtc: Failed to set time\n");
+               goto err;
+       }
+       if (!alarm_rtc_dev) {
+               pr_alarm(ERROR,
+                       "alarm_set_rtc: no RTC, time will be lost on reboot\n");
+               goto err;
+       }
+       ret = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time);
+       if (ret < 0)
+               pr_alarm(ERROR, "alarm_set_rtc: "
+                       "Failed to set RTC, time will be lost on reboot\n");
+err:
+       wake_unlock(&alarm_rtc_wake_lock);
+       mutex_unlock(&alarm_setrtc_mutex);
+       return ret;
+}
+
+/**
+ * alarm_get_elapsed_realtime - get the elapsed real time in ktime_t format
+ *
+ * returns the time in ktime_t format
+ */
+ktime_t alarm_get_elapsed_realtime(void)
+{
+       ktime_t now;
+       unsigned long flags;
+       struct alarm_queue *base = &alarms[ANDROID_ALARM_ELAPSED_REALTIME];
+
+       spin_lock_irqsave(&alarm_slock, flags);
+       now = base->stopped ? base->stopped_time : ktime_get_real();
+       now = ktime_sub(now, base->delta);
+       spin_unlock_irqrestore(&alarm_slock, flags);
+       return now;
+}
+
+static enum hrtimer_restart alarm_timer_triggered(struct hrtimer *timer)
+{
+       struct alarm_queue *base;
+       struct android_alarm *alarm;
+       unsigned long flags;
+       ktime_t now;
+
+       spin_lock_irqsave(&alarm_slock, flags);
+
+       base = container_of(timer, struct alarm_queue, timer);
+       now = base->stopped ? base->stopped_time : hrtimer_cb_get_time(timer);
+       now = ktime_sub(now, base->delta);
+
+       pr_alarm(INT, "alarm_timer_triggered type %ld at %lld\n",
+               base - alarms, ktime_to_ns(now));
+
+       while (base->first) {
+               alarm = container_of(base->first, struct android_alarm, node);
+               if (alarm->softexpires.tv64 > now.tv64) {
+                       pr_alarm(FLOW, "don't call alarm, %pF, %lld (s %lld)\n",
+                               alarm->function, ktime_to_ns(alarm->expires),
+                               ktime_to_ns(alarm->softexpires));
+                       break;
+               }
+               base->first = rb_next(&alarm->node);
+               rb_erase(&alarm->node, &base->alarms);
+               RB_CLEAR_NODE(&alarm->node);
+               pr_alarm(CALL, "call alarm, type %d, func %pF, %lld (s %lld)\n",
+                       alarm->type, alarm->function,
+                       ktime_to_ns(alarm->expires),
+                       ktime_to_ns(alarm->softexpires));
+               spin_unlock_irqrestore(&alarm_slock, flags);
+               alarm->function(alarm);
+               spin_lock_irqsave(&alarm_slock, flags);
+       }
+       if (!base->first)
+               pr_alarm(FLOW, "no more alarms of type %ld\n", base - alarms);
+       update_timer_locked(base, true);
+       spin_unlock_irqrestore(&alarm_slock, flags);
+       return HRTIMER_NORESTART;
+}
+
+static void alarm_triggered_func(void *p)
+{
+       struct rtc_device *rtc = alarm_rtc_dev;
+       if (!(rtc->irq_data & RTC_AF))
+               return;
+       pr_alarm(INT, "rtc alarm triggered\n");
+       wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+}
+
+static int alarm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       int                 err = 0;
+       unsigned long       flags;
+       struct rtc_wkalrm   rtc_alarm;
+       struct rtc_time     rtc_current_rtc_time;
+       unsigned long       rtc_current_time;
+       unsigned long       rtc_alarm_time;
+       struct timespec     rtc_delta;
+       struct timespec     wall_time;
+       struct alarm_queue *wakeup_queue = NULL;
+       struct alarm_queue *tmp_queue = NULL;
+
+       pr_alarm(SUSPEND, "alarm_suspend(%p, %d)\n", pdev, state.event);
+
+       spin_lock_irqsave(&alarm_slock, flags);
+       suspended = true;
+       spin_unlock_irqrestore(&alarm_slock, flags);
+
+       hrtimer_cancel(&alarms[ANDROID_ALARM_RTC_WAKEUP].timer);
+       hrtimer_cancel(&alarms[
+                       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].timer);
+
+       tmp_queue = &alarms[ANDROID_ALARM_RTC_WAKEUP];
+       if (tmp_queue->first)
+               wakeup_queue = tmp_queue;
+       tmp_queue = &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+       if (tmp_queue->first && (!wakeup_queue ||
+                               hrtimer_get_expires(&tmp_queue->timer).tv64 <
+                               hrtimer_get_expires(&wakeup_queue->timer).tv64))
+               wakeup_queue = tmp_queue;
+       if (wakeup_queue) {
+               rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+               getnstimeofday(&wall_time);
+               rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+               set_normalized_timespec(&rtc_delta,
+                                       wall_time.tv_sec - rtc_current_time,
+                                       wall_time.tv_nsec);
+
+               rtc_alarm_time = timespec_sub(ktime_to_timespec(
+                       hrtimer_get_expires(&wakeup_queue->timer)),
+                       rtc_delta).tv_sec;
+
+               rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time);
+               rtc_alarm.enabled = 1;
+               rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+               rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+               rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+               pr_alarm(SUSPEND,
+                       "rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n",
+                       rtc_alarm_time, rtc_current_time,
+                       rtc_delta.tv_sec, rtc_delta.tv_nsec);
+               if (rtc_current_time + 1 >= rtc_alarm_time) {
+                       pr_alarm(SUSPEND, "alarm about to go off\n");
+                       memset(&rtc_alarm, 0, sizeof(rtc_alarm));
+                       rtc_alarm.enabled = 0;
+                       rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+
+                       spin_lock_irqsave(&alarm_slock, flags);
+                       suspended = false;
+                       wake_lock_timeout(&alarm_rtc_wake_lock, 2 * HZ);
+                       update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP],
+                                                                       false);
+                       update_timer_locked(&alarms[
+                               ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP], false);
+                       err = -EBUSY;
+                       spin_unlock_irqrestore(&alarm_slock, flags);
+               }
+       }
+       return err;
+}
+
+static int alarm_resume(struct platform_device *pdev)
+{
+       struct rtc_wkalrm alarm;
+       unsigned long       flags;
+
+       pr_alarm(SUSPEND, "alarm_resume(%p)\n", pdev);
+
+       memset(&alarm, 0, sizeof(alarm));
+       alarm.enabled = 0;
+       rtc_set_alarm(alarm_rtc_dev, &alarm);
+
+       spin_lock_irqsave(&alarm_slock, flags);
+       suspended = false;
+       update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP], false);
+       update_timer_locked(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP],
+                                                                       false);
+       spin_unlock_irqrestore(&alarm_slock, flags);
+
+       return 0;
+}
+
+static struct rtc_task alarm_rtc_task = {
+       .func = alarm_triggered_func
+};
+
+static int rtc_alarm_add_device(struct device *dev,
+                               struct class_interface *class_intf)
+{
+       int err;
+       struct rtc_device *rtc = to_rtc_device(dev);
+
+       mutex_lock(&alarm_setrtc_mutex);
+
+       if (alarm_rtc_dev) {
+               err = -EBUSY;
+               goto err1;
+       }
+
+       alarm_platform_dev =
+               platform_device_register_simple("alarm", -1, NULL, 0);
+       if (IS_ERR(alarm_platform_dev)) {
+               err = PTR_ERR(alarm_platform_dev);
+               goto err2;
+       }
+       err = rtc_irq_register(rtc, &alarm_rtc_task);
+       if (err)
+               goto err3;
+       alarm_rtc_dev = rtc;
+       pr_alarm(INIT_STATUS, "using rtc device, %s, for alarms", rtc->name);
+       mutex_unlock(&alarm_setrtc_mutex);
+
+       return 0;
+
+err3:
+       platform_device_unregister(alarm_platform_dev);
+err2:
+err1:
+       mutex_unlock(&alarm_setrtc_mutex);
+       return err;
+}
+
+static void rtc_alarm_remove_device(struct device *dev,
+                                   struct class_interface *class_intf)
+{
+       if (dev == &alarm_rtc_dev->dev) {
+               pr_alarm(INIT_STATUS, "lost rtc device for alarms");
+               rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task);
+               platform_device_unregister(alarm_platform_dev);
+               alarm_rtc_dev = NULL;
+       }
+}
+
+static struct class_interface rtc_alarm_interface = {
+       .add_dev = &rtc_alarm_add_device,
+       .remove_dev = &rtc_alarm_remove_device,
+};
+
+static struct platform_driver alarm_driver = {
+       .suspend = alarm_suspend,
+       .resume = alarm_resume,
+       .driver = {
+               .name = "alarm"
+       }
+};
+
+static int __init alarm_late_init(void)
+{
+       unsigned long   flags;
+       struct timespec tmp_time, system_time;
+
+       /* this needs to run after the rtc is read at boot */
+       spin_lock_irqsave(&alarm_slock, flags);
+       /* We read the current rtc and system time so we can later calulate
+        * elasped realtime to be (boot_systemtime + rtc - boot_rtc) ==
+        * (rtc - (boot_rtc - boot_systemtime))
+        */
+       getnstimeofday(&tmp_time);
+       ktime_get_ts(&system_time);
+       alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+               alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+                       timespec_to_ktime(timespec_sub(tmp_time, system_time));
+
+       spin_unlock_irqrestore(&alarm_slock, flags);
+       return 0;
+}
+
+static int __init alarm_driver_init(void)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+               hrtimer_init(&alarms[i].timer,
+                               CLOCK_REALTIME, HRTIMER_MODE_ABS);
+               alarms[i].timer.function = alarm_timer_triggered;
+       }
+       hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].timer,
+                    CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       alarms[ANDROID_ALARM_SYSTEMTIME].timer.function = alarm_timer_triggered;
+       err = platform_driver_register(&alarm_driver);
+       if (err < 0)
+               goto err1;
+       wake_lock_init(&alarm_rtc_wake_lock, WAKE_LOCK_SUSPEND, "alarm_rtc");
+       rtc_alarm_interface.class = rtc_class;
+       err = class_interface_register(&rtc_alarm_interface);
+       if (err < 0)
+               goto err2;
+
+       return 0;
+
+err2:
+       wake_lock_destroy(&alarm_rtc_wake_lock);
+       platform_driver_unregister(&alarm_driver);
+err1:
+       return err;
+}
+
+static void  __exit alarm_exit(void)
+{
+       class_interface_unregister(&rtc_alarm_interface);
+       wake_lock_destroy(&alarm_rtc_wake_lock);
+       platform_driver_unregister(&alarm_driver);
+}
+
+late_initcall(alarm_late_init);
+module_init(alarm_driver_init);
+module_exit(alarm_exit);
+
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
new file mode 100644 (file)
index 0000000..6eecbde
--- /dev/null
@@ -0,0 +1,121 @@
+/* include/linux/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_ALARM_H
+#define _LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+       /* return code bit numbers or set alarm arg */
+       ANDROID_ALARM_RTC_WAKEUP,
+       ANDROID_ALARM_RTC,
+       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+       ANDROID_ALARM_ELAPSED_REALTIME,
+       ANDROID_ALARM_SYSTEMTIME,
+
+       ANDROID_ALARM_TYPE_COUNT,
+
+       /* return code bit numbers */
+       /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+#ifdef __KERNEL__
+
+#include <linux/ktime.h>
+#include <linux/rbtree.h>
+
+/*
+ * The alarm interface is similar to the hrtimer interface but adds support
+ * for wakeup from suspend. It also adds an elapsed realtime clock that can
+ * be used for periodic timers that need to keep runing while the system is
+ * suspended and not be disrupted when the wall time is set.
+ */
+
+/**
+ * struct alarm - the basic alarm structure
+ * @node:      red black tree node for time ordered insertion
+ * @type:      alarm type. rtc/elapsed-realtime/systemtime, wakeup/non-wakeup.
+ * @softexpires: the absolute earliest expiry time of the alarm.
+ * @expires:   the absolute expiry time.
+ * @function:  alarm expiry callback function
+ *
+ * The alarm structure must be initialized by alarm_init()
+ *
+ */
+
+struct android_alarm {
+       struct rb_node          node;
+       enum android_alarm_type type;
+       ktime_t                 softexpires;
+       ktime_t                 expires;
+       void                    (*function)(struct android_alarm *);
+};
+
+void android_alarm_init(struct android_alarm *alarm,
+       enum android_alarm_type type, void (*function)(struct android_alarm *));
+void android_alarm_start_range(struct android_alarm *alarm, ktime_t start,
+                                                               ktime_t end);
+int android_alarm_try_to_cancel(struct android_alarm *alarm);
+int android_alarm_cancel(struct android_alarm *alarm);
+ktime_t alarm_get_elapsed_realtime(void);
+
+/* set rtc while preserving elapsed realtime */
+int android_alarm_set_rtc(const struct timespec ts);
+
+#ifdef CONFIG_ANDROID_ALARM_OLDDRV_COMPAT
+/*
+ * Some older drivers depend on the old API,
+ * so provide compatability macros for now.
+ */
+#define alarm android_alarm
+#define alarm_init(x, y, z) android_alarm_init(x, y, z)
+#define alarm_start_range(x, y, z) android_alarm_start_range(x, y, z)
+#define alarm_try_to_cancel(x) android_alarm_try_to_cancel(x)
+#define alarm_cancel(x) android_alarm_cancel(x)
+#define alarm_set_rtc(x) android_alarm_set_rtc(x)
+#endif
+
+
+#endif
+
+enum android_alarm_return_flags {
+       ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+       ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+                               1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+       ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+                               1U << ANDROID_ALARM_ELAPSED_REALTIME,
+       ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+       ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT                  _IO('a', 1)
+
+#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
new file mode 100644 (file)
index 0000000..9f1f27e
--- /dev/null
@@ -0,0 +1,752 @@
+/* mm/ashmem.c
+**
+** Anonymous Shared Memory Subsystem, ashmem
+**
+** Copyright (C) 2008 Google, Inc.
+**
+** Robert Love <rlove@google.com>
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+** GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/security.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/personality.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include "ashmem.h"
+
+#define ASHMEM_NAME_PREFIX "dev/ashmem/"
+#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
+#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
+
+/*
+ * ashmem_area - anonymous shared memory area
+ * Lifecycle: From our parent file's open() until its release()
+ * Locking: Protected by `ashmem_mutex'
+ * Big Note: Mappings do NOT pin this structure; it dies on close()
+ */
+struct ashmem_area {
+       char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
+       struct list_head unpinned_list;  /* list of all ashmem areas */
+       struct file *file;               /* the shmem-based backing file */
+       size_t size;                     /* size of the mapping, in bytes */
+       unsigned long prot_mask;         /* allowed prot bits, as vm_flags */
+};
+
+/*
+ * ashmem_range - represents an interval of unpinned (evictable) pages
+ * Lifecycle: From unpin to pin
+ * Locking: Protected by `ashmem_mutex'
+ */
+struct ashmem_range {
+       struct list_head lru;           /* entry in LRU list */
+       struct list_head unpinned;      /* entry in its area's unpinned list */
+       struct ashmem_area *asma;       /* associated area */
+       size_t pgstart;                 /* starting page, inclusive */
+       size_t pgend;                   /* ending page, inclusive */
+       unsigned int purged;            /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
+};
+
+/* LRU list of unpinned pages, protected by ashmem_mutex */
+static LIST_HEAD(ashmem_lru_list);
+
+/* Count of pages on our LRU list, protected by ashmem_mutex */
+static unsigned long lru_count;
+
+/*
+ * ashmem_mutex - protects the list of and each individual ashmem_area
+ *
+ * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
+ */
+static DEFINE_MUTEX(ashmem_mutex);
+
+static struct kmem_cache *ashmem_area_cachep __read_mostly;
+static struct kmem_cache *ashmem_range_cachep __read_mostly;
+
+#define range_size(range) \
+       ((range)->pgend - (range)->pgstart + 1)
+
+#define range_on_lru(range) \
+       ((range)->purged == ASHMEM_NOT_PURGED)
+
+#define page_range_subsumes_range(range, start, end) \
+       (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
+
+#define page_range_subsumed_by_range(range, start, end) \
+       (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
+
+#define page_in_range(range, page) \
+       (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
+
+#define page_range_in_range(range, start, end) \
+       (page_in_range(range, start) || page_in_range(range, end) || \
+               page_range_subsumes_range(range, start, end))
+
+#define range_before_page(range, page) \
+       ((range)->pgend < (page))
+
+#define PROT_MASK              (PROT_EXEC | PROT_READ | PROT_WRITE)
+
+static inline void lru_add(struct ashmem_range *range)
+{
+       list_add_tail(&range->lru, &ashmem_lru_list);
+       lru_count += range_size(range);
+}
+
+static inline void lru_del(struct ashmem_range *range)
+{
+       list_del(&range->lru);
+       lru_count -= range_size(range);
+}
+
+/*
+ * range_alloc - allocate and initialize a new ashmem_range structure
+ *
+ * 'asma' - associated ashmem_area
+ * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
+ * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
+ * 'start' - starting page, inclusive
+ * 'end' - ending page, inclusive
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int range_alloc(struct ashmem_area *asma,
+                      struct ashmem_range *prev_range, unsigned int purged,
+                      size_t start, size_t end)
+{
+       struct ashmem_range *range;
+
+       range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+       if (unlikely(!range))
+               return -ENOMEM;
+
+       range->asma = asma;
+       range->pgstart = start;
+       range->pgend = end;
+       range->purged = purged;
+
+       list_add_tail(&range->unpinned, &prev_range->unpinned);
+
+       if (range_on_lru(range))
+               lru_add(range);
+
+       return 0;
+}
+
+static void range_del(struct ashmem_range *range)
+{
+       list_del(&range->unpinned);
+       if (range_on_lru(range))
+               lru_del(range);
+       kmem_cache_free(ashmem_range_cachep, range);
+}
+
+/*
+ * range_shrink - shrinks a range
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static inline void range_shrink(struct ashmem_range *range,
+                               size_t start, size_t end)
+{
+       size_t pre = range_size(range);
+
+       range->pgstart = start;
+       range->pgend = end;
+
+       if (range_on_lru(range))
+               lru_count -= pre - range_size(range);
+}
+
+static int ashmem_open(struct inode *inode, struct file *file)
+{
+       struct ashmem_area *asma;
+       int ret;
+
+       ret = generic_file_open(inode, file);
+       if (unlikely(ret))
+               return ret;
+
+       asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
+       if (unlikely(!asma))
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&asma->unpinned_list);
+       memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
+       asma->prot_mask = PROT_MASK;
+       file->private_data = asma;
+
+       return 0;
+}
+
+static int ashmem_release(struct inode *ignored, struct file *file)
+{
+       struct ashmem_area *asma = file->private_data;
+       struct ashmem_range *range, *next;
+
+       mutex_lock(&ashmem_mutex);
+       list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
+               range_del(range);
+       mutex_unlock(&ashmem_mutex);
+
+       if (asma->file)
+               fput(asma->file);
+       kmem_cache_free(ashmem_area_cachep, asma);
+
+       return 0;
+}
+
+static ssize_t ashmem_read(struct file *file, char __user *buf,
+                          size_t len, loff_t *pos)
+{
+       struct ashmem_area *asma = file->private_data;
+       int ret = 0;
+
+       mutex_lock(&ashmem_mutex);
+
+       /* If size is not set, or set to 0, always return EOF. */
+       if (asma->size == 0)
+               goto out;
+
+       if (!asma->file) {
+               ret = -EBADF;
+               goto out;
+       }
+
+       ret = asma->file->f_op->read(asma->file, buf, len, pos);
+       if (ret < 0)
+               goto out;
+
+       /** Update backing file pos, since f_ops->read() doesn't */
+       asma->file->f_pos = *pos;
+
+out:
+       mutex_unlock(&ashmem_mutex);
+       return ret;
+}
+
+static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
+{
+       struct ashmem_area *asma = file->private_data;
+       int ret;
+
+       mutex_lock(&ashmem_mutex);
+
+       if (asma->size == 0) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (!asma->file) {
+               ret = -EBADF;
+               goto out;
+       }
+
+       ret = asma->file->f_op->llseek(asma->file, offset, origin);
+       if (ret < 0)
+               goto out;
+
+       /** Copy f_pos from backing file, since f_ops->llseek() sets it */
+       file->f_pos = asma->file->f_pos;
+
+out:
+       mutex_unlock(&ashmem_mutex);
+       return ret;
+}
+
+static inline unsigned long calc_vm_may_flags(unsigned long prot)
+{
+       return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
+              _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
+              _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
+}
+
+static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct ashmem_area *asma = file->private_data;
+       int ret = 0;
+
+       mutex_lock(&ashmem_mutex);
+
+       /* user needs to SET_SIZE before mapping */
+       if (unlikely(!asma->size)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* requested protection bits must match our allowed protection mask */
+       if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
+                    calc_vm_prot_bits(PROT_MASK))) {
+               ret = -EPERM;
+               goto out;
+       }
+       vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
+
+       if (!asma->file) {
+               char *name = ASHMEM_NAME_DEF;
+               struct file *vmfile;
+
+               if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+                       name = asma->name;
+
+               /* ... and allocate the backing shmem file */
+               vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
+               if (unlikely(IS_ERR(vmfile))) {
+                       ret = PTR_ERR(vmfile);
+                       goto out;
+               }
+               asma->file = vmfile;
+       }
+       get_file(asma->file);
+
+       /*
+        * XXX - Reworked to use shmem_zero_setup() instead of
+        * shmem_set_file while we're in staging. -jstultz
+        */
+       if (vma->vm_flags & VM_SHARED) {
+               ret = shmem_zero_setup(vma);
+               if (ret) {
+                       fput(asma->file);
+                       goto out;
+               }
+       }
+
+       if (vma->vm_file)
+               fput(vma->vm_file);
+       vma->vm_file = asma->file;
+       vma->vm_flags |= VM_CAN_NONLINEAR;
+
+out:
+       mutex_unlock(&ashmem_mutex);
+       return ret;
+}
+
+/*
+ * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
+ *
+ * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
+ * many objects (pages) we have in total.
+ *
+ * 'gfp_mask' is the mask of the allocation that got us into this mess.
+ *
+ * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * proceed without risk of deadlock (due to gfp_mask).
+ *
+ * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
+ * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
+ * pages freed.
+ */
+static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+       struct ashmem_range *range, *next;
+
+       /* We might recurse into filesystem code, so bail out if necessary */
+       if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
+               return -1;
+       if (!sc->nr_to_scan)
+               return lru_count;
+
+       mutex_lock(&ashmem_mutex);
+       list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+               struct inode *inode = range->asma->file->f_dentry->d_inode;
+               loff_t start = range->pgstart * PAGE_SIZE;
+               loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+
+               vmtruncate_range(inode, start, end);
+               range->purged = ASHMEM_WAS_PURGED;
+               lru_del(range);
+
+               sc->nr_to_scan -= range_size(range);
+               if (sc->nr_to_scan <= 0)
+                       break;
+       }
+       mutex_unlock(&ashmem_mutex);
+
+       return lru_count;
+}
+
+static struct shrinker ashmem_shrinker = {
+       .shrink = ashmem_shrink,
+       .seeks = DEFAULT_SEEKS * 4,
+};
+
+static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
+{
+       int ret = 0;
+
+       mutex_lock(&ashmem_mutex);
+
+       /* the user can only remove, not add, protection bits */
+       if (unlikely((asma->prot_mask & prot) != prot)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* does the application expect PROT_READ to imply PROT_EXEC? */
+       if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+               prot |= PROT_EXEC;
+
+       asma->prot_mask = prot;
+
+out:
+       mutex_unlock(&ashmem_mutex);
+       return ret;
+}
+
+static int set_name(struct ashmem_area *asma, void __user *name)
+{
+       int ret = 0;
+
+       mutex_lock(&ashmem_mutex);
+
+       /* cannot change an existing mapping's name */
+       if (unlikely(asma->file)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
+                                   name, ASHMEM_NAME_LEN)))
+               ret = -EFAULT;
+       asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
+
+out:
+       mutex_unlock(&ashmem_mutex);
+
+       return ret;
+}
+
+static int get_name(struct ashmem_area *asma, void __user *name)
+{
+       int ret = 0;
+
+       mutex_lock(&ashmem_mutex);
+       if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
+               size_t len;
+
+               /*
+                * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
+                * prevents us from revealing one user's stack to another.
+                */
+               len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
+               if (unlikely(copy_to_user(name,
+                               asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
+                       ret = -EFAULT;
+       } else {
+               if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
+                                         sizeof(ASHMEM_NAME_DEF))))
+                       ret = -EFAULT;
+       }
+       mutex_unlock(&ashmem_mutex);
+
+       return ret;
+}
+
+/*
+ * ashmem_pin - pin the given ashmem region, returning whether it was
+ * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+       struct ashmem_range *range, *next;
+       int ret = ASHMEM_NOT_PURGED;
+
+       list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+               /* moved past last applicable page; we can short circuit */
+               if (range_before_page(range, pgstart))
+                       break;
+
+               /*
+                * The user can ask us to pin pages that span multiple ranges,
+                * or to pin pages that aren't even unpinned, so this is messy.
+                *
+                * Four cases:
+                * 1. The requested range subsumes an existing range, so we
+                *    just remove the entire matching range.
+                * 2. The requested range overlaps the start of an existing
+                *    range, so we just update that range.
+                * 3. The requested range overlaps the end of an existing
+                *    range, so we just update that range.
+                * 4. The requested range punches a hole in an existing range,
+                *    so we have to update one side of the range and then
+                *    create a new range for the other side.
+                */
+               if (page_range_in_range(range, pgstart, pgend)) {
+                       ret |= range->purged;
+
+                       /* Case #1: Easy. Just nuke the whole thing. */
+                       if (page_range_subsumes_range(range, pgstart, pgend)) {
+                               range_del(range);
+                               continue;
+                       }
+
+                       /* Case #2: We overlap from the start, so adjust it */
+                       if (range->pgstart >= pgstart) {
+                               range_shrink(range, pgend + 1, range->pgend);
+                               continue;
+                       }
+
+                       /* Case #3: We overlap from the rear, so adjust it */
+                       if (range->pgend <= pgend) {
+                               range_shrink(range, range->pgstart, pgstart-1);
+                               continue;
+                       }
+
+                       /*
+                        * Case #4: We eat a chunk out of the middle. A bit
+                        * more complicated, we allocate a new range for the
+                        * second half and adjust the first chunk's endpoint.
+                        */
+                       range_alloc(asma, range, range->purged,
+                                   pgend + 1, range->pgend);
+                       range_shrink(range, range->pgstart, pgstart - 1);
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * ashmem_unpin - unpin the given range of pages. Returns zero on success.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+       struct ashmem_range *range, *next;
+       unsigned int purged = ASHMEM_NOT_PURGED;
+
+restart:
+       list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+               /* short circuit: this is our insertion point */
+               if (range_before_page(range, pgstart))
+                       break;
+
+               /*
+                * The user can ask us to unpin pages that are already entirely
+                * or partially pinned. We handle those two cases here.
+                */
+               if (page_range_subsumed_by_range(range, pgstart, pgend))
+                       return 0;
+               if (page_range_in_range(range, pgstart, pgend)) {
+                       pgstart = min_t(size_t, range->pgstart, pgstart),
+                       pgend = max_t(size_t, range->pgend, pgend);
+                       purged |= range->purged;
+                       range_del(range);
+                       goto restart;
+               }
+       }
+
+       return range_alloc(asma, range, purged, pgstart, pgend);
+}
+
+/*
+ * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
+ * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
+                                size_t pgend)
+{
+       struct ashmem_range *range;
+       int ret = ASHMEM_IS_PINNED;
+
+       list_for_each_entry(range, &asma->unpinned_list, unpinned) {
+               if (range_before_page(range, pgstart))
+                       break;
+               if (page_range_in_range(range, pgstart, pgend)) {
+                       ret = ASHMEM_IS_UNPINNED;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
+                           void __user *p)
+{
+       struct ashmem_pin pin;
+       size_t pgstart, pgend;
+       int ret = -EINVAL;
+
+       if (unlikely(!asma->file))
+               return -EINVAL;
+
+       if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
+               return -EFAULT;
+
+       /* per custom, you can pass zero for len to mean "everything onward" */
+       if (!pin.len)
+               pin.len = PAGE_ALIGN(asma->size) - pin.offset;
+
+       if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
+               return -EINVAL;
+
+       if (unlikely(((__u32) -1) - pin.offset < pin.len))
+               return -EINVAL;
+
+       if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
+               return -EINVAL;
+
+       pgstart = pin.offset / PAGE_SIZE;
+       pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
+
+       mutex_lock(&ashmem_mutex);
+
+       switch (cmd) {
+       case ASHMEM_PIN:
+               ret = ashmem_pin(asma, pgstart, pgend);
+               break;
+       case ASHMEM_UNPIN:
+               ret = ashmem_unpin(asma, pgstart, pgend);
+               break;
+       case ASHMEM_GET_PIN_STATUS:
+               ret = ashmem_get_pin_status(asma, pgstart, pgend);
+               break;
+       }
+
+       mutex_unlock(&ashmem_mutex);
+
+       return ret;
+}
+
+static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct ashmem_area *asma = file->private_data;
+       long ret = -ENOTTY;
+
+       switch (cmd) {
+       case ASHMEM_SET_NAME:
+               ret = set_name(asma, (void __user *) arg);
+               break;
+       case ASHMEM_GET_NAME:
+               ret = get_name(asma, (void __user *) arg);
+               break;
+       case ASHMEM_SET_SIZE:
+               ret = -EINVAL;
+               if (!asma->file) {
+                       ret = 0;
+                       asma->size = (size_t) arg;
+               }
+               break;
+       case ASHMEM_GET_SIZE:
+               ret = asma->size;
+               break;
+       case ASHMEM_SET_PROT_MASK:
+               ret = set_prot_mask(asma, arg);
+               break;
+       case ASHMEM_GET_PROT_MASK:
+               ret = asma->prot_mask;
+               break;
+       case ASHMEM_PIN:
+       case ASHMEM_UNPIN:
+       case ASHMEM_GET_PIN_STATUS:
+               ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
+               break;
+       case ASHMEM_PURGE_ALL_CACHES:
+               ret = -EPERM;
+               if (capable(CAP_SYS_ADMIN)) {
+                       struct shrink_control sc = {
+                               .gfp_mask = GFP_KERNEL,
+                               .nr_to_scan = 0,
+                       };
+                       ret = ashmem_shrink(&ashmem_shrinker, &sc);
+                       sc.nr_to_scan = ret;
+                       ashmem_shrink(&ashmem_shrinker, &sc);
+               }
+               break;
+       }
+
+       return ret;
+}
+
+static const struct file_operations ashmem_fops = {
+       .owner = THIS_MODULE,
+       .open = ashmem_open,
+       .release = ashmem_release,
+       .read = ashmem_read,
+       .llseek = ashmem_llseek,
+       .mmap = ashmem_mmap,
+       .unlocked_ioctl = ashmem_ioctl,
+       .compat_ioctl = ashmem_ioctl,
+};
+
+static struct miscdevice ashmem_misc = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "ashmem",
+       .fops = &ashmem_fops,
+};
+
+static int __init ashmem_init(void)
+{
+       int ret;
+
+       ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
+                                         sizeof(struct ashmem_area),
+                                         0, 0, NULL);
+       if (unlikely(!ashmem_area_cachep)) {
+               printk(KERN_ERR "ashmem: failed to create slab cache\n");
+               return -ENOMEM;
+       }
+
+       ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
+                                         sizeof(struct ashmem_range),
+                                         0, 0, NULL);
+       if (unlikely(!ashmem_range_cachep)) {
+               printk(KERN_ERR "ashmem: failed to create slab cache\n");
+               return -ENOMEM;
+       }
+
+       ret = misc_register(&ashmem_misc);
+       if (unlikely(ret)) {
+               printk(KERN_ERR "ashmem: failed to register misc device!\n");
+               return ret;
+       }
+
+       register_shrinker(&ashmem_shrinker);
+
+       printk(KERN_INFO "ashmem: initialized\n");
+
+       return 0;
+}
+
+static void __exit ashmem_exit(void)
+{
+       int ret;
+
+       unregister_shrinker(&ashmem_shrinker);
+
+       ret = misc_deregister(&ashmem_misc);
+       if (unlikely(ret))
+               printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
+
+       kmem_cache_destroy(ashmem_range_cachep);
+       kmem_cache_destroy(ashmem_area_cachep);
+
+       printk(KERN_INFO "ashmem: unloaded\n");
+}
+
+module_init(ashmem_init);
+module_exit(ashmem_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
new file mode 100644 (file)
index 0000000..1976b10
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * include/linux/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _LINUX_ASHMEM_H
+#define _LINUX_ASHMEM_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN                256
+
+#define ASHMEM_NAME_DEF                "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED      0
+#define ASHMEM_WAS_PURGED      1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED     0
+#define ASHMEM_IS_PINNED       1
+
+struct ashmem_pin {
+       __u32 offset;   /* offset into region, in bytes, page-aligned */
+       __u32 len;      /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC            0x77
+
+#define ASHMEM_SET_NAME                _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME                _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE                _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE                _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK   _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK   _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN             _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN           _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS  _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES        _IO(__ASHMEMIOC, 10)
+
+#endif /* _LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
new file mode 100644 (file)
index 0000000..ca2a32d
--- /dev/null
@@ -0,0 +1,3640 @@
+/* binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include "binder.h"
+
+static DEFINE_MUTEX(binder_lock);
+static DEFINE_MUTEX(binder_deferred_lock);
+static DEFINE_MUTEX(binder_mmap_lock);
+
+static HLIST_HEAD(binder_procs);
+static HLIST_HEAD(binder_deferred_list);
+static HLIST_HEAD(binder_dead_nodes);
+
+static struct dentry *binder_debugfs_dir_entry_root;
+static struct dentry *binder_debugfs_dir_entry_proc;
+static struct dentry *binder_debugfs_state;
+static struct dentry *binder_debugfs_stats;
+static struct dentry *binder_debugfs_transactions;
+static struct dentry *binder_debugfs_transaction_log;
+static struct dentry *binder_debugfs_failed_transaction_log;
+static struct binder_node *binder_context_mgr_node;
+static uid_t binder_context_mgr_uid = -1;
+static int binder_last_id;
+static struct workqueue_struct *binder_deferred_workqueue;
+
+#define BINDER_DEBUG_ENTRY(name) \
+static int binder_##name##_open(struct inode *inode, struct file *file) \
+{ \
+       return single_open(file, binder_##name##_show, inode->i_private); \
+} \
+\
+static const struct file_operations binder_##name##_fops = { \
+       .owner = THIS_MODULE, \
+       .open = binder_##name##_open, \
+       .read = seq_read, \
+       .llseek = seq_lseek, \
+       .release = single_release, \
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused);
+BINDER_DEBUG_ENTRY(proc);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K                               0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M                               0x400000
+#endif
+
+#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+       BINDER_DEBUG_USER_ERROR             = 1U << 0,
+       BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
+       BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
+       BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
+       BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
+       BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
+       BINDER_DEBUG_READ_WRITE             = 1U << 6,
+       BINDER_DEBUG_USER_REFS              = 1U << 7,
+       BINDER_DEBUG_THREADS                = 1U << 8,
+       BINDER_DEBUG_TRANSACTION            = 1U << 9,
+       BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
+       BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
+       BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
+       BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
+       BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
+       BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+       BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+static bool binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+
+static int binder_set_stop_on_user_error(const char *val,
+                                        struct kernel_param *kp)
+{
+       int ret;
+       ret = param_set_int(val, kp);
+       if (binder_stop_on_user_error < 2)
+               wake_up(&binder_user_error_wait);
+       return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+       param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_debug(mask, x...) \
+       do { \
+               if (binder_debug_mask & mask) \
+                       printk(KERN_INFO x); \
+       } while (0)
+
+#define binder_user_error(x...) \
+       do { \
+               if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+                       printk(KERN_INFO x); \
+               if (binder_stop_on_user_error) \
+                       binder_stop_on_user_error = 2; \
+       } while (0)
+
+enum binder_stat_types {
+       BINDER_STAT_PROC,
+       BINDER_STAT_THREAD,
+       BINDER_STAT_NODE,
+       BINDER_STAT_REF,
+       BINDER_STAT_DEATH,
+       BINDER_STAT_TRANSACTION,
+       BINDER_STAT_TRANSACTION_COMPLETE,
+       BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+       int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+       int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+       int obj_created[BINDER_STAT_COUNT];
+       int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+static inline void binder_stats_deleted(enum binder_stat_types type)
+{
+       binder_stats.obj_deleted[type]++;
+}
+
+static inline void binder_stats_created(enum binder_stat_types type)
+{
+       binder_stats.obj_created[type]++;
+}
+
+struct binder_transaction_log_entry {
+       int debug_id;
+       int call_type;
+       int from_proc;
+       int from_thread;
+       int target_handle;
+       int to_proc;
+       int to_thread;
+       int to_node;
+       int data_size;
+       int offsets_size;
+};
+struct binder_transaction_log {
+       int next;
+       int full;
+       struct binder_transaction_log_entry entry[32];
+};
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry *binder_transaction_log_add(
+       struct binder_transaction_log *log)
+{
+       struct binder_transaction_log_entry *e;
+       e = &log->entry[log->next];
+       memset(e, 0, sizeof(*e));
+       log->next++;
+       if (log->next == ARRAY_SIZE(log->entry)) {
+               log->next = 0;
+               log->full = 1;
+       }
+       return e;
+}
+
+struct binder_work {
+       struct list_head entry;
+       enum {
+               BINDER_WORK_TRANSACTION = 1,
+               BINDER_WORK_TRANSACTION_COMPLETE,
+               BINDER_WORK_NODE,
+               BINDER_WORK_DEAD_BINDER,
+               BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+               BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+       } type;
+};
+
+struct binder_node {
+       int debug_id;
+       struct binder_work work;
+       union {
+               struct rb_node rb_node;
+               struct hlist_node dead_node;
+       };
+       struct binder_proc *proc;
+       struct hlist_head refs;
+       int internal_strong_refs;
+       int local_weak_refs;
+       int local_strong_refs;
+       void __user *ptr;
+       void __user *cookie;
+       unsigned has_strong_ref:1;
+       unsigned pending_strong_ref:1;
+       unsigned has_weak_ref:1;
+       unsigned pending_weak_ref:1;
+       unsigned has_async_transaction:1;
+       unsigned accept_fds:1;
+       unsigned min_priority:8;
+       struct list_head async_todo;
+};
+
+struct binder_ref_death {
+       struct binder_work work;
+       void __user *cookie;
+};
+
+struct binder_ref {
+       /* Lookups needed: */
+       /*   node + proc => ref (transaction) */
+       /*   desc + proc => ref (transaction, inc/dec ref) */
+       /*   node => refs + procs (proc exit) */
+       int debug_id;
+       struct rb_node rb_node_desc;
+       struct rb_node rb_node_node;
+       struct hlist_node node_entry;
+       struct binder_proc *proc;
+       struct binder_node *node;
+       uint32_t desc;
+       int strong;
+       int weak;
+       struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+       struct list_head entry; /* free and allocated entries by address */
+       struct rb_node rb_node; /* free entry by size or allocated entry */
+                               /* by address */
+       unsigned free:1;
+       unsigned allow_user_free:1;
+       unsigned async_transaction:1;
+       unsigned debug_id:29;
+
+       struct binder_transaction *transaction;
+
+       struct binder_node *target_node;
+       size_t data_size;
+       size_t offsets_size;
+       uint8_t data[0];
+};
+
+enum binder_deferred_state {
+       BINDER_DEFERRED_PUT_FILES    = 0x01,
+       BINDER_DEFERRED_FLUSH        = 0x02,
+       BINDER_DEFERRED_RELEASE      = 0x04,
+};
+
+struct binder_proc {
+       struct hlist_node proc_node;
+       struct rb_root threads;
+       struct rb_root nodes;
+       struct rb_root refs_by_desc;
+       struct rb_root refs_by_node;
+       int pid;
+       struct vm_area_struct *vma;
+       struct mm_struct *vma_vm_mm;
+       struct task_struct *tsk;
+       struct files_struct *files;
+       struct hlist_node deferred_work_node;
+       int deferred_work;
+       void *buffer;
+       ptrdiff_t user_buffer_offset;
+
+       struct list_head buffers;
+       struct rb_root free_buffers;
+       struct rb_root allocated_buffers;
+       size_t free_async_space;
+
+       struct page **pages;
+       size_t buffer_size;
+       uint32_t buffer_free;
+       struct list_head todo;
+       wait_queue_head_t wait;
+       struct binder_stats stats;
+       struct list_head delivered_death;
+       int max_threads;
+       int requested_threads;
+       int requested_threads_started;
+       int ready_threads;
+       long default_priority;
+       struct dentry *debugfs_entry;
+};
+
+enum {
+       BINDER_LOOPER_STATE_REGISTERED  = 0x01,
+       BINDER_LOOPER_STATE_ENTERED     = 0x02,
+       BINDER_LOOPER_STATE_EXITED      = 0x04,
+       BINDER_LOOPER_STATE_INVALID     = 0x08,
+       BINDER_LOOPER_STATE_WAITING     = 0x10,
+       BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+       struct binder_proc *proc;
+       struct rb_node rb_node;
+       int pid;
+       int looper;
+       struct binder_transaction *transaction_stack;
+       struct list_head todo;
+       uint32_t return_error; /* Write failed, return error code in read buf */
+       uint32_t return_error2; /* Write failed, return error code in read */
+               /* buffer. Used when sending a reply to a dead process that */
+               /* we are also waiting on */
+       wait_queue_head_t wait;
+       struct binder_stats stats;
+};
+
+struct binder_transaction {
+       int debug_id;
+       struct binder_work work;
+       struct binder_thread *from;
+       struct binder_transaction *from_parent;
+       struct binder_proc *to_proc;
+       struct binder_thread *to_thread;
+       struct binder_transaction *to_parent;
+       unsigned need_reply:1;
+       /* unsigned is_dead:1; */       /* not used at the moment */
+
+       struct binder_buffer *buffer;
+       unsigned int    code;
+       unsigned int    flags;
+       long    priority;
+       long    saved_priority;
+       uid_t   sender_euid;
+};
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+
+/*
+ * copied from get_unused_fd_flags
+ */
+int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
+{
+       struct files_struct *files = proc->files;
+       int fd, error;
+       struct fdtable *fdt;
+       unsigned long rlim_cur;
+       unsigned long irqs;
+
+       if (files == NULL)
+               return -ESRCH;
+
+       error = -EMFILE;
+       spin_lock(&files->file_lock);
+
+repeat:
+       fdt = files_fdtable(files);
+       fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd);
+
+       /*
+        * N.B. For clone tasks sharing a files structure, this test
+        * will limit the total number of files that can be opened.
+        */
+       rlim_cur = 0;
+       if (lock_task_sighand(proc->tsk, &irqs)) {
+               rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
+               unlock_task_sighand(proc->tsk, &irqs);
+       }
+       if (fd >= rlim_cur)
+               goto out;
+
+       /* Do we need to expand the fd array or fd set?  */
+       error = expand_files(files, fd);
+       if (error < 0)
+               goto out;
+
+       if (error) {
+               /*
+                * If we needed to expand the fs array we
+                * might have blocked - try again.
+                */
+               error = -EMFILE;
+               goto repeat;
+       }
+
+       __set_open_fd(fd, fdt);
+       if (flags & O_CLOEXEC)
+               __set_close_on_exec(fd, fdt);
+       else
+               __clear_close_on_exec(fd, fdt);
+       files->next_fd = fd + 1;
+#if 1
+       /* Sanity check */
+       if (fdt->fd[fd] != NULL) {
+               printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
+               fdt->fd[fd] = NULL;
+       }
+#endif
+       error = fd;
+
+out:
+       spin_unlock(&files->file_lock);
+       return error;
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+       struct binder_proc *proc, unsigned int fd, struct file *file)
+{
+       struct files_struct *files = proc->files;
+       struct fdtable *fdt;
+
+       if (files == NULL)
+               return;
+
+       spin_lock(&files->file_lock);
+       fdt = files_fdtable(files);
+       BUG_ON(fdt->fd[fd] != NULL);
+       rcu_assign_pointer(fdt->fd[fd], file);
+       spin_unlock(&files->file_lock);
+}
+
+/*
+ * copied from __put_unused_fd in open.c
+ */
+static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+{
+       struct fdtable *fdt = files_fdtable(files);
+       __clear_open_fd(fd, fdt);
+       if (fd < files->next_fd)
+               files->next_fd = fd;
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct binder_proc *proc, unsigned int fd)
+{
+       struct file *filp;
+       struct files_struct *files = proc->files;
+       struct fdtable *fdt;
+       int retval;
+
+       if (files == NULL)
+               return -ESRCH;
+
+       spin_lock(&files->file_lock);
+       fdt = files_fdtable(files);
+       if (fd >= fdt->max_fds)
+               goto out_unlock;
+       filp = fdt->fd[fd];
+       if (!filp)
+               goto out_unlock;
+       rcu_assign_pointer(fdt->fd[fd], NULL);
+       __clear_close_on_exec(fd, fdt);
+       __put_unused_fd(files, fd);
+       spin_unlock(&files->file_lock);
+       retval = filp_close(filp, files);
+
+       /* can't restart close syscall because file table entry was cleared */
+       if (unlikely(retval == -ERESTARTSYS ||
+                    retval == -ERESTARTNOINTR ||
+                    retval == -ERESTARTNOHAND ||
+                    retval == -ERESTART_RESTARTBLOCK))
+               retval = -EINTR;
+
+       return retval;
+
+out_unlock:
+       spin_unlock(&files->file_lock);
+       return -EBADF;
+}
+
+static void binder_set_nice(long nice)
+{
+       long min_nice;
+       if (can_nice(current, nice)) {
+               set_user_nice(current, nice);
+               return;
+       }
+       min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
+       binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+                    "binder: %d: nice value %ld not allowed use "
+                    "%ld instead\n", current->pid, nice, min_nice);
+       set_user_nice(current, min_nice);
+       if (min_nice < 20)
+               return;
+       binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(struct binder_proc *proc,
+                                struct binder_buffer *buffer)
+{
+       if (list_is_last(&buffer->entry, &proc->buffers))
+               return proc->buffer + proc->buffer_size - (void *)buffer->data;
+       else
+               return (size_t)list_entry(buffer->entry.next,
+                       struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_proc *proc,
+                                     struct binder_buffer *new_buffer)
+{
+       struct rb_node **p = &proc->free_buffers.rb_node;
+       struct rb_node *parent = NULL;
+       struct binder_buffer *buffer;
+       size_t buffer_size;
+       size_t new_buffer_size;
+
+       BUG_ON(!new_buffer->free);
+
+       new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "binder: %d: add free buffer, size %zd, "
+                    "at %p\n", proc->pid, new_buffer_size, new_buffer);
+
+       while (*p) {
+               parent = *p;
+               buffer = rb_entry(parent, struct binder_buffer, rb_node);
+               BUG_ON(!buffer->free);
+
+               buffer_size = binder_buffer_size(proc, buffer);
+
+               if (new_buffer_size < buffer_size)
+                       p = &parent->rb_left;
+               else
+                       p = &parent->rb_right;
+       }
+       rb_link_node(&new_buffer->rb_node, parent, p);
+       rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(struct binder_proc *proc,
+                                          struct binder_buffer *new_buffer)
+{
+       struct rb_node **p = &proc->allocated_buffers.rb_node;
+       struct rb_node *parent = NULL;
+       struct binder_buffer *buffer;
+
+       BUG_ON(new_buffer->free);
+
+       while (*p) {
+               parent = *p;
+               buffer = rb_entry(parent, struct binder_buffer, rb_node);
+               BUG_ON(buffer->free);
+
+               if (new_buffer < buffer)
+                       p = &parent->rb_left;
+               else if (new_buffer > buffer)
+                       p = &parent->rb_right;
+               else
+                       BUG();
+       }
+       rb_link_node(&new_buffer->rb_node, parent, p);
+       rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
+                                                 void __user *user_ptr)
+{
+       struct rb_node *n = proc->allocated_buffers.rb_node;
+       struct binder_buffer *buffer;
+       struct binder_buffer *kern_ptr;
+
+       kern_ptr = user_ptr - proc->user_buffer_offset
+               - offsetof(struct binder_buffer, data);
+
+       while (n) {
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+               BUG_ON(buffer->free);
+
+               if (kern_ptr < buffer)
+                       n = n->rb_left;
+               else if (kern_ptr > buffer)
+                       n = n->rb_right;
+               else
+                       return buffer;
+       }
+       return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+                                   void *start, void *end,
+                                   struct vm_area_struct *vma)
+{
+       void *page_addr;
+       unsigned long user_page_addr;
+       struct vm_struct tmp_area;
+       struct page **page;
+       struct mm_struct *mm;
+
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "binder: %d: %s pages %p-%p\n", proc->pid,
+                    allocate ? "allocate" : "free", start, end);
+
+       if (end <= start)
+               return 0;
+
+       if (vma)
+               mm = NULL;
+       else
+               mm = get_task_mm(proc->tsk);
+
+       if (mm) {
+               down_write(&mm->mmap_sem);
+               vma = proc->vma;
+               if (vma && mm != proc->vma_vm_mm) {
+                       pr_err("binder: %d: vma mm and task mm mismatch\n",
+                               proc->pid);
+                       vma = NULL;
+               }
+       }
+
+       if (allocate == 0)
+               goto free_range;
+
+       if (vma == NULL) {
+               printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
+                      "map pages in userspace, no vma\n", proc->pid);
+               goto err_no_vma;
+       }
+
+       for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+               int ret;
+               struct page **page_array_ptr;
+               page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+               BUG_ON(*page);
+               *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (*page == NULL) {
+                       printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+                              "for page at %p\n", proc->pid, page_addr);
+                       goto err_alloc_page_failed;
+               }
+               tmp_area.addr = page_addr;
+               tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+               page_array_ptr = page;
+               ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+               if (ret) {
+                       printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+                              "to map page at %p in kernel\n",
+                              proc->pid, page_addr);
+                       goto err_map_kernel_failed;
+               }
+               user_page_addr =
+                       (uintptr_t)page_addr + proc->user_buffer_offset;
+               ret = vm_insert_page(vma, user_page_addr, page[0]);
+               if (ret) {
+                       printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+                              "to map page at %lx in userspace\n",
+                              proc->pid, user_page_addr);
+                       goto err_vm_insert_page_failed;
+               }
+               /* vm_insert_page does not seem to increment the refcount */
+       }
+       if (mm) {
+               up_write(&mm->mmap_sem);
+               mmput(mm);
+       }
+       return 0;
+
+free_range:
+       for (page_addr = end - PAGE_SIZE; page_addr >= start;
+            page_addr -= PAGE_SIZE) {
+               page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+               if (vma)
+                       zap_page_range(vma, (uintptr_t)page_addr +
+                               proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+               unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+               __free_page(*page);
+               *page = NULL;
+err_alloc_page_failed:
+               ;
+       }
+err_no_vma:
+       if (mm) {
+               up_write(&mm->mmap_sem);
+               mmput(mm);
+       }
+       return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
+                                             size_t data_size,
+                                             size_t offsets_size, int is_async)
+{
+       struct rb_node *n = proc->free_buffers.rb_node;
+       struct binder_buffer *buffer;
+       size_t buffer_size;
+       struct rb_node *best_fit = NULL;
+       void *has_page_addr;
+       void *end_page_addr;
+       size_t size;
+
+       if (proc->vma == NULL) {
+               printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
+                      proc->pid);
+               return NULL;
+       }
+
+       size = ALIGN(data_size, sizeof(void *)) +
+               ALIGN(offsets_size, sizeof(void *));
+
+       if (size < data_size || size < offsets_size) {
+               binder_user_error("binder: %d: got transaction with invalid "
+                       "size %zd-%zd\n", proc->pid, data_size, offsets_size);
+               return NULL;
+       }
+
+       if (is_async &&
+           proc->free_async_space < size + sizeof(struct binder_buffer)) {
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                            "binder: %d: binder_alloc_buf size %zd"
+                            "failed, no async space left\n", proc->pid, size);
+               return NULL;
+       }
+
+       while (n) {
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+               BUG_ON(!buffer->free);
+               buffer_size = binder_buffer_size(proc, buffer);
+
+               if (size < buffer_size) {
+                       best_fit = n;
+                       n = n->rb_left;
+               } else if (size > buffer_size)
+                       n = n->rb_right;
+               else {
+                       best_fit = n;
+                       break;
+               }
+       }
+       if (best_fit == NULL) {
+               printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
+                      "no address space\n", proc->pid, size);
+               return NULL;
+       }
+       if (n == NULL) {
+               buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+               buffer_size = binder_buffer_size(proc, buffer);
+       }
+
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "binder: %d: binder_alloc_buf size %zd got buff"
+                    "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
+
+       has_page_addr =
+               (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+       if (n == NULL) {
+               if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+                       buffer_size = size; /* no room for other buffers */
+               else
+                       buffer_size = size + sizeof(struct binder_buffer);
+       }
+       end_page_addr =
+               (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+       if (end_page_addr > has_page_addr)
+               end_page_addr = has_page_addr;
+       if (binder_update_page_range(proc, 1,
+           (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
+               return NULL;
+
+       rb_erase(best_fit, &proc->free_buffers);
+       buffer->free = 0;
+       binder_insert_allocated_buffer(proc, buffer);
+       if (buffer_size != size) {
+               struct binder_buffer *new_buffer = (void *)buffer->data + size;
+               list_add(&new_buffer->entry, &buffer->entry);
+               new_buffer->free = 1;
+               binder_insert_free_buffer(proc, new_buffer);
+       }
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "binder: %d: binder_alloc_buf size %zd got "
+                    "%p\n", proc->pid, size, buffer);
+       buffer->data_size = data_size;
+       buffer->offsets_size = offsets_size;
+       buffer->async_transaction = is_async;
+       if (is_async) {
+               proc->free_async_space -= size + sizeof(struct binder_buffer);
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+                            "binder: %d: binder_alloc_buf size %zd "
+                            "async free %zd\n", proc->pid, size,
+                            proc->free_async_space);
+       }
+
+       return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+       return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+       return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_proc *proc,
+                                     struct binder_buffer *buffer)
+{
+       struct binder_buffer *prev, *next = NULL;
+       int free_page_end = 1;
+       int free_page_start = 1;
+
+       BUG_ON(proc->buffers.next == &buffer->entry);
+       prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+       BUG_ON(!prev->free);
+       if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+               free_page_start = 0;
+               if (buffer_end_page(prev) == buffer_end_page(buffer))
+                       free_page_end = 0;
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                            "binder: %d: merge free, buffer %p "
+                            "share page with %p\n", proc->pid, buffer, prev);
+       }
+
+       if (!list_is_last(&buffer->entry, &proc->buffers)) {
+               next = list_entry(buffer->entry.next,
+                                 struct binder_buffer, entry);
+               if (buffer_start_page(next) == buffer_end_page(buffer)) {
+                       free_page_end = 0;
+                       if (buffer_start_page(next) ==
+                           buffer_start_page(buffer))
+                               free_page_start = 0;
+                       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                                    "binder: %d: merge free, buffer"
+                                    " %p share page with %p\n", proc->pid,
+                                    buffer, prev);
+               }
+       }
+       list_del(&buffer->entry);
+       if (free_page_start || free_page_end) {
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                            "binder: %d: merge free, buffer %p do "
+                            "not share page%s%s with with %p or %p\n",
+                            proc->pid, buffer, free_page_start ? "" : " end",
+                            free_page_end ? "" : " start", prev, next);
+               binder_update_page_range(proc, 0, free_page_start ?
+                       buffer_start_page(buffer) : buffer_end_page(buffer),
+                       (free_page_end ? buffer_end_page(buffer) :
+                       buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+       }
+}
+
+static void binder_free_buf(struct binder_proc *proc,
+                           struct binder_buffer *buffer)
+{
+       size_t size, buffer_size;
+
+       buffer_size = binder_buffer_size(proc, buffer);
+
+       size = ALIGN(buffer->data_size, sizeof(void *)) +
+               ALIGN(buffer->offsets_size, sizeof(void *));
+
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "binder: %d: binder_free_buf %p size %zd buffer"
+                    "_size %zd\n", proc->pid, buffer, size, buffer_size);
+
+       BUG_ON(buffer->free);
+       BUG_ON(size > buffer_size);
+       BUG_ON(buffer->transaction != NULL);
+       BUG_ON((void *)buffer < proc->buffer);
+       BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+       if (buffer->async_transaction) {
+               proc->free_async_space += size + sizeof(struct binder_buffer);
+
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+                            "binder: %d: binder_free_buf size %zd "
+                            "async free %zd\n", proc->pid, size,
+                            proc->free_async_space);
+       }
+
+       binder_update_page_range(proc, 0,
+               (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+               (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+               NULL);
+       rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+       buffer->free = 1;
+       if (!list_is_last(&buffer->entry, &proc->buffers)) {
+               struct binder_buffer *next = list_entry(buffer->entry.next,
+                                               struct binder_buffer, entry);
+               if (next->free) {
+                       rb_erase(&next->rb_node, &proc->free_buffers);
+                       binder_delete_free_buffer(proc, next);
+               }
+       }
+       if (proc->buffers.next != &buffer->entry) {
+               struct binder_buffer *prev = list_entry(buffer->entry.prev,
+                                               struct binder_buffer, entry);
+               if (prev->free) {
+                       binder_delete_free_buffer(proc, buffer);
+                       rb_erase(&prev->rb_node, &proc->free_buffers);
+                       buffer = prev;
+               }
+       }
+       binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+                                          void __user *ptr)
+{
+       struct rb_node *n = proc->nodes.rb_node;
+       struct binder_node *node;
+
+       while (n) {
+               node = rb_entry(n, struct binder_node, rb_node);
+
+               if (ptr < node->ptr)
+                       n = n->rb_left;
+               else if (ptr > node->ptr)
+                       n = n->rb_right;
+               else
+                       return node;
+       }
+       return NULL;
+}
+
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+                                          void __user *ptr,
+                                          void __user *cookie)
+{
+       struct rb_node **p = &proc->nodes.rb_node;
+       struct rb_node *parent = NULL;
+       struct binder_node *node;
+
+       while (*p) {
+               parent = *p;
+               node = rb_entry(parent, struct binder_node, rb_node);
+
+               if (ptr < node->ptr)
+                       p = &(*p)->rb_left;
+               else if (ptr > node->ptr)
+                       p = &(*p)->rb_right;
+               else
+                       return NULL;
+       }
+
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (node == NULL)
+               return NULL;
+       binder_stats_created(BINDER_STAT_NODE);
+       rb_link_node(&node->rb_node, parent, p);
+       rb_insert_color(&node->rb_node, &proc->nodes);
+       node->debug_id = ++binder_last_id;
+       node->proc = proc;
+       node->ptr = ptr;
+       node->cookie = cookie;
+       node->work.type = BINDER_WORK_NODE;
+       INIT_LIST_HEAD(&node->work.entry);
+       INIT_LIST_HEAD(&node->async_todo);
+       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                    "binder: %d:%d node %d u%p c%p created\n",
+                    proc->pid, current->pid, node->debug_id,
+                    node->ptr, node->cookie);
+       return node;
+}
+
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+                          struct list_head *target_list)
+{
+       if (strong) {
+               if (internal) {
+                       if (target_list == NULL &&
+                           node->internal_strong_refs == 0 &&
+                           !(node == binder_context_mgr_node &&
+                           node->has_strong_ref)) {
+                               printk(KERN_ERR "binder: invalid inc strong "
+                                       "node for %d\n", node->debug_id);
+                               return -EINVAL;
+                       }
+                       node->internal_strong_refs++;
+               } else
+                       node->local_strong_refs++;
+               if (!node->has_strong_ref && target_list) {
+                       list_del_init(&node->work.entry);
+                       list_add_tail(&node->work.entry, target_list);
+               }
+       } else {
+               if (!internal)
+                       node->local_weak_refs++;
+               if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+                       if (target_list == NULL) {
+                               printk(KERN_ERR "binder: invalid inc weak node "
+                                       "for %d\n", node->debug_id);
+                               return -EINVAL;
+                       }
+                       list_add_tail(&node->work.entry, target_list);
+               }
+       }
+       return 0;
+}
+
+static int binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+       if (strong) {
+               if (internal)
+                       node->internal_strong_refs--;
+               else
+                       node->local_strong_refs--;
+               if (node->local_strong_refs || node->internal_strong_refs)
+                       return 0;
+       } else {
+               if (!internal)
+                       node->local_weak_refs--;
+               if (node->local_weak_refs || !hlist_empty(&node->refs))
+                       return 0;
+       }
+       if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+               if (list_empty(&node->work.entry)) {
+                       list_add_tail(&node->work.entry, &node->proc->todo);
+                       wake_up_interruptible(&node->proc->wait);
+               }
+       } else {
+               if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+                   !node->local_weak_refs) {
+                       list_del_init(&node->work.entry);
+                       if (node->proc) {
+                               rb_erase(&node->rb_node, &node->proc->nodes);
+                               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                                            "binder: refless node %d deleted\n",
+                                            node->debug_id);
+                       } else {
+                               hlist_del(&node->dead_node);
+                               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                                            "binder: dead node %d deleted\n",
+                                            node->debug_id);
+                       }
+                       kfree(node);
+                       binder_stats_deleted(BINDER_STAT_NODE);
+               }
+       }
+
+       return 0;
+}
+
+
+static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+                                        uint32_t desc)
+{
+       struct rb_node *n = proc->refs_by_desc.rb_node;
+       struct binder_ref *ref;
+
+       while (n) {
+               ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+               if (desc < ref->desc)
+                       n = n->rb_left;
+               else if (desc > ref->desc)
+                       n = n->rb_right;
+               else
+                       return ref;
+       }
+       return NULL;
+}
+
+static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
+                                                 struct binder_node *node)
+{
+       struct rb_node *n;
+       struct rb_node **p = &proc->refs_by_node.rb_node;
+       struct rb_node *parent = NULL;
+       struct binder_ref *ref, *new_ref;
+
+       while (*p) {
+               parent = *p;
+               ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+               if (node < ref->node)
+                       p = &(*p)->rb_left;
+               else if (node > ref->node)
+                       p = &(*p)->rb_right;
+               else
+                       return ref;
+       }
+       new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+       if (new_ref == NULL)
+               return NULL;
+       binder_stats_created(BINDER_STAT_REF);
+       new_ref->debug_id = ++binder_last_id;
+       new_ref->proc = proc;
+       new_ref->node = node;
+       rb_link_node(&new_ref->rb_node_node, parent, p);
+       rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+       new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+       for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+               ref = rb_entry(n, struct binder_ref, rb_node_desc);
+               if (ref->desc > new_ref->desc)
+                       break;
+               new_ref->desc = ref->desc + 1;
+       }
+
+       p = &proc->refs_by_desc.rb_node;
+       while (*p) {
+               parent = *p;
+               ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+               if (new_ref->desc < ref->desc)
+                       p = &(*p)->rb_left;
+               else if (new_ref->desc > ref->desc)
+                       p = &(*p)->rb_right;
+               else
+                       BUG();
+       }
+       rb_link_node(&new_ref->rb_node_desc, parent, p);
+       rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+       if (node) {
+               hlist_add_head(&new_ref->node_entry, &node->refs);
+
+               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                            "binder: %d new ref %d desc %d for "
+                            "node %d\n", proc->pid, new_ref->debug_id,
+                            new_ref->desc, node->debug_id);
+       } else {
+               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                            "binder: %d new ref %d desc %d for "
+                            "dead node\n", proc->pid, new_ref->debug_id,
+                             new_ref->desc);
+       }
+       return new_ref;
+}
+
+static void binder_delete_ref(struct binder_ref *ref)
+{
+       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                    "binder: %d delete ref %d desc %d for "
+                    "node %d\n", ref->proc->pid, ref->debug_id,
+                    ref->desc, ref->node->debug_id);
+
+       rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+       rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+       if (ref->strong)
+               binder_dec_node(ref->node, 1, 1);
+       hlist_del(&ref->node_entry);
+       binder_dec_node(ref->node, 0, 1);
+       if (ref->death) {
+               binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                            "binder: %d delete ref %d desc %d "
+                            "has death notification\n", ref->proc->pid,
+                            ref->debug_id, ref->desc);
+               list_del(&ref->death->work.entry);
+               kfree(ref->death);
+               binder_stats_deleted(BINDER_STAT_DEATH);
+       }
+       kfree(ref);
+       binder_stats_deleted(BINDER_STAT_REF);
+}
+
+static int binder_inc_ref(struct binder_ref *ref, int strong,
+                         struct list_head *target_list)
+{
+       int ret;
+       if (strong) {
+               if (ref->strong == 0) {
+                       ret = binder_inc_node(ref->node, 1, 1, target_list);
+                       if (ret)
+                               return ret;
+               }
+               ref->strong++;
+       } else {
+               if (ref->weak == 0) {
+                       ret = binder_inc_node(ref->node, 0, 1, target_list);
+                       if (ret)
+                               return ret;
+               }
+               ref->weak++;
+       }
+       return 0;
+}
+
+
+static int binder_dec_ref(struct binder_ref *ref, int strong)
+{
+       if (strong) {
+               if (ref->strong == 0) {
+                       binder_user_error("binder: %d invalid dec strong, "
+                                         "ref %d desc %d s %d w %d\n",
+                                         ref->proc->pid, ref->debug_id,
+                                         ref->desc, ref->strong, ref->weak);
+                       return -EINVAL;
+               }
+               ref->strong--;
+               if (ref->strong == 0) {
+                       int ret;
+                       ret = binder_dec_node(ref->node, strong, 1);
+                       if (ret)
+                               return ret;
+               }
+       } else {
+               if (ref->weak == 0) {
+                       binder_user_error("binder: %d invalid dec weak, "
+                                         "ref %d desc %d s %d w %d\n",
+                                         ref->proc->pid, ref->debug_id,
+                                         ref->desc, ref->strong, ref->weak);
+                       return -EINVAL;
+               }
+               ref->weak--;
+       }
+       if (ref->strong == 0 && ref->weak == 0)
+               binder_delete_ref(ref);
+       return 0;
+}
+
+static void binder_pop_transaction(struct binder_thread *target_thread,
+                                  struct binder_transaction *t)
+{
+       if (target_thread) {
+               BUG_ON(target_thread->transaction_stack != t);
+               BUG_ON(target_thread->transaction_stack->from != target_thread);
+               target_thread->transaction_stack =
+                       target_thread->transaction_stack->from_parent;
+               t->from = NULL;
+       }
+       t->need_reply = 0;
+       if (t->buffer)
+               t->buffer->transaction = NULL;
+       kfree(t);
+       binder_stats_deleted(BINDER_STAT_TRANSACTION);
+}
+
+static void binder_send_failed_reply(struct binder_transaction *t,
+                                    uint32_t error_code)
+{
+       struct binder_thread *target_thread;
+       BUG_ON(t->flags & TF_ONE_WAY);
+       while (1) {
+               target_thread = t->from;
+               if (target_thread) {
+                       if (target_thread->return_error != BR_OK &&
+                          target_thread->return_error2 == BR_OK) {
+                               target_thread->return_error2 =
+                                       target_thread->return_error;
+                               target_thread->return_error = BR_OK;
+                       }
+                       if (target_thread->return_error == BR_OK) {
+                               binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+                                            "binder: send failed reply for "
+                                            "transaction %d to %d:%d\n",
+                                             t->debug_id, target_thread->proc->pid,
+                                             target_thread->pid);
+
+                               binder_pop_transaction(target_thread, t);
+                               target_thread->return_error = error_code;
+                               wake_up_interruptible(&target_thread->wait);
+                       } else {
+                               printk(KERN_ERR "binder: reply failed, target "
+                                       "thread, %d:%d, has error code %d "
+                                       "already\n", target_thread->proc->pid,
+                                       target_thread->pid,
+                                       target_thread->return_error);
+                       }
+                       return;
+               } else {
+                       struct binder_transaction *next = t->from_parent;
+
+                       binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+                                    "binder: send failed reply "
+                                    "for transaction %d, target dead\n",
+                                    t->debug_id);
+
+                       binder_pop_transaction(target_thread, t);
+                       if (next == NULL) {
+                               binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                                            "binder: reply failed,"
+                                            " no target thread at root\n");
+                               return;
+                       }
+                       t = next;
+                       binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                                    "binder: reply failed, no target "
+                                    "thread -- retry %d\n", t->debug_id);
+               }
+       }
+}
+
+static void binder_transaction_buffer_release(struct binder_proc *proc,
+                                             struct binder_buffer *buffer,
+                                             size_t *failed_at)
+{
+       size_t *offp, *off_end;
+       int debug_id = buffer->debug_id;
+
+       binder_debug(BINDER_DEBUG_TRANSACTION,
+                    "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
+                    proc->pid, buffer->debug_id,
+                    buffer->data_size, buffer->offsets_size, failed_at);
+
+       if (buffer->target_node)
+               binder_dec_node(buffer->target_node, 1, 0);
+
+       offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+       if (failed_at)
+               off_end = failed_at;
+       else
+               off_end = (void *)offp + buffer->offsets_size;
+       for (; offp < off_end; offp++) {
+               struct flat_binder_object *fp;
+               if (*offp > buffer->data_size - sizeof(*fp) ||
+                   buffer->data_size < sizeof(*fp) ||
+                   !IS_ALIGNED(*offp, sizeof(void *))) {
+                       printk(KERN_ERR "binder: transaction release %d bad"
+                                       "offset %zd, size %zd\n", debug_id,
+                                       *offp, buffer->data_size);
+                       continue;
+               }
+               fp = (struct flat_binder_object *)(buffer->data + *offp);
+               switch (fp->type) {
+               case BINDER_TYPE_BINDER:
+               case BINDER_TYPE_WEAK_BINDER: {
+                       struct binder_node *node = binder_get_node(proc, fp->binder);
+                       if (node == NULL) {
+                               printk(KERN_ERR "binder: transaction release %d"
+                                      " bad node %p\n", debug_id, fp->binder);
+                               break;
+                       }
+                       binder_debug(BINDER_DEBUG_TRANSACTION,
+                                    "        node %d u%p\n",
+                                    node->debug_id, node->ptr);
+                       binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+               } break;
+               case BINDER_TYPE_HANDLE:
+               case BINDER_TYPE_WEAK_HANDLE: {
+                       struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+                       if (ref == NULL) {
+                               printk(KERN_ERR "binder: transaction release %d"
+                                      " bad handle %ld\n", debug_id,
+                                      fp->handle);
+                               break;
+                       }
+                       binder_debug(BINDER_DEBUG_TRANSACTION,
+                                    "        ref %d desc %d (node %d)\n",
+                                    ref->debug_id, ref->desc, ref->node->debug_id);
+                       binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+               } break;
+
+               case BINDER_TYPE_FD:
+                       binder_debug(BINDER_DEBUG_TRANSACTION,
+                                    "        fd %ld\n", fp->handle);
+                       if (failed_at)
+                               task_close_fd(proc, fp->handle);
+                       break;
+
+               default:
+                       printk(KERN_ERR "binder: transaction release %d bad "
+                              "object type %lx\n", debug_id, fp->type);
+                       break;
+               }
+       }
+}
+
+static void binder_transaction(struct binder_proc *proc,
+                              struct binder_thread *thread,
+                              struct binder_transaction_data *tr, int reply)
+{
+       struct binder_transaction *t;
+       struct binder_work *tcomplete;
+       size_t *offp, *off_end;
+       struct binder_proc *target_proc;
+       struct binder_thread *target_thread = NULL;
+       struct binder_node *target_node = NULL;
+       struct list_head *target_list;
+       wait_queue_head_t *target_wait;
+       struct binder_transaction *in_reply_to = NULL;
+       struct binder_transaction_log_entry *e;
+       uint32_t return_error;
+
+       e = binder_transaction_log_add(&binder_transaction_log);
+       e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+       e->from_proc = proc->pid;
+       e->from_thread = thread->pid;
+       e->target_handle = tr->target.handle;
+       e->data_size = tr->data_size;
+       e->offsets_size = tr->offsets_size;
+
+       if (reply) {
+               in_reply_to = thread->transaction_stack;
+               if (in_reply_to == NULL) {
+                       binder_user_error("binder: %d:%d got reply transaction "
+                                         "with no transaction stack\n",
+                                         proc->pid, thread->pid);
+                       return_error = BR_FAILED_REPLY;
+                       goto err_empty_call_stack;
+               }
+               binder_set_nice(in_reply_to->saved_priority);
+               if (in_reply_to->to_thread != thread) {
+                       binder_user_error("binder: %d:%d got reply transaction "
+                               "with bad transaction stack,"
+                               " transaction %d has target %d:%d\n",
+                               proc->pid, thread->pid, in_reply_to->debug_id,
+                               in_reply_to->to_proc ?
+                               in_reply_to->to_proc->pid : 0,
+                               in_reply_to->to_thread ?
+                               in_reply_to->to_thread->pid : 0);
+                       return_error = BR_FAILED_REPLY;
+                       in_reply_to = NULL;
+                       goto err_bad_call_stack;
+               }
+               thread->transaction_stack = in_reply_to->to_parent;
+               target_thread = in_reply_to->from;
+               if (target_thread == NULL) {
+                       return_error = BR_DEAD_REPLY;
+                       goto err_dead_binder;
+               }
+               if (target_thread->transaction_stack != in_reply_to) {
+                       binder_user_error("binder: %d:%d got reply transaction "
+                               "with bad target transaction stack %d, "
+                               "expected %d\n",
+                               proc->pid, thread->pid,
+                               target_thread->transaction_stack ?
+                               target_thread->transaction_stack->debug_id : 0,
+                               in_reply_to->debug_id);
+                       return_error = BR_FAILED_REPLY;
+                       in_reply_to = NULL;
+                       target_thread = NULL;
+                       goto err_dead_binder;
+               }
+               target_proc = target_thread->proc;
+       } else {
+               if (tr->target.handle) {
+                       struct binder_ref *ref;
+                       ref = binder_get_ref(proc, tr->target.handle);
+                       if (ref == NULL) {
+                               binder_user_error("binder: %d:%d got "
+                                       "transaction to invalid handle\n",
+                                       proc->pid, thread->pid);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_invalid_target_handle;
+                       }
+                       target_node = ref->node;
+               } else {
+                       target_node = binder_context_mgr_node;
+                       if (target_node == NULL) {
+                               return_error = BR_DEAD_REPLY;
+                               goto err_no_context_mgr_node;
+                       }
+               }
+               e->to_node = target_node->debug_id;
+               target_proc = target_node->proc;
+               if (target_proc == NULL) {
+                       return_error = BR_DEAD_REPLY;
+                       goto err_dead_binder;
+               }
+               if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+                       struct binder_transaction *tmp;
+                       tmp = thread->transaction_stack;
+                       if (tmp->to_thread != thread) {
+                               binder_user_error("binder: %d:%d got new "
+                                       "transaction with bad transaction stack"
+                                       ", transaction %d has target %d:%d\n",
+                                       proc->pid, thread->pid, tmp->debug_id,
+                                       tmp->to_proc ? tmp->to_proc->pid : 0,
+                                       tmp->to_thread ?
+                                       tmp->to_thread->pid : 0);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_bad_call_stack;
+                       }
+                       while (tmp) {
+                               if (tmp->from && tmp->from->proc == target_proc)
+                                       target_thread = tmp->from;
+                               tmp = tmp->from_parent;
+                       }
+               }
+       }
+       if (target_thread) {
+               e->to_thread = target_thread->pid;
+               target_list = &target_thread->todo;
+               target_wait = &target_thread->wait;
+       } else {
+               target_list = &target_proc->todo;
+               target_wait = &target_proc->wait;
+       }
+       e->to_proc = target_proc->pid;
+
+       /* TODO: reuse incoming transaction for reply */
+       t = kzalloc(sizeof(*t), GFP_KERNEL);
+       if (t == NULL) {
+               return_error = BR_FAILED_REPLY;
+               goto err_alloc_t_failed;
+       }
+       binder_stats_created(BINDER_STAT_TRANSACTION);
+
+       tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+       if (tcomplete == NULL) {
+               return_error = BR_FAILED_REPLY;
+               goto err_alloc_tcomplete_failed;
+       }
+       binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
+
+       t->debug_id = ++binder_last_id;
+       e->debug_id = t->debug_id;
+
+       if (reply)
+               binder_debug(BINDER_DEBUG_TRANSACTION,
+                            "binder: %d:%d BC_REPLY %d -> %d:%d, "
+                            "data %p-%p size %zd-%zd\n",
+                            proc->pid, thread->pid, t->debug_id,
+                            target_proc->pid, target_thread->pid,
+                            tr->data.ptr.buffer, tr->data.ptr.offsets,
+                            tr->data_size, tr->offsets_size);
+       else
+               binder_debug(BINDER_DEBUG_TRANSACTION,
+                            "binder: %d:%d BC_TRANSACTION %d -> "
+                            "%d - node %d, data %p-%p size %zd-%zd\n",
+                            proc->pid, thread->pid, t->debug_id,
+                            target_proc->pid, target_node->debug_id,
+                            tr->data.ptr.buffer, tr->data.ptr.offsets,
+                            tr->data_size, tr->offsets_size);
+
+       if (!reply && !(tr->flags & TF_ONE_WAY))
+               t->from = thread;
+       else
+               t->from = NULL;
+       t->sender_euid = proc->tsk->cred->euid;
+       t->to_proc = target_proc;
+       t->to_thread = target_thread;
+       t->code = tr->code;
+       t->flags = tr->flags;
+       t->priority = task_nice(current);
+       t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+               tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+       if (t->buffer == NULL) {
+               return_error = BR_FAILED_REPLY;
+               goto err_binder_alloc_buf_failed;
+       }
+       t->buffer->allow_user_free = 0;
+       t->buffer->debug_id = t->debug_id;
+       t->buffer->transaction = t;
+       t->buffer->target_node = target_node;
+       if (target_node)
+               binder_inc_node(target_node, 1, 0, NULL);
+
+       offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+
+       if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+               binder_user_error("binder: %d:%d got transaction with invalid "
+                       "data ptr\n", proc->pid, thread->pid);
+               return_error = BR_FAILED_REPLY;
+               goto err_copy_data_failed;
+       }
+       if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+               binder_user_error("binder: %d:%d got transaction with invalid "
+                       "offsets ptr\n", proc->pid, thread->pid);
+               return_error = BR_FAILED_REPLY;
+               goto err_copy_data_failed;
+       }
+       if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
+               binder_user_error("binder: %d:%d got transaction with "
+                       "invalid offsets size, %zd\n",
+                       proc->pid, thread->pid, tr->offsets_size);
+               return_error = BR_FAILED_REPLY;
+               goto err_bad_offset;
+       }
+       off_end = (void *)offp + tr->offsets_size;
+       for (; offp < off_end; offp++) {
+               struct flat_binder_object *fp;
+               if (*offp > t->buffer->data_size - sizeof(*fp) ||
+                   t->buffer->data_size < sizeof(*fp) ||
+                   !IS_ALIGNED(*offp, sizeof(void *))) {
+                       binder_user_error("binder: %d:%d got transaction with "
+                               "invalid offset, %zd\n",
+                               proc->pid, thread->pid, *offp);
+                       return_error = BR_FAILED_REPLY;
+                       goto err_bad_offset;
+               }
+               fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+               switch (fp->type) {
+               case BINDER_TYPE_BINDER:
+               case BINDER_TYPE_WEAK_BINDER: {
+                       struct binder_ref *ref;
+                       struct binder_node *node = binder_get_node(proc, fp->binder);
+                       if (node == NULL) {
+                               node = binder_new_node(proc, fp->binder, fp->cookie);
+                               if (node == NULL) {
+                                       return_error = BR_FAILED_REPLY;
+                                       goto err_binder_new_node_failed;
+                               }
+                               node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+                               node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+                       }
+                       if (fp->cookie != node->cookie) {
+                               binder_user_error("binder: %d:%d sending u%p "
+                                       "node %d, cookie mismatch %p != %p\n",
+                                       proc->pid, thread->pid,
+                                       fp->binder, node->debug_id,
+                                       fp->cookie, node->cookie);
+                               goto err_binder_get_ref_for_node_failed;
+                       }
+                       ref = binder_get_ref_for_node(target_proc, node);
+                       if (ref == NULL) {
+                               return_error = BR_FAILED_REPLY;
+                               goto err_binder_get_ref_for_node_failed;
+                       }
+                       if (fp->type == BINDER_TYPE_BINDER)
+                               fp->type = BINDER_TYPE_HANDLE;
+                       else
+                               fp->type = BINDER_TYPE_WEAK_HANDLE;
+                       fp->handle = ref->desc;
+                       binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+                                      &thread->todo);
+
+                       binder_debug(BINDER_DEBUG_TRANSACTION,
+                                    "        node %d u%p -> ref %d desc %d\n",
+                                    node->debug_id, node->ptr, ref->debug_id,
+                                    ref->desc);
+               } break;
+               case BINDER_TYPE_HANDLE:
+               case BINDER_TYPE_WEAK_HANDLE: {
+                       struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+                       if (ref == NULL) {
+                               binder_user_error("binder: %d:%d got "
+                                       "transaction with invalid "
+                                       "handle, %ld\n", proc->pid,
+                                       thread->pid, fp->handle);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_binder_get_ref_failed;
+                       }
+                       if (ref->node->proc == target_proc) {
+                               if (fp->type == BINDER_TYPE_HANDLE)
+                                       fp->type = BINDER_TYPE_BINDER;
+                               else
+                                       fp->type = BINDER_TYPE_WEAK_BINDER;
+                               fp->binder = ref->node->ptr;
+                               fp->cookie = ref->node->cookie;
+                               binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
+                               binder_debug(BINDER_DEBUG_TRANSACTION,
+                                            "        ref %d desc %d -> node %d u%p\n",
+                                            ref->debug_id, ref->desc, ref->node->debug_id,
+                                            ref->node->ptr);
+                       } else {
+                               struct binder_ref *new_ref;
+                               new_ref = binder_get_ref_for_node(target_proc, ref->node);
+                               if (new_ref == NULL) {
+                                       return_error = BR_FAILED_REPLY;
+                                       goto err_binder_get_ref_for_node_failed;
+                               }
+                               fp->handle = new_ref->desc;
+                               binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+                               binder_debug(BINDER_DEBUG_TRANSACTION,
+                                            "        ref %d desc %d -> ref %d desc %d (node %d)\n",
+                                            ref->debug_id, ref->desc, new_ref->debug_id,
+                                            new_ref->desc, ref->node->debug_id);
+                       }
+               } break;
+
+               case BINDER_TYPE_FD: {
+                       int target_fd;
+                       struct file *file;
+
+                       if (reply) {
+                               if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+                                       binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
+                                               proc->pid, thread->pid, fp->handle);
+                                       return_error = BR_FAILED_REPLY;
+                                       goto err_fd_not_allowed;
+                               }
+                       } else if (!target_node->accept_fds) {
+                               binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
+                                       proc->pid, thread->pid, fp->handle);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_fd_not_allowed;
+                       }
+
+                       file = fget(fp->handle);
+                       if (file == NULL) {
+                               binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
+                                       proc->pid, thread->pid, fp->handle);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_fget_failed;
+                       }
+                       target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+                       if (target_fd < 0) {
+                               fput(file);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_get_unused_fd_failed;
+                       }
+                       task_fd_install(target_proc, target_fd, file);
+                       binder_debug(BINDER_DEBUG_TRANSACTION,
+                                    "        fd %ld -> %d\n", fp->handle, target_fd);
+                       /* TODO: fput? */
+                       fp->handle = target_fd;
+               } break;
+
+               default:
+                       binder_user_error("binder: %d:%d got transactio"
+                               "n with invalid object type, %lx\n",
+                               proc->pid, thread->pid, fp->type);
+                       return_error = BR_FAILED_REPLY;
+                       goto err_bad_object_type;
+               }
+       }
+       if (reply) {
+               BUG_ON(t->buffer->async_transaction != 0);
+               binder_pop_transaction(target_thread, in_reply_to);
+       } else if (!(t->flags & TF_ONE_WAY)) {
+               BUG_ON(t->buffer->async_transaction != 0);
+               t->need_reply = 1;
+               t->from_parent = thread->transaction_stack;
+               thread->transaction_stack = t;
+       } else {
+               BUG_ON(target_node == NULL);
+               BUG_ON(t->buffer->async_transaction != 1);
+               if (target_node->has_async_transaction) {
+                       target_list = &target_node->async_todo;
+                       target_wait = NULL;
+               } else
+                       target_node->has_async_transaction = 1;
+       }
+       t->work.type = BINDER_WORK_TRANSACTION;
+       list_add_tail(&t->work.entry, target_list);
+       tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+       list_add_tail(&tcomplete->entry, &thread->todo);
+       if (target_wait)
+               wake_up_interruptible(target_wait);
+       return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+       binder_transaction_buffer_release(target_proc, t->buffer, offp);
+       t->buffer->transaction = NULL;
+       binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+       kfree(tcomplete);
+       binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+err_alloc_tcomplete_failed:
+       kfree(t);
+       binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+       binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+                    "binder: %d:%d transaction failed %d, size %zd-%zd\n",
+                    proc->pid, thread->pid, return_error,
+                    tr->data_size, tr->offsets_size);
+
+       {
+               struct binder_transaction_log_entry *fe;
+               fe = binder_transaction_log_add(&binder_transaction_log_failed);
+               *fe = *e;
+       }
+
+       BUG_ON(thread->return_error != BR_OK);
+       if (in_reply_to) {
+               thread->return_error = BR_TRANSACTION_COMPLETE;
+               binder_send_failed_reply(in_reply_to, return_error);
+       } else
+               thread->return_error = return_error;
+}
+
+int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
+                       void __user *buffer, int size, signed long *consumed)
+{
+       uint32_t cmd;
+       void __user *ptr = buffer + *consumed;
+       void __user *end = buffer + size;
+
+       while (ptr < end && thread->return_error == BR_OK) {
+               if (get_user(cmd, (uint32_t __user *)ptr))
+                       return -EFAULT;
+               ptr += sizeof(uint32_t);
+               if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+                       binder_stats.bc[_IOC_NR(cmd)]++;
+                       proc->stats.bc[_IOC_NR(cmd)]++;
+                       thread->stats.bc[_IOC_NR(cmd)]++;
+               }
+               switch (cmd) {
+               case BC_INCREFS:
+               case BC_ACQUIRE:
+               case BC_RELEASE:
+               case BC_DECREFS: {
+                       uint32_t target;
+                       struct binder_ref *ref;
+                       const char *debug_string;
+
+                       if (get_user(target, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+                       if (target == 0 && binder_context_mgr_node &&
+                           (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
+                               ref = binder_get_ref_for_node(proc,
+                                              binder_context_mgr_node);
+                               if (ref->desc != target) {
+                                       binder_user_error("binder: %d:"
+                                               "%d tried to acquire "
+                                               "reference to desc 0, "
+                                               "got %d instead\n",
+                                               proc->pid, thread->pid,
+                                               ref->desc);
+                               }
+                       } else
+                               ref = binder_get_ref(proc, target);
+                       if (ref == NULL) {
+                               binder_user_error("binder: %d:%d refcou"
+                                       "nt change on invalid ref %d\n",
+                                       proc->pid, thread->pid, target);
+                               break;
+                       }
+                       switch (cmd) {
+                       case BC_INCREFS:
+                               debug_string = "IncRefs";
+                               binder_inc_ref(ref, 0, NULL);
+                               break;
+                       case BC_ACQUIRE:
+                               debug_string = "Acquire";
+                               binder_inc_ref(ref, 1, NULL);
+                               break;
+                       case BC_RELEASE:
+                               debug_string = "Release";
+                               binder_dec_ref(ref, 1);
+                               break;
+                       case BC_DECREFS:
+                       default:
+                               debug_string = "DecRefs";
+                               binder_dec_ref(ref, 0);
+                               break;
+                       }
+                       binder_debug(BINDER_DEBUG_USER_REFS,
+                                    "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
+                                    proc->pid, thread->pid, debug_string, ref->debug_id,
+                                    ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+                       break;
+               }
+               case BC_INCREFS_DONE:
+               case BC_ACQUIRE_DONE: {
+                       void __user *node_ptr;
+                       void *cookie;
+                       struct binder_node *node;
+
+                       if (get_user(node_ptr, (void * __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(void *);
+                       if (get_user(cookie, (void * __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(void *);
+                       node = binder_get_node(proc, node_ptr);
+                       if (node == NULL) {
+                               binder_user_error("binder: %d:%d "
+                                       "%s u%p no match\n",
+                                       proc->pid, thread->pid,
+                                       cmd == BC_INCREFS_DONE ?
+                                       "BC_INCREFS_DONE" :
+                                       "BC_ACQUIRE_DONE",
+                                       node_ptr);
+                               break;
+                       }
+                       if (cookie != node->cookie) {
+                               binder_user_error("binder: %d:%d %s u%p node %d"
+                                       " cookie mismatch %p != %p\n",
+                                       proc->pid, thread->pid,
+                                       cmd == BC_INCREFS_DONE ?
+                                       "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+                                       node_ptr, node->debug_id,
+                                       cookie, node->cookie);
+                               break;
+                       }
+                       if (cmd == BC_ACQUIRE_DONE) {
+                               if (node->pending_strong_ref == 0) {
+                                       binder_user_error("binder: %d:%d "
+                                               "BC_ACQUIRE_DONE node %d has "
+                                               "no pending acquire request\n",
+                                               proc->pid, thread->pid,
+                                               node->debug_id);
+                                       break;
+                               }
+                               node->pending_strong_ref = 0;
+                       } else {
+                               if (node->pending_weak_ref == 0) {
+                                       binder_user_error("binder: %d:%d "
+                                               "BC_INCREFS_DONE node %d has "
+                                               "no pending increfs request\n",
+                                               proc->pid, thread->pid,
+                                               node->debug_id);
+                                       break;
+                               }
+                               node->pending_weak_ref = 0;
+                       }
+                       binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+                       binder_debug(BINDER_DEBUG_USER_REFS,
+                                    "binder: %d:%d %s node %d ls %d lw %d\n",
+                                    proc->pid, thread->pid,
+                                    cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+                                    node->debug_id, node->local_strong_refs, node->local_weak_refs);
+                       break;
+               }
+               case BC_ATTEMPT_ACQUIRE:
+                       printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
+                       return -EINVAL;
+               case BC_ACQUIRE_RESULT:
+                       printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
+                       return -EINVAL;
+
+               case BC_FREE_BUFFER: {
+                       void __user *data_ptr;
+                       struct binder_buffer *buffer;
+
+                       if (get_user(data_ptr, (void * __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(void *);
+
+                       buffer = binder_buffer_lookup(proc, data_ptr);
+                       if (buffer == NULL) {
+                               binder_user_error("binder: %d:%d "
+                                       "BC_FREE_BUFFER u%p no match\n",
+                                       proc->pid, thread->pid, data_ptr);
+                               break;
+                       }
+                       if (!buffer->allow_user_free) {
+                               binder_user_error("binder: %d:%d "
+                                       "BC_FREE_BUFFER u%p matched "
+                                       "unreturned buffer\n",
+                                       proc->pid, thread->pid, data_ptr);
+                               break;
+                       }
+                       binder_debug(BINDER_DEBUG_FREE_BUFFER,
+                                    "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
+                                    proc->pid, thread->pid, data_ptr, buffer->debug_id,
+                                    buffer->transaction ? "active" : "finished");
+
+                       if (buffer->transaction) {
+                               buffer->transaction->buffer = NULL;
+                               buffer->transaction = NULL;
+                       }
+                       if (buffer->async_transaction && buffer->target_node) {
+                               BUG_ON(!buffer->target_node->has_async_transaction);
+                               if (list_empty(&buffer->target_node->async_todo))
+                                       buffer->target_node->has_async_transaction = 0;
+                               else
+                                       list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+                       }
+                       binder_transaction_buffer_release(proc, buffer, NULL);
+                       binder_free_buf(proc, buffer);
+                       break;
+               }
+
+               case BC_TRANSACTION:
+               case BC_REPLY: {
+                       struct binder_transaction_data tr;
+
+                       if (copy_from_user(&tr, ptr, sizeof(tr)))
+                               return -EFAULT;
+                       ptr += sizeof(tr);
+                       binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+                       break;
+               }
+
+               case BC_REGISTER_LOOPER:
+                       binder_debug(BINDER_DEBUG_THREADS,
+                                    "binder: %d:%d BC_REGISTER_LOOPER\n",
+                                    proc->pid, thread->pid);
+                       if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
+                               thread->looper |= BINDER_LOOPER_STATE_INVALID;
+                               binder_user_error("binder: %d:%d ERROR:"
+                                       " BC_REGISTER_LOOPER called "
+                                       "after BC_ENTER_LOOPER\n",
+                                       proc->pid, thread->pid);
+                       } else if (proc->requested_threads == 0) {
+                               thread->looper |= BINDER_LOOPER_STATE_INVALID;
+                               binder_user_error("binder: %d:%d ERROR:"
+                                       " BC_REGISTER_LOOPER called "
+                                       "without request\n",
+                                       proc->pid, thread->pid);
+                       } else {
+                               proc->requested_threads--;
+                               proc->requested_threads_started++;
+                       }
+                       thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+                       break;
+               case BC_ENTER_LOOPER:
+                       binder_debug(BINDER_DEBUG_THREADS,
+                                    "binder: %d:%d BC_ENTER_LOOPER\n",
+                                    proc->pid, thread->pid);
+                       if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
+                               thread->looper |= BINDER_LOOPER_STATE_INVALID;
+                               binder_user_error("binder: %d:%d ERROR:"
+                                       " BC_ENTER_LOOPER called after "
+                                       "BC_REGISTER_LOOPER\n",
+                                       proc->pid, thread->pid);
+                       }
+                       thread->looper |= BINDER_LOOPER_STATE_ENTERED;
+                       break;
+               case BC_EXIT_LOOPER:
+                       binder_debug(BINDER_DEBUG_THREADS,
+                                    "binder: %d:%d BC_EXIT_LOOPER\n",
+                                    proc->pid, thread->pid);
+                       thread->looper |= BINDER_LOOPER_STATE_EXITED;
+                       break;
+
+               case BC_REQUEST_DEATH_NOTIFICATION:
+               case BC_CLEAR_DEATH_NOTIFICATION: {
+                       uint32_t target;
+                       void __user *cookie;
+                       struct binder_ref *ref;
+                       struct binder_ref_death *death;
+
+                       if (get_user(target, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+                       if (get_user(cookie, (void __user * __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(void *);
+                       ref = binder_get_ref(proc, target);
+                       if (ref == NULL) {
+                               binder_user_error("binder: %d:%d %s "
+                                       "invalid ref %d\n",
+                                       proc->pid, thread->pid,
+                                       cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+                                       "BC_REQUEST_DEATH_NOTIFICATION" :
+                                       "BC_CLEAR_DEATH_NOTIFICATION",
+                                       target);
+                               break;
+                       }
+
+                       binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+                                    "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+                                    proc->pid, thread->pid,
+                                    cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+                                    "BC_REQUEST_DEATH_NOTIFICATION" :
+                                    "BC_CLEAR_DEATH_NOTIFICATION",
+                                    cookie, ref->debug_id, ref->desc,
+                                    ref->strong, ref->weak, ref->node->debug_id);
+
+                       if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+                               if (ref->death) {
+                                       binder_user_error("binder: %d:%"
+                                               "d BC_REQUEST_DEATH_NOTI"
+                                               "FICATION death notific"
+                                               "ation already set\n",
+                                               proc->pid, thread->pid);
+                                       break;
+                               }
+                               death = kzalloc(sizeof(*death), GFP_KERNEL);
+                               if (death == NULL) {
+                                       thread->return_error = BR_ERROR;
+                                       binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+                                                    "binder: %d:%d "
+                                                    "BC_REQUEST_DEATH_NOTIFICATION failed\n",
+                                                    proc->pid, thread->pid);
+                                       break;
+                               }
+                               binder_stats_created(BINDER_STAT_DEATH);
+                               INIT_LIST_HEAD(&death->work.entry);
+                               death->cookie = cookie;
+                               ref->death = death;
+                               if (ref->node->proc == NULL) {
+                                       ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+                                       if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+                                               list_add_tail(&ref->death->work.entry, &thread->todo);
+                                       } else {
+                                               list_add_tail(&ref->death->work.entry, &proc->todo);
+                                               wake_up_interruptible(&proc->wait);
+                                       }
+                               }
+                       } else {
+                               if (ref->death == NULL) {
+                                       binder_user_error("binder: %d:%"
+                                               "d BC_CLEAR_DEATH_NOTIFI"
+                                               "CATION death notificat"
+                                               "ion not active\n",
+                                               proc->pid, thread->pid);
+                                       break;
+                               }
+                               death = ref->death;
+                               if (death->cookie != cookie) {
+                                       binder_user_error("binder: %d:%"
+                                               "d BC_CLEAR_DEATH_NOTIFI"
+                                               "CATION death notificat"
+                                               "ion cookie mismatch "
+                                               "%p != %p\n",
+                                               proc->pid, thread->pid,
+                                               death->cookie, cookie);
+                                       break;
+                               }
+                               ref->death = NULL;
+                               if (list_empty(&death->work.entry)) {
+                                       death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+                                       if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+                                               list_add_tail(&death->work.entry, &thread->todo);
+                                       } else {
+                                               list_add_tail(&death->work.entry, &proc->todo);
+                                               wake_up_interruptible(&proc->wait);
+                                       }
+                               } else {
+                                       BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
+                                       death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
+                               }
+                       }
+               } break;
+               case BC_DEAD_BINDER_DONE: {
+                       struct binder_work *w;
+                       void __user *cookie;
+                       struct binder_ref_death *death = NULL;
+                       if (get_user(cookie, (void __user * __user *)ptr))
+                               return -EFAULT;
+
+                       ptr += sizeof(void *);
+                       list_for_each_entry(w, &proc->delivered_death, entry) {
+                               struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+                               if (tmp_death->cookie == cookie) {
+                                       death = tmp_death;
+                                       break;
+                               }
+                       }
+                       binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                                    "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
+                                    proc->pid, thread->pid, cookie, death);
+                       if (death == NULL) {
+                               binder_user_error("binder: %d:%d BC_DEAD"
+                                       "_BINDER_DONE %p not found\n",
+                                       proc->pid, thread->pid, cookie);
+                               break;
+                       }
+
+                       list_del_init(&death->work.entry);
+                       if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
+                               death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+                               if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+                                       list_add_tail(&death->work.entry, &thread->todo);
+                               } else {
+                                       list_add_tail(&death->work.entry, &proc->todo);
+                                       wake_up_interruptible(&proc->wait);
+                               }
+                       }
+               } break;
+
+               default:
+                       printk(KERN_ERR "binder: %d:%d unknown command %d\n",
+                              proc->pid, thread->pid, cmd);
+                       return -EINVAL;
+               }
+               *consumed = ptr - buffer;
+       }
+       return 0;
+}
+
+void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
+                   uint32_t cmd)
+{
+       if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
+               binder_stats.br[_IOC_NR(cmd)]++;
+               proc->stats.br[_IOC_NR(cmd)]++;
+               thread->stats.br[_IOC_NR(cmd)]++;
+       }
+}
+
+static int binder_has_proc_work(struct binder_proc *proc,
+                               struct binder_thread *thread)
+{
+       return !list_empty(&proc->todo) ||
+               (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_has_thread_work(struct binder_thread *thread)
+{
+       return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
+               (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_thread_read(struct binder_proc *proc,
+                             struct binder_thread *thread,
+                             void  __user *buffer, int size,
+                             signed long *consumed, int non_block)
+{
+       void __user *ptr = buffer + *consumed;
+       void __user *end = buffer + size;
+
+       int ret = 0;
+       int wait_for_proc_work;
+
+       if (*consumed == 0) {
+               if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+                       return -EFAULT;
+               ptr += sizeof(uint32_t);
+       }
+
+retry:
+       wait_for_proc_work = thread->transaction_stack == NULL &&
+                               list_empty(&thread->todo);
+
+       if (thread->return_error != BR_OK && ptr < end) {
+               if (thread->return_error2 != BR_OK) {
+                       if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+                       if (ptr == end)
+                               goto done;
+                       thread->return_error2 = BR_OK;
+               }
+               if (put_user(thread->return_error, (uint32_t __user *)ptr))
+                       return -EFAULT;
+               ptr += sizeof(uint32_t);
+               thread->return_error = BR_OK;
+               goto done;
+       }
+
+
+       thread->looper |= BINDER_LOOPER_STATE_WAITING;
+       if (wait_for_proc_work)
+               proc->ready_threads++;
+       mutex_unlock(&binder_lock);
+       if (wait_for_proc_work) {
+               if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+                                       BINDER_LOOPER_STATE_ENTERED))) {
+                       binder_user_error("binder: %d:%d ERROR: Thread waiting "
+                               "for process work before calling BC_REGISTER_"
+                               "LOOPER or BC_ENTER_LOOPER (state %x)\n",
+                               proc->pid, thread->pid, thread->looper);
+                       wait_event_interruptible(binder_user_error_wait,
+                                                binder_stop_on_user_error < 2);
+               }
+               binder_set_nice(proc->default_priority);
+               if (non_block) {
+                       if (!binder_has_proc_work(proc, thread))
+                               ret = -EAGAIN;
+               } else
+                       ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+       } else {
+               if (non_block) {
+                       if (!binder_has_thread_work(thread))
+                               ret = -EAGAIN;
+               } else
+                       ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+       }
+       mutex_lock(&binder_lock);
+       if (wait_for_proc_work)
+               proc->ready_threads--;
+       thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
+
+       if (ret)
+               return ret;
+
+       while (1) {
+               uint32_t cmd;
+               struct binder_transaction_data tr;
+               struct binder_work *w;
+               struct binder_transaction *t = NULL;
+
+               if (!list_empty(&thread->todo))
+                       w = list_first_entry(&thread->todo, struct binder_work, entry);
+               else if (!list_empty(&proc->todo) && wait_for_proc_work)
+                       w = list_first_entry(&proc->todo, struct binder_work, entry);
+               else {
+                       if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
+                               goto retry;
+                       break;
+               }
+
+               if (end - ptr < sizeof(tr) + 4)
+                       break;
+
+               switch (w->type) {
+               case BINDER_WORK_TRANSACTION: {
+                       t = container_of(w, struct binder_transaction, work);
+               } break;
+               case BINDER_WORK_TRANSACTION_COMPLETE: {
+                       cmd = BR_TRANSACTION_COMPLETE;
+                       if (put_user(cmd, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+
+                       binder_stat_br(proc, thread, cmd);
+                       binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
+                                    "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
+                                    proc->pid, thread->pid);
+
+                       list_del(&w->entry);
+                       kfree(w);
+                       binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+               } break;
+               case BINDER_WORK_NODE: {
+                       struct binder_node *node = container_of(w, struct binder_node, work);
+                       uint32_t cmd = BR_NOOP;
+                       const char *cmd_name;
+                       int strong = node->internal_strong_refs || node->local_strong_refs;
+                       int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+                       if (weak && !node->has_weak_ref) {
+                               cmd = BR_INCREFS;
+                               cmd_name = "BR_INCREFS";
+                               node->has_weak_ref = 1;
+                               node->pending_weak_ref = 1;
+                               node->local_weak_refs++;
+                       } else if (strong && !node->has_strong_ref) {
+                               cmd = BR_ACQUIRE;
+                               cmd_name = "BR_ACQUIRE";
+                               node->has_strong_ref = 1;
+                               node->pending_strong_ref = 1;
+                               node->local_strong_refs++;
+                       } else if (!strong && node->has_strong_ref) {
+                               cmd = BR_RELEASE;
+                               cmd_name = "BR_RELEASE";
+                               node->has_strong_ref = 0;
+                       } else if (!weak && node->has_weak_ref) {
+                               cmd = BR_DECREFS;
+                               cmd_name = "BR_DECREFS";
+                               node->has_weak_ref = 0;
+                       }
+                       if (cmd != BR_NOOP) {
+                               if (put_user(cmd, (uint32_t __user *)ptr))
+                                       return -EFAULT;
+                               ptr += sizeof(uint32_t);
+                               if (put_user(node->ptr, (void * __user *)ptr))
+                                       return -EFAULT;
+                               ptr += sizeof(void *);
+                               if (put_user(node->cookie, (void * __user *)ptr))
+                                       return -EFAULT;
+                               ptr += sizeof(void *);
+
+                               binder_stat_br(proc, thread, cmd);
+                               binder_debug(BINDER_DEBUG_USER_REFS,
+                                            "binder: %d:%d %s %d u%p c%p\n",
+                                            proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+                       } else {
+                               list_del_init(&w->entry);
+                               if (!weak && !strong) {
+                                       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                                                    "binder: %d:%d node %d u%p c%p deleted\n",
+                                                    proc->pid, thread->pid, node->debug_id,
+                                                    node->ptr, node->cookie);
+                                       rb_erase(&node->rb_node, &proc->nodes);
+                                       kfree(node);
+                                       binder_stats_deleted(BINDER_STAT_NODE);
+                               } else {
+                                       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                                                    "binder: %d:%d node %d u%p c%p state unchanged\n",
+                                                    proc->pid, thread->pid, node->debug_id, node->ptr,
+                                                    node->cookie);
+                               }
+                       }
+               } break;
+               case BINDER_WORK_DEAD_BINDER:
+               case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+               case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+                       struct binder_ref_death *death;
+                       uint32_t cmd;
+
+                       death = container_of(w, struct binder_ref_death, work);
+                       if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
+                               cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
+                       else
+                               cmd = BR_DEAD_BINDER;
+                       if (put_user(cmd, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+                       if (put_user(death->cookie, (void * __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(void *);
+                       binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+                                    "binder: %d:%d %s %p\n",
+                                     proc->pid, thread->pid,
+                                     cmd == BR_DEAD_BINDER ?
+                                     "BR_DEAD_BINDER" :
+                                     "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+                                     death->cookie);
+
+                       if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+                               list_del(&w->entry);
+                               kfree(death);
+                               binder_stats_deleted(BINDER_STAT_DEATH);
+                       } else
+                               list_move(&w->entry, &proc->delivered_death);
+                       if (cmd == BR_DEAD_BINDER)
+                               goto done; /* DEAD_BINDER notifications can cause transactions */
+               } break;
+               }
+
+               if (!t)
+                       continue;
+
+               BUG_ON(t->buffer == NULL);
+               if (t->buffer->target_node) {
+                       struct binder_node *target_node = t->buffer->target_node;
+                       tr.target.ptr = target_node->ptr;
+                       tr.cookie =  target_node->cookie;
+                       t->saved_priority = task_nice(current);
+                       if (t->priority < target_node->min_priority &&
+                           !(t->flags & TF_ONE_WAY))
+                               binder_set_nice(t->priority);
+                       else if (!(t->flags & TF_ONE_WAY) ||
+                                t->saved_priority > target_node->min_priority)
+                               binder_set_nice(target_node->min_priority);
+                       cmd = BR_TRANSACTION;
+               } else {
+                       tr.target.ptr = NULL;
+                       tr.cookie = NULL;
+                       cmd = BR_REPLY;
+               }
+               tr.code = t->code;
+               tr.flags = t->flags;
+               tr.sender_euid = t->sender_euid;
+
+               if (t->from) {
+                       struct task_struct *sender = t->from->proc->tsk;
+                       tr.sender_pid = task_tgid_nr_ns(sender,
+                                                       current->nsproxy->pid_ns);
+               } else {
+                       tr.sender_pid = 0;
+               }
+
+               tr.data_size = t->buffer->data_size;
+               tr.offsets_size = t->buffer->offsets_size;
+               tr.data.ptr.buffer = (void *)t->buffer->data +
+                                       proc->user_buffer_offset;
+               tr.data.ptr.offsets = tr.data.ptr.buffer +
+                                       ALIGN(t->buffer->data_size,
+                                           sizeof(void *));
+
+               if (put_user(cmd, (uint32_t __user *)ptr))
+                       return -EFAULT;
+               ptr += sizeof(uint32_t);
+               if (copy_to_user(ptr, &tr, sizeof(tr)))
+                       return -EFAULT;
+               ptr += sizeof(tr);
+
+               binder_stat_br(proc, thread, cmd);
+               binder_debug(BINDER_DEBUG_TRANSACTION,
+                            "binder: %d:%d %s %d %d:%d, cmd %d"
+                            "size %zd-%zd ptr %p-%p\n",
+                            proc->pid, thread->pid,
+                            (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
+                            "BR_REPLY",
+                            t->debug_id, t->from ? t->from->proc->pid : 0,
+                            t->from ? t->from->pid : 0, cmd,
+                            t->buffer->data_size, t->buffer->offsets_size,
+                            tr.data.ptr.buffer, tr.data.ptr.offsets);
+
+               list_del(&t->work.entry);
+               t->buffer->allow_user_free = 1;
+               if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+                       t->to_parent = thread->transaction_stack;
+                       t->to_thread = thread;
+                       thread->transaction_stack = t;
+               } else {
+                       t->buffer->transaction = NULL;
+                       kfree(t);
+                       binder_stats_deleted(BINDER_STAT_TRANSACTION);
+               }
+               break;
+       }
+
+done:
+
+       *consumed = ptr - buffer;
+       if (proc->requested_threads + proc->ready_threads == 0 &&
+           proc->requested_threads_started < proc->max_threads &&
+           (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+            BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
+            /*spawn a new thread if we leave this out */) {
+               proc->requested_threads++;
+               binder_debug(BINDER_DEBUG_THREADS,
+                            "binder: %d:%d BR_SPAWN_LOOPER\n",
+                            proc->pid, thread->pid);
+               if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+static void binder_release_work(struct list_head *list)
+{
+       struct binder_work *w;
+       while (!list_empty(list)) {
+               w = list_first_entry(list, struct binder_work, entry);
+               list_del_init(&w->entry);
+               switch (w->type) {
+               case BINDER_WORK_TRANSACTION: {
+                       struct binder_transaction *t;
+
+                       t = container_of(w, struct binder_transaction, work);
+                       if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
+                               binder_send_failed_reply(t, BR_DEAD_REPLY);
+               } break;
+               case BINDER_WORK_TRANSACTION_COMPLETE: {
+                       kfree(w);
+                       binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+               } break;
+               default:
+                       break;
+               }
+       }
+
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+       struct binder_thread *thread = NULL;
+       struct rb_node *parent = NULL;
+       struct rb_node **p = &proc->threads.rb_node;
+
+       while (*p) {
+               parent = *p;
+               thread = rb_entry(parent, struct binder_thread, rb_node);
+
+               if (current->pid < thread->pid)
+                       p = &(*p)->rb_left;
+               else if (current->pid > thread->pid)
+                       p = &(*p)->rb_right;
+               else
+                       break;
+       }
+       if (*p == NULL) {
+               thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+               if (thread == NULL)
+                       return NULL;
+               binder_stats_created(BINDER_STAT_THREAD);
+               thread->proc = proc;
+               thread->pid = current->pid;
+               init_waitqueue_head(&thread->wait);
+               INIT_LIST_HEAD(&thread->todo);
+               rb_link_node(&thread->rb_node, parent, p);
+               rb_insert_color(&thread->rb_node, &proc->threads);
+               thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+               thread->return_error = BR_OK;
+               thread->return_error2 = BR_OK;
+       }
+       return thread;
+}
+
+static int binder_free_thread(struct binder_proc *proc,
+                             struct binder_thread *thread)
+{
+       struct binder_transaction *t;
+       struct binder_transaction *send_reply = NULL;
+       int active_transactions = 0;
+
+       rb_erase(&thread->rb_node, &proc->threads);
+       t = thread->transaction_stack;
+       if (t && t->to_thread == thread)
+               send_reply = t;
+       while (t) {
+               active_transactions++;
+               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+                            "binder: release %d:%d transaction %d "
+                            "%s, still active\n", proc->pid, thread->pid,
+                            t->debug_id,
+                            (t->to_thread == thread) ? "in" : "out");
+
+               if (t->to_thread == thread) {
+                       t->to_proc = NULL;
+                       t->to_thread = NULL;
+                       if (t->buffer) {
+                               t->buffer->transaction = NULL;
+                               t->buffer = NULL;
+                       }
+                       t = t->to_parent;
+               } else if (t->from == thread) {
+                       t->from = NULL;
+                       t = t->from_parent;
+               } else
+                       BUG();
+       }
+       if (send_reply)
+               binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
+       binder_release_work(&thread->todo);
+       kfree(thread);
+       binder_stats_deleted(BINDER_STAT_THREAD);
+       return active_transactions;
+}
+
+static unsigned int binder_poll(struct file *filp,
+                               struct poll_table_struct *wait)
+{
+       struct binder_proc *proc = filp->private_data;
+       struct binder_thread *thread = NULL;
+       int wait_for_proc_work;
+
+       mutex_lock(&binder_lock);
+       thread = binder_get_thread(proc);
+
+       wait_for_proc_work = thread->transaction_stack == NULL &&
+               list_empty(&thread->todo) && thread->return_error == BR_OK;
+       mutex_unlock(&binder_lock);
+
+       if (wait_for_proc_work) {
+               if (binder_has_proc_work(proc, thread))
+                       return POLLIN;
+               poll_wait(filp, &proc->wait, wait);
+               if (binder_has_proc_work(proc, thread))
+                       return POLLIN;
+       } else {
+               if (binder_has_thread_work(thread))
+                       return POLLIN;
+               poll_wait(filp, &thread->wait, wait);
+               if (binder_has_thread_work(thread))
+                       return POLLIN;
+       }
+       return 0;
+}
+
+static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       int ret;
+       struct binder_proc *proc = filp->private_data;
+       struct binder_thread *thread;
+       unsigned int size = _IOC_SIZE(cmd);
+       void __user *ubuf = (void __user *)arg;
+
+       /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
+
+       ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+       if (ret)
+               return ret;
+
+       mutex_lock(&binder_lock);
+       thread = binder_get_thread(proc);
+       if (thread == NULL) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       switch (cmd) {
+       case BINDER_WRITE_READ: {
+               struct binder_write_read bwr;
+               if (size != sizeof(struct binder_write_read)) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+               if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+               binder_debug(BINDER_DEBUG_READ_WRITE,
+                            "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
+                            proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
+                            bwr.read_size, bwr.read_buffer);
+
+               if (bwr.write_size > 0) {
+                       ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+                       if (ret < 0) {
+                               bwr.read_consumed = 0;
+                               if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+                                       ret = -EFAULT;
+                               goto err;
+                       }
+               }
+               if (bwr.read_size > 0) {
+                       ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+                       if (!list_empty(&proc->todo))
+                               wake_up_interruptible(&proc->wait);
+                       if (ret < 0) {
+                               if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+                                       ret = -EFAULT;
+                               goto err;
+                       }
+               }
+               binder_debug(BINDER_DEBUG_READ_WRITE,
+                            "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
+                            proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
+                            bwr.read_consumed, bwr.read_size);
+               if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+               break;
+       }
+       case BINDER_SET_MAX_THREADS:
+               if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+               break;
+       case BINDER_SET_CONTEXT_MGR:
+               if (binder_context_mgr_node != NULL) {
+                       printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
+                       ret = -EBUSY;
+                       goto err;
+               }
+               if (binder_context_mgr_uid != -1) {
+                       if (binder_context_mgr_uid != current->cred->euid) {
+                               printk(KERN_ERR "binder: BINDER_SET_"
+                                      "CONTEXT_MGR bad uid %d != %d\n",
+                                      current->cred->euid,
+                                      binder_context_mgr_uid);
+                               ret = -EPERM;
+                               goto err;
+                       }
+               } else
+                       binder_context_mgr_uid = current->cred->euid;
+               binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+               if (binder_context_mgr_node == NULL) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+               binder_context_mgr_node->local_weak_refs++;
+               binder_context_mgr_node->local_strong_refs++;
+               binder_context_mgr_node->has_strong_ref = 1;
+               binder_context_mgr_node->has_weak_ref = 1;
+               break;
+       case BINDER_THREAD_EXIT:
+               binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
+                            proc->pid, thread->pid);
+               binder_free_thread(proc, thread);
+               thread = NULL;
+               break;
+       case BINDER_VERSION:
+               if (size != sizeof(struct binder_version)) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+               if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+               break;
+       default:
+               ret = -EINVAL;
+               goto err;
+       }
+       ret = 0;
+err:
+       if (thread)
+               thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
+       mutex_unlock(&binder_lock);
+       wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+       if (ret && ret != -ERESTARTSYS)
+               printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
+       return ret;
+}
+
+static void binder_vma_open(struct vm_area_struct *vma)
+{
+       struct binder_proc *proc = vma->vm_private_data;
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+                    "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+                    proc->pid, vma->vm_start, vma->vm_end,
+                    (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+                    (unsigned long)pgprot_val(vma->vm_page_prot));
+}
+
+static void binder_vma_close(struct vm_area_struct *vma)
+{
+       struct binder_proc *proc = vma->vm_private_data;
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+                    "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+                    proc->pid, vma->vm_start, vma->vm_end,
+                    (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+                    (unsigned long)pgprot_val(vma->vm_page_prot));
+       proc->vma = NULL;
+       proc->vma_vm_mm = NULL;
+       binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
+}
+
+static struct vm_operations_struct binder_vm_ops = {
+       .open = binder_vma_open,
+       .close = binder_vma_close,
+};
+
+static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       int ret;
+       struct vm_struct *area;
+       struct binder_proc *proc = filp->private_data;
+       const char *failure_string;
+       struct binder_buffer *buffer;
+
+       if ((vma->vm_end - vma->vm_start) > SZ_4M)
+               vma->vm_end = vma->vm_start + SZ_4M;
+
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+                    "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+                    proc->pid, vma->vm_start, vma->vm_end,
+                    (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+                    (unsigned long)pgprot_val(vma->vm_page_prot));
+
+       if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
+               ret = -EPERM;
+               failure_string = "bad vm_flags";
+               goto err_bad_arg;
+       }
+       vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+
+       mutex_lock(&binder_mmap_lock);
+       if (proc->buffer) {
+               ret = -EBUSY;
+               failure_string = "already mapped";
+               goto err_already_mapped;
+       }
+
+       area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+       if (area == NULL) {
+               ret = -ENOMEM;
+               failure_string = "get_vm_area";
+               goto err_get_vm_area_failed;
+       }
+       proc->buffer = area->addr;
+       proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+       mutex_unlock(&binder_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+       if (cache_is_vipt_aliasing()) {
+               while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
+                       printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+                       vma->vm_start += PAGE_SIZE;
+               }
+       }
+#endif
+       proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
+       if (proc->pages == NULL) {
+               ret = -ENOMEM;
+               failure_string = "alloc page array";
+               goto err_alloc_pages_failed;
+       }
+       proc->buffer_size = vma->vm_end - vma->vm_start;
+
+       vma->vm_ops = &binder_vm_ops;
+       vma->vm_private_data = proc;
+
+       if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+               ret = -ENOMEM;
+               failure_string = "alloc small buf";
+               goto err_alloc_small_buf_failed;
+       }
+       buffer = proc->buffer;
+       INIT_LIST_HEAD(&proc->buffers);
+       list_add(&buffer->entry, &proc->buffers);
+       buffer->free = 1;
+       binder_insert_free_buffer(proc, buffer);
+       proc->free_async_space = proc->buffer_size / 2;
+       barrier();
+       proc->files = get_files_struct(proc->tsk);
+       proc->vma = vma;
+       proc->vma_vm_mm = vma->vm_mm;
+
+       /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
+                proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+       return 0;
+
+err_alloc_small_buf_failed:
+       kfree(proc->pages);
+       proc->pages = NULL;
+err_alloc_pages_failed:
+       mutex_lock(&binder_mmap_lock);
+       vfree(proc->buffer);
+       proc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+       mutex_unlock(&binder_mmap_lock);
+err_bad_arg:
+       printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
+              proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+       return ret;
+}
+
+static int binder_open(struct inode *nodp, struct file *filp)
+{
+       struct binder_proc *proc;
+
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+                    current->group_leader->pid, current->pid);
+
+       proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+       if (proc == NULL)
+               return -ENOMEM;
+       get_task_struct(current);
+       proc->tsk = current;
+       INIT_LIST_HEAD(&proc->todo);
+       init_waitqueue_head(&proc->wait);
+       proc->default_priority = task_nice(current);
+       mutex_lock(&binder_lock);
+       binder_stats_created(BINDER_STAT_PROC);
+       hlist_add_head(&proc->proc_node, &binder_procs);
+       proc->pid = current->group_leader->pid;
+       INIT_LIST_HEAD(&proc->delivered_death);
+       filp->private_data = proc;
+       mutex_unlock(&binder_lock);
+
+       if (binder_debugfs_dir_entry_proc) {
+               char strbuf[11];
+               snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+               proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+                       binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+       }
+
+       return 0;
+}
+
+static int binder_flush(struct file *filp, fl_owner_t id)
+{
+       struct binder_proc *proc = filp->private_data;
+
+       binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
+
+       return 0;
+}
+
+static void binder_deferred_flush(struct binder_proc *proc)
+{
+       struct rb_node *n;
+       int wake_count = 0;
+       for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+               struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+               thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+               if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
+                       wake_up_interruptible(&thread->wait);
+                       wake_count++;
+               }
+       }
+       wake_up_interruptible_all(&proc->wait);
+
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+                    "binder_flush: %d woke %d threads\n", proc->pid,
+                    wake_count);
+}
+
+static int binder_release(struct inode *nodp, struct file *filp)
+{
+       struct binder_proc *proc = filp->private_data;
+       debugfs_remove(proc->debugfs_entry);
+       binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
+
+       return 0;
+}
+
+static void binder_deferred_release(struct binder_proc *proc)
+{
+       struct hlist_node *pos;
+       struct binder_transaction *t;
+       struct rb_node *n;
+       int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
+
+       BUG_ON(proc->vma);
+       BUG_ON(proc->files);
+
+       hlist_del(&proc->proc_node);
+       if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+               binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                            "binder_release: %d context_mgr_node gone\n",
+                            proc->pid);
+               binder_context_mgr_node = NULL;
+       }
+
+       threads = 0;
+       active_transactions = 0;
+       while ((n = rb_first(&proc->threads))) {
+               struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+               threads++;
+               active_transactions += binder_free_thread(proc, thread);
+       }
+       nodes = 0;
+       incoming_refs = 0;
+       while ((n = rb_first(&proc->nodes))) {
+               struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
+
+               nodes++;
+               rb_erase(&node->rb_node, &proc->nodes);
+               list_del_init(&node->work.entry);
+               if (hlist_empty(&node->refs)) {
+                       kfree(node);
+                       binder_stats_deleted(BINDER_STAT_NODE);
+               } else {
+                       struct binder_ref *ref;
+                       int death = 0;
+
+                       node->proc = NULL;
+                       node->local_strong_refs = 0;
+                       node->local_weak_refs = 0;
+                       hlist_add_head(&node->dead_node, &binder_dead_nodes);
+
+                       hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+                               incoming_refs++;
+                               if (ref->death) {
+                                       death++;
+                                       if (list_empty(&ref->death->work.entry)) {
+                                               ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+                                               list_add_tail(&ref->death->work.entry, &ref->proc->todo);
+                                               wake_up_interruptible(&ref->proc->wait);
+                                       } else
+                                               BUG();
+                               }
+                       }
+                       binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                                    "binder: node %d now dead, "
+                                    "refs %d, death %d\n", node->debug_id,
+                                    incoming_refs, death);
+               }
+       }
+       outgoing_refs = 0;
+       while ((n = rb_first(&proc->refs_by_desc))) {
+               struct binder_ref *ref = rb_entry(n, struct binder_ref,
+                                                 rb_node_desc);
+               outgoing_refs++;
+               binder_delete_ref(ref);
+       }
+       binder_release_work(&proc->todo);
+       buffers = 0;
+
+       while ((n = rb_first(&proc->allocated_buffers))) {
+               struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
+                                                       rb_node);
+               t = buffer->transaction;
+               if (t) {
+                       t->buffer = NULL;
+                       buffer->transaction = NULL;
+                       printk(KERN_ERR "binder: release proc %d, "
+                              "transaction %d, not freed\n",
+                              proc->pid, t->debug_id);
+                       /*BUG();*/
+               }
+               binder_free_buf(proc, buffer);
+               buffers++;
+       }
+
+       binder_stats_deleted(BINDER_STAT_PROC);
+
+       page_count = 0;
+       if (proc->pages) {
+               int i;
+               for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+                       if (proc->pages[i]) {
+                               void *page_addr = proc->buffer + i * PAGE_SIZE;
+                               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                                            "binder_release: %d: "
+                                            "page %d at %p not freed\n",
+                                            proc->pid, i,
+                                            page_addr);
+                               unmap_kernel_range((unsigned long)page_addr,
+                                       PAGE_SIZE);
+                               __free_page(proc->pages[i]);
+                               page_count++;
+                       }
+               }
+               kfree(proc->pages);
+               vfree(proc->buffer);
+       }
+
+       put_task_struct(proc->tsk);
+
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+                    "binder_release: %d threads %d, nodes %d (ref %d), "
+                    "refs %d, active transactions %d, buffers %d, "
+                    "pages %d\n",
+                    proc->pid, threads, nodes, incoming_refs, outgoing_refs,
+                    active_transactions, buffers, page_count);
+
+       kfree(proc);
+}
+
+static void binder_deferred_func(struct work_struct *work)
+{
+       struct binder_proc *proc;
+       struct files_struct *files;
+
+       int defer;
+       do {
+               mutex_lock(&binder_lock);
+               mutex_lock(&binder_deferred_lock);
+               if (!hlist_empty(&binder_deferred_list)) {
+                       proc = hlist_entry(binder_deferred_list.first,
+                                       struct binder_proc, deferred_work_node);
+                       hlist_del_init(&proc->deferred_work_node);
+                       defer = proc->deferred_work;
+                       proc->deferred_work = 0;
+               } else {
+                       proc = NULL;
+                       defer = 0;
+               }
+               mutex_unlock(&binder_deferred_lock);
+
+               files = NULL;
+               if (defer & BINDER_DEFERRED_PUT_FILES) {
+                       files = proc->files;
+                       if (files)
+                               proc->files = NULL;
+               }
+
+               if (defer & BINDER_DEFERRED_FLUSH)
+                       binder_deferred_flush(proc);
+
+               if (defer & BINDER_DEFERRED_RELEASE)
+                       binder_deferred_release(proc); /* frees proc */
+
+               mutex_unlock(&binder_lock);
+               if (files)
+                       put_files_struct(files);
+       } while (proc);
+}
+static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
+{
+       mutex_lock(&binder_deferred_lock);
+       proc->deferred_work |= defer;
+       if (hlist_unhashed(&proc->deferred_work_node)) {
+               hlist_add_head(&proc->deferred_work_node,
+                               &binder_deferred_list);
+               queue_work(binder_deferred_workqueue, &binder_deferred_work);
+       }
+       mutex_unlock(&binder_deferred_lock);
+}
+
+static void print_binder_transaction(struct seq_file *m, const char *prefix,
+                                    struct binder_transaction *t)
+{
+       seq_printf(m,
+                  "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+                  prefix, t->debug_id, t,
+                  t->from ? t->from->proc->pid : 0,
+                  t->from ? t->from->pid : 0,
+                  t->to_proc ? t->to_proc->pid : 0,
+                  t->to_thread ? t->to_thread->pid : 0,
+                  t->code, t->flags, t->priority, t->need_reply);
+       if (t->buffer == NULL) {
+               seq_puts(m, " buffer free\n");
+               return;
+       }
+       if (t->buffer->target_node)
+               seq_printf(m, " node %d",
+                          t->buffer->target_node->debug_id);
+       seq_printf(m, " size %zd:%zd data %p\n",
+                  t->buffer->data_size, t->buffer->offsets_size,
+                  t->buffer->data);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+                               struct binder_buffer *buffer)
+{
+       seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
+                  prefix, buffer->debug_id, buffer->data,
+                  buffer->data_size, buffer->offsets_size,
+                  buffer->transaction ? "active" : "delivered");
+}
+
+static void print_binder_work(struct seq_file *m, const char *prefix,
+                             const char *transaction_prefix,
+                             struct binder_work *w)
+{
+       struct binder_node *node;
+       struct binder_transaction *t;
+
+       switch (w->type) {
+       case BINDER_WORK_TRANSACTION:
+               t = container_of(w, struct binder_transaction, work);
+               print_binder_transaction(m, transaction_prefix, t);
+               break;
+       case BINDER_WORK_TRANSACTION_COMPLETE:
+               seq_printf(m, "%stransaction complete\n", prefix);
+               break;
+       case BINDER_WORK_NODE:
+               node = container_of(w, struct binder_node, work);
+               seq_printf(m, "%snode work %d: u%p c%p\n",
+                          prefix, node->debug_id, node->ptr, node->cookie);
+               break;
+       case BINDER_WORK_DEAD_BINDER:
+               seq_printf(m, "%shas dead binder\n", prefix);
+               break;
+       case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+               seq_printf(m, "%shas cleared dead binder\n", prefix);
+               break;
+       case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+               seq_printf(m, "%shas cleared death notification\n", prefix);
+               break;
+       default:
+               seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
+               break;
+       }
+}
+
+static void print_binder_thread(struct seq_file *m,
+                               struct binder_thread *thread,
+                               int print_always)
+{
+       struct binder_transaction *t;
+       struct binder_work *w;
+       size_t start_pos = m->count;
+       size_t header_pos;
+
+       seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
+       header_pos = m->count;
+       t = thread->transaction_stack;
+       while (t) {
+               if (t->from == thread) {
+                       print_binder_transaction(m,
+                                                "    outgoing transaction", t);
+                       t = t->from_parent;
+               } else if (t->to_thread == thread) {
+                       print_binder_transaction(m,
+                                                "    incoming transaction", t);
+                       t = t->to_parent;
+               } else {
+                       print_binder_transaction(m, "    bad transaction", t);
+                       t = NULL;
+               }
+       }
+       list_for_each_entry(w, &thread->todo, entry) {
+               print_binder_work(m, "    ", "    pending transaction", w);
+       }
+       if (!print_always && m->count == header_pos)
+               m->count = start_pos;
+}
+
+static void print_binder_node(struct seq_file *m, struct binder_node *node)
+{
+       struct binder_ref *ref;
+       struct hlist_node *pos;
+       struct binder_work *w;
+       int count;
+
+       count = 0;
+       hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+               count++;
+
+       seq_printf(m, "  node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
+                  node->debug_id, node->ptr, node->cookie,
+                  node->has_strong_ref, node->has_weak_ref,
+                  node->local_strong_refs, node->local_weak_refs,
+                  node->internal_strong_refs, count);
+       if (count) {
+               seq_puts(m, " proc");
+               hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+                       seq_printf(m, " %d", ref->proc->pid);
+       }
+       seq_puts(m, "\n");
+       list_for_each_entry(w, &node->async_todo, entry)
+               print_binder_work(m, "    ",
+                                 "    pending async transaction", w);
+}
+
+static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+{
+       seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
+                  ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+                  ref->node->debug_id, ref->strong, ref->weak, ref->death);
+}
+
+static void print_binder_proc(struct seq_file *m,
+                             struct binder_proc *proc, int print_all)
+{
+       struct binder_work *w;
+       struct rb_node *n;
+       size_t start_pos = m->count;
+       size_t header_pos;
+
+       seq_printf(m, "proc %d\n", proc->pid);
+       header_pos = m->count;
+
+       for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+               print_binder_thread(m, rb_entry(n, struct binder_thread,
+                                               rb_node), print_all);
+       for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+               struct binder_node *node = rb_entry(n, struct binder_node,
+                                                   rb_node);
+               if (print_all || node->has_async_transaction)
+                       print_binder_node(m, node);
+       }
+       if (print_all) {
+               for (n = rb_first(&proc->refs_by_desc);
+                    n != NULL;
+                    n = rb_next(n))
+                       print_binder_ref(m, rb_entry(n, struct binder_ref,
+                                                    rb_node_desc));
+       }
+       for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+               print_binder_buffer(m, "  buffer",
+                                   rb_entry(n, struct binder_buffer, rb_node));
+       list_for_each_entry(w, &proc->todo, entry)
+               print_binder_work(m, "  ", "  pending transaction", w);
+       list_for_each_entry(w, &proc->delivered_death, entry) {
+               seq_puts(m, "  has delivered dead binder\n");
+               break;
+       }
+       if (!print_all && m->count == header_pos)
+               m->count = start_pos;
+}
+
+static const char *binder_return_strings[] = {
+       "BR_ERROR",
+       "BR_OK",
+       "BR_TRANSACTION",
+       "BR_REPLY",
+       "BR_ACQUIRE_RESULT",
+       "BR_DEAD_REPLY",
+       "BR_TRANSACTION_COMPLETE",
+       "BR_INCREFS",
+       "BR_ACQUIRE",
+       "BR_RELEASE",
+       "BR_DECREFS",
+       "BR_ATTEMPT_ACQUIRE",
+       "BR_NOOP",
+       "BR_SPAWN_LOOPER",
+       "BR_FINISHED",
+       "BR_DEAD_BINDER",
+       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+       "BR_FAILED_REPLY"
+};
+
+static const char *binder_command_strings[] = {
+       "BC_TRANSACTION",
+       "BC_REPLY",
+       "BC_ACQUIRE_RESULT",
+       "BC_FREE_BUFFER",
+       "BC_INCREFS",
+       "BC_ACQUIRE",
+       "BC_RELEASE",
+       "BC_DECREFS",
+       "BC_INCREFS_DONE",
+       "BC_ACQUIRE_DONE",
+       "BC_ATTEMPT_ACQUIRE",
+       "BC_REGISTER_LOOPER",
+       "BC_ENTER_LOOPER",
+       "BC_EXIT_LOOPER",
+       "BC_REQUEST_DEATH_NOTIFICATION",
+       "BC_CLEAR_DEATH_NOTIFICATION",
+       "BC_DEAD_BINDER_DONE"
+};
+
+static const char *binder_objstat_strings[] = {
+       "proc",
+       "thread",
+       "node",
+       "ref",
+       "death",
+       "transaction",
+       "transaction_complete"
+};
+
+static void print_binder_stats(struct seq_file *m, const char *prefix,
+                              struct binder_stats *stats)
+{
+       int i;
+
+       BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
+                    ARRAY_SIZE(binder_command_strings));
+       for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
+               if (stats->bc[i])
+                       seq_printf(m, "%s%s: %d\n", prefix,
+                                  binder_command_strings[i], stats->bc[i]);
+       }
+
+       BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
+                    ARRAY_SIZE(binder_return_strings));
+       for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
+               if (stats->br[i])
+                       seq_printf(m, "%s%s: %d\n", prefix,
+                                  binder_return_strings[i], stats->br[i]);
+       }
+
+       BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+                    ARRAY_SIZE(binder_objstat_strings));
+       BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+                    ARRAY_SIZE(stats->obj_deleted));
+       for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+               if (stats->obj_created[i] || stats->obj_deleted[i])
+                       seq_printf(m, "%s%s: active %d total %d\n", prefix,
+                               binder_objstat_strings[i],
+                               stats->obj_created[i] - stats->obj_deleted[i],
+                               stats->obj_created[i]);
+       }
+}
+
+static void print_binder_proc_stats(struct seq_file *m,
+                                   struct binder_proc *proc)
+{
+       struct binder_work *w;
+       struct rb_node *n;
+       int count, strong, weak;
+
+       seq_printf(m, "proc %d\n", proc->pid);
+       count = 0;
+       for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+               count++;
+       seq_printf(m, "  threads: %d\n", count);
+       seq_printf(m, "  requested threads: %d+%d/%d\n"
+                       "  ready threads %d\n"
+                       "  free async space %zd\n", proc->requested_threads,
+                       proc->requested_threads_started, proc->max_threads,
+                       proc->ready_threads, proc->free_async_space);
+       count = 0;
+       for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+               count++;
+       seq_printf(m, "  nodes: %d\n", count);
+       count = 0;
+       strong = 0;
+       weak = 0;
+       for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+               struct binder_ref *ref = rb_entry(n, struct binder_ref,
+                                                 rb_node_desc);
+               count++;
+               strong += ref->strong;
+               weak += ref->weak;
+       }
+       seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
+
+       count = 0;
+       for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+               count++;
+       seq_printf(m, "  buffers: %d\n", count);
+
+       count = 0;
+       list_for_each_entry(w, &proc->todo, entry) {
+               switch (w->type) {
+               case BINDER_WORK_TRANSACTION:
+                       count++;
+                       break;
+               default:
+                       break;
+               }
+       }
+       seq_printf(m, "  pending transactions: %d\n", count);
+
+       print_binder_stats(m, "  ", &proc->stats);
+}
+
+
+static int binder_state_show(struct seq_file *m, void *unused)
+{
+       struct binder_proc *proc;
+       struct hlist_node *pos;
+       struct binder_node *node;
+       int do_lock = !binder_debug_no_lock;
+
+       if (do_lock)
+               mutex_lock(&binder_lock);
+
+       seq_puts(m, "binder state:\n");
+
+       if (!hlist_empty(&binder_dead_nodes))
+               seq_puts(m, "dead nodes:\n");
+       hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
+               print_binder_node(m, node);
+
+       hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+               print_binder_proc(m, proc, 1);
+       if (do_lock)
+               mutex_unlock(&binder_lock);
+       return 0;
+}
+
+static int binder_stats_show(struct seq_file *m, void *unused)
+{
+       struct binder_proc *proc;
+       struct hlist_node *pos;
+       int do_lock = !binder_debug_no_lock;
+
+       if (do_lock)
+               mutex_lock(&binder_lock);
+
+       seq_puts(m, "binder stats:\n");
+
+       print_binder_stats(m, "", &binder_stats);
+
+       hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+               print_binder_proc_stats(m, proc);
+       if (do_lock)
+               mutex_unlock(&binder_lock);
+       return 0;
+}
+
+static int binder_transactions_show(struct seq_file *m, void *unused)
+{
+       struct binder_proc *proc;
+       struct hlist_node *pos;
+       int do_lock = !binder_debug_no_lock;
+
+       if (do_lock)
+               mutex_lock(&binder_lock);
+
+       seq_puts(m, "binder transactions:\n");
+       hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+               print_binder_proc(m, proc, 0);
+       if (do_lock)
+               mutex_unlock(&binder_lock);
+       return 0;
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused)
+{
+       struct binder_proc *proc = m->private;
+       int do_lock = !binder_debug_no_lock;
+
+       if (do_lock)
+               mutex_lock(&binder_lock);
+       seq_puts(m, "binder proc state:\n");
+       print_binder_proc(m, proc, 1);
+       if (do_lock)
+               mutex_unlock(&binder_lock);
+       return 0;
+}
+
+static void print_binder_transaction_log_entry(struct seq_file *m,
+                                       struct binder_transaction_log_entry *e)
+{
+       seq_printf(m,
+                  "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+                  e->debug_id, (e->call_type == 2) ? "reply" :
+                  ((e->call_type == 1) ? "async" : "call "), e->from_proc,
+                  e->from_thread, e->to_proc, e->to_thread, e->to_node,
+                  e->target_handle, e->data_size, e->offsets_size);
+}
+
+static int binder_transaction_log_show(struct seq_file *m, void *unused)
+{
+       struct binder_transaction_log *log = m->private;
+       int i;
+
+       if (log->full) {
+               for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
+                       print_binder_transaction_log_entry(m, &log->entry[i]);
+       }
+       for (i = 0; i < log->next; i++)
+               print_binder_transaction_log_entry(m, &log->entry[i]);
+       return 0;
+}
+
+static const struct file_operations binder_fops = {
+       .owner = THIS_MODULE,
+       .poll = binder_poll,
+       .unlocked_ioctl = binder_ioctl,
+       .mmap = binder_mmap,
+       .open = binder_open,
+       .flush = binder_flush,
+       .release = binder_release,
+};
+
+static struct miscdevice binder_miscdev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "binder",
+       .fops = &binder_fops
+};
+
+BINDER_DEBUG_ENTRY(state);
+BINDER_DEBUG_ENTRY(stats);
+BINDER_DEBUG_ENTRY(transactions);
+BINDER_DEBUG_ENTRY(transaction_log);
+
+static int __init binder_init(void)
+{
+       int ret;
+
+       binder_deferred_workqueue = create_singlethread_workqueue("binder");
+       if (!binder_deferred_workqueue)
+               return -ENOMEM;
+
+       binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
+       if (binder_debugfs_dir_entry_root)
+               binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
+                                                binder_debugfs_dir_entry_root);
+       ret = misc_register(&binder_miscdev);
+       if (binder_debugfs_dir_entry_root) {
+               binder_debugfs_state = debugfs_create_file(
+                                   "state",
+                                   S_IRUGO,
+                                   binder_debugfs_dir_entry_root,
+                                   NULL,
+                                   &binder_state_fops);
+               binder_debugfs_stats = debugfs_create_file(
+                                   "stats",
+                                   S_IRUGO,
+                                   binder_debugfs_dir_entry_root,
+                                   NULL,
+                                   &binder_stats_fops);
+               binder_debugfs_transactions = debugfs_create_file(
+                                   "transactions",
+                                   S_IRUGO,
+                                   binder_debugfs_dir_entry_root,
+                                   NULL,
+                                   &binder_transactions_fops);
+               binder_debugfs_transaction_log = debugfs_create_file(
+                                   "transaction_log",
+                                   S_IRUGO,
+                                   binder_debugfs_dir_entry_root,
+                                   &binder_transaction_log,
+                                   &binder_transaction_log_fops);
+               binder_debugfs_failed_transaction_log = debugfs_create_file(
+                                   "failed_transaction_log",
+                                   S_IRUGO,
+                                   binder_debugfs_dir_entry_root,
+                                   &binder_transaction_log_failed,
+                                   &binder_transaction_log_fops);
+       }
+       return ret;
+}
+
+static void __exit binder_exit(void)
+{
+       if (binder_debugfs_dir_entry_root) {
+               debugfs_remove(binder_debugfs_dir_entry_proc);
+               debugfs_remove(binder_debugfs_state);
+               debugfs_remove(binder_debugfs_stats);
+               debugfs_remove(binder_debugfs_transactions);
+               debugfs_remove(binder_debugfs_transaction_log);
+               debugfs_remove(binder_debugfs_failed_transaction_log);
+               debugfs_remove(binder_debugfs_dir_entry_root);
+       }
+
+       misc_deregister(&binder_miscdev);
+       destroy_workqueue(binder_deferred_workqueue);
+
+       printk(KERN_INFO "binder: unloaded\n");
+}
+
+module_init(binder_init);
+module_exit(binder_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
new file mode 100644 (file)
index 0000000..25ab6f2
--- /dev/null
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_H
+#define _LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+       ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+       BINDER_TYPE_BINDER      = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+       BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+       BINDER_TYPE_HANDLE      = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+       BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+       BINDER_TYPE_FD          = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+       FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+       FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes.  The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur.  The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+       /* 8 bytes for large_flat_header. */
+       unsigned long           type;
+       unsigned long           flags;
+
+       /* 8 bytes of data. */
+       union {
+               void            *binder;        /* local object */
+               signed long     handle;         /* remote object */
+       };
+
+       /* extra data associated with local object */
+       void                    *cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses apropriately.
+ */
+
+struct binder_write_read {
+       signed long     write_size;     /* bytes to write */
+       signed long     write_consumed; /* bytes consumed by driver */
+       unsigned long   write_buffer;
+       signed long     read_size;      /* bytes to read */
+       signed long     read_consumed;  /* bytes consumed by driver */
+       unsigned long   read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+       /* driver protocol version -- increment with incompatible change */
+       signed long     protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ              _IOWR('b', 1, struct binder_write_read)
+#define        BINDER_SET_IDLE_TIMEOUT         _IOW('b', 3, int64_t)
+#define        BINDER_SET_MAX_THREADS          _IOW('b', 5, size_t)
+#define        BINDER_SET_IDLE_PRIORITY        _IOW('b', 6, int)
+#define        BINDER_SET_CONTEXT_MGR          _IOW('b', 7, int)
+#define        BINDER_THREAD_EXIT              _IOW('b', 8, int)
+#define BINDER_VERSION                 _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted.  This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process.  That is, the process is being destroyed.
+ * You should handle this by exiting from your process.  Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+       TF_ONE_WAY      = 0x01, /* this is a one-way call: async, no return */
+       TF_ROOT_OBJECT  = 0x04, /* contents are the component's root object */
+       TF_STATUS_CODE  = 0x08, /* contents are a 32-bit status code */
+       TF_ACCEPT_FDS   = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+       /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+        * identifying the target and contents of the transaction.
+        */
+       union {
+               size_t  handle; /* target descriptor of command transaction */
+               void    *ptr;   /* target descriptor of return transaction */
+       } target;
+       void            *cookie;        /* target object cookie */
+       unsigned int    code;           /* transaction command */
+
+       /* General information about the transaction. */
+       unsigned int    flags;
+       pid_t           sender_pid;
+       uid_t           sender_euid;
+       size_t          data_size;      /* number of bytes of data */
+       size_t          offsets_size;   /* number of bytes of offsets */
+
+       /* If this transaction is inline, the data immediately
+        * follows here; otherwise, it ends with a pointer to
+        * the data buffer.
+        */
+       union {
+               struct {
+                       /* transaction data */
+                       const void      *buffer;
+                       /* offsets from buffer to flat_binder_object structs */
+                       const void      *offsets;
+               } ptr;
+               uint8_t buf[8];
+       } data;
+};
+
+struct binder_ptr_cookie {
+       void *ptr;
+       void *cookie;
+};
+
+struct binder_pri_desc {
+       int priority;
+       int desc;
+};
+
+struct binder_pri_ptr_cookie {
+       int priority;
+       void *ptr;
+       void *cookie;
+};
+
+enum BinderDriverReturnProtocol {
+       BR_ERROR = _IOR('r', 0, int),
+       /*
+        * int: error code
+        */
+
+       BR_OK = _IO('r', 1),
+       /* No parameters! */
+
+       BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+       BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+       /*
+        * binder_transaction_data: the received command.
+        */
+
+       BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+       /*
+        * not currently supported
+        * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+        * Else the remote object has acquired a primary reference.
+        */
+
+       BR_DEAD_REPLY = _IO('r', 5),
+       /*
+        * The target of the last transaction (either a bcTRANSACTION or
+        * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
+        */
+
+       BR_TRANSACTION_COMPLETE = _IO('r', 6),
+       /*
+        * No parameters... always refers to the last transaction requested
+        * (including replies).  Note that this will be sent even for
+        * asynchronous transactions.
+        */
+
+       BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+       BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+       BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+       BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+       /*
+        * void *:      ptr to binder
+        * void *: cookie for binder
+        */
+
+       BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+       /*
+        * not currently supported
+        * int: priority
+        * void *: ptr to binder
+        * void *: cookie for binder
+        */
+
+       BR_NOOP = _IO('r', 12),
+       /*
+        * No parameters.  Do nothing and examine the next command.  It exists
+        * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+        */
+
+       BR_SPAWN_LOOPER = _IO('r', 13),
+       /*
+        * No parameters.  The driver has determined that a process has no
+        * threads waiting to service incomming transactions.  When a process
+        * receives this command, it must spawn a new service thread and
+        * register it via bcENTER_LOOPER.
+        */
+
+       BR_FINISHED = _IO('r', 14),
+       /*
+        * not currently supported
+        * stop threadpool thread
+        */
+
+       BR_DEAD_BINDER = _IOR('r', 15, void *),
+       /*
+        * void *: cookie
+        */
+       BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+       /*
+        * void *: cookie
+        */
+
+       BR_FAILED_REPLY = _IO('r', 17),
+       /*
+        * The the last transaction (either a bcTRANSACTION or
+        * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
+        */
+};
+
+enum BinderDriverCommandProtocol {
+       BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+       BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+       /*
+        * binder_transaction_data: the sent command.
+        */
+
+       BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+       /*
+        * not currently supported
+        * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+        * Else you have acquired a primary reference on the object.
+        */
+
+       BC_FREE_BUFFER = _IOW('c', 3, int),
+       /*
+        * void *: ptr to transaction data received on a read
+        */
+
+       BC_INCREFS = _IOW('c', 4, int),
+       BC_ACQUIRE = _IOW('c', 5, int),
+       BC_RELEASE = _IOW('c', 6, int),
+       BC_DECREFS = _IOW('c', 7, int),
+       /*
+        * int: descriptor
+        */
+
+       BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+       BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+       /*
+        * void *: ptr to binder
+        * void *: cookie for binder
+        */
+
+       BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+       /*
+        * not currently supported
+        * int: priority
+        * int: descriptor
+        */
+
+       BC_REGISTER_LOOPER = _IO('c', 11),
+       /*
+        * No parameters.
+        * Register a spawned looper thread with the device.
+        */
+
+       BC_ENTER_LOOPER = _IO('c', 12),
+       BC_EXIT_LOOPER = _IO('c', 13),
+       /*
+        * No parameters.
+        * These two commands are sent as an application-level thread
+        * enters and exits the binder loop, respectively.  They are
+        * used so the binder can have an accurate count of the number
+        * of looping threads it has available.
+        */
+
+       BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
+       /*
+        * void *: ptr to binder
+        * void *: cookie
+        */
+
+       BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
+       /*
+        * void *: ptr to binder
+        * void *: cookie
+        */
+
+       BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+       /*
+        * void *: cookie
+        */
+};
+
+#endif /* _LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
new file mode 100644 (file)
index 0000000..ea69b6a
--- /dev/null
@@ -0,0 +1,648 @@
+/*
+ * drivers/misc/logger.c
+ *
+ * A Logging Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include "logger.h"
+
+#include <asm/ioctls.h>
+
+/*
+ * struct logger_log - represents a specific log, such as 'main' or 'radio'
+ *
+ * This structure lives from module insertion until module removal, so it does
+ * not need additional reference counting. The structure is protected by the
+ * mutex 'mutex'.
+ */
+struct logger_log {
+       unsigned char           *buffer;/* the ring buffer itself */
+       struct miscdevice       misc;   /* misc device representing the log */
+       wait_queue_head_t       wq;     /* wait queue for readers */
+       struct list_head        readers; /* this log's readers */
+       struct mutex            mutex;  /* mutex protecting buffer */
+       size_t                  w_off;  /* current write head offset */
+       size_t                  head;   /* new readers start here */
+       size_t                  size;   /* size of the log */
+};
+
+/*
+ * struct logger_reader - a logging device open for reading
+ *
+ * This object lives from open to release, so we don't need additional
+ * reference counting. The structure is protected by log->mutex.
+ */
+struct logger_reader {
+       struct logger_log       *log;   /* associated log */
+       struct list_head        list;   /* entry in logger_log's list */
+       size_t                  r_off;  /* current read head offset */
+};
+
+/* logger_offset - returns index 'n' into the log via (optimized) modulus */
+size_t logger_offset(struct logger_log *log, size_t n)
+{
+       return n & (log->size-1);
+}
+
+
+/*
+ * file_get_log - Given a file structure, return the associated log
+ *
+ * This isn't aesthetic. We have several goals:
+ *
+ *     1) Need to quickly obtain the associated log during an I/O operation
+ *     2) Readers need to maintain state (logger_reader)
+ *     3) Writers need to be very fast (open() should be a near no-op)
+ *
+ * In the reader case, we can trivially go file->logger_reader->logger_log.
+ * For a writer, we don't want to maintain a logger_reader, so we just go
+ * file->logger_log. Thus what file->private_data points at depends on whether
+ * or not the file was opened for reading. This function hides that dirtiness.
+ */
+static inline struct logger_log *file_get_log(struct file *file)
+{
+       if (file->f_mode & FMODE_READ) {
+               struct logger_reader *reader = file->private_data;
+               return reader->log;
+       } else
+               return file->private_data;
+}
+
+/*
+ * get_entry_len - Grabs the length of the payload of the next entry starting
+ * from 'off'.
+ *
+ * An entry length is 2 bytes (16 bits) in host endian order.
+ * In the log, the length does not include the size of the log entry structure.
+ * This function returns the size including the log entry structure.
+ *
+ * Caller needs to hold log->mutex.
+ */
+static __u32 get_entry_len(struct logger_log *log, size_t off)
+{
+       __u16 val;
+
+       /* copy 2 bytes from buffer, in memcpy order, */
+       /* handling possible wrap at end of buffer */
+
+       ((__u8 *)&val)[0] = log->buffer[off];
+       if (likely(off+1 < log->size))
+               ((__u8 *)&val)[1] = log->buffer[off+1];
+       else
+               ((__u8 *)&val)[1] = log->buffer[0];
+
+       return sizeof(struct logger_entry) + val;
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * Caller must hold log->mutex.
+ */
+static ssize_t do_read_log_to_user(struct logger_log *log,
+                                  struct logger_reader *reader,
+                                  char __user *buf,
+                                  size_t count)
+{
+       size_t len;
+
+       /*
+        * We read from the log in two disjoint operations. First, we read from
+        * the current read head offset up to 'count' bytes or to the end of
+        * the log, whichever comes first.
+        */
+       len = min(count, log->size - reader->r_off);
+       if (copy_to_user(buf, log->buffer + reader->r_off, len))
+               return -EFAULT;
+
+       /*
+        * Second, we read any remaining bytes, starting back at the head of
+        * the log.
+        */
+       if (count != len)
+               if (copy_to_user(buf + len, log->buffer, count - len))
+                       return -EFAULT;
+
+       reader->r_off = logger_offset(log, reader->r_off + count);
+
+       return count;
+}
+
+/*
+ * logger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ *     - O_NONBLOCK works
+ *     - If there are no log entries to read, blocks until log is written to
+ *     - Atomically reads exactly one log entry
+ *
+ * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t logger_read(struct file *file, char __user *buf,
+                          size_t count, loff_t *pos)
+{
+       struct logger_reader *reader = file->private_data;
+       struct logger_log *log = reader->log;
+       ssize_t ret;
+       DEFINE_WAIT(wait);
+
+start:
+       while (1) {
+               mutex_lock(&log->mutex);
+
+               prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
+
+               ret = (log->w_off == reader->r_off);
+               mutex_unlock(&log->mutex);
+               if (!ret)
+                       break;
+
+               if (file->f_flags & O_NONBLOCK) {
+                       ret = -EAGAIN;
+                       break;
+               }
+
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
+               schedule();
+       }
+
+       finish_wait(&log->wq, &wait);
+       if (ret)
+               return ret;
+
+       mutex_lock(&log->mutex);
+
+       /* is there still something to read or did we race? */
+       if (unlikely(log->w_off == reader->r_off)) {
+               mutex_unlock(&log->mutex);
+               goto start;
+       }
+
+       /* get the size of the next entry */
+       ret = get_entry_len(log, reader->r_off);
+       if (count < ret) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* get exactly one entry from the log */
+       ret = do_read_log_to_user(log, reader, buf, ret);
+
+out:
+       mutex_unlock(&log->mutex);
+
+       return ret;
+}
+
+/*
+ * get_next_entry - return the offset of the first valid entry at least 'len'
+ * bytes after 'off'.
+ *
+ * Caller must hold log->mutex.
+ */
+static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
+{
+       size_t count = 0;
+
+       do {
+               size_t nr = get_entry_len(log, off);
+               off = logger_offset(log, off + nr);
+               count += nr;
+       } while (count < len);
+
+       return off;
+}
+
+/*
+ * is_between - is a < c < b, accounting for wrapping of a, b, and c
+ *    positions in the buffer
+ *
+ * That is, if a<b, check for c between a and b
+ * and if a>b, check for c outside (not between) a and b
+ *
+ * |------- a xxxxxxxx b --------|
+ *               c^
+ *
+ * |xxxxx b --------- a xxxxxxxxx|
+ *    c^
+ *  or                    c^
+ */
+static inline int is_between(size_t a, size_t b, size_t c)
+{
+       if (a < b) {
+               /* is c between a and b? */
+               if (a < c && c <= b)
+                       return 1;
+       } else {
+               /* is c outside of b through a? */
+               if (c <= b || a < c)
+                       return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who were
+ * lapped by the writer; also do the same for the default "start head".
+ * We do this by "pulling forward" the readers and start head to the first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void fix_up_readers(struct logger_log *log, size_t len)
+{
+       size_t old = log->w_off;
+       size_t new = logger_offset(log, old + len);
+       struct logger_reader *reader;
+
+       if (is_between(old, new, log->head))
+               log->head = get_next_entry(log, log->head, len);
+
+       list_for_each_entry(reader, &log->readers, list)
+               if (is_between(old, new, reader->r_off))
+                       reader->r_off = get_next_entry(log, reader->r_off, len);
+}
+
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct logger_log *log, const void *buf, size_t count)
+{
+       size_t len;
+
+       len = min(count, log->size - log->w_off);
+       memcpy(log->buffer + log->w_off, buf, len);
+
+       if (count != len)
+               memcpy(log->buffer, buf + len, count - len);
+
+       log->w_off = logger_offset(log, log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct logger_log *log,
+                                     const void __user *buf, size_t count)
+{
+       size_t len;
+
+       len = min(count, log->size - log->w_off);
+       if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+               return -EFAULT;
+
+       if (count != len)
+               if (copy_from_user(log->buffer, buf + len, count - len))
+                       /*
+                        * Note that by not updating w_off, this abandons the
+                        * portion of the new entry that *was* successfully
+                        * copied, just above.  This is intentional to avoid
+                        * message corruption from missing fragments.
+                        */
+                       return -EFAULT;
+
+       log->w_off = logger_offset(log, log->w_off + count);
+
+       return count;
+}
+
+/*
+ * logger_aio_write - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+                        unsigned long nr_segs, loff_t ppos)
+{
+       struct logger_log *log = file_get_log(iocb->ki_filp);
+       size_t orig = log->w_off;
+       struct logger_entry header;
+       struct timespec now;
+       ssize_t ret = 0;
+
+       now = current_kernel_time();
+
+       header.pid = current->tgid;
+       header.tid = current->pid;
+       header.sec = now.tv_sec;
+       header.nsec = now.tv_nsec;
+       header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+
+       /* null writes succeed, return zero */
+       if (unlikely(!header.len))
+               return 0;
+
+       mutex_lock(&log->mutex);
+
+       /*
+        * Fix up any readers, pulling them forward to the first readable
+        * entry after (what will be) the new write offset. We do this now
+        * because if we partially fail, we can end up with clobbered log
+        * entries that encroach on readable buffer.
+        */
+       fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+       do_write_log(log, &header, sizeof(struct logger_entry));
+
+       while (nr_segs-- > 0) {
+               size_t len;
+               ssize_t nr;
+
+               /* figure out how much of this vector we can keep */
+               len = min_t(size_t, iov->iov_len, header.len - ret);
+
+               /* write out this segment's payload */
+               nr = do_write_log_from_user(log, iov->iov_base, len);
+               if (unlikely(nr < 0)) {
+                       log->w_off = orig;
+                       mutex_unlock(&log->mutex);
+                       return nr;
+               }
+
+               iov++;
+               ret += nr;
+       }
+
+       mutex_unlock(&log->mutex);
+
+       /* wake up any blocked readers */
+       wake_up_interruptible(&log->wq);
+
+       return ret;
+}
+
+static struct logger_log *get_log_from_minor(int);
+
+/*
+ * logger_open - the log's open() file operation
+ *
+ * Note how near a no-op this is in the write-only case. Keep it that way!
+ */
+static int logger_open(struct inode *inode, struct file *file)
+{
+       struct logger_log *log;
+       int ret;
+
+       ret = nonseekable_open(inode, file);
+       if (ret)
+               return ret;
+
+       log = get_log_from_minor(MINOR(inode->i_rdev));
+       if (!log)
+               return -ENODEV;
+
+       if (file->f_mode & FMODE_READ) {
+               struct logger_reader *reader;
+
+               reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
+               if (!reader)
+                       return -ENOMEM;
+
+               reader->log = log;
+               INIT_LIST_HEAD(&reader->list);
+
+               mutex_lock(&log->mutex);
+               reader->r_off = log->head;
+               list_add_tail(&reader->list, &log->readers);
+               mutex_unlock(&log->mutex);
+
+               file->private_data = reader;
+       } else
+               file->private_data = log;
+
+       return 0;
+}
+
+/*
+ * logger_release - the log's release file operation
+ *
+ * Note this is a total no-op in the write-only case. Keep it that way!
+ */
+static int logger_release(struct inode *ignored, struct file *file)
+{
+       if (file->f_mode & FMODE_READ) {
+               struct logger_reader *reader = file->private_data;
+               struct logger_log *log = reader->log;
+
+               mutex_lock(&log->mutex);
+               list_del(&reader->list);
+               mutex_unlock(&log->mutex);
+
+               kfree(reader);
+       }
+
+       return 0;
+}
+
+/*
+ * logger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to the log.
+ * Note also that, strictly speaking, a return value of POLLIN does not
+ * guarantee that the log is readable without blocking, as there is a small
+ * chance that the writer can lap the reader in the interim between poll()
+ * returning and the read() request.
+ */
+static unsigned int logger_poll(struct file *file, poll_table *wait)
+{
+       struct logger_reader *reader;
+       struct logger_log *log;
+       unsigned int ret = POLLOUT | POLLWRNORM;
+
+       if (!(file->f_mode & FMODE_READ))
+               return ret;
+
+       reader = file->private_data;
+       log = reader->log;
+
+       poll_wait(file, &log->wq, wait);
+
+       mutex_lock(&log->mutex);
+       if (log->w_off != reader->r_off)
+               ret |= POLLIN | POLLRDNORM;
+       mutex_unlock(&log->mutex);
+
+       return ret;
+}
+
+static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct logger_log *log = file_get_log(file);
+       struct logger_reader *reader;
+       long ret = -ENOTTY;
+
+       mutex_lock(&log->mutex);
+
+       switch (cmd) {
+       case LOGGER_GET_LOG_BUF_SIZE:
+               ret = log->size;
+               break;
+       case LOGGER_GET_LOG_LEN:
+               if (!(file->f_mode & FMODE_READ)) {
+                       ret = -EBADF;
+                       break;
+               }
+               reader = file->private_data;
+               if (log->w_off >= reader->r_off)
+                       ret = log->w_off - reader->r_off;
+               else
+                       ret = (log->size - reader->r_off) + log->w_off;
+               break;
+       case LOGGER_GET_NEXT_ENTRY_LEN:
+               if (!(file->f_mode & FMODE_READ)) {
+                       ret = -EBADF;
+                       break;
+               }
+               reader = file->private_data;
+               if (log->w_off != reader->r_off)
+                       ret = get_entry_len(log, reader->r_off);
+               else
+                       ret = 0;
+               break;
+       case LOGGER_FLUSH_LOG:
+               if (!(file->f_mode & FMODE_WRITE)) {
+                       ret = -EBADF;
+                       break;
+               }
+               list_for_each_entry(reader, &log->readers, list)
+                       reader->r_off = log->w_off;
+               log->head = log->w_off;
+               ret = 0;
+               break;
+       }
+
+       mutex_unlock(&log->mutex);
+
+       return ret;
+}
+
+static const struct file_operations logger_fops = {
+       .owner = THIS_MODULE,
+       .read = logger_read,
+       .aio_write = logger_aio_write,
+       .poll = logger_poll,
+       .unlocked_ioctl = logger_ioctl,
+       .compat_ioctl = logger_ioctl,
+       .open = logger_open,
+       .release = logger_release,
+};
+
+/*
+ * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
+ * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
+ * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
+ */
+#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
+static unsigned char _buf_ ## VAR[SIZE]; \
+static struct logger_log VAR = { \
+       .buffer = _buf_ ## VAR, \
+       .misc = { \
+               .minor = MISC_DYNAMIC_MINOR, \
+               .name = NAME, \
+               .fops = &logger_fops, \
+               .parent = NULL, \
+       }, \
+       .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
+       .readers = LIST_HEAD_INIT(VAR .readers), \
+       .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
+       .w_off = 0, \
+       .head = 0, \
+       .size = SIZE, \
+};
+
+DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024)
+DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
+DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024)
+DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
+
+static struct logger_log *get_log_from_minor(int minor)
+{
+       if (log_main.misc.minor == minor)
+               return &log_main;
+       if (log_events.misc.minor == minor)
+               return &log_events;
+       if (log_radio.misc.minor == minor)
+               return &log_radio;
+       if (log_system.misc.minor == minor)
+               return &log_system;
+       return NULL;
+}
+
+static int __init init_log(struct logger_log *log)
+{
+       int ret;
+
+       ret = misc_register(&log->misc);
+       if (unlikely(ret)) {
+               printk(KERN_ERR "logger: failed to register misc "
+                      "device for log '%s'!\n", log->misc.name);
+               return ret;
+       }
+
+       printk(KERN_INFO "logger: created %luK log '%s'\n",
+              (unsigned long) log->size >> 10, log->misc.name);
+
+       return 0;
+}
+
+static int __init logger_init(void)
+{
+       int ret;
+
+       ret = init_log(&log_main);
+       if (unlikely(ret))
+               goto out;
+
+       ret = init_log(&log_events);
+       if (unlikely(ret))
+               goto out;
+
+       ret = init_log(&log_radio);
+       if (unlikely(ret))
+               goto out;
+
+       ret = init_log(&log_system);
+       if (unlikely(ret))
+               goto out;
+
+out:
+       return ret;
+}
+device_initcall(logger_init);
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
new file mode 100644 (file)
index 0000000..2cb06e9
--- /dev/null
@@ -0,0 +1,49 @@
+/* include/linux/logger.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Author: Robert Love <rlove@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_LOGGER_H
+#define _LINUX_LOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct logger_entry {
+       __u16           len;    /* length of the payload */
+       __u16           __pad;  /* no matter what, we get 2 bytes of padding */
+       __s32           pid;    /* generating process's pid */
+       __s32           tid;    /* generating process's tid */
+       __s32           sec;    /* seconds since Epoch */
+       __s32           nsec;   /* nanoseconds */
+       char            msg[0]; /* the entry's payload */
+};
+
+#define LOGGER_LOG_RADIO       "log_radio"     /* radio-related messages */
+#define LOGGER_LOG_EVENTS      "log_events"    /* system/hardware events */
+#define LOGGER_LOG_SYSTEM      "log_system"    /* system/framework messages */
+#define LOGGER_LOG_MAIN                "log_main"      /* everything else */
+
+#define LOGGER_ENTRY_MAX_LEN           (4*1024)
+#define LOGGER_ENTRY_MAX_PAYLOAD       \
+       (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+
+#define __LOGGERIO     0xAE
+
+#define LOGGER_GET_LOG_BUF_SIZE                _IO(__LOGGERIO, 1) /* size of log */
+#define LOGGER_GET_LOG_LEN             _IO(__LOGGERIO, 2) /* used log len */
+#define LOGGER_GET_NEXT_ENTRY_LEN      _IO(__LOGGERIO, 3) /* next entry len */
+#define LOGGER_FLUSH_LOG               _IO(__LOGGERIO, 4) /* flush log */
+
+#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
new file mode 100644 (file)
index 0000000..b91e4bc
--- /dev/null
@@ -0,0 +1,189 @@
+/* drivers/misc/lowmemorykiller.c
+ *
+ * The lowmemorykiller driver lets user-space specify a set of memory thresholds
+ * where processes with a range of oom_score_adj values will get killed. Specify
+ * the minimum oom_score_adj values in
+ * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in
+ * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma
+ * separated list of numbers in ascending order.
+ *
+ * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
+ * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
+ * processes with a oom_score_adj value of 8 or higher when the free memory
+ * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or
+ * higher when the free memory drops below 1024 pages.
+ *
+ * The driver considers memory used for caches to be free, but if a large
+ * percentage of the cached memory is locked this can be very inaccurate
+ * and processes may not get killed until the normal oom killer is triggered.
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+#include <linux/rcupdate.h>
+#include <linux/profile.h>
+#include <linux/notifier.h>
+
+static uint32_t lowmem_debug_level = 2;
+static int lowmem_adj[6] = {
+       0,
+       1,
+       6,
+       12,
+};
+static int lowmem_adj_size = 4;
+static int lowmem_minfree[6] = {
+       3 * 512,        /* 6MB */
+       2 * 1024,       /* 8MB */
+       4 * 1024,       /* 16MB */
+       16 * 1024,      /* 64MB */
+};
+static int lowmem_minfree_size = 4;
+
+static unsigned long lowmem_deathpending_timeout;
+
+#define lowmem_print(level, x...)                      \
+       do {                                            \
+               if (lowmem_debug_level >= (level))      \
+                       printk(x);                      \
+       } while (0)
+
+static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+       struct task_struct *tsk;
+       struct task_struct *selected = NULL;
+       int rem = 0;
+       int tasksize;
+       int i;
+       int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
+       int selected_tasksize = 0;
+       int selected_oom_score_adj;
+       int array_size = ARRAY_SIZE(lowmem_adj);
+       int other_free = global_page_state(NR_FREE_PAGES);
+       int other_file = global_page_state(NR_FILE_PAGES) -
+                                               global_page_state(NR_SHMEM);
+
+       if (lowmem_adj_size < array_size)
+               array_size = lowmem_adj_size;
+       if (lowmem_minfree_size < array_size)
+               array_size = lowmem_minfree_size;
+       for (i = 0; i < array_size; i++) {
+               if (other_free < lowmem_minfree[i] &&
+                   other_file < lowmem_minfree[i]) {
+                       min_score_adj = lowmem_adj[i];
+                       break;
+               }
+       }
+       if (sc->nr_to_scan > 0)
+               lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
+                               sc->nr_to_scan, sc->gfp_mask, other_free,
+                               other_file, min_score_adj);
+       rem = global_page_state(NR_ACTIVE_ANON) +
+               global_page_state(NR_ACTIVE_FILE) +
+               global_page_state(NR_INACTIVE_ANON) +
+               global_page_state(NR_INACTIVE_FILE);
+       if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
+               lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
+                            sc->nr_to_scan, sc->gfp_mask, rem);
+               return rem;
+       }
+       selected_oom_score_adj = min_score_adj;
+
+       rcu_read_lock();
+       for_each_process(tsk) {
+               struct task_struct *p;
+               int oom_score_adj;
+
+               if (tsk->flags & PF_KTHREAD)
+                       continue;
+
+               p = find_lock_task_mm(tsk);
+               if (!p)
+                       continue;
+
+               if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
+                   time_before_eq(jiffies, lowmem_deathpending_timeout)) {
+                       task_unlock(p);
+                       rcu_read_unlock();
+                       return 0;
+               }
+               oom_score_adj = p->signal->oom_score_adj;
+               if (oom_score_adj < min_score_adj) {
+                       task_unlock(p);
+                       continue;
+               }
+               tasksize = get_mm_rss(p->mm);
+               task_unlock(p);
+               if (tasksize <= 0)
+                       continue;
+               if (selected) {
+                       if (oom_score_adj < selected_oom_score_adj)
+                               continue;
+                       if (oom_score_adj == selected_oom_score_adj &&
+                           tasksize <= selected_tasksize)
+                               continue;
+               }
+               selected = p;
+               selected_tasksize = tasksize;
+               selected_oom_score_adj = oom_score_adj;
+               lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
+                            p->pid, p->comm, oom_score_adj, tasksize);
+       }
+       if (selected) {
+               lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
+                            selected->pid, selected->comm,
+                            selected_oom_score_adj, selected_tasksize);
+               lowmem_deathpending_timeout = jiffies + HZ;
+               send_sig(SIGKILL, selected, 0);
+               set_tsk_thread_flag(selected, TIF_MEMDIE);
+               rem -= selected_tasksize;
+       }
+       lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
+                    sc->nr_to_scan, sc->gfp_mask, rem);
+       rcu_read_unlock();
+       return rem;
+}
+
+static struct shrinker lowmem_shrinker = {
+       .shrink = lowmem_shrink,
+       .seeks = DEFAULT_SEEKS * 16
+};
+
+static int __init lowmem_init(void)
+{
+       register_shrinker(&lowmem_shrinker);
+       return 0;
+}
+
+static void __exit lowmem_exit(void)
+{
+       unregister_shrinker(&lowmem_shrinker);
+}
+
+module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
+                        S_IRUGO | S_IWUSR);
+module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
+                        S_IRUGO | S_IWUSR);
+module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+
+module_init(lowmem_init);
+module_exit(lowmem_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/android/persistent_ram.c b/drivers/staging/android/persistent_ram.c
new file mode 100644 (file)
index 0000000..86f0837
--- /dev/null
@@ -0,0 +1,411 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/rslib.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "persistent_ram.h"
+
+struct persistent_ram_buffer {
+       uint32_t    sig;
+       uint32_t    start;
+       uint32_t    size;
+       uint8_t     data[0];
+};
+
+#define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
+
+static __initdata LIST_HEAD(persistent_ram_list);
+
+static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+       uint8_t *data, size_t len, uint8_t *ecc)
+{
+       int i;
+       uint16_t par[prz->ecc_size];
+
+       /* Initialize the parity buffer */
+       memset(par, 0, sizeof(par));
+       encode_rs8(prz->rs_decoder, data, len, par, 0);
+       for (i = 0; i < prz->ecc_size; i++)
+               ecc[i] = par[i];
+}
+
+static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
+       void *data, size_t len, uint8_t *ecc)
+{
+       int i;
+       uint16_t par[prz->ecc_size];
+
+       for (i = 0; i < prz->ecc_size; i++)
+               par[i] = ecc[i];
+       return decode_rs8(prz->rs_decoder, data, par, len,
+                               NULL, 0, NULL, 0, NULL);
+}
+
+static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
+       unsigned int count)
+{
+       struct persistent_ram_buffer *buffer = prz->buffer;
+       uint8_t *buffer_end = buffer->data + prz->buffer_size;
+       uint8_t *block;
+       uint8_t *par;
+       int ecc_block_size = prz->ecc_block_size;
+       int ecc_size = prz->ecc_size;
+       int size = prz->ecc_block_size;
+
+       if (!prz->ecc)
+               return;
+
+       block = buffer->data + (buffer->start & ~(ecc_block_size - 1));
+       par = prz->par_buffer +
+             (buffer->start / ecc_block_size) * prz->ecc_size;
+       do {
+               if (block + ecc_block_size > buffer_end)
+                       size = buffer_end - block;
+               persistent_ram_encode_rs8(prz, block, size, par);
+               block += ecc_block_size;
+               par += ecc_size;
+       } while (block < buffer->data + buffer->start + count);
+}
+
+static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
+{
+       struct persistent_ram_buffer *buffer = prz->buffer;
+
+       if (!prz->ecc)
+               return;
+
+       persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
+                                 prz->par_header);
+}
+
+static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
+{
+       struct persistent_ram_buffer *buffer = prz->buffer;
+       uint8_t *block;
+       uint8_t *par;
+
+       if (!prz->ecc)
+               return;
+
+       block = buffer->data;
+       par = prz->par_buffer;
+       while (block < buffer->data + buffer->size) {
+               int numerr;
+               int size = prz->ecc_block_size;
+               if (block + size > buffer->data + prz->buffer_size)
+                       size = buffer->data + prz->buffer_size - block;
+               numerr = persistent_ram_decode_rs8(prz, block, size, par);
+               if (numerr > 0) {
+                       pr_devel("persistent_ram: error in block %p, %d\n",
+                              block, numerr);
+                       prz->corrected_bytes += numerr;
+               } else if (numerr < 0) {
+                       pr_devel("persistent_ram: uncorrectable error in block %p\n",
+                               block);
+                       prz->bad_blocks++;
+               }
+               block += prz->ecc_block_size;
+               par += prz->ecc_size;
+       }
+}
+
+static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
+       size_t buffer_size)
+{
+       int numerr;
+       struct persistent_ram_buffer *buffer = prz->buffer;
+       int ecc_blocks;
+
+       if (!prz->ecc)
+               return 0;
+
+       prz->ecc_block_size = 128;
+       prz->ecc_size = 16;
+       prz->ecc_symsize = 8;
+       prz->ecc_poly = 0x11d;
+
+       ecc_blocks = DIV_ROUND_UP(prz->buffer_size, prz->ecc_block_size);
+       prz->buffer_size -= (ecc_blocks + 1) * prz->ecc_size;
+
+       if (prz->buffer_size > buffer_size) {
+               pr_err("persistent_ram: invalid size %zu, non-ecc datasize %zu\n",
+                      buffer_size, prz->buffer_size);
+               return -EINVAL;
+       }
+
+       prz->par_buffer = buffer->data + prz->buffer_size;
+       prz->par_header = prz->par_buffer + ecc_blocks * prz->ecc_size;
+
+       /*
+        * first consecutive root is 0
+        * primitive element to generate roots = 1
+        */
+       prz->rs_decoder = init_rs(prz->ecc_symsize, prz->ecc_poly, 0, 1,
+                                 prz->ecc_size);
+       if (prz->rs_decoder == NULL) {
+               pr_info("persistent_ram: init_rs failed\n");
+               return -EINVAL;
+       }
+
+       prz->corrected_bytes = 0;
+       prz->bad_blocks = 0;
+
+       numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
+                                          prz->par_header);
+       if (numerr > 0) {
+               pr_info("persistent_ram: error in header, %d\n", numerr);
+               prz->corrected_bytes += numerr;
+       } else if (numerr < 0) {
+               pr_info("persistent_ram: uncorrectable error in header\n");
+               prz->bad_blocks++;
+       }
+
+       return 0;
+}
+
+ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
+       char *str, size_t len)
+{
+       ssize_t ret;
+
+       if (prz->corrected_bytes || prz->bad_blocks)
+               ret = snprintf(str, len, ""
+                       "\n%d Corrected bytes, %d unrecoverable blocks\n",
+                       prz->corrected_bytes, prz->bad_blocks);
+       else
+               ret = snprintf(str, len, "\nNo errors detected\n");
+
+       return ret;
+}
+
+static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
+       const void *s, unsigned int count)
+{
+       struct persistent_ram_buffer *buffer = prz->buffer;
+       memcpy(buffer->data + buffer->start, s, count);
+       persistent_ram_update_ecc(prz, count);
+}
+
+static void __init
+persistent_ram_save_old(struct persistent_ram_zone *prz)
+{
+       struct persistent_ram_buffer *buffer = prz->buffer;
+       size_t old_log_size = buffer->size;
+       char *dest;
+
+       persistent_ram_ecc_old(prz);
+
+       dest = kmalloc(old_log_size, GFP_KERNEL);
+       if (dest == NULL) {
+               pr_err("persistent_ram: failed to allocate buffer\n");
+               return;
+       }
+
+       prz->old_log = dest;
+       prz->old_log_size = old_log_size;
+       memcpy(prz->old_log,
+              &buffer->data[buffer->start], buffer->size - buffer->start);
+       memcpy(prz->old_log + buffer->size - buffer->start,
+              &buffer->data[0], buffer->start);
+}
+
+int notrace persistent_ram_write(struct persistent_ram_zone *prz,
+       const void *s, unsigned int count)
+{
+       int rem;
+       int c = count;
+       struct persistent_ram_buffer *buffer = prz->buffer;
+
+       if (c > prz->buffer_size) {
+               s += c - prz->buffer_size;
+               c = prz->buffer_size;
+       }
+       rem = prz->buffer_size - buffer->start;
+       if (rem < c) {
+               persistent_ram_update(prz, s, rem);
+               s += rem;
+               c -= rem;
+               buffer->start = 0;
+               buffer->size = prz->buffer_size;
+       }
+       persistent_ram_update(prz, s, c);
+
+       buffer->start += c;
+       if (buffer->size < prz->buffer_size)
+               buffer->size += c;
+       persistent_ram_update_header_ecc(prz);
+
+       return count;
+}
+
+size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
+{
+       return prz->old_log_size;
+}
+
+void *persistent_ram_old(struct persistent_ram_zone *prz)
+{
+       return prz->old_log;
+}
+
+void persistent_ram_free_old(struct persistent_ram_zone *prz)
+{
+       kfree(prz->old_log);
+       prz->old_log = NULL;
+       prz->old_log_size = 0;
+}
+
+static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
+               struct persistent_ram_zone *prz)
+{
+       struct page **pages;
+       phys_addr_t page_start;
+       unsigned int page_count;
+       pgprot_t prot;
+       unsigned int i;
+
+       page_start = start - offset_in_page(start);
+       page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
+
+       prot = pgprot_noncached(PAGE_KERNEL);
+
+       pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
+       if (!pages) {
+               pr_err("%s: Failed to allocate array for %u pages\n", __func__,
+                       page_count);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < page_count; i++) {
+               phys_addr_t addr = page_start + i * PAGE_SIZE;
+               pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+       }
+       prz->vaddr = vmap(pages, page_count, VM_MAP, prot);
+       kfree(pages);
+       if (!prz->vaddr) {
+               pr_err("%s: Failed to map %u pages\n", __func__, page_count);
+               return -ENOMEM;
+       }
+
+       prz->buffer = prz->vaddr + offset_in_page(start);
+       prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
+
+       return 0;
+}
+
+static int __init persistent_ram_buffer_init(const char *name,
+               struct persistent_ram_zone *prz)
+{
+       int i;
+       struct persistent_ram *ram;
+       struct persistent_ram_descriptor *desc;
+       phys_addr_t start;
+
+       list_for_each_entry(ram, &persistent_ram_list, node) {
+               start = ram->start;
+               for (i = 0; i < ram->num_descs; i++) {
+                       desc = &ram->descs[i];
+                       if (!strcmp(desc->name, name))
+                               return persistent_ram_buffer_map(start,
+                                               desc->size, prz);
+                       start += desc->size;
+               }
+       }
+
+       return -EINVAL;
+}
+
+static  __init
+struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc)
+{
+       struct persistent_ram_zone *prz;
+       int ret = -ENOMEM;
+
+       prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
+       if (!prz) {
+               pr_err("persistent_ram: failed to allocate persistent ram zone\n");
+               goto err;
+       }
+
+       INIT_LIST_HEAD(&prz->node);
+
+       ret = persistent_ram_buffer_init(dev_name(dev), prz);
+       if (ret) {
+               pr_err("persistent_ram: failed to initialize buffer\n");
+               goto err;
+       }
+
+       prz->ecc = ecc;
+       ret = persistent_ram_init_ecc(prz, prz->buffer_size);
+       if (ret)
+               goto err;
+
+       if (prz->buffer->sig == PERSISTENT_RAM_SIG) {
+               if (prz->buffer->size > prz->buffer_size
+                   || prz->buffer->start > prz->buffer->size)
+                       pr_info("persistent_ram: found existing invalid buffer, size %d, start %d\n",
+                              prz->buffer->size, prz->buffer->start);
+               else {
+                       pr_info("persistent_ram: found existing buffer, size %d, start %d\n",
+                              prz->buffer->size, prz->buffer->start);
+                       persistent_ram_save_old(prz);
+               }
+       } else {
+               pr_info("persistent_ram: no valid data in buffer (sig = 0x%08x)\n",
+                       prz->buffer->sig);
+       }
+
+       prz->buffer->sig = PERSISTENT_RAM_SIG;
+       prz->buffer->start = 0;
+       prz->buffer->size = 0;
+
+       return prz;
+err:
+       kfree(prz);
+       return ERR_PTR(ret);
+}
+
+struct persistent_ram_zone * __init
+persistent_ram_init_ringbuffer(struct device *dev, bool ecc)
+{
+       return __persistent_ram_init(dev, ecc);
+}
+
+int __init persistent_ram_early_init(struct persistent_ram *ram)
+{
+       int ret;
+
+       ret = memblock_reserve(ram->start, ram->size);
+       if (ret) {
+               pr_err("Failed to reserve persistent memory from %08lx-%08lx\n",
+                       (long)ram->start, (long)(ram->start + ram->size - 1));
+               return ret;
+       }
+
+       list_add_tail(&ram->node, &persistent_ram_list);
+
+       pr_info("Initialized persistent memory from %08lx-%08lx\n",
+               (long)ram->start, (long)(ram->start + ram->size - 1));
+
+       return 0;
+}
diff --git a/drivers/staging/android/persistent_ram.h b/drivers/staging/android/persistent_ram.h
new file mode 100644 (file)
index 0000000..f41e208
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_PERSISTENT_RAM_H__
+#define __LINUX_PERSISTENT_RAM_H__
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct persistent_ram_buffer;
+
+struct persistent_ram_descriptor {
+       const char      *name;
+       phys_addr_t     size;
+};
+
+struct persistent_ram {
+       phys_addr_t     start;
+       phys_addr_t     size;
+
+       int                                     num_descs;
+       struct persistent_ram_descriptor        *descs;
+
+       struct list_head node;
+};
+
+struct persistent_ram_zone {
+       struct list_head node;
+       void *vaddr;
+       struct persistent_ram_buffer *buffer;
+       size_t buffer_size;
+
+       /* ECC correction */
+       bool ecc;
+       char *par_buffer;
+       char *par_header;
+       struct rs_control *rs_decoder;
+       int corrected_bytes;
+       int bad_blocks;
+       int ecc_block_size;
+       int ecc_size;
+       int ecc_symsize;
+       int ecc_poly;
+
+       char *old_log;
+       size_t old_log_size;
+       size_t old_log_footer_size;
+       bool early;
+};
+
+int persistent_ram_early_init(struct persistent_ram *ram);
+
+struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev,
+               bool ecc);
+
+int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
+       unsigned int count);
+
+size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
+void *persistent_ram_old(struct persistent_ram_zone *prz);
+void persistent_ram_free_old(struct persistent_ram_zone *prz);
+ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
+       char *str, size_t len);
+
+#endif
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
new file mode 100644 (file)
index 0000000..2c5d35b
--- /dev/null
@@ -0,0 +1,179 @@
+/* drivers/android/ram_console.c
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include "persistent_ram.h"
+#include "ram_console.h"
+
+static struct persistent_ram_zone *ram_console_zone;
+static const char *bootinfo;
+static size_t bootinfo_size;
+
+static void
+ram_console_write(struct console *console, const char *s, unsigned int count)
+{
+       struct persistent_ram_zone *prz = console->data;
+       persistent_ram_write(prz, s, count);
+}
+
+static struct console ram_console = {
+       .name   = "ram",
+       .write  = ram_console_write,
+       .flags  = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
+       .index  = -1,
+};
+
+void ram_console_enable_console(int enabled)
+{
+       if (enabled)
+               ram_console.flags |= CON_ENABLED;
+       else
+               ram_console.flags &= ~CON_ENABLED;
+}
+
+static int __init ram_console_probe(struct platform_device *pdev)
+{
+       struct ram_console_platform_data *pdata = pdev->dev.platform_data;
+       struct persistent_ram_zone *prz;
+
+       prz = persistent_ram_init_ringbuffer(&pdev->dev, true);
+       if (IS_ERR(prz))
+               return PTR_ERR(prz);
+
+
+       if (pdata) {
+               bootinfo = kstrdup(pdata->bootinfo, GFP_KERNEL);
+               if (bootinfo)
+                       bootinfo_size = strlen(bootinfo);
+       }
+
+       ram_console_zone = prz;
+       ram_console.data = prz;
+
+       register_console(&ram_console);
+
+       return 0;
+}
+
+static struct platform_driver ram_console_driver = {
+       .driver         = {
+               .name   = "ram_console",
+       },
+};
+
+static int __init ram_console_module_init(void)
+{
+       return platform_driver_probe(&ram_console_driver, ram_console_probe);
+}
+
+#ifndef CONFIG_PRINTK
+#define dmesg_restrict 0
+#endif
+
+static ssize_t ram_console_read_old(struct file *file, char __user *buf,
+                                   size_t len, loff_t *offset)
+{
+       loff_t pos = *offset;
+       ssize_t count;
+       struct persistent_ram_zone *prz = ram_console_zone;
+       size_t old_log_size = persistent_ram_old_size(prz);
+       const char *old_log = persistent_ram_old(prz);
+       char *str;
+       int ret;
+
+       if (dmesg_restrict && !capable(CAP_SYSLOG))
+               return -EPERM;
+
+       /* Main last_kmsg log */
+       if (pos < old_log_size) {
+               count = min(len, (size_t)(old_log_size - pos));
+               if (copy_to_user(buf, old_log + pos, count))
+                       return -EFAULT;
+               goto out;
+       }
+
+       /* ECC correction notice */
+       pos -= old_log_size;
+       count = persistent_ram_ecc_string(prz, NULL, 0);
+       if (pos < count) {
+               str = kmalloc(count, GFP_KERNEL);
+               if (!str)
+                       return -ENOMEM;
+               persistent_ram_ecc_string(prz, str, count + 1);
+               count = min(len, (size_t)(count - pos));
+               ret = copy_to_user(buf, str + pos, count);
+               kfree(str);
+               if (ret)
+                       return -EFAULT;
+               goto out;
+       }
+
+       /* Boot info passed through pdata */
+       pos -= count;
+       if (pos < bootinfo_size) {
+               count = min(len, (size_t)(bootinfo_size - pos));
+               if (copy_to_user(buf, bootinfo + pos, count))
+                       return -EFAULT;
+               goto out;
+       }
+
+       /* EOF */
+       return 0;
+
+out:
+       *offset += count;
+       return count;
+}
+
+static const struct file_operations ram_console_file_ops = {
+       .owner = THIS_MODULE,
+       .read = ram_console_read_old,
+};
+
+static int __init ram_console_late_init(void)
+{
+       struct proc_dir_entry *entry;
+       struct persistent_ram_zone *prz = ram_console_zone;
+
+       if (!prz)
+               return 0;
+
+       if (persistent_ram_old_size(prz) == 0)
+               return 0;
+
+       entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
+       if (!entry) {
+               printk(KERN_ERR "ram_console: failed to create proc entry\n");
+               persistent_ram_free_old(prz);
+               return 0;
+       }
+
+       entry->proc_fops = &ram_console_file_ops;
+       entry->size = persistent_ram_old_size(prz) +
+               persistent_ram_ecc_string(prz, NULL, 0) +
+               bootinfo_size;
+
+       return 0;
+}
+
+late_initcall(ram_console_late_init);
+module_init(ram_console_module_init);
similarity index 65%
rename from arch/arm/mach-tegra/include/mach/vmalloc.h
rename to drivers/staging/android/ram_console.h
index fd6aa65..9f1125c 100644 (file)
@@ -1,12 +1,6 @@
 /*
- * arch/arm/mach-tegra/include/mach/vmalloc.h
- *
  * Copyright (C) 2010 Google, Inc.
  *
- * Author:
- *     Colin Cross <ccross@google.com>
- *     Erik Gilling <konkers@google.com>
- *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
  * may be copied, distributed, and modified under those terms.
  *
  */
 
-#ifndef __MACH_TEGRA_VMALLOC_H
-#define __MACH_TEGRA_VMALLOC_H
-
-#include <asm/sizes.h>
+#ifndef _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
+#define _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
 
-#define VMALLOC_END        0xFE000000UL
+struct ram_console_platform_data {
+       const char *bootinfo;
+};
 
-#endif
+#endif /* _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ */
diff --git a/drivers/staging/android/switch/Kconfig b/drivers/staging/android/switch/Kconfig
new file mode 100644 (file)
index 0000000..36846f6
--- /dev/null
@@ -0,0 +1,11 @@
+menuconfig ANDROID_SWITCH
+       tristate "Android Switch class support"
+       help
+         Say Y here to enable Android switch class support. This allows
+         monitoring switches by userspace via sysfs and uevent.
+
+config ANDROID_SWITCH_GPIO
+       tristate "Android GPIO Switch support"
+       depends on GENERIC_GPIO && ANDROID_SWITCH
+       help
+         Say Y here to enable GPIO based switch support.
diff --git a/drivers/staging/android/switch/Makefile b/drivers/staging/android/switch/Makefile
new file mode 100644 (file)
index 0000000..d76bfdc
--- /dev/null
@@ -0,0 +1,4 @@
+# Android Switch Class Driver
+obj-$(CONFIG_ANDROID_SWITCH)           += switch_class.o
+obj-$(CONFIG_ANDROID_SWITCH_GPIO)      += switch_gpio.o
+
diff --git a/drivers/staging/android/switch/switch.h b/drivers/staging/android/switch/switch.h
new file mode 100644 (file)
index 0000000..4fcb310
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ *  Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+       const char      *name;
+       struct device   *dev;
+       int             index;
+       int             state;
+
+       ssize_t (*print_name)(struct switch_dev *sdev, char *buf);
+       ssize_t (*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+       const char *name;
+       unsigned        gpio;
+
+       /* if NULL, switch_dev.name will be printed */
+       const char *name_on;
+       const char *name_off;
+       /* if NULL, "0" or "1" will be printed */
+       const char *state_on;
+       const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+       return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
diff --git a/drivers/staging/android/switch/switch_class.c b/drivers/staging/android/switch/switch_class.c
new file mode 100644 (file)
index 0000000..7468044
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+ * switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include "switch.h"
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct switch_dev *sdev = (struct switch_dev *)
+               dev_get_drvdata(dev);
+
+       if (sdev->print_state) {
+               int ret = sdev->print_state(sdev, buf);
+               if (ret >= 0)
+                       return ret;
+       }
+       return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct switch_dev *sdev = (struct switch_dev *)
+               dev_get_drvdata(dev);
+
+       if (sdev->print_name) {
+               int ret = sdev->print_name(sdev, buf);
+               if (ret >= 0)
+                       return ret;
+       }
+       return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+       char name_buf[120];
+       char state_buf[120];
+       char *prop_buf;
+       char *envp[3];
+       int env_offset = 0;
+       int length;
+
+       if (sdev->state != state) {
+               sdev->state = state;
+
+               prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+               if (prop_buf) {
+                       length = name_show(sdev->dev, NULL, prop_buf);
+                       if (length > 0) {
+                               if (prop_buf[length - 1] == '\n')
+                                       prop_buf[length - 1] = 0;
+                               snprintf(name_buf, sizeof(name_buf),
+                                       "SWITCH_NAME=%s", prop_buf);
+                               envp[env_offset++] = name_buf;
+                       }
+                       length = state_show(sdev->dev, NULL, prop_buf);
+                       if (length > 0) {
+                               if (prop_buf[length - 1] == '\n')
+                                       prop_buf[length - 1] = 0;
+                               snprintf(state_buf, sizeof(state_buf),
+                                       "SWITCH_STATE=%s", prop_buf);
+                               envp[env_offset++] = state_buf;
+                       }
+                       envp[env_offset] = NULL;
+                       kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+                       free_page((unsigned long)prop_buf);
+               } else {
+                       printk(KERN_ERR "out of memory in switch_set_state\n");
+                       kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+       if (!switch_class) {
+               switch_class = class_create(THIS_MODULE, "switch");
+               if (IS_ERR(switch_class))
+                       return PTR_ERR(switch_class);
+               atomic_set(&device_count, 0);
+       }
+
+       return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+       int ret;
+
+       if (!switch_class) {
+               ret = create_switch_class();
+               if (ret < 0)
+                       return ret;
+       }
+
+       sdev->index = atomic_inc_return(&device_count);
+       sdev->dev = device_create(switch_class, NULL,
+               MKDEV(0, sdev->index), NULL, sdev->name);
+       if (IS_ERR(sdev->dev))
+               return PTR_ERR(sdev->dev);
+
+       ret = device_create_file(sdev->dev, &dev_attr_state);
+       if (ret < 0)
+               goto err_create_file_1;
+       ret = device_create_file(sdev->dev, &dev_attr_name);
+       if (ret < 0)
+               goto err_create_file_2;
+
+       dev_set_drvdata(sdev->dev, sdev);
+       sdev->state = 0;
+       return 0;
+
+err_create_file_2:
+       device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+       device_destroy(switch_class, MKDEV(0, sdev->index));
+       printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+       device_remove_file(sdev->dev, &dev_attr_name);
+       device_remove_file(sdev->dev, &dev_attr_state);
+       device_destroy(switch_class, MKDEV(0, sdev->index));
+       dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+       return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+       class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/switch/switch_gpio.c b/drivers/staging/android/switch/switch_gpio.c
new file mode 100644 (file)
index 0000000..38b2c2f
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include "switch.h"
+
+struct gpio_switch_data {
+       struct switch_dev sdev;
+       unsigned gpio;
+       const char *name_on;
+       const char *name_off;
+       const char *state_on;
+       const char *state_off;
+       int irq;
+       struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+       int state;
+       struct gpio_switch_data *data =
+               container_of(work, struct gpio_switch_data, work);
+
+       state = gpio_get_value(data->gpio);
+       switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+       struct gpio_switch_data *switch_data =
+           (struct gpio_switch_data *)dev_id;
+
+       schedule_work(&switch_data->work);
+       return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+       struct gpio_switch_data *switch_data =
+               container_of(sdev, struct gpio_switch_data, sdev);
+       const char *state;
+       if (switch_get_state(sdev))
+               state = switch_data->state_on;
+       else
+               state = switch_data->state_off;
+
+       if (state)
+               return sprintf(buf, "%s\n", state);
+       return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+       struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+       struct gpio_switch_data *switch_data;
+       int ret = 0;
+
+       if (!pdata)
+               return -EBUSY;
+
+       switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+       if (!switch_data)
+               return -ENOMEM;
+
+       switch_data->sdev.name = pdata->name;
+       switch_data->gpio = pdata->gpio;
+       switch_data->name_on = pdata->name_on;
+       switch_data->name_off = pdata->name_off;
+       switch_data->state_on = pdata->state_on;
+       switch_data->state_off = pdata->state_off;
+       switch_data->sdev.print_state = switch_gpio_print_state;
+
+       ret = switch_dev_register(&switch_data->sdev);
+       if (ret < 0)
+               goto err_switch_dev_register;
+
+       ret = gpio_request(switch_data->gpio, pdev->name);
+       if (ret < 0)
+               goto err_request_gpio;
+
+       ret = gpio_direction_input(switch_data->gpio);
+       if (ret < 0)
+               goto err_set_gpio_input;
+
+       INIT_WORK(&switch_data->work, gpio_switch_work);
+
+       switch_data->irq = gpio_to_irq(switch_data->gpio);
+       if (switch_data->irq < 0) {
+               ret = switch_data->irq;
+               goto err_detect_irq_num_failed;
+       }
+
+       ret = request_irq(switch_data->irq, gpio_irq_handler,
+                         IRQF_TRIGGER_LOW, pdev->name, switch_data);
+       if (ret < 0)
+               goto err_request_irq;
+
+       /* Perform initial detection */
+       gpio_switch_work(&switch_data->work);
+
+       return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+       gpio_free(switch_data->gpio);
+err_request_gpio:
+       switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+       kfree(switch_data);
+
+       return ret;
+}
+
+static int __devexit gpio_switch_remove(struct platform_device *pdev)
+{
+       struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+       cancel_work_sync(&switch_data->work);
+       gpio_free(switch_data->gpio);
+       switch_dev_unregister(&switch_data->sdev);
+       kfree(switch_data);
+
+       return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+       .probe          = gpio_switch_probe,
+       .remove         = __devexit_p(gpio_switch_remove),
+       .driver         = {
+               .name   = "switch-gpio",
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init gpio_switch_init(void)
+{
+       return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+       platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
new file mode 100644 (file)
index 0000000..45c522c
--- /dev/null
@@ -0,0 +1,179 @@
+/* drivers/misc/timed_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/hrtimer.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+
+#include "timed_output.h"
+#include "timed_gpio.h"
+
+
+struct timed_gpio_data {
+       struct timed_output_dev dev;
+       struct hrtimer timer;
+       spinlock_t lock;
+       unsigned gpio;
+       int max_timeout;
+       u8 active_low;
+};
+
+static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
+{
+       struct timed_gpio_data *data =
+               container_of(timer, struct timed_gpio_data, timer);
+
+       gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
+       return HRTIMER_NORESTART;
+}
+
+static int gpio_get_time(struct timed_output_dev *dev)
+{
+       struct timed_gpio_data  *data =
+               container_of(dev, struct timed_gpio_data, dev);
+
+       if (hrtimer_active(&data->timer)) {
+               ktime_t r = hrtimer_get_remaining(&data->timer);
+               struct timeval t = ktime_to_timeval(r);
+               return t.tv_sec * 1000 + t.tv_usec / 1000;
+       } else
+               return 0;
+}
+
+static void gpio_enable(struct timed_output_dev *dev, int value)
+{
+       struct timed_gpio_data  *data =
+               container_of(dev, struct timed_gpio_data, dev);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&data->lock, flags);
+
+       /* cancel previous timer and set GPIO according to value */
+       hrtimer_cancel(&data->timer);
+       gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
+
+       if (value > 0) {
+               if (value > data->max_timeout)
+                       value = data->max_timeout;
+
+               hrtimer_start(&data->timer,
+                       ktime_set(value / 1000, (value % 1000) * 1000000),
+                       HRTIMER_MODE_REL);
+       }
+
+       spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static int timed_gpio_probe(struct platform_device *pdev)
+{
+       struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+       struct timed_gpio *cur_gpio;
+       struct timed_gpio_data *gpio_data, *gpio_dat;
+       int i, ret;
+
+       if (!pdata)
+               return -EBUSY;
+
+       gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios,
+                       GFP_KERNEL);
+       if (!gpio_data)
+               return -ENOMEM;
+
+       for (i = 0; i < pdata->num_gpios; i++) {
+               cur_gpio = &pdata->gpios[i];
+               gpio_dat = &gpio_data[i];
+
+               hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
+                               HRTIMER_MODE_REL);
+               gpio_dat->timer.function = gpio_timer_func;
+               spin_lock_init(&gpio_dat->lock);
+
+               gpio_dat->dev.name = cur_gpio->name;
+               gpio_dat->dev.get_time = gpio_get_time;
+               gpio_dat->dev.enable = gpio_enable;
+               ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
+               if (ret < 0)
+                       goto err_out;
+               ret = timed_output_dev_register(&gpio_dat->dev);
+               if (ret < 0) {
+                       gpio_free(cur_gpio->gpio);
+                       goto err_out;
+               }
+
+               gpio_dat->gpio = cur_gpio->gpio;
+               gpio_dat->max_timeout = cur_gpio->max_timeout;
+               gpio_dat->active_low = cur_gpio->active_low;
+               gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
+       }
+
+       platform_set_drvdata(pdev, gpio_data);
+
+       return 0;
+
+err_out:
+       while (--i >= 0) {
+               timed_output_dev_unregister(&gpio_data[i].dev);
+               gpio_free(gpio_data[i].gpio);
+       }
+       kfree(gpio_data);
+
+       return ret;
+}
+
+static int timed_gpio_remove(struct platform_device *pdev)
+{
+       struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+       struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < pdata->num_gpios; i++) {
+               timed_output_dev_unregister(&gpio_data[i].dev);
+               gpio_free(gpio_data[i].gpio);
+       }
+
+       kfree(gpio_data);
+
+       return 0;
+}
+
+static struct platform_driver timed_gpio_driver = {
+       .probe          = timed_gpio_probe,
+       .remove         = timed_gpio_remove,
+       .driver         = {
+               .name           = TIMED_GPIO_NAME,
+               .owner          = THIS_MODULE,
+       },
+};
+
+static int __init timed_gpio_init(void)
+{
+       return platform_driver_register(&timed_gpio_driver);
+}
+
+static void __exit timed_gpio_exit(void)
+{
+       platform_driver_unregister(&timed_gpio_driver);
+}
+
+module_init(timed_gpio_init);
+module_exit(timed_gpio_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed gpio driver");
+MODULE_LICENSE("GPL");
similarity index 58%
rename from arch/arm/mach-msm/include/mach/vmalloc.h
rename to drivers/staging/android/timed_gpio.h
index d138448..d29e169 100644 (file)
@@ -1,6 +1,6 @@
-/* arch/arm/mach-msm/include/mach/vmalloc.h
+/* include/linux/timed_gpio.h
  *
- * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2008 Google, Inc.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- */
+*/
 
-#ifndef __ASM_ARCH_MSM_VMALLOC_H
-#define __ASM_ARCH_MSM_VMALLOC_H
+#ifndef _LINUX_TIMED_GPIO_H
+#define _LINUX_TIMED_GPIO_H
 
-#define VMALLOC_END      0xd0000000UL
+#define TIMED_GPIO_NAME "timed-gpio"
 
-#endif
+struct timed_gpio {
+       const char *name;
+       unsigned        gpio;
+       int             max_timeout;
+       u8              active_low;
+};
+
+struct timed_gpio_platform_data {
+       int             num_gpios;
+       struct timed_gpio *gpios;
+};
 
+#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
new file mode 100644 (file)
index 0000000..f373422
--- /dev/null
@@ -0,0 +1,123 @@
+/* drivers/misc/timed_output.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+
+#include "timed_output.h"
+
+static struct class *timed_output_class;
+static atomic_t device_count;
+
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct timed_output_dev *tdev = dev_get_drvdata(dev);
+       int remaining = tdev->get_time(tdev);
+
+       return sprintf(buf, "%d\n", remaining);
+}
+
+static ssize_t enable_store(
+               struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t size)
+{
+       struct timed_output_dev *tdev = dev_get_drvdata(dev);
+       int value;
+
+       if (sscanf(buf, "%d", &value) != 1)
+               return -EINVAL;
+
+       tdev->enable(tdev, value);
+
+       return size;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+
+static int create_timed_output_class(void)
+{
+       if (!timed_output_class) {
+               timed_output_class = class_create(THIS_MODULE, "timed_output");
+               if (IS_ERR(timed_output_class))
+                       return PTR_ERR(timed_output_class);
+               atomic_set(&device_count, 0);
+       }
+
+       return 0;
+}
+
+int timed_output_dev_register(struct timed_output_dev *tdev)
+{
+       int ret;
+
+       if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
+               return -EINVAL;
+
+       ret = create_timed_output_class();
+       if (ret < 0)
+               return ret;
+
+       tdev->index = atomic_inc_return(&device_count);
+       tdev->dev = device_create(timed_output_class, NULL,
+               MKDEV(0, tdev->index), NULL, tdev->name);
+       if (IS_ERR(tdev->dev))
+               return PTR_ERR(tdev->dev);
+
+       ret = device_create_file(tdev->dev, &dev_attr_enable);
+       if (ret < 0)
+               goto err_create_file;
+
+       dev_set_drvdata(tdev->dev, tdev);
+       tdev->state = 0;
+       return 0;
+
+err_create_file:
+       device_destroy(timed_output_class, MKDEV(0, tdev->index));
+       printk(KERN_ERR "timed_output: Failed to register driver %s\n",
+                       tdev->name);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_register);
+
+void timed_output_dev_unregister(struct timed_output_dev *tdev)
+{
+       device_remove_file(tdev->dev, &dev_attr_enable);
+       device_destroy(timed_output_class, MKDEV(0, tdev->index));
+       dev_set_drvdata(tdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
+
+static int __init timed_output_init(void)
+{
+       return create_timed_output_class();
+}
+
+static void __exit timed_output_exit(void)
+{
+       class_destroy(timed_output_class);
+}
+
+module_init(timed_output_init);
+module_exit(timed_output_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed output class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
new file mode 100644 (file)
index 0000000..ec907ab
--- /dev/null
@@ -0,0 +1,37 @@
+/* include/linux/timed_output.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_OUTPUT_H
+#define _LINUX_TIMED_OUTPUT_H
+
+struct timed_output_dev {
+       const char      *name;
+
+       /* enable the output and set the timer */
+       void    (*enable)(struct timed_output_dev *sdev, int timeout);
+
+       /* returns the current number of milliseconds remaining on the timer */
+       int             (*get_time)(struct timed_output_dev *sdev);
+
+       /* private data */
+       struct device   *dev;
+       int             index;
+       int             state;
+};
+
+extern int timed_output_dev_register(struct timed_output_dev *dev);
+extern void timed_output_dev_unregister(struct timed_output_dev *dev);
+
+#endif
index 264be2d..bb64201 100644 (file)
@@ -268,6 +268,8 @@ static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
                }
 
                urb->transfer_buffer = dev->stream + (i * AS102_USB_BUF_SIZE);
+               urb->transfer_dma = dev->dma_addr + (i * AS102_USB_BUF_SIZE);
+               urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
                urb->transfer_buffer_length = AS102_USB_BUF_SIZE;
 
                dev->stream_urb[i] = urb;
diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/staging/omapdrm/Kconfig
new file mode 100644 (file)
index 0000000..81a7cba
--- /dev/null
@@ -0,0 +1,25 @@
+
+config DRM_OMAP
+       tristate "OMAP DRM"
+       depends on DRM && !CONFIG_FB_OMAP2
+       depends on ARCH_OMAP2PLUS
+       select DRM_KMS_HELPER
+       select OMAP2_DSS
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
+       select FB_SYS_FOPS
+       default n
+       help
+         DRM display driver for OMAP2/3/4 based boards.
+
+config DRM_OMAP_NUM_CRTCS
+       int "Number of CRTCs"
+       range 1 10
+       default 1  if ARCH_OMAP2 || ARCH_OMAP3
+       default 2  if ARCH_OMAP4
+       depends on DRM_OMAP
+       help
+         Select the number of video overlays which can be used as framebuffers.
+         The remaining overlays are reserved for video.
+
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/staging/omapdrm/Makefile
new file mode 100644 (file)
index 0000000..4aa9a2f
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI)
+#
+
+ccflags-y := -Iinclude/drm -Werror
+omapdrm-y := omap_drv.o omap_crtc.o omap_encoder.o omap_connector.o omap_fb.o omap_fbdev.o omap_gem.o
+
+# temporary:
+omapdrm-y += omap_gem_helpers.o
+
+obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/staging/omapdrm/TODO b/drivers/staging/omapdrm/TODO
new file mode 100644 (file)
index 0000000..17781c9
--- /dev/null
@@ -0,0 +1,32 @@
+TODO
+. check error handling/cleanup paths
+. add drm_plane / overlay support
+. add video decode/encode support (via syslink3 + codec-engine)
+. still some rough edges with flipping.. event back to userspace should
+  really come after VSYNC interrupt
+. where should we do eviction (detatch_pages())?  We aren't necessarily
+  accessing the pages via a GART, so maybe we need some other threshold
+  to put a cap on the # of pages that can be pin'd.  (It is mostly only
+  of interest in case you have a swap partition/file.. which a lot of
+  these devices do not.. but it doesn't hurt for the driver to do the
+  right thing anyways.)
+  . Use mm_shrinker to trigger unpinning pages.  Need to figure out how
+    to handle next issue first (I think?)
+  . Note TTM already has some mm_shrinker stuff..  maybe an argument to
+    move to TTM?  Or maybe something that could be factored out in common?
+. GEM/shmem backed pages can have existing mappings (kernel linear map,
+  etc..), which isn't really ideal.
+. Revisit GEM sync object infrastructure.. TTM has some framework for this
+  already.  Possibly this could be refactored out and made more common?
+  There should be some way to do this with less wheel-reinvention.
+. Review DSS vs KMS mismatches.  The omap_dss_device is sort of part encoder,
+  part connector.  Which results in a bit of duct tape to fwd calls from
+  encoder to connector.  Possibly this could be done a bit better.
+
+Userspace:
+. git://github.com/robclark/xf86-video-omap.git
+
+Currently tested on
+. OMAP3530 beagleboard
+. OMAP4430 pandaboard
+. OMAP4460 pandaboard
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c
new file mode 100644 (file)
index 0000000..5e2856c
--- /dev/null
@@ -0,0 +1,371 @@
+/*
+ * drivers/staging/omapdrm/omap_connector.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * connector funcs
+ */
+
+#define to_omap_connector(x) container_of(x, struct omap_connector, base)
+
+struct omap_connector {
+       struct drm_connector base;
+       struct omap_dss_device *dssdev;
+};
+
+static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+               struct omap_video_timings *timings)
+{
+       mode->clock = timings->pixel_clock;
+
+       mode->hdisplay = timings->x_res;
+       mode->hsync_start = mode->hdisplay + timings->hfp;
+       mode->hsync_end = mode->hsync_start + timings->hsw;
+       mode->htotal = mode->hsync_end + timings->hbp;
+
+       mode->vdisplay = timings->y_res;
+       mode->vsync_start = mode->vdisplay + timings->vfp;
+       mode->vsync_end = mode->vsync_start + timings->vsw;
+       mode->vtotal = mode->vsync_end + timings->vbp;
+
+       /* note: whether or not it is interlaced, +/- h/vsync, etc,
+        * which should be set in the mode flags, is not exposed in
+        * the omap_video_timings struct.. but hdmi driver tracks
+        * those separately so all we have to have to set the mode
+        * is the way to recover these timings values, and the
+        * omap_dss_driver would do the rest.
+        */
+}
+
+static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+               struct drm_display_mode *mode)
+{
+       timings->pixel_clock = mode->clock;
+
+       timings->x_res = mode->hdisplay;
+       timings->hfp = mode->hsync_start - mode->hdisplay;
+       timings->hsw = mode->hsync_end - mode->hsync_start;
+       timings->hbp = mode->htotal - mode->hsync_end;
+
+       timings->y_res = mode->vdisplay;
+       timings->vfp = mode->vsync_start - mode->vdisplay;
+       timings->vsw = mode->vsync_end - mode->vsync_start;
+       timings->vbp = mode->vtotal - mode->vsync_end;
+}
+
+static void omap_connector_dpms(struct drm_connector *connector, int mode)
+{
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       struct omap_dss_device *dssdev = omap_connector->dssdev;
+       int old_dpms;
+
+       DBG("%s: %d", dssdev->name, mode);
+
+       old_dpms = connector->dpms;
+
+       /* from off to on, do from crtc to connector */
+       if (mode < old_dpms)
+               drm_helper_connector_dpms(connector, mode);
+
+       if (mode == DRM_MODE_DPMS_ON) {
+               /* store resume info for suspended displays */
+               switch (dssdev->state) {
+               case OMAP_DSS_DISPLAY_SUSPENDED:
+                       dssdev->activate_after_resume = true;
+                       break;
+               case OMAP_DSS_DISPLAY_DISABLED: {
+                       int ret = dssdev->driver->enable(dssdev);
+                       if (ret) {
+                               DBG("%s: failed to enable: %d",
+                                               dssdev->name, ret);
+                               dssdev->driver->disable(dssdev);
+                       }
+                       break;
+               }
+               default:
+                       break;
+               }
+       } else {
+               /* TODO */
+       }
+
+       /* from on to off, do from connector to crtc */
+       if (mode > old_dpms)
+               drm_helper_connector_dpms(connector, mode);
+}
+
+enum drm_connector_status omap_connector_detect(
+               struct drm_connector *connector, bool force)
+{
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       struct omap_dss_device *dssdev = omap_connector->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       enum drm_connector_status ret;
+
+       if (dssdrv->detect) {
+               if (dssdrv->detect(dssdev)) {
+                       ret = connector_status_connected;
+               } else {
+                       ret = connector_status_disconnected;
+               }
+       } else {
+               ret = connector_status_unknown;
+       }
+
+       VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
+
+       return ret;
+}
+
+static void omap_connector_destroy(struct drm_connector *connector)
+{
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       struct omap_dss_device *dssdev = omap_connector->dssdev;
+
+       dssdev->driver->disable(dssdev);
+
+       DBG("%s", omap_connector->dssdev->name);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(omap_connector);
+
+       omap_dss_put_device(dssdev);
+}
+
+#define MAX_EDID  512
+
+static int omap_connector_get_modes(struct drm_connector *connector)
+{
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       struct omap_dss_device *dssdev = omap_connector->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       struct drm_device *dev = connector->dev;
+       int n = 0;
+
+       DBG("%s", omap_connector->dssdev->name);
+
+       /* if display exposes EDID, then we parse that in the normal way to
+        * build table of supported modes.. otherwise (ie. fixed resolution
+        * LCD panels) we just return a single mode corresponding to the
+        * currently configured timings:
+        */
+       if (dssdrv->read_edid) {
+               void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
+
+               if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
+                               drm_edid_is_valid(edid)) {
+                       drm_mode_connector_update_edid_property(
+                                       connector, edid);
+                       n = drm_add_edid_modes(connector, edid);
+                       kfree(connector->display_info.raw_edid);
+                       connector->display_info.raw_edid = edid;
+               } else {
+                       drm_mode_connector_update_edid_property(
+                                       connector, NULL);
+                       connector->display_info.raw_edid = NULL;
+                       kfree(edid);
+               }
+       } else {
+               struct drm_display_mode *mode = drm_mode_create(dev);
+               struct omap_video_timings timings;
+
+               dssdrv->get_timings(dssdev, &timings);
+
+               copy_timings_omap_to_drm(mode, &timings);
+
+               mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+               drm_mode_set_name(mode);
+               drm_mode_probed_add(connector, mode);
+
+               n = 1;
+       }
+
+       return n;
+}
+
+static int omap_connector_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+{
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       struct omap_dss_device *dssdev = omap_connector->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       struct omap_video_timings timings = {0};
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *new_mode;
+       int ret = MODE_BAD;
+
+       copy_timings_drm_to_omap(&timings, mode);
+       mode->vrefresh = drm_mode_vrefresh(mode);
+
+       if (!dssdrv->check_timings(dssdev, &timings)) {
+               /* check if vrefresh is still valid */
+               new_mode = drm_mode_duplicate(dev, mode);
+               new_mode->clock = timings.pixel_clock;
+               new_mode->vrefresh = 0;
+               if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+                       ret = MODE_OK;
+               drm_mode_destroy(dev, new_mode);
+       }
+
+       DBG("connector: mode %s: "
+                       "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+                       (ret == MODE_OK) ? "valid" : "invalid",
+                       mode->base.id, mode->name, mode->vrefresh, mode->clock,
+                       mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal,
+                       mode->vdisplay, mode->vsync_start,
+                       mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+       return ret;
+}
+
+struct drm_encoder *omap_connector_attached_encoder(
+               struct drm_connector *connector)
+{
+       int i;
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               struct drm_mode_object *obj;
+
+               if (connector->encoder_ids[i] == 0)
+                       break;
+
+               obj = drm_mode_object_find(connector->dev,
+                               connector->encoder_ids[i],
+                               DRM_MODE_OBJECT_ENCODER);
+
+               if (obj) {
+                       struct drm_encoder *encoder = obj_to_encoder(obj);
+                       struct omap_overlay_manager *mgr =
+                                       omap_encoder_get_manager(encoder);
+                       DBG("%s: found %s", omap_connector->dssdev->name,
+                                       mgr->name);
+                       return encoder;
+               }
+       }
+
+       DBG("%s: no encoder", omap_connector->dssdev->name);
+
+       return NULL;
+}
+
+static const struct drm_connector_funcs omap_connector_funcs = {
+       .dpms = omap_connector_dpms,
+       .detect = omap_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = omap_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
+       .get_modes = omap_connector_get_modes,
+       .mode_valid = omap_connector_mode_valid,
+       .best_encoder = omap_connector_attached_encoder,
+};
+
+/* called from encoder when mode is set, to propagate settings to the dssdev */
+void omap_connector_mode_set(struct drm_connector *connector,
+               struct drm_display_mode *mode)
+{
+       struct drm_device *dev = connector->dev;
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+       struct omap_dss_device *dssdev = omap_connector->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       struct omap_video_timings timings;
+
+       copy_timings_drm_to_omap(&timings, mode);
+
+       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+                       omap_connector->dssdev->name,
+                       mode->base.id, mode->name, mode->vrefresh, mode->clock,
+                       mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal,
+                       mode->vdisplay, mode->vsync_start,
+                       mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+       if (dssdrv->check_timings(dssdev, &timings)) {
+               dev_err(dev->dev, "could not set timings\n");
+               return;
+       }
+
+       dssdrv->set_timings(dssdev, &timings);
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_connector_flush(struct drm_connector *connector,
+               int x, int y, int w, int h)
+{
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+
+       /* TODO: enable when supported in dss */
+       VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
+}
+
+/* initialize connector */
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+               int connector_type, struct omap_dss_device *dssdev)
+{
+       struct drm_connector *connector = NULL;
+       struct omap_connector *omap_connector;
+
+       DBG("%s", dssdev->name);
+
+       omap_dss_get_device(dssdev);
+
+       omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
+       if (!omap_connector) {
+               dev_err(dev->dev, "could not allocate connector\n");
+               goto fail;
+       }
+
+       omap_connector->dssdev = dssdev;
+       connector = &omap_connector->base;
+
+       drm_connector_init(dev, connector, &omap_connector_funcs,
+                               connector_type);
+       drm_connector_helper_add(connector, &omap_connector_helper_funcs);
+
+#if 0 /* enable when dss2 supports hotplug */
+       if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD)
+               connector->polled = 0;
+       else
+#endif
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                               DRM_CONNECTOR_POLL_DISCONNECT;
+
+       connector->interlace_allowed = 1;
+       connector->doublescan_allowed = 0;
+
+       drm_sysfs_connector_add(connector);
+
+       return connector;
+
+fail:
+       if (connector) {
+               omap_connector_destroy(connector);
+       }
+
+       return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
new file mode 100644 (file)
index 0000000..fd09bcf
--- /dev/null
@@ -0,0 +1,327 @@
+/*
+ * drivers/staging/omapdrm/omap_crtc.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_mode.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
+
+struct omap_crtc {
+       struct drm_crtc base;
+       struct omap_overlay *ovl;
+       struct omap_overlay_info info;
+       int id;
+
+       /* if there is a pending flip, this will be non-null: */
+       struct drm_pending_vblank_event *event;
+};
+
+/* push changes down to dss2 */
+static int commit(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct omap_overlay *ovl = omap_crtc->ovl;
+       struct omap_overlay_info *info = &omap_crtc->info;
+       int ret;
+
+       DBG("%s", omap_crtc->ovl->name);
+       DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
+                       info->out_height, info->screen_width);
+       DBG("%d,%d %08x", info->pos_x, info->pos_y, info->paddr);
+
+       /* NOTE: do we want to do this at all here, or just wait
+        * for dpms(ON) since other CRTC's may not have their mode
+        * set yet, so fb dimensions may still change..
+        */
+       ret = ovl->set_overlay_info(ovl, info);
+       if (ret) {
+               dev_err(dev->dev, "could not set overlay info\n");
+               return ret;
+       }
+
+       /* our encoder doesn't necessarily get a commit() after this, in
+        * particular in the dpms() and mode_set_base() cases, so force the
+        * manager to update:
+        *
+        * could this be in the encoder somehow?
+        */
+       if (ovl->manager) {
+               ret = ovl->manager->apply(ovl->manager);
+               if (ret) {
+                       dev_err(dev->dev, "could not apply settings\n");
+                       return ret;
+               }
+       }
+
+       if (info->enabled) {
+               omap_framebuffer_flush(crtc->fb, crtc->x, crtc->y,
+                               crtc->fb->width, crtc->fb->height);
+       }
+
+       return 0;
+}
+
+/* update parameters that are dependent on the framebuffer dimensions and
+ * position within the fb that this crtc scans out from. This is called
+ * when framebuffer dimensions or x,y base may have changed, either due
+ * to our mode, or a change in another crtc that is scanning out of the
+ * same fb.
+ */
+static void update_scanout(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       dma_addr_t paddr;
+       unsigned int screen_width;
+
+       omap_framebuffer_get_buffer(crtc->fb, crtc->x, crtc->y,
+                       NULL, &paddr, &screen_width);
+
+       DBG("%s: %d,%d: %08x (%d)", omap_crtc->ovl->name,
+                       crtc->x, crtc->y, (u32)paddr, screen_width);
+
+       omap_crtc->info.paddr = paddr;
+       omap_crtc->info.screen_width = screen_width;
+}
+
+static void omap_crtc_gamma_set(struct drm_crtc *crtc,
+               u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       DBG("%s", omap_crtc->ovl->name);
+}
+
+static void omap_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       DBG("%s", omap_crtc->ovl->name);
+       drm_crtc_cleanup(crtc);
+       kfree(omap_crtc);
+}
+
+static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+       DBG("%s: %d", omap_crtc->ovl->name, mode);
+
+       if (mode == DRM_MODE_DPMS_ON) {
+               update_scanout(crtc);
+               omap_crtc->info.enabled = true;
+       } else {
+               omap_crtc->info.enabled = false;
+       }
+
+       WARN_ON(commit(crtc));
+}
+
+static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       DBG("%s", omap_crtc->ovl->name);
+       return true;
+}
+
+static int omap_crtc_mode_set(struct drm_crtc *crtc,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode,
+                              int x, int y,
+                              struct drm_framebuffer *old_fb)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+       DBG("%s: %d,%d: %dx%d", omap_crtc->ovl->name, x, y,
+                       mode->hdisplay, mode->vdisplay);
+
+       /* just use adjusted mode */
+       mode = adjusted_mode;
+
+       omap_crtc->info.width = mode->hdisplay;
+       omap_crtc->info.height = mode->vdisplay;
+       omap_crtc->info.out_width = mode->hdisplay;
+       omap_crtc->info.out_height = mode->vdisplay;
+       omap_crtc->info.color_mode = OMAP_DSS_COLOR_RGB24U;
+       omap_crtc->info.rotation_type = OMAP_DSS_ROT_DMA;
+       omap_crtc->info.rotation = OMAP_DSS_ROT_0;
+       omap_crtc->info.global_alpha = 0xff;
+       omap_crtc->info.mirror = 0;
+       omap_crtc->info.mirror = 0;
+       omap_crtc->info.pos_x = 0;
+       omap_crtc->info.pos_y = 0;
+#if 0 /* re-enable when these are available in DSS2 driver */
+       omap_crtc->info.zorder = 3;        /* GUI in the front, video behind */
+       omap_crtc->info.min_x_decim = 1;
+       omap_crtc->info.max_x_decim = 1;
+       omap_crtc->info.min_y_decim = 1;
+       omap_crtc->info.max_y_decim = 1;
+#endif
+
+       update_scanout(crtc);
+
+       return 0;
+}
+
+static void omap_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct omap_overlay *ovl = omap_crtc->ovl;
+
+       DBG("%s", omap_crtc->ovl->name);
+
+       ovl->get_overlay_info(ovl, &omap_crtc->info);
+
+       omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_crtc_commit(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       DBG("%s", omap_crtc->ovl->name);
+       omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+                   struct drm_framebuffer *old_fb)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+       DBG("%s %d,%d: fb=%p", omap_crtc->ovl->name, x, y, old_fb);
+
+       update_scanout(crtc);
+
+       return commit(crtc);
+}
+
+static void omap_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       DBG("%s", omap_crtc->ovl->name);
+}
+
+static void page_flip_cb(void *arg)
+{
+       struct drm_crtc *crtc = arg;
+       struct drm_device *dev = crtc->dev;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct drm_pending_vblank_event *event = omap_crtc->event;
+       struct timeval now;
+       unsigned long flags;
+
+       WARN_ON(!event);
+
+       omap_crtc->event = NULL;
+
+       update_scanout(crtc);
+       WARN_ON(commit(crtc));
+
+       /* wakeup userspace */
+       /* TODO: this should happen *after* flip in vsync IRQ handler */
+       if (event) {
+               spin_lock_irqsave(&dev->event_lock, flags);
+               event->event.sequence = drm_vblank_count_and_time(
+                               dev, omap_crtc->id, &now);
+               event->event.tv_sec = now.tv_sec;
+               event->event.tv_usec = now.tv_usec;
+               list_add_tail(&event->base.link,
+                               &event->base.file_priv->event_list);
+               wake_up_interruptible(&event->base.file_priv->event_wait);
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       }
+}
+
+static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
+                struct drm_framebuffer *fb,
+                struct drm_pending_vblank_event *event)
+{
+       struct drm_device *dev = crtc->dev;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+       DBG("%d -> %d", crtc->fb ? crtc->fb->base.id : -1, fb->base.id);
+
+       if (omap_crtc->event) {
+               dev_err(dev->dev, "already a pending flip\n");
+               return -EINVAL;
+       }
+
+       crtc->fb = fb;
+       omap_crtc->event = event;
+
+       omap_gem_op_async(omap_framebuffer_bo(fb), OMAP_GEM_READ,
+                       page_flip_cb, crtc);
+
+       return 0;
+}
+
+static const struct drm_crtc_funcs omap_crtc_funcs = {
+       .gamma_set = omap_crtc_gamma_set,
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = omap_crtc_destroy,
+       .page_flip = omap_crtc_page_flip_locked,
+};
+
+static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
+       .dpms = omap_crtc_dpms,
+       .mode_fixup = omap_crtc_mode_fixup,
+       .mode_set = omap_crtc_mode_set,
+       .prepare = omap_crtc_prepare,
+       .commit = omap_crtc_commit,
+       .mode_set_base = omap_crtc_mode_set_base,
+       .load_lut = omap_crtc_load_lut,
+};
+
+struct omap_overlay *omap_crtc_get_overlay(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       return omap_crtc->ovl;
+}
+
+/* initialize crtc */
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+               struct omap_overlay *ovl, int id)
+{
+       struct drm_crtc *crtc = NULL;
+       struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+
+       DBG("%s", ovl->name);
+
+       if (!omap_crtc) {
+               dev_err(dev->dev, "could not allocate CRTC\n");
+               goto fail;
+       }
+
+       omap_crtc->ovl = ovl;
+       omap_crtc->id = id;
+       crtc = &omap_crtc->base;
+       drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+       drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
+
+       return crtc;
+
+fail:
+       if (crtc) {
+               drm_crtc_cleanup(crtc);
+               kfree(omap_crtc);
+       }
+       return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_drm.h b/drivers/staging/omapdrm/omap_drm.h
new file mode 100644 (file)
index 0000000..40167dd
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * include/drm/omap_drm.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRM_H__
+#define __OMAP_DRM_H__
+
+#include "drm.h"
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+#define OMAP_PARAM_CHIPSET_ID  1       /* ie. 0x3430, 0x4430, etc */
+
+struct drm_omap_param {
+       uint64_t param;                 /* in */
+       uint64_t value;                 /* in (set_param), out (get_param) */
+};
+
+#define OMAP_BO_SCANOUT                0x00000001      /* scanout capable (phys contiguous) */
+#define OMAP_BO_CACHE_MASK     0x00000006      /* cache type mask, see cache modes */
+#define OMAP_BO_TILED_MASK     0x00000f00      /* tiled mapping mask, see tiled modes */
+
+/* cache modes */
+#define OMAP_BO_CACHED         0x00000000      /* default */
+#define OMAP_BO_WC             0x00000002      /* write-combine */
+#define OMAP_BO_UNCACHED       0x00000004      /* strongly-ordered (uncached) */
+
+/* tiled modes */
+#define OMAP_BO_TILED_8                0x00000100
+#define OMAP_BO_TILED_16       0x00000200
+#define OMAP_BO_TILED_32       0x00000300
+#define OMAP_BO_TILED          (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
+
+union omap_gem_size {
+       uint32_t bytes;         /* (for non-tiled formats) */
+       struct {
+               uint16_t width;
+               uint16_t height;
+       } tiled;                /* (for tiled formats) */
+};
+
+struct drm_omap_gem_new {
+       union omap_gem_size size;       /* in */
+       uint32_t flags;                 /* in */
+       uint32_t handle;                /* out */
+       uint32_t __pad;
+};
+
+/* mask of operations: */
+enum omap_gem_op {
+       OMAP_GEM_READ = 0x01,
+       OMAP_GEM_WRITE = 0x02,
+};
+
+struct drm_omap_gem_cpu_prep {
+       uint32_t handle;                /* buffer handle (in) */
+       uint32_t op;                    /* mask of omap_gem_op (in) */
+};
+
+struct drm_omap_gem_cpu_fini {
+       uint32_t handle;                /* buffer handle (in) */
+       uint32_t op;                    /* mask of omap_gem_op (in) */
+       /* TODO maybe here we pass down info about what regions are touched
+        * by sw so we can be clever about cache ops?  For now a placeholder,
+        * set to zero and we just do full buffer flush..
+        */
+       uint32_t nregions;
+       uint32_t __pad;
+};
+
+struct drm_omap_gem_info {
+       uint32_t handle;                /* buffer handle (in) */
+       uint32_t pad;
+       uint64_t offset;                /* mmap offset (out) */
+       /* note: in case of tiled buffers, the user virtual size can be
+        * different from the physical size (ie. how many pages are needed
+        * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
+        * This size here is the one that should be used if you want to
+        * mmap() the buffer:
+        */
+       uint32_t size;                  /* virtual size for mmap'ing (out) */
+       uint32_t __pad;
+};
+
+#define DRM_OMAP_GET_PARAM             0x00
+#define DRM_OMAP_SET_PARAM             0x01
+/* placeholder for plugin-api
+#define DRM_OMAP_GET_BASE              0x02
+*/
+#define DRM_OMAP_GEM_NEW               0x03
+#define DRM_OMAP_GEM_CPU_PREP          0x04
+#define DRM_OMAP_GEM_CPU_FINI          0x05
+#define DRM_OMAP_GEM_INFO              0x06
+#define DRM_OMAP_NUM_IOCTLS            0x07
+
+#define DRM_IOCTL_OMAP_GET_PARAM       DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
+#define DRM_IOCTL_OMAP_SET_PARAM       DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
+/* placeholder for plugin-api
+#define DRM_IOCTL_OMAP_GET_BASE                DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
+*/
+#define DRM_IOCTL_OMAP_GEM_NEW         DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
+#define DRM_IOCTL_OMAP_GEM_CPU_PREP    DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
+#define DRM_IOCTL_OMAP_GEM_CPU_FINI    DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
+#define DRM_IOCTL_OMAP_GEM_INFO                DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
+
+#endif /* __OMAP_DRM_H__ */
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
new file mode 100644 (file)
index 0000000..cee0050
--- /dev/null
@@ -0,0 +1,810 @@
+/*
+ * drivers/staging/omapdrm/omap_drv.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+
+#define DRIVER_NAME            MODULE_NAME
+#define DRIVER_DESC            "OMAP DRM"
+#define DRIVER_DATE            "20110917"
+#define DRIVER_MAJOR           1
+#define DRIVER_MINOR           0
+#define DRIVER_PATCHLEVEL      0
+
+struct drm_device *drm_device;
+
+static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS;
+
+MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs");
+module_param(num_crtc, int, 0600);
+
+/*
+ * mode config funcs
+ */
+
+/* Notes about mapping DSS and DRM entities:
+ *    CRTC:        overlay
+ *    encoder:     manager.. with some extension to allow one primary CRTC
+ *                 and zero or more video CRTC's to be mapped to one encoder?
+ *    connector:   dssdev.. manager can be attached/detached from different
+ *                 devices
+ */
+
+static void omap_fb_output_poll_changed(struct drm_device *dev)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       DBG("dev=%p", dev);
+       if (priv->fbdev) {
+               drm_fb_helper_hotplug_event(priv->fbdev);
+       }
+}
+
+static struct drm_mode_config_funcs omap_mode_config_funcs = {
+       .fb_create = omap_framebuffer_create,
+       .output_poll_changed = omap_fb_output_poll_changed,
+};
+
+static int get_connector_type(struct omap_dss_device *dssdev)
+{
+       switch (dssdev->type) {
+       case OMAP_DISPLAY_TYPE_HDMI:
+               return DRM_MODE_CONNECTOR_HDMIA;
+       case OMAP_DISPLAY_TYPE_DPI:
+               if (!strcmp(dssdev->name, "dvi"))
+                       return DRM_MODE_CONNECTOR_DVID;
+               /* fallthrough */
+       default:
+               return DRM_MODE_CONNECTOR_Unknown;
+       }
+}
+
+#if 0 /* enable when dss2 supports hotplug */
+static int omap_drm_notifier(struct notifier_block *nb,
+               unsigned long evt, void *arg)
+{
+       switch (evt) {
+       case OMAP_DSS_SIZE_CHANGE:
+       case OMAP_DSS_HOTPLUG_CONNECT:
+       case OMAP_DSS_HOTPLUG_DISCONNECT: {
+               struct drm_device *dev = drm_device;
+               DBG("hotplug event: evt=%d, dev=%p", evt, dev);
+               if (dev) {
+                       drm_sysfs_hotplug_event(dev);
+               }
+               return NOTIFY_OK;
+       }
+       default:  /* don't care about other events for now */
+               return NOTIFY_DONE;
+       }
+}
+#endif
+
+static void dump_video_chains(void)
+{
+       int i;
+
+       DBG("dumping video chains: ");
+       for (i = 0; i < omap_dss_get_num_overlays(); i++) {
+               struct omap_overlay *ovl = omap_dss_get_overlay(i);
+               struct omap_overlay_manager *mgr = ovl->manager;
+               struct omap_dss_device *dssdev = mgr ? mgr->device : NULL;
+               if (dssdev) {
+                       DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
+                                               dssdev->name);
+               } else if (mgr) {
+                       DBG("%d: %s -> %s", i, ovl->name, mgr->name);
+               } else {
+                       DBG("%d: %s", i, ovl->name);
+               }
+       }
+}
+
+/* create encoders for each manager */
+static int create_encoder(struct drm_device *dev,
+               struct omap_overlay_manager *mgr)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       struct drm_encoder *encoder = omap_encoder_init(dev, mgr);
+
+       if (!encoder) {
+               dev_err(dev->dev, "could not create encoder: %s\n",
+                               mgr->name);
+               return -ENOMEM;
+       }
+
+       BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+
+       priv->encoders[priv->num_encoders++] = encoder;
+
+       return 0;
+}
+
+/* create connectors for each display device */
+static int create_connector(struct drm_device *dev,
+               struct omap_dss_device *dssdev)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       static struct notifier_block *notifier;
+       struct drm_connector *connector;
+       int j;
+
+       if (!dssdev->driver) {
+               dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+                               dssdev->name);
+               return 0;
+       }
+
+       if (!(dssdev->driver->get_timings ||
+                               dssdev->driver->read_edid)) {
+               dev_warn(dev->dev, "%s driver does not support "
+                       "get_timings or read_edid.. skipping it!\n",
+                       dssdev->name);
+               return 0;
+       }
+
+       connector = omap_connector_init(dev,
+                       get_connector_type(dssdev), dssdev);
+
+       if (!connector) {
+               dev_err(dev->dev, "could not create connector: %s\n",
+                               dssdev->name);
+               return -ENOMEM;
+       }
+
+       BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+
+       priv->connectors[priv->num_connectors++] = connector;
+
+#if 0 /* enable when dss2 supports hotplug */
+       notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
+       notifier->notifier_call = omap_drm_notifier;
+       omap_dss_add_notify(dssdev, notifier);
+#else
+       notifier = NULL;
+#endif
+
+       for (j = 0; j < priv->num_encoders; j++) {
+               struct omap_overlay_manager *mgr =
+                       omap_encoder_get_manager(priv->encoders[j]);
+               if (mgr->device == dssdev) {
+                       drm_mode_connector_attach_encoder(connector,
+                                       priv->encoders[j]);
+               }
+       }
+
+       return 0;
+}
+
+/* create up to max_overlays CRTCs mapping to overlays.. by default,
+ * connect the overlays to different managers/encoders, giving priority
+ * to encoders connected to connectors with a detected connection
+ */
+static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
+               int *j, unsigned int connected_connectors)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_overlay_manager *mgr = NULL;
+       struct drm_crtc *crtc;
+
+       if (ovl->manager) {
+               DBG("disconnecting %s from %s", ovl->name,
+                                       ovl->manager->name);
+               ovl->unset_manager(ovl);
+       }
+
+       /* find next best connector, ones with detected connection first
+        */
+       while (*j < priv->num_connectors && !mgr) {
+               if (connected_connectors & (1 << *j)) {
+                       struct drm_encoder *encoder =
+                               omap_connector_attached_encoder(
+                                               priv->connectors[*j]);
+                       if (encoder) {
+                               mgr = omap_encoder_get_manager(encoder);
+                       }
+               }
+               (*j)++;
+       }
+
+       /* if we couldn't find another connected connector, lets start
+        * looking at the unconnected connectors:
+        *
+        * note: it might not be immediately apparent, but thanks to
+        * the !mgr check in both this loop and the one above, the only
+        * way to enter this loop is with *j == priv->num_connectors,
+        * so idx can never go negative.
+        */
+       while (*j < 2 * priv->num_connectors && !mgr) {
+               int idx = *j - priv->num_connectors;
+               if (!(connected_connectors & (1 << idx))) {
+                       struct drm_encoder *encoder =
+                               omap_connector_attached_encoder(
+                                               priv->connectors[idx]);
+                       if (encoder) {
+                               mgr = omap_encoder_get_manager(encoder);
+                       }
+               }
+               (*j)++;
+       }
+
+       if (mgr) {
+               DBG("connecting %s to %s", ovl->name, mgr->name);
+               ovl->set_manager(ovl, mgr);
+       }
+
+       crtc = omap_crtc_init(dev, ovl, priv->num_crtcs);
+
+       if (!crtc) {
+               dev_err(dev->dev, "could not create CRTC: %s\n",
+                               ovl->name);
+               return -ENOMEM;
+       }
+
+       BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+
+       priv->crtcs[priv->num_crtcs++] = crtc;
+
+       return 0;
+}
+
+static int match_dev_name(struct omap_dss_device *dssdev, void *data)
+{
+       return !strcmp(dssdev->name, data);
+}
+
+static unsigned int detect_connectors(struct drm_device *dev)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       unsigned int connected_connectors = 0;
+       int i;
+
+       for (i = 0; i < priv->num_connectors; i++) {
+               struct drm_connector *connector = priv->connectors[i];
+               if (omap_connector_detect(connector, true) ==
+                               connector_status_connected) {
+                       connected_connectors |= (1 << i);
+               }
+       }
+
+       return connected_connectors;
+}
+
+static int omap_modeset_init(struct drm_device *dev)
+{
+       const struct omap_drm_platform_data *pdata = dev->dev->platform_data;
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_dss_device *dssdev = NULL;
+       int i, j;
+       unsigned int connected_connectors = 0;
+
+       drm_mode_config_init(dev);
+
+       if (pdata) {
+               /* if platform data is provided by the board file, use it to
+                * control which overlays, managers, and devices we own.
+                */
+               for (i = 0; i < pdata->mgr_cnt; i++) {
+                       struct omap_overlay_manager *mgr =
+                                       omap_dss_get_overlay_manager(pdata->mgr_ids[i]);
+                       create_encoder(dev, mgr);
+               }
+
+               for (i = 0; i < pdata->dev_cnt; i++) {
+                       struct omap_dss_device *dssdev =
+                               omap_dss_find_device(
+                                       (void *)pdata->dev_names[i], match_dev_name);
+                       if (!dssdev) {
+                               dev_warn(dev->dev, "no such dssdev: %s\n",
+                                               pdata->dev_names[i]);
+                               continue;
+                       }
+                       create_connector(dev, dssdev);
+               }
+
+               connected_connectors = detect_connectors(dev);
+
+               j = 0;
+               for (i = 0; i < pdata->ovl_cnt; i++) {
+                       struct omap_overlay *ovl =
+                                       omap_dss_get_overlay(pdata->ovl_ids[i]);
+                       create_crtc(dev, ovl, &j, connected_connectors);
+               }
+       } else {
+               /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
+                * to make educated guesses about everything else
+                */
+               int max_overlays = min(omap_dss_get_num_overlays(), num_crtc);
+
+               for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
+                       create_encoder(dev, omap_dss_get_overlay_manager(i));
+               }
+
+               for_each_dss_dev(dssdev) {
+                       create_connector(dev, dssdev);
+               }
+
+               connected_connectors = detect_connectors(dev);
+
+               j = 0;
+               for (i = 0; i < max_overlays; i++) {
+                       create_crtc(dev, omap_dss_get_overlay(i),
+                                       &j, connected_connectors);
+               }
+       }
+
+       /* for now keep the mapping of CRTCs and encoders static.. */
+       for (i = 0; i < priv->num_encoders; i++) {
+               struct drm_encoder *encoder = priv->encoders[i];
+               struct omap_overlay_manager *mgr =
+                               omap_encoder_get_manager(encoder);
+
+               encoder->possible_crtcs = 0;
+
+               for (j = 0; j < priv->num_crtcs; j++) {
+                       struct omap_overlay *ovl =
+                                       omap_crtc_get_overlay(priv->crtcs[j]);
+                       if (ovl->manager == mgr) {
+                               encoder->possible_crtcs |= (1 << j);
+                       }
+               }
+
+               DBG("%s: possible_crtcs=%08x", mgr->name,
+                                       encoder->possible_crtcs);
+       }
+
+       dump_video_chains();
+
+       dev->mode_config.min_width = 256;
+       dev->mode_config.min_height = 256;
+
+       /* note: eventually will need some cpu_is_omapXYZ() type stuff here
+        * to fill in these limits properly on different OMAP generations..
+        */
+       dev->mode_config.max_width = 2048;
+       dev->mode_config.max_height = 2048;
+
+       dev->mode_config.funcs = &omap_mode_config_funcs;
+
+       return 0;
+}
+
+static void omap_modeset_free(struct drm_device *dev)
+{
+       drm_mode_config_cleanup(dev);
+}
+
+/*
+ * drm ioctl funcs
+ */
+
+
+static int ioctl_get_param(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_omap_param *args = data;
+
+       DBG("%p: param=%llu", dev, args->param);
+
+       switch (args->param) {
+       case OMAP_PARAM_CHIPSET_ID:
+               args->value = GET_OMAP_TYPE;
+               break;
+       default:
+               DBG("unknown parameter %lld", args->param);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ioctl_set_param(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_omap_param *args = data;
+
+       switch (args->param) {
+       default:
+               DBG("unknown parameter %lld", args->param);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ioctl_gem_new(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_omap_gem_new *args = data;
+       DBG("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+                       args->size.bytes, args->flags);
+       return omap_gem_new_handle(dev, file_priv, args->size,
+                       args->flags, &args->handle);
+}
+
+static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_omap_gem_cpu_prep *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+
+       VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (!obj) {
+               return -ENOENT;
+       }
+
+       ret = omap_gem_op_sync(obj, args->op);
+
+       if (!ret) {
+               ret = omap_gem_op_start(obj, args->op);
+       }
+
+       drm_gem_object_unreference_unlocked(obj);
+
+       return ret;
+}
+
+static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_omap_gem_cpu_fini *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+
+       VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (!obj) {
+               return -ENOENT;
+       }
+
+       /* XXX flushy, flushy */
+       ret = 0;
+
+       if (!ret) {
+               ret = omap_gem_op_finish(obj, args->op);
+       }
+
+       drm_gem_object_unreference_unlocked(obj);
+
+       return ret;
+}
+
+static int ioctl_gem_info(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_omap_gem_info *args = data;
+       struct drm_gem_object *obj;
+       int ret = 0;
+
+       DBG("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (!obj) {
+               return -ENOENT;
+       }
+
+       args->size = obj->size;  /* for now */
+       args->offset = omap_gem_mmap_offset(obj);
+
+       drm_gem_object_unreference_unlocked(obj);
+
+       return ret;
+}
+
+struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+       DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
+};
+
+/*
+ * drm driver funcs
+ */
+
+/**
+ * load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ *   - initialize the memory manager
+ *   - allocate initial config memory
+ *   - setup the DRM framebuffer with the allocated memory
+ */
+static int dev_load(struct drm_device *dev, unsigned long flags)
+{
+       struct omap_drm_private *priv;
+       int ret;
+
+       DBG("load: dev=%p", dev);
+
+       drm_device = dev;
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               dev_err(dev->dev, "could not allocate priv\n");
+               return -ENOMEM;
+       }
+
+       dev->dev_private = priv;
+
+       ret = omap_modeset_init(dev);
+       if (ret) {
+               dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+               dev->dev_private = NULL;
+               kfree(priv);
+               return ret;
+       }
+
+       priv->fbdev = omap_fbdev_init(dev);
+       if (!priv->fbdev) {
+               dev_warn(dev->dev, "omap_fbdev_init failed\n");
+               /* well, limp along without an fbdev.. maybe X11 will work? */
+       }
+
+       drm_kms_helper_poll_init(dev);
+
+       ret = drm_vblank_init(dev, priv->num_crtcs);
+       if (ret) {
+               dev_warn(dev->dev, "could not init vblank\n");
+       }
+
+       return 0;
+}
+
+static int dev_unload(struct drm_device *dev)
+{
+       DBG("unload: dev=%p", dev);
+
+       drm_vblank_cleanup(dev);
+       drm_kms_helper_poll_fini(dev);
+
+       omap_fbdev_free(dev);
+
+       omap_modeset_free(dev);
+
+       kfree(dev->dev_private);
+       dev->dev_private = NULL;
+
+       return 0;
+}
+
+static int dev_open(struct drm_device *dev, struct drm_file *file)
+{
+       file->driver_priv = NULL;
+
+       DBG("open: dev=%p, file=%p", dev, file);
+
+       return 0;
+}
+
+static int dev_firstopen(struct drm_device *dev)
+{
+       DBG("firstopen: dev=%p", dev);
+       return 0;
+}
+
+/**
+ * lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited.  In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ */
+static void dev_lastclose(struct drm_device *dev)
+{
+       /* we don't support vga-switcheroo.. so just make sure the fbdev
+        * mode is active
+        */
+       struct omap_drm_private *priv = dev->dev_private;
+       int ret;
+
+       DBG("lastclose: dev=%p", dev);
+
+       ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+       if (ret)
+               DBG("failed to restore crtc mode");
+}
+
+static void dev_preclose(struct drm_device *dev, struct drm_file *file)
+{
+       DBG("preclose: dev=%p", dev);
+}
+
+static void dev_postclose(struct drm_device *dev, struct drm_file *file)
+{
+       DBG("postclose: dev=%p, file=%p", dev, file);
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc.  If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+static int dev_enable_vblank(struct drm_device *dev, int crtc)
+{
+       DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc);
+       return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc.  If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+static void dev_disable_vblank(struct drm_device *dev, int crtc)
+{
+       DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc);
+}
+
+static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS)
+{
+       return IRQ_HANDLED;
+}
+
+static void dev_irq_preinstall(struct drm_device *dev)
+{
+       DBG("irq_preinstall: dev=%p", dev);
+}
+
+static int dev_irq_postinstall(struct drm_device *dev)
+{
+       DBG("irq_postinstall: dev=%p", dev);
+       return 0;
+}
+
+static void dev_irq_uninstall(struct drm_device *dev)
+{
+       DBG("irq_uninstall: dev=%p", dev);
+}
+
+static struct vm_operations_struct omap_gem_vm_ops = {
+       .fault = omap_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static struct drm_driver omap_drm_driver = {
+               .driver_features =
+                               DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM,
+               .load = dev_load,
+               .unload = dev_unload,
+               .open = dev_open,
+               .firstopen = dev_firstopen,
+               .lastclose = dev_lastclose,
+               .preclose = dev_preclose,
+               .postclose = dev_postclose,
+               .get_vblank_counter = drm_vblank_count,
+               .enable_vblank = dev_enable_vblank,
+               .disable_vblank = dev_disable_vblank,
+               .irq_preinstall = dev_irq_preinstall,
+               .irq_postinstall = dev_irq_postinstall,
+               .irq_uninstall = dev_irq_uninstall,
+               .irq_handler = dev_irq_handler,
+               .reclaim_buffers = drm_core_reclaim_buffers,
+               .gem_init_object = omap_gem_init_object,
+               .gem_free_object = omap_gem_free_object,
+               .gem_vm_ops = &omap_gem_vm_ops,
+               .dumb_create = omap_gem_dumb_create,
+               .dumb_map_offset = omap_gem_dumb_map_offset,
+               .dumb_destroy = omap_gem_dumb_destroy,
+               .ioctls = ioctls,
+               .num_ioctls = DRM_OMAP_NUM_IOCTLS,
+               .fops = {
+                               .owner = THIS_MODULE,
+                               .open = drm_open,
+                               .unlocked_ioctl = drm_ioctl,
+                               .release = drm_release,
+                               .mmap = omap_gem_mmap,
+                               .poll = drm_poll,
+                               .fasync = drm_fasync,
+                               .read = drm_read,
+                               .llseek = noop_llseek,
+               },
+               .name = DRIVER_NAME,
+               .desc = DRIVER_DESC,
+               .date = DRIVER_DATE,
+               .major = DRIVER_MAJOR,
+               .minor = DRIVER_MINOR,
+               .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
+{
+       DBG("");
+       return 0;
+}
+
+static int pdev_resume(struct platform_device *device)
+{
+       DBG("");
+       return 0;
+}
+
+static void pdev_shutdown(struct platform_device *device)
+{
+       DBG("");
+}
+
+static int pdev_probe(struct platform_device *device)
+{
+       DBG("%s", device->name);
+       return drm_platform_init(&omap_drm_driver, device);
+}
+
+static int pdev_remove(struct platform_device *device)
+{
+       DBG("");
+       drm_platform_exit(&omap_drm_driver, device);
+       return 0;
+}
+
+struct platform_driver pdev = {
+               .driver = {
+                       .name = DRIVER_NAME,
+                       .owner = THIS_MODULE,
+               },
+               .probe = pdev_probe,
+               .remove = pdev_remove,
+               .suspend = pdev_suspend,
+               .resume = pdev_resume,
+               .shutdown = pdev_shutdown,
+};
+
+static int __init omap_drm_init(void)
+{
+       DBG("init");
+       return platform_driver_register(&pdev);
+}
+
+static void __exit omap_drm_fini(void)
+{
+       DBG("fini");
+       platform_driver_unregister(&pdev);
+}
+
+/* need late_initcall() so we load after dss_driver's are loaded */
+late_initcall(omap_drm_init);
+module_exit(omap_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+MODULE_DESCRIPTION("OMAP DRM Display Driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
new file mode 100644 (file)
index 0000000..504f354
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * drivers/staging/omapdrm/omap_drv.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRV_H__
+#define __OMAP_DRV_H__
+
+#include <video/omapdss.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/platform_data/omap_drm.h>
+#include "omap_drm.h"
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
+
+#define MODULE_NAME     "omapdrm"
+
+/* max # of mapper-id's that can be assigned.. todo, come up with a better
+ * (but still inexpensive) way to store/access per-buffer mapper private
+ * data..
+ */
+#define MAX_MAPPERS 2
+
+struct omap_drm_private {
+       unsigned int num_crtcs;
+       struct drm_crtc *crtcs[8];
+       unsigned int num_encoders;
+       struct drm_encoder *encoders[8];
+       unsigned int num_connectors;
+       struct drm_connector *connectors[8];
+
+       struct drm_fb_helper *fbdev;
+};
+
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
+void omap_fbdev_free(struct drm_device *dev);
+
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+               struct omap_overlay *ovl, int id);
+struct omap_overlay *omap_crtc_get_overlay(struct drm_crtc *crtc);
+
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+               struct omap_overlay_manager *mgr);
+struct omap_overlay_manager *omap_encoder_get_manager(
+               struct drm_encoder *encoder);
+struct drm_encoder *omap_connector_attached_encoder(
+               struct drm_connector *connector);
+enum drm_connector_status omap_connector_detect(
+               struct drm_connector *connector, bool force);
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+               int connector_type, struct omap_dss_device *dssdev);
+void omap_connector_mode_set(struct drm_connector *connector,
+               struct drm_display_mode *mode);
+void omap_connector_flush(struct drm_connector *connector,
+               int x, int y, int w, int h);
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+               struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd);
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+               struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo);
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb);
+int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y,
+               void **vaddr, dma_addr_t *paddr, unsigned int *screen_width);
+struct drm_connector *omap_framebuffer_get_next_connector(
+               struct drm_framebuffer *fb, struct drm_connector *from);
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+               int x, int y, int w, int h);
+
+
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+               union omap_gem_size gsize, uint32_t flags);
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+               union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
+void omap_gem_free_object(struct drm_gem_object *obj);
+int omap_gem_init_object(struct drm_gem_object *obj);
+void *omap_gem_vaddr(struct drm_gem_object *obj);
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+               uint32_t handle, uint64_t *offset);
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+               uint32_t handle);
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+               struct drm_mode_create_dumb *args);
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+               void (*fxn)(void *arg), void *arg);
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+               dma_addr_t *paddr, bool remap);
+int omap_gem_put_paddr(struct drm_gem_object *obj);
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
+
+static inline int align_pitch(int pitch, int width, int bpp)
+{
+       int bytespp = (bpp + 7) / 8;
+       /* in case someone tries to feed us a completely bogus stride: */
+       pitch = max(pitch, width * bytespp);
+       /* PVR needs alignment to 8 pixels.. right now that is the most
+        * restrictive stride requirement..
+        */
+       return ALIGN(pitch, 8 * bytespp);
+}
+
+#endif /* __OMAP_DRV_H__ */
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/staging/omapdrm/omap_encoder.c
new file mode 100644 (file)
index 0000000..83bacd8
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * drivers/staging/omapdrm/omap_encoder.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * encoder funcs
+ */
+
+#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
+
+struct omap_encoder {
+       struct drm_encoder base;
+       struct omap_overlay_manager *mgr;
+};
+
+static void omap_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       DBG("%s", omap_encoder->mgr->name);
+       drm_encoder_cleanup(encoder);
+       kfree(omap_encoder);
+}
+
+static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       DBG("%s: %d", omap_encoder->mgr->name, mode);
+}
+
+static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       DBG("%s", omap_encoder->mgr->name);
+       return true;
+}
+
+static void omap_encoder_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       struct drm_device *dev = encoder->dev;
+       struct omap_drm_private *priv = dev->dev_private;
+       int i;
+
+       mode = adjusted_mode;
+
+       DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
+                       mode->hdisplay, mode->vdisplay);
+
+       for (i = 0; i < priv->num_connectors; i++) {
+               struct drm_connector *connector = priv->connectors[i];
+               if (connector->encoder == encoder) {
+                       omap_connector_mode_set(connector, mode);
+               }
+       }
+}
+
+static void omap_encoder_prepare(struct drm_encoder *encoder)
+{
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       struct drm_encoder_helper_funcs *encoder_funcs =
+                               encoder->helper_private;
+       DBG("%s", omap_encoder->mgr->name);
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_encoder_commit(struct drm_encoder *encoder)
+{
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       struct drm_encoder_helper_funcs *encoder_funcs =
+                               encoder->helper_private;
+       DBG("%s", omap_encoder->mgr->name);
+       omap_encoder->mgr->apply(omap_encoder->mgr);
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+       .destroy = omap_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
+       .dpms = omap_encoder_dpms,
+       .mode_fixup = omap_encoder_mode_fixup,
+       .mode_set = omap_encoder_mode_set,
+       .prepare = omap_encoder_prepare,
+       .commit = omap_encoder_commit,
+};
+
+struct omap_overlay_manager *omap_encoder_get_manager(
+               struct drm_encoder *encoder)
+{
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       return omap_encoder->mgr;
+}
+
+/* initialize encoder */
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+               struct omap_overlay_manager *mgr)
+{
+       struct drm_encoder *encoder = NULL;
+       struct omap_encoder *omap_encoder;
+       struct omap_overlay_manager_info info;
+       int ret;
+
+       DBG("%s", mgr->name);
+
+       omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
+       if (!omap_encoder) {
+               dev_err(dev->dev, "could not allocate encoder\n");
+               goto fail;
+       }
+
+       omap_encoder->mgr = mgr;
+       encoder = &omap_encoder->base;
+
+       drm_encoder_init(dev, encoder, &omap_encoder_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+       drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
+
+       mgr->get_manager_info(mgr, &info);
+
+       /* TODO: fix hard-coded setup.. */
+       info.default_color = 0x00000000;
+       info.trans_key = 0x00000000;
+       info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+       info.trans_enabled = false;
+
+       ret = mgr->set_manager_info(mgr, &info);
+       if (ret) {
+               dev_err(dev->dev, "could not set manager info\n");
+               goto fail;
+       }
+
+       ret = mgr->apply(mgr);
+       if (ret) {
+               dev_err(dev->dev, "could not apply\n");
+               goto fail;
+       }
+
+       return encoder;
+
+fail:
+       if (encoder) {
+               drm_encoder_cleanup(encoder);
+               kfree(omap_encoder);
+       }
+
+       return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/staging/omapdrm/omap_fb.c
new file mode 100644 (file)
index 0000000..82ed612
--- /dev/null
@@ -0,0 +1,261 @@
+/*
+ * drivers/staging/omapdrm/omap_fb.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+
+/*
+ * framebuffer funcs
+ */
+
+#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
+
+struct omap_framebuffer {
+       struct drm_framebuffer base;
+       struct drm_gem_object *bo;
+       int size;
+       dma_addr_t paddr;
+};
+
+static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
+               struct drm_file *file_priv,
+               unsigned int *handle)
+{
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+    return drm_gem_handle_create(file_priv, omap_fb->bo, handle);
+}
+
+static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = fb->dev;
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+
+       DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+       drm_framebuffer_cleanup(fb);
+
+       if (omap_gem_put_paddr(omap_fb->bo)) {
+               dev_err(dev->dev, "could not unmap!\n");
+       }
+
+       if (omap_fb->bo) {
+               drm_gem_object_unreference_unlocked(omap_fb->bo);
+       }
+
+       kfree(omap_fb);
+}
+
+static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
+               struct drm_file *file_priv, unsigned flags, unsigned color,
+               struct drm_clip_rect *clips, unsigned num_clips)
+{
+       int i;
+
+       for (i = 0; i < num_clips; i++) {
+               omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
+                                       clips[i].x2 - clips[i].x1,
+                                       clips[i].y2 - clips[i].y1);
+       }
+
+       return 0;
+}
+
+static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
+       .create_handle = omap_framebuffer_create_handle,
+       .destroy = omap_framebuffer_destroy,
+       .dirty = omap_framebuffer_dirty,
+};
+
+/* returns the buffer size */
+int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y,
+               void **vaddr, dma_addr_t *paddr, unsigned int *screen_width)
+{
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       int bpp = fb->bits_per_pixel / 8;
+       unsigned long offset;
+
+       offset = (x * bpp) + (y * fb->pitch);
+
+       if (vaddr) {
+               void *bo_vaddr = omap_gem_vaddr(omap_fb->bo);
+               /* note: we can only count on having a vaddr for buffers that
+                * are allocated physically contiguously to begin with (ie.
+                * dma_alloc_coherent()).  But this should be ok because it
+                * is only used by legacy fbdev
+                */
+               BUG_ON(!bo_vaddr);
+               *vaddr = bo_vaddr + offset;
+       }
+
+       *paddr = omap_fb->paddr + offset;
+       *screen_width = fb->pitch / bpp;
+
+       return omap_fb->size - offset;
+}
+
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb)
+{
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       return omap_fb->bo;
+}
+
+/* iterate thru all the connectors, returning ones that are attached
+ * to the same fb..
+ */
+struct drm_connector *omap_framebuffer_get_next_connector(
+               struct drm_framebuffer *fb, struct drm_connector *from)
+{
+       struct drm_device *dev = fb->dev;
+       struct list_head *connector_list = &dev->mode_config.connector_list;
+       struct drm_connector *connector = from;
+
+       if (!from) {
+               return list_first_entry(connector_list, typeof(*from), head);
+       }
+
+       list_for_each_entry_from(connector, connector_list, head) {
+               if (connector != from) {
+                       struct drm_encoder *encoder = connector->encoder;
+                       struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+                       if (crtc && crtc->fb == fb) {
+                               return connector;
+                       }
+               }
+       }
+
+       return NULL;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+               int x, int y, int w, int h)
+{
+       struct drm_connector *connector = NULL;
+
+       VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
+
+       while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
+               /* only consider connectors that are part of a chain */
+               if (connector->encoder && connector->encoder->crtc) {
+                       /* TODO: maybe this should propagate thru the crtc who
+                        * could do the coordinate translation..
+                        */
+                       struct drm_crtc *crtc = connector->encoder->crtc;
+                       int cx = max(0, x - crtc->x);
+                       int cy = max(0, y - crtc->y);
+                       int cw = w + (x - crtc->x) - cx;
+                       int ch = h + (y - crtc->y) - cy;
+
+                       omap_connector_flush(connector, cx, cy, cw, ch);
+               }
+       }
+}
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+               struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd)
+{
+       struct drm_gem_object *bo;
+       struct drm_framebuffer *fb;
+       bo = drm_gem_object_lookup(dev, file, mode_cmd->handle);
+       if (!bo) {
+               return ERR_PTR(-ENOENT);
+       }
+       fb = omap_framebuffer_init(dev, mode_cmd, bo);
+       if (!fb) {
+               return ERR_PTR(-ENOMEM);
+       }
+       return fb;
+}
+
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+               struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo)
+{
+       struct omap_framebuffer *omap_fb;
+       struct drm_framebuffer *fb = NULL;
+       int size, ret;
+
+       DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%d)",
+                       dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+                       mode_cmd->bpp);
+
+       /* in case someone tries to feed us a completely bogus stride: */
+       mode_cmd->pitch = align_pitch(mode_cmd->pitch,
+                       mode_cmd->width, mode_cmd->bpp);
+
+       omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
+       if (!omap_fb) {
+               dev_err(dev->dev, "could not allocate fb\n");
+               goto fail;
+       }
+
+       fb = &omap_fb->base;
+       ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+       if (ret) {
+               dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+               goto fail;
+       }
+
+       DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+       size = PAGE_ALIGN(mode_cmd->pitch * mode_cmd->height);
+
+       if (bo) {
+               DBG("using existing %d byte buffer (needed %d)", bo->size, size);
+               if (size > bo->size) {
+                       dev_err(dev->dev, "provided buffer object is too small!\n");
+                       goto fail;
+               }
+       } else {
+               /* for convenience of all the various callers who don't want
+                * to be bothered to allocate their own buffer..
+                */
+               union omap_gem_size gsize = {
+                               .bytes = size,
+               };
+               DBG("allocating %d bytes for fb %d", size, dev->primary->index);
+               bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
+               if (!bo) {
+                       dev_err(dev->dev, "failed to allocate buffer object\n");
+                       goto fail;
+               }
+       }
+
+       omap_fb->bo = bo;
+       omap_fb->size = size;
+
+       if (omap_gem_get_paddr(bo, &omap_fb->paddr, true)) {
+               dev_err(dev->dev, "could not map (paddr)!\n");
+               goto fail;
+       }
+
+       drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+       return fb;
+
+fail:
+       if (fb) {
+               omap_framebuffer_destroy(fb);
+       }
+       return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
new file mode 100644 (file)
index 0000000..048077c
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * drivers/staging/omapdrm/omap_fbdev.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
+
+struct omap_fbdev {
+       struct drm_fb_helper base;
+       struct drm_framebuffer *fb;
+};
+
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
+
+static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
+               size_t count, loff_t *ppos)
+{
+       ssize_t res;
+
+       res = fb_sys_write(fbi, buf, count, ppos);
+       omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
+
+       return res;
+}
+
+static void omap_fbdev_fillrect(struct fb_info *fbi,
+               const struct fb_fillrect *rect)
+{
+       sys_fillrect(fbi, rect);
+       omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void omap_fbdev_copyarea(struct fb_info *fbi,
+               const struct fb_copyarea *area)
+{
+       sys_copyarea(fbi, area);
+       omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
+}
+
+static void omap_fbdev_imageblit(struct fb_info *fbi,
+               const struct fb_image *image)
+{
+       sys_imageblit(fbi, image);
+       omap_fbdev_flush(fbi, image->dx, image->dy,
+                               image->width, image->height);
+}
+
+static struct fb_ops omap_fb_ops = {
+       .owner = THIS_MODULE,
+
+       /* Note: to properly handle manual update displays, we wrap the
+        * basic fbdev ops which write to the framebuffer
+        */
+       .fb_read = fb_sys_read,
+       .fb_write = omap_fbdev_write,
+       .fb_fillrect = omap_fbdev_fillrect,
+       .fb_copyarea = omap_fbdev_copyarea,
+       .fb_imageblit = omap_fbdev_imageblit,
+
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_pan_display = drm_fb_helper_pan_display,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcmap = drm_fb_helper_setcmap,
+
+       .fb_debug_enter = drm_fb_helper_debug_enter,
+       .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int omap_fbdev_create(struct drm_fb_helper *helper,
+               struct drm_fb_helper_surface_size *sizes)
+{
+       struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+       struct drm_device *dev = helper->dev;
+       struct drm_framebuffer *fb = NULL;
+       struct fb_info *fbi = NULL;
+       struct drm_mode_fb_cmd mode_cmd = {0};
+       dma_addr_t paddr;
+       void __iomem *vaddr;
+       int size, screen_width;
+       int ret;
+
+       /* only doing ARGB32 since this is what is needed to alpha-blend
+        * with video overlays:
+        */
+       sizes->surface_bpp = 32;
+       sizes->surface_depth = 32;
+
+       DBG("create fbdev: %dx%d@%d", sizes->surface_width,
+                       sizes->surface_height, sizes->surface_bpp);
+
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
+
+       mode_cmd.bpp = sizes->surface_bpp;
+       mode_cmd.depth = sizes->surface_depth;
+
+       fb = omap_framebuffer_init(dev, &mode_cmd, NULL);
+       if (!fb) {
+               dev_err(dev->dev, "failed to allocate fb\n");
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
+       fbi = framebuffer_alloc(0, dev->dev);
+       if (!fbi) {
+               dev_err(dev->dev, "failed to allocate fb info\n");
+               ret = -ENOMEM;
+               goto fail_unlock;
+       }
+
+       DBG("fbi=%p, dev=%p", fbi, dev);
+
+       fbdev->fb = fb;
+       helper->fb = fb;
+       helper->fbdev = fbi;
+
+       fbi->par = helper;
+       fbi->flags = FBINFO_DEFAULT;
+       fbi->fbops = &omap_fb_ops;
+
+       strcpy(fbi->fix.id, MODULE_NAME);
+
+       ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+       if (ret) {
+               ret = -ENOMEM;
+               goto fail_unlock;
+       }
+
+       drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
+       drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+       size = omap_framebuffer_get_buffer(fb, 0, 0,
+                       &vaddr, &paddr, &screen_width);
+
+       dev->mode_config.fb_base = paddr;
+
+       fbi->screen_base = vaddr;
+       fbi->screen_size = size;
+       fbi->fix.smem_start = paddr;
+       fbi->fix.smem_len = size;
+
+       DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+       DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+
+fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
+fail:
+
+       if (ret) {
+               if (fbi)
+                       framebuffer_release(fbi);
+               if (fb)
+                       fb->funcs->destroy(fb);
+       }
+
+       return ret;
+}
+
+static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
+               u16 red, u16 green, u16 blue, int regno)
+{
+       DBG("fbdev: set gamma");
+}
+
+static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
+               u16 *red, u16 *green, u16 *blue, int regno)
+{
+       DBG("fbdev: get gamma");
+}
+
+static int omap_fbdev_probe(struct drm_fb_helper *helper,
+               struct drm_fb_helper_surface_size *sizes)
+{
+       int new_fb = 0;
+       int ret;
+
+       if (!helper->fb) {
+               ret = omap_fbdev_create(helper, sizes);
+               if (ret)
+                       return ret;
+               new_fb = 1;
+       }
+       return new_fb;
+}
+
+static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+       .gamma_set = omap_crtc_fb_gamma_set,
+       .gamma_get = omap_crtc_fb_gamma_get,
+       .fb_probe = omap_fbdev_probe,
+};
+
+static struct drm_fb_helper *get_fb(struct fb_info *fbi)
+{
+       if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
+               /* these are not the fb's you're looking for */
+               return NULL;
+       }
+       return fbi->par;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
+{
+       struct drm_fb_helper *helper = get_fb(fbi);
+
+       if (!helper)
+               return;
+
+       VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
+
+       omap_framebuffer_flush(helper->fb, x, y, w, h);
+}
+
+/* initialize fbdev helper */
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_fbdev *fbdev = NULL;
+       struct drm_fb_helper *helper;
+       int ret = 0;
+
+       fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+       if (!fbdev) {
+               dev_err(dev->dev, "could not allocate fbdev\n");
+               goto fail;
+       }
+
+       helper = &fbdev->base;
+
+       helper->funcs = &omap_fb_helper_funcs;
+
+       ret = drm_fb_helper_init(dev, helper,
+                       priv->num_crtcs, priv->num_connectors);
+       if (ret) {
+               dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+               goto fail;
+       }
+
+       drm_fb_helper_single_add_all_connectors(helper);
+       drm_fb_helper_initial_config(helper, 32);
+
+       priv->fbdev = helper;
+
+       return helper;
+
+fail:
+       kfree(fbdev);
+       return NULL;
+}
+
+void omap_fbdev_free(struct drm_device *dev)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       struct drm_fb_helper *helper = priv->fbdev;
+       struct omap_fbdev *fbdev;
+       struct fb_info *fbi;
+
+       DBG();
+
+       fbi = helper->fbdev;
+
+       unregister_framebuffer(fbi);
+       framebuffer_release(fbi);
+
+       drm_fb_helper_fini(helper);
+
+       fbdev = to_omap_fbdev(priv->fbdev);
+
+       kfree(fbdev);
+
+       priv->fbdev = NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
new file mode 100644 (file)
index 0000000..bc1709c
--- /dev/null
@@ -0,0 +1,774 @@
+/*
+ * drivers/staging/omapdrm/omap_gem.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "omap_drv.h"
+
+/* remove these once drm core helpers are merged */
+struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+               bool dirty, bool accessed);
+
+/*
+ * GEM buffer object implementation.
+ */
+
+#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
+
+/* note: we use upper 8 bits of flags for driver-internal flags: */
+#define OMAP_BO_DMA                    0x01000000      /* actually is physically contiguous */
+#define OMAP_BO_EXT_SYNC       0x02000000      /* externally allocated sync object */
+#define OMAP_BO_EXT_MEM                0x04000000      /* externally allocated memory */
+
+
+struct omap_gem_object {
+       struct drm_gem_object base;
+
+       uint32_t flags;
+
+       /**
+        * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
+        * is set and the paddr is valid.
+        *
+        * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
+        * buffer is requested, but doesn't mean that it is.  Use the
+        * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
+        * physical address.
+        */
+       dma_addr_t paddr;
+
+       /**
+        * Array of backing pages, if allocated.  Note that pages are never
+        * allocated for buffers originally allocated from contiguous memory
+        */
+       struct page **pages;
+
+       /**
+        * Virtual address, if mapped.
+        */
+       void *vaddr;
+
+       /**
+        * sync-object allocated on demand (if needed)
+        *
+        * Per-buffer sync-object for tracking pending and completed hw/dma
+        * read and write operations.  The layout in memory is dictated by
+        * the SGX firmware, which uses this information to stall the command
+        * stream if a surface is not ready yet.
+        *
+        * Note that when buffer is used by SGX, the sync-object needs to be
+        * allocated from a special heap of sync-objects.  This way many sync
+        * objects can be packed in a page, and not waste GPU virtual address
+        * space.  Because of this we have to have a omap_gem_set_sync_object()
+        * API to allow replacement of the syncobj after it has (potentially)
+        * already been allocated.  A bit ugly but I haven't thought of a
+        * better alternative.
+        */
+       struct {
+               uint32_t write_pending;
+               uint32_t write_complete;
+               uint32_t read_pending;
+               uint32_t read_complete;
+       } *sync;
+};
+
+/* GEM objects can either be allocated from contiguous memory (in which
+ * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL).  But non
+ * contiguous buffers can be remapped in TILER/DMM if they need to be
+ * contiguous... but we don't do this all the time to reduce pressure
+ * on TILER/DMM space when we know at allocation time that the buffer
+ * will need to be scanned out.
+ */
+static inline bool is_shmem(struct drm_gem_object *obj)
+{
+       return obj->filp != NULL;
+}
+
+static int get_pages(struct drm_gem_object *obj, struct page ***pages);
+
+static DEFINE_SPINLOCK(sync_lock);
+
+/** ensure backing pages are allocated */
+static int omap_gem_attach_pages(struct drm_gem_object *obj)
+{
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       struct page **pages;
+
+       WARN_ON(omap_obj->pages);
+
+       /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
+        * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
+        * we actually want CMA memory for it all anyways..
+        */
+       pages = _drm_gem_get_pages(obj, GFP_KERNEL);
+       if (IS_ERR(pages)) {
+               dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
+               return PTR_ERR(pages);
+       }
+
+       omap_obj->pages = pages;
+       return 0;
+}
+
+/** release backing pages */
+static void omap_gem_detach_pages(struct drm_gem_object *obj)
+{
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       _drm_gem_put_pages(obj, omap_obj->pages, true, false);
+       omap_obj->pages = NULL;
+}
+
+/** get mmap offset */
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+{
+       if (!obj->map_list.map) {
+               /* Make it mmapable */
+               int ret = drm_gem_create_mmap_offset(obj);
+               if (ret) {
+                       dev_err(obj->dev->dev, "could not allocate mmap offset");
+                       return 0;
+               }
+       }
+
+       return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+/**
+ * omap_gem_fault              -       pagefault handler for GEM objects
+ * @vma: the VMA of the GEM object
+ * @vmf: fault detail
+ *
+ * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ * does most of the work for us including the actual map/unmap calls
+ * but we need to do the actual page work.
+ *
+ * The VMA was set up by GEM. In doing so it also ensured that the
+ * vma->vm_private_data points to the GEM object that is backing this
+ * mapping.
+ */
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_gem_object *obj = vma->vm_private_data;
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       struct drm_device *dev = obj->dev;
+       struct page **pages;
+       unsigned long pfn;
+       pgoff_t pgoff;
+       int ret;
+
+       /* Make sure we don't parallel update on a fault, nor move or remove
+        * something from beneath our feet
+        */
+       mutex_lock(&dev->struct_mutex);
+
+       /* if a shmem backed object, make sure we have pages attached now */
+       ret = get_pages(obj, &pages);
+       if (ret) {
+               goto fail;
+       }
+
+       /* where should we do corresponding put_pages().. we are mapping
+        * the original page, rather than thru a GART, so we can't rely
+        * on eviction to trigger this.  But munmap() or all mappings should
+        * probably trigger put_pages()?
+        */
+
+       /* We don't use vmf->pgoff since that has the fake offset: */
+       pgoff = ((unsigned long)vmf->virtual_address -
+                       vma->vm_start) >> PAGE_SHIFT;
+
+       if (omap_obj->pages) {
+               pfn = page_to_pfn(omap_obj->pages[pgoff]);
+       } else {
+               BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
+               pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
+       }
+
+       VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+                       pfn, pfn << PAGE_SHIFT);
+
+       ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+
+fail:
+       mutex_unlock(&dev->struct_mutex);
+       switch (ret) {
+       case 0:
+       case -ERESTARTSYS:
+       case -EINTR:
+               return VM_FAULT_NOPAGE;
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       default:
+               return VM_FAULT_SIGBUS;
+       }
+}
+
+/** We override mainly to fix up some of the vm mapping flags.. */
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct omap_gem_object *omap_obj;
+       int ret;
+
+       ret = drm_gem_mmap(filp, vma);
+       if (ret) {
+               DBG("mmap failed: %d", ret);
+               return ret;
+       }
+
+       /* after drm_gem_mmap(), it is safe to access the obj */
+       omap_obj = to_omap_bo(vma->vm_private_data);
+
+       vma->vm_flags &= ~VM_PFNMAP;
+       vma->vm_flags |= VM_MIXEDMAP;
+
+       if (omap_obj->flags & OMAP_BO_WC) {
+               vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+       } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
+               vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+       } else {
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       }
+
+       return ret;
+}
+
+/**
+ * omap_gem_dumb_create        -       create a dumb buffer
+ * @drm_file: our client file
+ * @dev: our device
+ * @args: the requested arguments copied from userspace
+ *
+ * Allocate a buffer suitable for use for a frame buffer of the
+ * form described by user space. Give userspace a handle by which
+ * to reference it.
+ */
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+               struct drm_mode_create_dumb *args)
+{
+       union omap_gem_size gsize;
+
+       /* in case someone tries to feed us a completely bogus stride: */
+       args->pitch = align_pitch(args->pitch, args->width, args->bpp);
+       args->size = PAGE_ALIGN(args->pitch * args->height);
+
+       gsize = (union omap_gem_size){
+               .bytes = args->size,
+       };
+
+       return omap_gem_new_handle(dev, file, gsize,
+                       OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
+}
+
+/**
+ * omap_gem_dumb_destroy       -       destroy a dumb buffer
+ * @file: client file
+ * @dev: our DRM device
+ * @handle: the object handle
+ *
+ * Destroy a handle that was created via omap_gem_dumb_create.
+ */
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+               uint32_t handle)
+{
+       /* No special work needed, drop the reference and see what falls out */
+       return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * omap_gem_dumb_map   -       buffer mapping for dumb interface
+ * @file: our drm client file
+ * @dev: drm device
+ * @handle: GEM handle to the object (from dumb_create)
+ *
+ * Do the necessary setup to allow the mapping of the frame buffer
+ * into user memory. We don't have to do much here at the moment.
+ */
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+               uint32_t handle, uint64_t *offset)
+{
+       struct drm_gem_object *obj;
+       int ret = 0;
+
+       mutex_lock(&dev->struct_mutex);
+
+       /* GEM does all our handle to object mapping */
+       obj = drm_gem_object_lookup(dev, file, handle);
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto fail;
+       }
+
+       *offset = omap_gem_mmap_offset(obj);
+
+       drm_gem_object_unreference_unlocked(obj);
+
+fail:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
+ * already contiguous, remap it to pin in physically contiguous memory.. (ie.
+ * map in TILER)
+ */
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+               dma_addr_t *paddr, bool remap)
+{
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+
+       if (is_shmem(obj)) {
+               /* TODO: remap to TILER */
+               return -ENOMEM;
+       }
+
+       *paddr = omap_obj->paddr;
+
+       return ret;
+}
+
+/* Release physical address, when DMA is no longer being performed.. this
+ * could potentially unpin and unmap buffers from TILER
+ */
+int omap_gem_put_paddr(struct drm_gem_object *obj)
+{
+       /* do something here when remap to TILER is used.. */
+       return 0;
+}
+
+/* acquire pages when needed (for example, for DMA where physically
+ * contiguous buffer is not required
+ */
+static int get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+
+       if (is_shmem(obj) && !omap_obj->pages) {
+               ret = omap_gem_attach_pages(obj);
+               if (ret) {
+                       dev_err(obj->dev->dev, "could not attach pages\n");
+                       return ret;
+               }
+       }
+
+       /* TODO: even phys-contig.. we should have a list of pages? */
+       *pages = omap_obj->pages;
+
+       return 0;
+}
+
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+       int ret;
+       mutex_lock(&obj->dev->struct_mutex);
+       ret = get_pages(obj, pages);
+       mutex_unlock(&obj->dev->struct_mutex);
+       return ret;
+}
+
+/* release pages when DMA no longer being performed */
+int omap_gem_put_pages(struct drm_gem_object *obj)
+{
+       /* do something here if we dynamically attach/detach pages.. at
+        * least they would no longer need to be pinned if everyone has
+        * released the pages..
+        */
+       return 0;
+}
+
+/* Get kernel virtual address for CPU access.. only buffers that are
+ * allocated contiguously have a kernel virtual address, so this more
+ * or less only exists for omap_fbdev
+ */
+void *omap_gem_vaddr(struct drm_gem_object *obj)
+{
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       return omap_obj->vaddr;
+}
+
+/* Buffer Synchronization:
+ */
+
+struct omap_gem_sync_waiter {
+       struct list_head list;
+       struct omap_gem_object *omap_obj;
+       enum omap_gem_op op;
+       uint32_t read_target, write_target;
+       /* notify called w/ sync_lock held */
+       void (*notify)(void *arg);
+       void *arg;
+};
+
+/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
+ * the read and/or write target count is achieved which can call a user
+ * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
+ * cpu access), etc.
+ */
+static LIST_HEAD(waiters);
+
+static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
+{
+       struct omap_gem_object *omap_obj = waiter->omap_obj;
+       if ((waiter->op & OMAP_GEM_READ) &&
+                       (omap_obj->sync->read_complete < waiter->read_target))
+               return true;
+       if ((waiter->op & OMAP_GEM_WRITE) &&
+                       (omap_obj->sync->write_complete < waiter->write_target))
+               return true;
+       return false;
+}
+
+/* macro for sync debug.. */
+#define SYNCDBG 0
+#define SYNC(fmt, ...) do { if (SYNCDBG) \
+               printk(KERN_ERR "%s:%d: "fmt"\n", \
+                               __func__, __LINE__, ##__VA_ARGS__); \
+       } while (0)
+
+
+static void sync_op_update(void)
+{
+       struct omap_gem_sync_waiter *waiter, *n;
+       list_for_each_entry_safe(waiter, n, &waiters, list) {
+               if (!is_waiting(waiter)) {
+                       list_del(&waiter->list);
+                       SYNC("notify: %p", waiter);
+                       waiter->notify(waiter->arg);
+                       kfree(waiter);
+               }
+       }
+}
+
+static inline int sync_op(struct drm_gem_object *obj,
+               enum omap_gem_op op, bool start)
+{
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+
+       spin_lock(&sync_lock);
+
+       if (!omap_obj->sync) {
+               omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+               if (!omap_obj->sync) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+       }
+
+       if (start) {
+               if (op & OMAP_GEM_READ)
+                       omap_obj->sync->read_pending++;
+               if (op & OMAP_GEM_WRITE)
+                       omap_obj->sync->write_pending++;
+       } else {
+               if (op & OMAP_GEM_READ)
+                       omap_obj->sync->read_complete++;
+               if (op & OMAP_GEM_WRITE)
+                       omap_obj->sync->write_complete++;
+               sync_op_update();
+       }
+
+unlock:
+       spin_unlock(&sync_lock);
+
+       return ret;
+}
+
+/* it is a bit lame to handle updates in this sort of polling way, but
+ * in case of PVR, the GPU can directly update read/write complete
+ * values, and not really tell us which ones it updated.. this also
+ * means that sync_lock is not quite sufficient.  So we'll need to
+ * do something a bit better when it comes time to add support for
+ * separate 2d hw..
+ */
+void omap_gem_op_update(void)
+{
+       spin_lock(&sync_lock);
+       sync_op_update();
+       spin_unlock(&sync_lock);
+}
+
+/* mark the start of read and/or write operation */
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+       return sync_op(obj, op, true);
+}
+
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+       return sync_op(obj, op, false);
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(sync_event);
+
+static void sync_notify(void *arg)
+{
+       struct task_struct **waiter_task = arg;
+       *waiter_task = NULL;
+       wake_up_all(&sync_event);
+}
+
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+       if (omap_obj->sync) {
+               struct task_struct *waiter_task = current;
+               struct omap_gem_sync_waiter *waiter =
+                               kzalloc(sizeof(*waiter), GFP_KERNEL);
+
+               if (!waiter) {
+                       return -ENOMEM;
+               }
+
+               waiter->omap_obj = omap_obj;
+               waiter->op = op;
+               waiter->read_target = omap_obj->sync->read_pending;
+               waiter->write_target = omap_obj->sync->write_pending;
+               waiter->notify = sync_notify;
+               waiter->arg = &waiter_task;
+
+               spin_lock(&sync_lock);
+               if (is_waiting(waiter)) {
+                       SYNC("waited: %p", waiter);
+                       list_add_tail(&waiter->list, &waiters);
+                       spin_unlock(&sync_lock);
+                       ret = wait_event_interruptible(sync_event,
+                                       (waiter_task == NULL));
+                       spin_lock(&sync_lock);
+                       if (waiter_task) {
+                               SYNC("interrupted: %p", waiter);
+                               /* we were interrupted */
+                               list_del(&waiter->list);
+                               waiter_task = NULL;
+                       } else {
+                               /* freed in sync_op_update() */
+                               waiter = NULL;
+                       }
+               }
+               spin_unlock(&sync_lock);
+
+               if (waiter) {
+                       kfree(waiter);
+               }
+       }
+       return ret;
+}
+
+/* call fxn(arg), either synchronously or asynchronously if the op
+ * is currently blocked..  fxn() can be called from any context
+ *
+ * (TODO for now fxn is called back from whichever context calls
+ * omap_gem_op_update().. but this could be better defined later
+ * if needed)
+ *
+ * TODO more code in common w/ _sync()..
+ */
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+               void (*fxn)(void *arg), void *arg)
+{
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       if (omap_obj->sync) {
+               struct omap_gem_sync_waiter *waiter =
+                               kzalloc(sizeof(*waiter), GFP_ATOMIC);
+
+               if (!waiter) {
+                       return -ENOMEM;
+               }
+
+               waiter->omap_obj = omap_obj;
+               waiter->op = op;
+               waiter->read_target = omap_obj->sync->read_pending;
+               waiter->write_target = omap_obj->sync->write_pending;
+               waiter->notify = fxn;
+               waiter->arg = arg;
+
+               spin_lock(&sync_lock);
+               if (is_waiting(waiter)) {
+                       SYNC("waited: %p", waiter);
+                       list_add_tail(&waiter->list, &waiters);
+                       spin_unlock(&sync_lock);
+                       return 0;
+               }
+
+               spin_unlock(&sync_lock);
+       }
+
+       /* no waiting.. */
+       fxn(arg);
+
+       return 0;
+}
+
+/* special API so PVR can update the buffer to use a sync-object allocated
+ * from it's sync-obj heap.  Only used for a newly allocated (from PVR's
+ * perspective) sync-object, so we overwrite the new syncobj w/ values
+ * from the already allocated syncobj (if there is one)
+ */
+int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
+{
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+
+       spin_lock(&sync_lock);
+
+       if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
+               /* clearing a previously set syncobj */
+               syncobj = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+               if (!syncobj) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+               memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+               omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
+               omap_obj->sync = syncobj;
+       } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+               /* replacing an existing syncobj */
+               if (omap_obj->sync) {
+                       memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+                       kfree(omap_obj->sync);
+               }
+               omap_obj->flags |= OMAP_BO_EXT_SYNC;
+               omap_obj->sync = syncobj;
+       }
+
+unlock:
+       spin_unlock(&sync_lock);
+       return ret;
+}
+
+int omap_gem_init_object(struct drm_gem_object *obj)
+{
+       return -EINVAL;          /* unused */
+}
+
+/* don't call directly.. called from GEM core when it is time to actually
+ * free the object..
+ */
+void omap_gem_free_object(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+       if (obj->map_list.map) {
+               drm_gem_free_mmap_offset(obj);
+       }
+
+       /* don't free externally allocated backing memory */
+       if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
+               if (omap_obj->pages) {
+                       omap_gem_detach_pages(obj);
+               }
+               if (!is_shmem(obj)) {
+                       dma_free_writecombine(dev->dev, obj->size,
+                                       omap_obj->vaddr, omap_obj->paddr);
+               }
+       }
+
+       /* don't free externally allocated syncobj */
+       if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+               kfree(omap_obj->sync);
+       }
+
+       drm_gem_object_release(obj);
+
+       kfree(obj);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+               union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
+{
+       struct drm_gem_object *obj;
+       int ret;
+
+       obj = omap_gem_new(dev, gsize, flags);
+       if (!obj)
+               return -ENOMEM;
+
+       ret = drm_gem_handle_create(file, obj, handle);
+       if (ret) {
+               drm_gem_object_release(obj);
+               kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
+               return ret;
+       }
+
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(obj);
+
+       return 0;
+}
+
+/* GEM buffer object constructor */
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+               union omap_gem_size gsize, uint32_t flags)
+{
+       struct omap_gem_object *omap_obj;
+       struct drm_gem_object *obj = NULL;
+       size_t size;
+       int ret;
+
+       if (flags & OMAP_BO_TILED) {
+               /* TODO: not implemented yet */
+               goto fail;
+       }
+
+       size = PAGE_ALIGN(gsize.bytes);
+
+       omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
+       if (!omap_obj) {
+               dev_err(dev->dev, "could not allocate GEM object\n");
+               goto fail;
+       }
+
+       obj = &omap_obj->base;
+
+       if (flags & OMAP_BO_SCANOUT) {
+               /* attempt to allocate contiguous memory */
+               omap_obj->vaddr =  dma_alloc_writecombine(dev->dev, size,
+                               &omap_obj->paddr, GFP_KERNEL);
+               if (omap_obj->vaddr) {
+                       flags |= OMAP_BO_DMA;
+               }
+       }
+
+       omap_obj->flags = flags;
+
+       if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
+               ret = drm_gem_private_object_init(dev, obj, size);
+       } else {
+               ret = drm_gem_object_init(dev, obj, size);
+       }
+
+       if (ret) {
+               goto fail;
+       }
+
+       return obj;
+
+fail:
+       if (obj) {
+               omap_gem_free_object(obj);
+       }
+       return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/staging/omapdrm/omap_gem_helpers.c
new file mode 100644 (file)
index 0000000..1cfc12d
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * drivers/staging/omapdrm/omap_gem_helpers.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* temporary copy of drm_gem_{get,put}_pages() until the
+ * "drm/gem: add functions to get/put pages" patch is merged..
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+       struct inode *inode;
+       struct address_space *mapping;
+       struct page *p, **pages;
+       int i, npages;
+
+       /* This is the shared memory object that backs the GEM resource */
+       inode = obj->filp->f_path.dentry->d_inode;
+       mapping = inode->i_mapping;
+
+       npages = obj->size >> PAGE_SHIFT;
+
+       pages = drm_malloc_ab(npages, sizeof(struct page *));
+       if (pages == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       gfpmask |= mapping_gfp_mask(mapping);
+
+       for (i = 0; i < npages; i++) {
+               p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+               if (IS_ERR(p))
+                       goto fail;
+               pages[i] = p;
+
+               /* There is a hypothetical issue w/ drivers that require
+                * buffer memory in the low 4GB.. if the pages are un-
+                * pinned, and swapped out, they can end up swapped back
+                * in above 4GB.  If pages are already in memory, then
+                * shmem_read_mapping_page_gfp will ignore the gfpmask,
+                * even if the already in-memory page disobeys the mask.
+                *
+                * It is only a theoretical issue today, because none of
+                * the devices with this limitation can be populated with
+                * enough memory to trigger the issue.  But this BUG_ON()
+                * is here as a reminder in case the problem with
+                * shmem_read_mapping_page_gfp() isn't solved by the time
+                * it does become a real issue.
+                *
+                * See this thread: http://lkml.org/lkml/2011/7/11/238
+                */
+               BUG_ON((gfpmask & __GFP_DMA32) &&
+                               (page_to_pfn(p) >= 0x00100000UL));
+       }
+
+       return pages;
+
+fail:
+       while (i--) {
+               page_cache_release(pages[i]);
+       }
+       drm_free_large(pages);
+       return ERR_PTR(PTR_ERR(p));
+}
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ */
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+               bool dirty, bool accessed)
+{
+       int i, npages;
+
+       npages = obj->size >> PAGE_SHIFT;
+
+       for (i = 0; i < npages; i++) {
+               if (dirty)
+                       set_page_dirty(pages[i]);
+
+               if (accessed)
+                       mark_page_accessed(pages[i]);
+
+               /* Undo the reference we took when populating the table */
+               page_cache_release(pages[i]);
+       }
+
+       drm_free_large(pages);
+}
index b27d9aa..93de4f2 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig TIDSPBRIDGE
        tristate "DSP Bridge driver"
-       depends on ARCH_OMAP3 && BROKEN
+       depends on ARCH_OMAP3
        select OMAP_MBOX_FWK
        help
          DSP/BIOS Bridge is designed for platforms that contain a GPP and
index f7f71b2..21ece56 100644 (file)
@@ -18,3 +18,11 @@ config THERMAL_HWMON
        depends on THERMAL
        depends on HWMON=y || HWMON=THERMAL
        default y
+
+config OMAP3_THERMAL
+       bool "OMAP3/DM3730 band gap thermal sensor driver"
+       depends on THERMAL
+       depends on ARCH_OMAP
+       help
+         Enable this to plug the OMAP3/DM3730 band gap thermal sensor driver
+         into the Linux thermal framework
index 31108a0..bf93496 100644 (file)
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_THERMAL)          += thermal_sys.o
+obj-$(CONFIG_OMAP3_THERMAL)    += omap3_thermal.o
diff --git a/drivers/thermal/omap3_thermal.c b/drivers/thermal/omap3_thermal.c
new file mode 100644 (file)
index 0000000..8aee654
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * OMAP3/DM3730 band gap thermal driver.
+ *
+ * Copyright (C) 2014 Grazvydas Ignotas
+ * based on SPEAr Thermal Sensor driver (spear_thermal.c)
+ * Copyright (C) 2011-2012 ST Microelectronics
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/thermal.h>
+#include <linux/pm_runtime.h>
+#include <plat/cpu.h>
+
+#define ADC_CODE_MASK 0x7f
+
+struct omap3_thermal_dev {
+       struct device *dev;
+       void __iomem *thermal_base;
+       const int *adc_to_temp;
+       u32 bgap_soc_mask;
+       u32 bgap_eocz_mask;
+};
+
+static const int omap3630_adc_to_temp[128] = {
+       -40000, -40000, -40000, -40000, -40000, -40000, -40000, -40000, // 7
+       -40000, -40000, -40000, -40000, -40000, -39000, -36500, -34500, // 15
+       -33000, -31000, -29000, -27000, -25000, -23000, -21000, -19250, // 23
+       -17750, -16000, -14250, -12750, -11000,  -9000,  -7250,  -5750, // 31
+        -4250,  -2500,   -750,   1000,   2750,   4250,   5750,   7500, // 39
+         9250,  11000,  12750,  14250,  16000,  18000,  20000,  22000, // 47
+        24000,  26000,  27750,  29250,  31000,  32750,  34250,  36000, // 55
+        37750,  39250,  41000,  42750,  44250,  46000,  47750,  49250, // 63
+        51000,  52750,  54250,    560,  57750,  59250,  61000,  63000, // 71
+        65000,  67000,  69000,  70750,  72500,  74250,  76000,  77750, // 79
+        79250,  81000,  82750,  84250,  86000,  87750,  89250,  91000, // 87
+        92750,  94250,  96000,  97750,  99250, 101000, 102750, 104250, // 95
+       106000, 108000, 110000, 112000, 114000, 116000, 117750, 119250, // 103
+       121000, 122750, 124025, 125000, 125000, 125000, 125000, 125000, // 111
+       125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, // 119
+       125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000  // 127
+};
+
+static const int omap3530_adc_to_temp[128] = {
+       -40000, -40000, -40000, -40000, -40000, -39500, -38200, -36800, // 7
+       -34700, -32500, -31100, -29700, -28200, -26800, -25400, -24000, // 15
+       -22600, -21200, -19800, -18400, -17000, -15600, -14100, -12700, // 23
+       -11300,  -9900,  -8500,  -7100,  -5700,  -4250,  -2800,  -1400, // 31
+           50,   1550,   3000,   4400,   5850,   7300,   8700,  10100, // 39
+        11550,  13000,  14400,  15800,  17200,  18850,  20100,  21500, // 47
+        22900,  24350,  25800,  27200,  28600,  30000,  31400,  32800, // 55
+        34200,  35650,  37100,  38500,  39900,  41300,  42700,  44150, // 63
+        45600,  47000,  48400,  49800,  51300,  52600,  53950,  55300, // 71
+        56700,  58100,  59500,  60900,  62300,  63700,  70050,  66400, // 79
+        67800,  69200,  70600,  72000,  73400,  74800,  76200,  77600, // 87
+        79000,  80400,  81700,  83050,  84500,  85850,  87200,  88600, // 95
+        89950,  91300,  92700,  94050,  95400,  96800,  98200,  99550, // 103
+       100900, 102300, 103650, 105000, 106400, 107800, 109150, 110500, // 111
+       111900, 113300, 114650, 116000, 117400, 118750, 120100, 121500, // 119
+       122850, 124200, 124950, 125000, 125000, 125000, 125000, 125000  // 127
+};
+
+static int omap3_thermal_get_temp(struct thermal_zone_device *thermal,
+                                 unsigned long *temp)
+{
+       struct omap3_thermal_dev *tdev = thermal->devdata;
+       int timeout;
+       u32 val;
+       int ret;
+
+       ret = pm_runtime_get_sync(tdev->dev);
+       if (ret < 0) {
+               dev_err(tdev->dev, "pm_runtime_get_sync failed: %d\n", ret);
+               return ret;
+       }
+
+       val = readl(tdev->thermal_base);
+       val |= tdev->bgap_soc_mask; /* start of conversion */
+
+       writel(val, tdev->thermal_base);
+       usleep_range(428, 1000); /* at least 14 32k cycles */
+
+       val &= ~tdev->bgap_soc_mask;
+       writel(val, tdev->thermal_base);
+
+       usleep_range(1221, 2000); /* at least 36+4 32k cycles */
+       for (timeout = 1000; timeout > 0; timeout--) {
+               val = readl(tdev->thermal_base);
+               if (!(val & tdev->bgap_eocz_mask))
+                       break;
+               cpu_relax();
+       }
+
+       pm_runtime_mark_last_busy(tdev->dev);
+       ret = pm_runtime_put_autosuspend(tdev->dev);
+
+       if (timeout == 0)
+               dev_err(tdev->dev, "timeout waiting for eocz\n");
+
+       *temp = tdev->adc_to_temp[val & ADC_CODE_MASK];
+       return 0;
+}
+
+static const struct thermal_zone_device_ops omap3_thermal_ops = {
+       .get_temp = omap3_thermal_get_temp,
+};
+
+static int omap3_thermal_probe(struct platform_device *pdev)
+{
+       struct thermal_zone_device *omap3_thermal = NULL;
+       struct omap3_thermal_dev *tdev;
+       int ret = 0;
+       struct resource *stres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       if (!stres) {
+               dev_err(&pdev->dev, "memory resource missing\n");
+               return -ENODEV;
+       }
+
+       tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
+       if (!tdev)
+               return -ENOMEM;
+
+       tdev->dev = &pdev->dev;
+
+       if (cpu_is_omap3630()) {
+               tdev->bgap_soc_mask = BIT(9);
+               tdev->bgap_eocz_mask = BIT(8);
+               tdev->adc_to_temp = omap3630_adc_to_temp;
+       } else if (cpu_is_omap34xx()) {
+               tdev->bgap_soc_mask = BIT(8);
+               tdev->bgap_eocz_mask = BIT(7);
+               tdev->adc_to_temp = omap3530_adc_to_temp;
+       } else {
+               dev_err(&pdev->dev, "not OMAP3 family\n");
+               return -ENODEV;
+       }
+
+       tdev->thermal_base = devm_ioremap(&pdev->dev, stres->start,
+                       resource_size(stres));
+       if (!tdev->thermal_base) {
+               dev_err(&pdev->dev, "ioremap failed\n");
+               return -ENOMEM;
+       }
+
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+       pm_runtime_use_autosuspend(&pdev->dev);
+
+       omap3_thermal = thermal_zone_device_register("omap3-thermal", 0,
+                               tdev, &omap3_thermal_ops, 0, 0, 0, 0);
+       if (!omap3_thermal) {
+               dev_err(&pdev->dev, "thermal zone device is NULL\n");
+               ret = -EINVAL;
+               goto put_pm;
+       }
+
+       platform_set_drvdata(pdev, omap3_thermal);
+
+       return 0;
+
+put_pm:
+       pm_runtime_disable(&pdev->dev);
+       return ret;
+}
+
+static int omap3_thermal_exit(struct platform_device *pdev)
+{
+       struct thermal_zone_device *omap3_thermal = platform_get_drvdata(pdev);
+
+       thermal_zone_device_unregister(omap3_thermal);
+       platform_set_drvdata(pdev, NULL);
+       pm_runtime_disable(&pdev->dev);
+
+       return 0;
+}
+
+static struct platform_driver omap3_thermal_driver = {
+       .probe = omap3_thermal_probe,
+       .remove = omap3_thermal_exit,
+       .driver = {
+               .name = "omap3-thermal",
+               .owner = THIS_MODULE,
+       },
+};
+
+module_platform_driver(omap3_thermal_driver);
+
+MODULE_AUTHOR("Grazvydas Ignotas <notasas@gmail.com>");
+MODULE_DESCRIPTION("OMAP3/DM3730 thermal driver");
+MODULE_LICENSE("GPL");
index 5e713d3..d3c8e82 100644 (file)
 #include <plat/dmtimer.h>
 #include <plat/omap-serial.h>
 
+/* SCR register bitmasks */
+#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK              (1 << 7)
+
+/* FCR register bitmasks */
+#define OMAP_UART_FCR_RX_FIFO_TRIG_SHIFT               6
+#define OMAP_UART_FCR_RX_FIFO_TRIG_MASK                        (0x3 << 6)
+
 static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
 
 /* Forward declaration of functions */
@@ -148,11 +155,12 @@ static void serial_omap_stop_rx(struct uart_port *port)
        serial_out(up, UART_IER, up->ier);
 }
 
-static inline void receive_chars(struct uart_omap_port *up, int *status)
+static inline void receive_chars(struct uart_omap_port *up,
+               unsigned int *status)
 {
        struct tty_struct *tty = up->port.state->port.tty;
-       unsigned int flag;
-       unsigned char ch, lsr = *status;
+       unsigned int flag, lsr = *status;
+       unsigned char ch = 0;
        int max_count = 256;
 
        do {
@@ -262,6 +270,7 @@ static void serial_omap_start_tx(struct uart_port *port)
        int ret = 0;
 
        if (!up->use_dma) {
+               omap_uart_block_sleep_id(up->pdev->id);
                serial_omap_enable_ier_thri(up);
                return;
        }
@@ -723,6 +732,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
                up->ier |= UART_IER_MSI;
        serial_out(up, UART_IER, up->ier);
        serial_out(up, UART_LCR, cval);         /* reset DLAB */
+       up->scr = OMAP_UART_SCR_TX_EMPTY;
 
        /* FIFOs and DMA Settings */
 
@@ -744,15 +754,23 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
        up->mcr = serial_in(up, UART_MCR);
        serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
        /* FIFO ENABLE, DMA MODE */
-       serial_out(up, UART_FCR, up->fcr);
-       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+       up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
 
        if (up->use_dma) {
                serial_out(up, UART_TI752_TLR, 0);
-               serial_out(up, UART_OMAP_SCR,
-                       (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
+               up->scr |= UART_FCR_TRIGGER_4;
+       } else {
+               /* Set receive FIFO threshold to 1 byte */
+               up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK;
+               up->fcr |= (0x1 << OMAP_UART_FCR_RX_FIFO_TRIG_SHIFT);
        }
 
+       serial_out(up, UART_FCR, up->fcr);
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+       serial_out(up, UART_OMAP_SCR, up->scr);
+
        serial_out(up, UART_EFR, up->efr);
        serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
        serial_out(up, UART_MCR, up->mcr);
index a605549..0cee2c6 100644 (file)
@@ -1198,9 +1198,9 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
        raw_mode = (kbd->kbdmode == VC_RAW);
        if (raw_mode && !hw_raw)
                if (emulate_raw(vc, keycode, !down << 7))
-                       if (keycode < BTN_MISC && printk_ratelimit())
+                       /*if (keycode < BTN_MISC && printk_ratelimit())
                                pr_warning("can't emulate rawmode for keycode %d\n",
-                                          keycode);
+                                          keycode)*/;
 
 #ifdef CONFIG_SPARC
        if (keycode == KEY_A && sparc_l1_a_state) {
index ce7253b..59240b5 100644 (file)
@@ -640,6 +640,7 @@ fail:
                priv->iv = kmemdup(iv, nr_segs * sizeof(struct iovec),
                                   GFP_KERNEL);
                if (!priv->iv) {
+                       value = -ENOMEM;
                        kfree(priv);
                        goto fail;
                }
index d4159b8..e39b029 100644 (file)
@@ -337,7 +337,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
        .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
 };
 
-MODULE_ALIAS("platform:ehci-omap");
+MODULE_ALIAS("platform:omap-ehci");
 MODULE_AUTHOR("Texas Instruments, Inc.");
 MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
 
index fb05cac..2e9b5ff 100644 (file)
@@ -137,6 +137,9 @@ static int musb_ulpi_read(struct otg_transceiver *otg, u32 reg)
        int     i = 0;
        u8      r;
        u8      power;
+       int     ret;
+
+       pm_runtime_get_sync(otg->io_dev);
 
        /* Make sure the transceiver is not in low power mode */
        power = musb_readb(addr, MUSB_POWER);
@@ -154,15 +157,22 @@ static int musb_ulpi_read(struct otg_transceiver *otg, u32 reg)
        while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
                                & MUSB_ULPI_REG_CMPLT)) {
                i++;
-               if (i == 10000)
-                       return -ETIMEDOUT;
+               if (i == 10000) {
+                       ret = -ETIMEDOUT;
+                       goto out;
+               }
 
        }
        r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
        r &= ~MUSB_ULPI_REG_CMPLT;
        musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
 
-       return musb_readb(addr, MUSB_ULPI_REG_DATA);
+       ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
+
+out:
+       pm_runtime_put(otg->io_dev);
+
+       return ret;
 }
 
 static int musb_ulpi_write(struct otg_transceiver *otg, u32 val, u32 reg)
@@ -171,6 +181,9 @@ static int musb_ulpi_write(struct otg_transceiver *otg, u32 val, u32 reg)
        int     i = 0;
        u8      r = 0;
        u8      power;
+       int     ret = 0;
+
+       pm_runtime_get_sync(otg->io_dev);
 
        /* Make sure the transceiver is not in low power mode */
        power = musb_readb(addr, MUSB_POWER);
@@ -184,15 +197,20 @@ static int musb_ulpi_write(struct otg_transceiver *otg, u32 val, u32 reg)
        while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
                                & MUSB_ULPI_REG_CMPLT)) {
                i++;
-               if (i == 10000)
-                       return -ETIMEDOUT;
+               if (i == 10000) {
+                       ret = -ETIMEDOUT;
+                       goto out;
+               }
        }
 
        r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
        r &= ~MUSB_ULPI_REG_CMPLT;
        musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
 
-       return 0;
+out:
+       pm_runtime_put(otg->io_dev);
+
+       return ret;
 }
 #else
 #define musb_ulpi_read         NULL
@@ -437,8 +455,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
 
                                if (power & MUSB_POWER_SUSPENDM) {
                                        /* spurious */
-                                       musb->int_usb &= ~MUSB_INTR_SUSPEND;
-                                       dev_dbg(musb->controller, "Spurious SUSPENDM\n");
+                                       int_usb &= ~MUSB_INTR_SUSPEND;
+                                       dev_err(musb->controller, "Spurious SUSPENDM\n");
                                        break;
                                }
 
@@ -481,14 +499,20 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                                if ((devctl & MUSB_DEVCTL_VBUS)
                                                != (3 << MUSB_DEVCTL_VBUS_SHIFT)
                                                ) {
-                                       musb->int_usb |= MUSB_INTR_DISCONNECT;
-                                       musb->int_usb &= ~MUSB_INTR_SUSPEND;
+                                       if (!(int_usb & MUSB_INTR_DISCONNECT))
+                                               dev_err(musb->controller,
+                                                 "disconnect while suspended?\n");
+                                       int_usb |= MUSB_INTR_DISCONNECT;
+                                       int_usb &= ~MUSB_INTR_SUSPEND;
                                        break;
                                }
                                musb_g_resume(musb);
                                break;
                        case OTG_STATE_B_IDLE:
-                               musb->int_usb &= ~MUSB_INTR_SUSPEND;
+                               if (int_usb & MUSB_INTR_SUSPEND)
+                                       dev_err(musb->controller,
+                                               "bogus suspend+resume?\n");
+                               int_usb &= ~MUSB_INTR_SUSPEND;
                                break;
                        default:
                                WARNING("bogus %s RESUME (%s)\n",
@@ -573,9 +597,10 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                        break;
                }
 
-               dev_dbg(musb->controller, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
+               dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
+                               "VBUS_ERROR in %s (%02x, %02x, %s), retry #%d, port1 %08x\n",
                                otg_state_string(musb->xceiv->state),
-                               devctl,
+                               devctl, power,
                                ({ char *s;
                                switch (devctl & MUSB_DEVCTL_VBUS) {
                                case 0 << MUSB_DEVCTL_VBUS_SHIFT:
@@ -653,6 +678,15 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                        musb->is_active = 0;
                        break;
                }
+
+               switch (musb->xceiv->state) {
+               case OTG_STATE_B_IDLE:
+               case OTG_STATE_B_PERIPHERAL:
+                       cancel_delayed_work(&musb->vbus_workaround_work);
+                       schedule_delayed_work(&musb->vbus_workaround_work, HZ / 2);
+               default:
+                       break;
+               }
        }
 
        if (int_usb & MUSB_INTR_CONNECT) {
@@ -686,7 +720,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                switch (musb->xceiv->state) {
                case OTG_STATE_B_PERIPHERAL:
                        if (int_usb & MUSB_INTR_SUSPEND) {
-                               dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n");
+                               dev_err(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n");
                                int_usb &= ~MUSB_INTR_SUSPEND;
                                goto b_host;
                        } else
@@ -915,8 +949,8 @@ void musb_start(struct musb *musb)
                 */
                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
                        musb->is_active = 1;
-               else
-                       devctl |= MUSB_DEVCTL_SESSION;
+               //else
+               //      devctl |= MUSB_DEVCTL_SESSION;
 
        } else if (is_host_enabled(musb)) {
                /* assume ID pin is hard-wired to ground */
@@ -981,6 +1015,9 @@ static void musb_shutdown(struct platform_device *pdev)
        unsigned long   flags;
 
        pm_runtime_get_sync(musb->controller);
+
+       musb_gadget_cleanup(musb);
+
        spin_lock_irqsave(&musb->lock, flags);
        musb_platform_disable(musb);
        musb_generic_disable(musb);
@@ -992,6 +1029,9 @@ static void musb_shutdown(struct platform_device *pdev)
        musb_platform_exit(musb);
 
        pm_runtime_put(musb->controller);
+
+       cancel_delayed_work(&musb->vbus_workaround_work);
+
        /* FIXME power down */
 }
 
@@ -1483,15 +1523,22 @@ static irqreturn_t generic_interrupt(int irq, void *__hci)
        unsigned long   flags;
        irqreturn_t     retval = IRQ_NONE;
        struct musb     *musb = __hci;
+       int             i;
 
        spin_lock_irqsave(&musb->lock, flags);
 
-       musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
-       musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
-       musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+       for (i = 0; i < 8; i++) {
+               musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+               /* SOF is not enabled, but status is still often set */
+               musb->int_usb &= ~MUSB_INTR_SOF;
+               musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+               musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
 
-       if (musb->int_usb || musb->int_tx || musb->int_rx)
-               retval = musb_interrupt(musb);
+               if (musb->int_usb || musb->int_tx || musb->int_rx)
+                       retval = musb_interrupt(musb);
+               else
+                       break;
+       }
 
        spin_unlock_irqrestore(&musb->lock, flags);
 
@@ -1777,6 +1824,46 @@ static void musb_irq_work(struct work_struct *data)
        }
 }
 
+#include <linux/usb/ulpi.h>
+
+static void musb_vbus_workaround_work(struct work_struct *work)
+{
+       struct musb *musb = container_of(work, struct musb, vbus_workaround_work.work);
+       u8 devctl;
+       int ret;
+
+       if (musb_ulpi_access.write == NULL)
+               return;
+
+       pm_runtime_get_sync(musb->controller);
+
+       devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+       /*
+        * I don't really know why but VBUS sometimes gets stuck and
+        * causes session to never end. It would look like some pullup
+        * is enabled when it shouldn't be on certain PHY states.
+        * Turning on pulldowns magically drains VBUS to zero and allows
+        * session to end, so let's do that here.
+        *
+        * XXX: probably better check VBUS on TWL?
+        * beagle sometimes has session bit set but no VBUS on twl?
+        */
+       if ((musb->xceiv->state == OTG_STATE_B_PERIPHERAL ||
+            musb->xceiv->state == OTG_STATE_B_IDLE) &&
+           (devctl & MUSB_DEVCTL_VBUS) != (3 << MUSB_DEVCTL_VBUS_SHIFT) &&
+           (devctl & MUSB_DEVCTL_VBUS) != (0 << MUSB_DEVCTL_VBUS_SHIFT)) {
+               dev_dbg(musb->controller, "VBUS workaround..\n");
+               ret = musb_ulpi_access.write(musb->xceiv,
+                       ULPI_OTG_CTRL_DM_PULLDOWN | ULPI_OTG_CTRL_DP_PULLDOWN,
+                       ULPI_SET(ULPI_OTG_CTRL));
+               //if (ret)
+               //      dev_err(musb->controller, "VBUS workaround error\n");
+       }
+
+       pm_runtime_put(musb->controller);
+}
+
 /* --------------------------------------------------------------------------
  * Init support
  */
@@ -1834,8 +1921,6 @@ static void musb_free(struct musb *musb)
        sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
 #endif
 
-       musb_gadget_cleanup(musb);
-
        if (musb->nIrq >= 0) {
                if (musb->irq_wake)
                        disable_irq_wake(musb->nIrq);
@@ -1848,7 +1933,7 @@ static void musb_free(struct musb *musb)
                dma_controller_destroy(c);
        }
 
-       kfree(musb);
+       usb_put_hcd(musb_to_hcd(musb));
 }
 
 /*
@@ -1914,6 +1999,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        }
 
        if (!musb->xceiv->io_ops) {
+               musb->xceiv->io_dev = musb->controller;
                musb->xceiv->io_priv = musb->mregs;
                musb->xceiv->io_ops = &musb_ulpi_access;
        }
@@ -1948,6 +2034,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        /* Init IRQ workqueue before request_irq */
        INIT_WORK(&musb->irq_work, musb_irq_work);
 
+       INIT_DELAYED_WORK(&musb->vbus_workaround_work, musb_vbus_workaround_work);
+
        /* attach to the IRQ */
        if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
                dev_err(dev, "request_irq %d failed!\n", nIrq);
@@ -2004,9 +2092,13 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
                                ? 'B' : 'A'));
 
        } else /* peripheral is enabled */ {
-               MUSB_DEV_MODE(musb);
-               musb->xceiv->default_a = 0;
-               musb->xceiv->state = OTG_STATE_B_IDLE;
+               if (musb->xceiv->default_a) {
+                       MUSB_HST_MODE(musb);
+                       musb->xceiv->state = OTG_STATE_A_IDLE;
+               } else {
+                       MUSB_DEV_MODE(musb);
+                       musb->xceiv->state = OTG_STATE_B_IDLE;
+               }
 
                status = musb_gadget_setup(musb);
 
@@ -2019,6 +2111,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (status < 0)
                goto fail3;
 
+       pm_runtime_put(musb->controller);
+
        status = musb_init_debugfs(musb);
        if (status < 0)
                goto fail4;
@@ -2118,11 +2212,9 @@ static int __exit musb_remove(struct platform_device *pdev)
         *  - Peripheral mode: peripheral is deactivated (or never-activated)
         *  - OTG mode: both roles are deactivated (or never-activated)
         */
-       pm_runtime_get_sync(musb->controller);
        musb_exit_debugfs(musb);
        musb_shutdown(pdev);
 
-       pm_runtime_put(musb->controller);
        musb_free(musb);
        iounmap(ctrl_base);
        device_init_wakeup(&pdev->dev, 0);
@@ -2163,6 +2255,7 @@ static void musb_save_context(struct musb *musb)
                if (!epio)
                        continue;
 
+               musb_writeb(musb_base, MUSB_INDEX, i);
                musb->context.index_regs[i].txmaxp =
                        musb_readw(epio, MUSB_TXMAXP);
                musb->context.index_regs[i].txcsr =
@@ -2238,6 +2331,7 @@ static void musb_restore_context(struct musb *musb)
                if (!epio)
                        continue;
 
+               musb_writeb(musb_base, MUSB_INDEX, i);
                musb_writew(epio, MUSB_TXMAXP,
                        musb->context.index_regs[i].txmaxp);
                musb_writew(epio, MUSB_TXCSR,
@@ -2293,21 +2387,32 @@ static int musb_suspend(struct device *dev)
 {
        struct musb     *musb = dev_to_musb(dev);
        unsigned long   flags;
+       int             ret = 0;
 
        spin_lock_irqsave(&musb->lock, flags);
 
-       if (is_peripheral_active(musb)) {
+       {
                /* FIXME force disconnect unless we know USB will wake
                 * the system up quickly enough to respond ...
                 */
-       } else if (is_host_active(musb)) {
-               /* we know all the children are suspended; sometimes
-                * they will even be wakeup-enabled.
+               /*
+                * FIXME: musb must be already runtime suspended at this point.
+                * If it's not, framework will try to suspend it late when
+                * i2c will be off, and twl4030 will want to access it for it's
+                * stuff, causing data abort.
                 */
+               int pm_usage_count =
+                       atomic_read(&musb->controller->power.usage_count);
+               if (pm_usage_count > 1) {
+                       dev_err(dev, "can't suspend while still active, "
+                               "try removing gadget drivers (usage_count %d)\n",
+                               pm_usage_count);
+                       ret = -EBUSY;
+               }
        }
 
        spin_unlock_irqrestore(&musb->lock, flags);
-       return 0;
+       return ret;
 }
 
 static int musb_resume_noirq(struct device *dev)
index b3c065a..049cb50 100644 (file)
@@ -311,6 +311,7 @@ struct musb_context_registers {
        u8 index, testmode;
 
        u8 devctl, busctl, misc;
+       u32 otg_interfsel;
 
        struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
 };
@@ -447,13 +448,14 @@ struct musb {
         * We added this flag to forcefully disable double
         * buffering until we get it working.
         */
-       unsigned                double_buffer_not_ok:1 __deprecated;
+       unsigned                double_buffer_not_ok:1;
 
        struct musb_hdrc_config *config;
 
 #ifdef MUSB_CONFIG_PROC_FS
        struct proc_dir_entry *proc_entry;
 #endif
+       struct delayed_work     vbus_workaround_work;
 };
 
 static inline struct musb *gadget_to_musb(struct usb_gadget *g)
index 9c8845a..9be7c79 100644 (file)
@@ -1965,9 +1965,8 @@ static int musb_gadget_start(struct usb_gadget *g,
                        goto err2;
                }
 
-               if ((musb->xceiv->last_event == USB_EVENT_ID)
-                                       && musb->xceiv->set_vbus)
-                       otg_set_vbus(musb->xceiv, 1);
+               if (musb->xceiv->last_event == USB_EVENT_ID)
+                       musb_platform_set_vbus(musb, 1);
 
                hcd->self.uses_pio_for_control = 1;
        }
@@ -2054,6 +2053,7 @@ static int musb_gadget_stop(struct usb_gadget *g,
        dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
 
        musb->is_active = 0;
+       musb->gadget_driver = NULL;
        musb_platform_try_idle(musb, 0);
        spin_unlock_irqrestore(&musb->lock, flags);
 
index a8f0c09..58ca5e4 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
 #include <linux/err.h>
+#include <linux/delay.h>
 
 #include "musb_core.h"
 #include "omap2430.h"
@@ -145,6 +146,7 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
 
        if (is_on) {
                if (musb->xceiv->state == OTG_STATE_A_IDLE) {
+                       int loops = 100;
                        /* start the session */
                        devctl |= MUSB_DEVCTL_SESSION;
                        musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
@@ -154,9 +156,11 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
                         */
                        while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
 
+                               mdelay(5);
                                cpu_relax();
 
-                               if (time_after(jiffies, timeout)) {
+                               if (time_after(jiffies, timeout)
+                                   || loops-- <= 0) {
                                        dev_err(musb->controller,
                                        "configured as A device timeout");
                                        ret = -EINVAL;
@@ -262,6 +266,8 @@ static int musb_otg_notifications(struct notifier_block *nb,
 
                if (is_otg_enabled(musb) || is_peripheral_enabled(musb))
                        if (musb->gadget_driver) {
+                               omap2430_musb_set_vbus(musb, 0);
+
                                pm_runtime_mark_last_busy(musb->controller);
                                pm_runtime_put_autosuspend(musb->controller);
                        }
@@ -492,6 +498,9 @@ static int omap2430_runtime_suspend(struct device *dev)
        struct omap2430_glue            *glue = dev_get_drvdata(dev);
        struct musb                     *musb = glue_to_musb(glue);
 
+       musb->context.otg_interfsel = musb_readl(musb->mregs,
+                                               OTG_INTERFSEL);
+
        omap2430_low_level_exit(musb);
        otg_set_suspend(musb->xceiv, 1);
 
@@ -504,6 +513,10 @@ static int omap2430_runtime_resume(struct device *dev)
        struct musb                     *musb = glue_to_musb(glue);
 
        omap2430_low_level_init(musb);
+       if (musb->context.otg_interfsel != 0)
+               musb_writel(musb->mregs, OTG_INTERFSEL,
+                                       musb->context.otg_interfsel);
+
        otg_set_suspend(musb->xceiv, 0);
 
        return 0;
index 14f66c3..296e553 100644 (file)
@@ -163,6 +163,8 @@ struct twl4030_usb {
        bool                    vbus_supplied;
        u8                      asleep;
        bool                    irq_enabled;
+
+       struct delayed_work     id_workaround_work;
 };
 
 /* internal define on top of container_of */
@@ -246,10 +248,30 @@ twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
 
 /*-------------------------------------------------------------------------*/
 
+static bool twl4030_is_driving_vbus(struct twl4030_usb *twl)
+{
+       int ret;
+
+       ret = twl4030_usb_read(twl, PHY_CLK_CTRL_STS);
+       if (ret < 0 || !(ret & PHY_DPLL_CLK))
+               /*
+                * if clocks are off, registers are not updated,
+                * but we can assume we don't drive VBUS in this case
+                */
+               return false;
+
+       ret = twl4030_usb_read(twl, ULPI_OTG_CTRL);
+       if (ret < 0)
+               return false;
+
+       return (ret & (ULPI_OTG_DRVVBUS | ULPI_OTG_CHRGVBUS)) ? true : false;
+}
+
 static enum usb_xceiv_events twl4030_usb_linkstat(struct twl4030_usb *twl)
 {
        int     status;
        int     linkstat = USB_EVENT_NONE;
+       bool    driving_vbus = false;
 
        twl->vbus_supplied = false;
 
@@ -263,23 +285,31 @@ static enum usb_xceiv_events twl4030_usb_linkstat(struct twl4030_usb *twl)
         * signal is active, the OTG module is activated, and
         * its interrupt may be raised (may wake the system).
         */
+       msleep(50);
        status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER,
                        STS_HW_CONDITIONS);
        if (status < 0)
                dev_err(twl->dev, "USB link status err %d\n", status);
        else if (status & (BIT(7) | BIT(2))) {
-               if (status & (BIT(7)))
-                        twl->vbus_supplied = true;
+               if (status & BIT(7)) {
+                       driving_vbus = twl4030_is_driving_vbus(twl);
+                       if (driving_vbus)
+                               status &= ~BIT(7);
+               }
 
                if (status & BIT(2))
                        linkstat = USB_EVENT_ID;
-               else
+               else if (status & BIT(7)) {
                        linkstat = USB_EVENT_VBUS;
-       } else
-               linkstat = USB_EVENT_NONE;
+                       twl->vbus_supplied = true;
+               }
+       }
+
+       dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x; link %d, driving_vbus %d\n",
+                       status, linkstat, driving_vbus);
 
-       dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
-                       status, status, linkstat);
+       if (twl->otg.last_event == linkstat)
+               return linkstat;
 
        twl->otg.last_event = linkstat;
 
@@ -419,7 +449,16 @@ static void twl4030_phy_resume(struct twl4030_usb *twl)
                return;
        __twl4030_phy_resume(twl);
        twl->asleep = 0;
-       dev_dbg(twl->dev, "%s\n", __func__);
+
+       /*
+        * XXX When VBUS gets driven after musb goes to A mode,
+        * ID_PRES related interrupts no longer arrive, why?
+        * Register itself is updated fine though, so we must poll.
+        */
+       if (twl->otg.last_event == USB_EVENT_ID) {
+               cancel_delayed_work(&twl->id_workaround_work);
+               schedule_delayed_work(&twl->id_workaround_work, HZ);
+       }
 }
 
 static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -497,9 +536,47 @@ static ssize_t twl4030_usb_vbus_show(struct device *dev,
 }
 static DEVICE_ATTR(vbus, 0444, twl4030_usb_vbus_show, NULL);
 
+static ssize_t twl4030_usb_id_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       int ret;
+       int n = 0;
+       struct twl4030_usb *twl = dev_get_drvdata(dev);
+       twl4030_i2c_access(twl, 1);
+       ret = twl4030_usb_read(twl, ULPI_OTG_CTRL);
+       if ((ret < 0) || (!(ret & ULPI_OTG_ID_PULLUP))) {
+               /*
+                * enable ID pullup so that the id pin state can be measured,
+                * seems to be disabled sometimes for some reasons
+                */
+               dev_dbg(dev, "ULPI_OTG_ID_PULLUP not set (%x)\n", ret);
+               twl4030_usb_set_bits(twl, ULPI_OTG_CTRL, ULPI_OTG_ID_PULLUP);
+               mdelay(100);
+       }
+       ret = twl4030_usb_read(twl, ID_STATUS);
+       twl4030_i2c_access(twl, 0);
+       if (ret < 0)
+               return ret;
+       if (ret & ID_RES_FLOAT)
+               n = scnprintf(buf, PAGE_SIZE, "%s\n", "floating");
+       else if (ret & ID_RES_440K)
+               n = scnprintf(buf, PAGE_SIZE, "%s\n", "440k");
+       else if (ret & ID_RES_200K)
+               n = scnprintf(buf, PAGE_SIZE, "%s\n", "200k");
+       else if (ret & ID_RES_102K)
+               n = scnprintf(buf, PAGE_SIZE, "%s\n", "102k");
+       else if (ret & ID_RES_GND)
+               n = scnprintf(buf, PAGE_SIZE, "%s\n", "GND");
+       else
+               n = scnprintf(buf, PAGE_SIZE, "unknown: id=0x%x\n", ret);
+       return n;
+}
+static DEVICE_ATTR(id, 0444, twl4030_usb_id_show, NULL);
+
 static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
 {
        struct twl4030_usb *twl = _twl;
+       int status_old = twl->otg.last_event;
        int status;
 
        status = twl4030_usb_linkstat(twl);
@@ -515,12 +592,8 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
                 * USB_LINK_VBUS state.  musb_hdrc won't care until it
                 * starts to handle softconnect right.
                 */
-               if (status == USB_EVENT_NONE)
-                       twl4030_phy_suspend(twl, 0);
-               else
-                       twl4030_phy_resume(twl);
-
-               atomic_notifier_call_chain(&twl->otg.notifier, status,
+               if (status != status_old)
+                       atomic_notifier_call_chain(&twl->otg.notifier, status,
                                twl->otg.gadget);
        }
        sysfs_notify(&twl->dev->kobj, NULL, "vbus");
@@ -528,23 +601,44 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
        return IRQ_HANDLED;
 }
 
-static void twl4030_usb_phy_init(struct twl4030_usb *twl)
+static void twl4030_id_workaround_work(struct work_struct *work)
 {
+       struct twl4030_usb *twl = container_of(work, struct twl4030_usb,
+               id_workaround_work.work);
+       int status_old = twl->otg.last_event;
        int status;
 
        status = twl4030_usb_linkstat(twl);
-       if (status >= 0) {
-               if (status == USB_EVENT_NONE) {
-                       __twl4030_phy_power(twl, 0);
-                       twl->asleep = 1;
-               } else {
-                       __twl4030_phy_resume(twl);
-                       twl->asleep = 0;
-               }
+       if (status != status_old) {
+               dev_dbg(twl->dev, "handle missing status change: %d->%d\n",
+                       status_old, status);
+               twl->otg.last_event = status_old;
+               twl4030_usb_irq(0, twl);
+       }
 
-               atomic_notifier_call_chain(&twl->otg.notifier, status,
-                               twl->otg.gadget);
+       /* don't schedule during sleep - irq works right then */
+       if (status == USB_EVENT_ID && !twl->asleep) {
+               cancel_delayed_work(&twl->id_workaround_work);
+               schedule_delayed_work(&twl->id_workaround_work, HZ);
        }
+}
+
+static void twl4030_usb_phy_init(struct twl4030_usb *twl)
+{
+       int status;
+
+       /*
+        * Start in sleep state, we'll get otg.set_suspend(false) call
+        * and power up when musb runtime_pm enable kicks in.
+        */
+       __twl4030_phy_power(twl, 0);
+       twl->asleep = 1;
+
+       status = twl4030_usb_linkstat(twl);
+       if (status >= 0 && status != USB_EVENT_NONE)
+               atomic_notifier_call_chain(&twl->otg.notifier, status,
+                       twl->otg.gadget);
+
        sysfs_notify(&twl->dev->kobj, NULL, "vbus");
 }
 
@@ -620,6 +714,8 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
        /* init spinlock for workqueue */
        spin_lock_init(&twl->lock);
 
+       INIT_DELAYED_WORK(&twl->id_workaround_work, twl4030_id_workaround_work);
+
        err = twl4030_usb_ldo_init(twl);
        if (err) {
                dev_err(&pdev->dev, "ldo init failed\n");
@@ -631,6 +727,8 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, twl);
        if (device_create_file(&pdev->dev, &dev_attr_vbus))
                dev_warn(&pdev->dev, "could not create sysfs file\n");
+       if (device_create_file(&pdev->dev, &dev_attr_id))
+               dev_warn(&pdev->dev, "could not create sysfs file\n");
 
        ATOMIC_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
 
@@ -653,9 +751,6 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
                return status;
        }
 
-       /* Power down phy or make it work according to
-        * current link state.
-        */
        twl4030_usb_phy_init(twl);
 
        dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
@@ -667,7 +762,9 @@ static int __exit twl4030_usb_remove(struct platform_device *pdev)
        struct twl4030_usb *twl = platform_get_drvdata(pdev);
        int val;
 
+       cancel_delayed_work(&twl->id_workaround_work);
        free_irq(twl->irq, twl);
+       device_remove_file(twl->dev, &dev_attr_id);
        device_remove_file(twl->dev, &dev_attr_vbus);
 
        /* set transceiver mode to power on defaults */
index 278aeaa..28f9ca4 100644 (file)
@@ -238,6 +238,12 @@ config BACKLIGHT_PWM
          If you have a LCD backlight adjustable by PWM, say Y to enable
          this driver.
 
+config BACKLIGHT_PANDORA
+       tristate "Backlight Driver for Pandora"
+       depends on BACKLIGHT_CLASS_DEVICE && TWL4030_CORE
+       help
+         If you have a Pandora, say Y to enable it's backlight driver.
+
 config BACKLIGHT_DA903X
        tristate "Backlight Driver for DA9030/DA9034 using WLED"
        depends on PMIC_DA903X
index fdd1fc4..1fbfd19 100644 (file)
@@ -26,6 +26,7 @@ obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
 obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o
 obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
 obj-$(CONFIG_BACKLIGHT_PWM)    += pwm_bl.o
+obj-$(CONFIG_BACKLIGHT_PANDORA)        += pandora_bl.o
 obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
 obj-$(CONFIG_BACKLIGHT_MAX8925)        += max8925_bl.o
 obj-$(CONFIG_BACKLIGHT_APPLE)  += apple_bl.o
diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c
new file mode 100644 (file)
index 0000000..1187c2c
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Backlight driver for Pandora handheld.
+ * Pandora uses TWL4030 PWM0 -> TPS61161 combo for control backlight.
+ * Based on pwm_bl.c
+ *
+ * Copyright 2009,2012 Gražvydas Ignotas <notasas@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/i2c/twl.h>
+#include <linux/err.h>
+
+#define TWL_PWM0_ON            0x00
+#define TWL_PWM0_OFF           0x01
+
+#define TWL_INTBR_GPBR1                0x0c
+#define TWL_INTBR_PMBR1                0x0d
+
+#define TWL_PMBR1_PWM0_MUXMASK 0x0c
+#define TWL_PMBR1_PWM0         0x04
+#define PWM0_CLK_ENABLE                BIT(0)
+#define PWM0_ENABLE            BIT(2)
+
+/* range accepted by hardware */
+#define MIN_VALUE 9
+#define MAX_VALUE 63
+#define MAX_USER_VALUE (MAX_VALUE - MIN_VALUE)
+
+#define PANDORABL_WAS_OFF BL_CORE_DRIVER1
+
+static int pandora_backlight_update_status(struct backlight_device *bl)
+{
+       int brightness = bl->props.brightness;
+
+       if (bl->props.power != FB_BLANK_UNBLANK)
+               brightness = 0;
+       if (bl->props.state & BL_CORE_FBBLANK)
+               brightness = 0;
+       if (bl->props.state & BL_CORE_SUSPENDED)
+               brightness = 0;
+
+       if ((unsigned int)brightness > MAX_USER_VALUE)
+               brightness = MAX_USER_VALUE;
+
+       if (brightness == 0) {
+               if (bl->props.state & PANDORABL_WAS_OFF)
+                       goto done;
+
+               /* first disable PWM0 output, then clock */
+               twl_i2c_rmw_u8(TWL4030_MODULE_INTBR, PWM0_ENABLE, 0,
+                       TWL_INTBR_GPBR1);
+               twl_i2c_rmw_u8(TWL4030_MODULE_INTBR, PWM0_CLK_ENABLE, 0,
+                       TWL_INTBR_GPBR1);
+               goto done;
+       }
+
+       if (bl->props.state & PANDORABL_WAS_OFF) {
+               /*
+                * set PWM duty cycle to max. TPS61161 seems to use this
+                * to calibrate it's PWM sensitivity when it starts.
+                */
+               twl_i2c_write_u8(TWL4030_MODULE_PWM0, MAX_VALUE,
+                                       TWL_PWM0_OFF);
+
+               /* first enable clock, then PWM0 out */
+               twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+                       PWM0_ENABLE, PWM0_CLK_ENABLE, TWL_INTBR_GPBR1);
+               twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+                       0, PWM0_ENABLE, TWL_INTBR_GPBR1);
+
+               /*
+                * TI made it very easy to enable digital control, so easy that
+                * it often triggers unintentionally and disabes PWM control,
+                * so wait until 1 wire mode detection window ends.
+                */
+               usleep_range(2000, 10000);
+       }
+
+       twl_i2c_write_u8(TWL4030_MODULE_PWM0, MIN_VALUE + brightness,
+                               TWL_PWM0_OFF);
+
+done:
+       if (brightness != 0)
+               bl->props.state &= ~PANDORABL_WAS_OFF;
+       else
+               bl->props.state |= PANDORABL_WAS_OFF;
+
+       return 0;
+}
+
+static int pandora_backlight_get_brightness(struct backlight_device *bl)
+{
+       return bl->props.brightness;
+}
+
+static const struct backlight_ops pandora_backlight_ops = {
+       .options        = BL_CORE_SUSPENDRESUME,
+       .update_status  = pandora_backlight_update_status,
+       .get_brightness = pandora_backlight_get_brightness,
+};
+
+static int pandora_backlight_probe(struct platform_device *pdev)
+{
+       struct backlight_properties props;
+       struct backlight_device *bl;
+
+       memset(&props, 0, sizeof(props));
+       props.max_brightness = MAX_USER_VALUE;
+       props.type = BACKLIGHT_RAW;
+       bl = backlight_device_register(pdev->name, &pdev->dev,
+                       NULL, &pandora_backlight_ops, &props);
+       if (IS_ERR(bl)) {
+               dev_err(&pdev->dev, "failed to register backlight\n");
+               return PTR_ERR(bl);
+       }
+
+       platform_set_drvdata(pdev, bl);
+
+       /* 64 cycle period, ON position 0 */
+       twl_i2c_write_u8(TWL4030_MODULE_PWM0, 0x80, TWL_PWM0_ON);
+
+       bl->props.state |= PANDORABL_WAS_OFF;
+       bl->props.brightness = MAX_USER_VALUE;
+       backlight_update_status(bl);
+
+       /* enable PWM function in pin mux */
+       twl_i2c_rmw_u8(TWL4030_MODULE_INTBR,
+               TWL_PMBR1_PWM0_MUXMASK, TWL_PMBR1_PWM0, TWL_INTBR_PMBR1);
+
+       return 0;
+}
+
+static int pandora_backlight_remove(struct platform_device *pdev)
+{
+       struct backlight_device *bl = platform_get_drvdata(pdev);
+       backlight_device_unregister(bl);
+       return 0;
+}
+
+static struct platform_driver pandora_backlight_driver = {
+       .driver         = {
+               .name   = "pandora-backlight",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = pandora_backlight_probe,
+       .remove         = pandora_backlight_remove,
+};
+
+module_platform_driver(pandora_backlight_driver);
+
+MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
+MODULE_DESCRIPTION("Pandora Backlight Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pandora-backlight");
index 0a22808..be2be97 100644 (file)
@@ -1444,7 +1444,7 @@ __releases(&info->lock)
        return 0;
 }
 
-static const struct file_operations fb_fops = {
+const struct file_operations fb_fops = {
        .owner =        THIS_MODULE,
        .read =         fb_read,
        .write =        fb_write,
index 39ac49e..bf64787 100644 (file)
@@ -77,6 +77,11 @@ config LOGO_SUPERH_CLUT224
        depends on SUPERH
        default y
 
+config LOGO_PANDORA_CLUT224
+       bool "224-color Pandora Linux logo"
+       depends on MACH_OMAP3_PANDORA
+       default y
+
 config LOGO_M32R_CLUT224
        bool "224-color M32R Linux logo"
        depends on M32R
index 3b43781..26e59cf 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_LOGO_SUN_CLUT224)                += logo_sun_clut224.o
 obj-$(CONFIG_LOGO_SUPERH_MONO)         += logo_superh_mono.o
 obj-$(CONFIG_LOGO_SUPERH_VGA16)                += logo_superh_vga16.o
 obj-$(CONFIG_LOGO_SUPERH_CLUT224)      += logo_superh_clut224.o
+obj-$(CONFIG_LOGO_PANDORA_CLUT224)     += logo_pandora_clut224.o
 obj-$(CONFIG_LOGO_M32R_CLUT224)                += logo_m32r_clut224.o
 
 obj-$(CONFIG_SPU_BASE)                 += logo_spe_clut224.o
index 4bbe1b0..2c6533f 100644 (file)
@@ -111,6 +111,10 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
                /* SuperH Linux logo */
                logo = &logo_superh_clut224;
 #endif
+#ifdef CONFIG_LOGO_PANDORA_CLUT224
+               /* Pandora Linux logo */
+               logo = &logo_pandora_clut224;
+#endif
 #ifdef CONFIG_LOGO_M32R_CLUT224
                /* M32R Linux logo */
                logo = &logo_m32r_clut224;
diff --git a/drivers/video/logo/logo_pandora_clut224.ppm b/drivers/video/logo/logo_pandora_clut224.ppm
new file mode 100644 (file)
index 0000000..10d8583
--- /dev/null
@@ -0,0 +1,1123 @@
+P3
+80 80
+255
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+6 6 6  6 6 6  10 10 10  10 10 10  10 10 10  6 6 6
+6 6 6  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  6 6 6  10 10 10  14 14 14
+22 22 22  26 26 26  30 30 30  34 34 34  30 30 30  30 30 30
+26 26 26  18 18 18  14 14 14  10 10 10  6 6 6  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 1
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  6 6 6  14 14 14  26 26 26  42 42 42
+54 54 54  66 66 66  78 78 78  78 78 78  78 78 78  74 74 74
+66 66 66  54 54 54  42 42 42  26 26 26  18 18 18  10 10 10
+6 6 6  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 1  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  10 10 10  22 22 22  42 42 42  66 66 66  86 86 86
+66 66 66  38 38 38  38 38 38  22 22 22  26 26 26  34 34 34
+54 54 54  66 66 66  86 86 86  70 70 70  46 46 46  26 26 26
+14 14 14  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 1  0 0 1
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+10 10 10  26 26 26  50 50 50  82 82 82  58 58 58  6 6 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  6 6 6  54 54 54  86 86 86  66 66 66
+38 38 38  18 18 18  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  6 6 6
+22 22 22  50 50 50  78 78 78  34 34 34  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  6 6 6  70 70 70
+78 78 78  46 46 46  22 22 22  6 6 6  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 1  0 0 1
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  6 6 6  18 18 18
+42 42 42  82 82 82  26 26 26  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  14 14 14  46 46 46  34 34 34  6 6 6  2 2 6
+42 42 42  78 78 78  42 42 42  18 18 18  6 6 6  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 1  0 0 0
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  10 10 10  30 30 30
+66 66 66  58 58 58  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  26 26 26  86 86 86  101 101 101  46 46 46  10 10 10
+2 2 6  58 58 58  70 70 70  34 34 34  10 10 10  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 1  0 0 1
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  14 14 14  42 42 42
+86 86 86  10 10 10  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  30 30 30  94 94 94  94 94 94  58 58 58  26 26 26
+2 2 6  6 6 6  78 78 78  54 54 54  22 22 22  6 6 6
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  6 6 6  22 22 22  62 62 62
+62 62 62  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  26 26 26  54 54 54  38 38 38  18 18 18  10 10 10
+2 2 6  2 2 6  34 34 34  82 82 82  38 38 38  14 14 14
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 1
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  6 6 6  30 30 30  78 78 78
+30 30 30  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  10 10 10  10 10 10  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  78 78 78  50 50 50  18 18 18
+6 6 6  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 1  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  38 38 38  86 86 86
+14 14 14  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  54 54 54  66 66 66  26 26 26
+6 6 6  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 1
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  14 14 14  42 42 42  82 82 82
+2 2 6  2 2 6  2 2 6  6 6 6  10 10 10  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  6 6 6  14 14 14  10 10 10  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  18 18 18  82 82 82  34 34 34
+10 10 10  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 1  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  14 14 14  46 46 46  86 86 86
+2 2 6  2 2 6  6 6 6  6 6 6  22 22 22  34 34 34
+6 6 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+18 18 18  34 34 34  10 10 10  50 50 50  22 22 22  2 2 6
+2 2 6  2 2 6  2 2 6  10 10 10  86 86 86  42 42 42
+14 14 14  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 1  0 0 1
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  14 14 14  46 46 46  86 86 86
+2 2 6  2 2 6  38 38 38  116 116 116  94 94 94  22 22 22
+22 22 22  2 2 6  2 2 6  2 2 6  14 14 14  86 86 86
+132 138 141  158 158 158  158 158 158  38 38 38  26 26 26  6 6 6
+2 2 6  2 2 6  2 2 6  2 2 6  86 86 86  46 46 46
+14 14 14  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  14 14 14  46 46 46  86 86 86
+2 2 6  14 14 14  128 134 131  198 198 198  195 195 195  116 116 116
+10 10 10  2 2 6  2 2 6  6 6 6  101 98 89  187 187 187
+210 210 210  218 218 218  214 214 214  128 134 131  14 14 14  6 6 6
+2 2 6  2 2 6  2 2 6  2 2 6  86 86 86  50 50 50
+18 18 18  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 1  0 0 0  0 0 1  0 0 1
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  14 14 14  46 46 46  86 86 86
+2 2 6  54 54 54  218 218 218  195 195 195  226 226 226  246 246 246
+58 58 58  2 2 6  2 2 6  30 30 30  210 210 210  253 253 253
+170 170 170  123 123 123  221 221 221  234 234 234  74 74 74  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  70 70 70  58 58 58
+22 22 22  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  14 14 14  46 46 46  82 82 82
+2 2 6  106 106 106  170 170 170  26 26 26  86 86 86  226 226 226
+123 123 123  10 10 10  14 14 14  46 46 46  231 231 231  190 190 190
+6 6 6  70 70 70  90 90 90  238 238 238  158 158 158  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  70 70 70  58 58 58
+22 22 22  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 1  0 0 0  0 0 1  0 0 1
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  14 14 14  42 42 42  86 86 86
+6 6 6  116 116 116  106 106 106  6 6 6  70 70 70  137 144 148
+128 134 131  18 18 18  38 38 38  54 54 54  221 221 221  106 106 106
+2 2 6  14 14 14  46 46 46  190 190 190  198 198 198  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  74 74 74  62 62 62
+22 22 22  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 1  0 0 0  0 0 1  0 0 0
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  14 14 14  42 42 42  94 94 94
+14 14 14  101 101 101  128 134 131  2 2 6  18 18 18  116 116 116
+120 102 47  121 92 8  121 92 8  97 70 12  158 158 158  106 106 106
+2 2 6  2 2 6  2 2 6  195 195 195  195 195 195  6 6 6
+2 2 6  2 2 6  2 2 6  2 2 6  74 74 74  62 62 62
+22 22 22  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 1  0 0 1  0 0 1  0 0 0
+0 0 1  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  38 38 38  90 90 90
+14 14 14  58 58 58  210 210 210  26 26 26  54 38 6  156 118 10
+226 170 11  236 186 11  225 175 15  184 144 12  225 175 15  163 133 67
+37 26 9  2 2 6  70 70 70  246 246 246  132 138 141  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  70 70 70  66 66 66
+26 26 26  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  38 38 38  86 86 86
+14 14 14  10 10 10  195 195 195  175 164 127  192 133 9  225 175 15
+239 182 13  236 186 11  232 195 16  232 195 16  245 215 43  241 208 19
+232 195 16  184 144 12  215 203 150  202 202 202  42 42 42  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  50 50 50  74 74 74
+30 30 30  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  34 34 34  86 86 86
+14 14 14  2 2 6  121 87 25  192 133 9  219 162 10  239 182 13
+236 186 11  232 195 16  241 208 19  246 218 45  246 218 45  245 215 43
+246 215 20  241 208 19  241 208 19  226 184 13  121 87 25  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  50 50 50  82 82 82
+34 34 34  10 10 10  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  34 34 34  82 82 82
+30 30 30  61 42 6  180 123 7  206 145 10  230 174 11  239 182 13
+236 186 11  238 202 15  241 208 19  246 218 45  245 215 43  246 215 20
+246 215 20  246 215 20  226 184 13  225 175 15  184 144 12  6 6 6
+2 2 6  2 2 6  2 2 6  2 2 6  26 26 26  94 94 94
+42 42 42  14 14 14  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  30 30 30  78 78 78
+50 50 50  104 69 6  192 133 9  216 158 10  236 178 12  236 186 11
+232 195 16  241 208 19  246 218 45  245 215 43  246 215 20  246 215 20
+241 208 19  200 144 11  200 144 11  216 158 10  156 118 10  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  6 6 6  90 90 90
+54 54 54  18 18 18  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  30 30 30  78 78 78
+46 46 46  22 22 22  137 92 6  216 158 10  239 182 13  236 186 11
+238 202 15  241 208 19  246 215 20  246 215 20  241 208 19  213 154 11
+185 133 11  210 150 10  216 158 10  210 150 10  97 70 12  2 2 6
+6 6 6  54 54 54  14 14 14  2 2 6  2 2 6  62 62 62
+74 74 74  30 30 30  10 10 10  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  34 34 34  78 78 78
+50 50 50  6 6 6  86 66 32  137 92 6  184 144 12  226 184 13
+232 195 16  232 195 16  225 175 15  184 144 12  167 114 7  192 133 9
+210 150 10  213 154 11  190 142 34  171 161 111  101 98 89  2 2 6
+2 2 6  78 78 78  116 116 116  58 58 58  2 2 6  22 22 22
+90 90 90  46 46 46  18 18 18  6 6 6  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  38 38 38  86 86 86
+50 50 50  6 6 6  128 134 131  171 161 111  156 107 11  167 114 7
+200 144 11  184 144 12  197 138 11  200 144 11  206 145 10  206 145 10
+197 138 11  175 164 127  195 195 195  198 198 198  170 170 170  14 14 14
+2 2 6  22 22 22  116 116 116  116 116 116  22 22 22  2 2 6
+74 74 74  70 70 70  30 30 30  10 10 10  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  6 6 6  18 18 18  50 50 50  101 101 101
+26 26 26  10 10 10  132 138 141  190 190 190  171 161 111  156 107 11
+197 138 11  200 144 11  197 138 11  192 133 9  180 123 7  190 142 34
+198 184 128  187 187 187  202 202 202  221 221 221  214 214 214  66 66 66
+2 2 6  2 2 6  50 50 50  62 62 62  6 6 6  2 2 6
+10 10 10  90 90 90  50 50 50  18 18 18  6 6 6  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  10 10 10  34 34 34  74 74 74  74 74 74
+2 2 6  6 6 6  137 144 148  198 198 198  190 190 190  166 170 147
+141 122 60  156 107 11  156 107 11  158 118 36  171 161 111  187 187 187
+190 190 190  210 210 210  246 246 246  253 253 253  253 253 253  179 181 183
+6 6 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  62 62 62  74 74 74  34 34 34  14 14 14  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  10 10 10  22 22 22  54 54 54  94 94 94  18 18 18
+2 2 6  46 46 46  234 234 234  221 221 221  190 190 190  190 190 190
+190 190 190  187 187 187  187 187 187  190 190 190  190 190 190  195 195 195
+214 214 214  242 242 242  253 253 253  253 253 253  253 253 253  253 253 253
+82 82 82  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  14 14 14  86 86 86  54 54 54  22 22 22  6 6 6
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+6 6 6  18 18 18  46 46 46  90 90 90  46 46 46  18 18 18
+6 6 6  179 181 183  253 253 253  246 246 246  206 206 206  190 190 190
+190 190 190  190 190 190  190 190 190  190 190 190  206 206 206  231 231 231
+250 250 250  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+202 202 202  14 14 14  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  42 42 42  86 86 86  42 42 42  18 18 18
+6 6 6  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  6 6 6
+14 14 14  38 38 38  74 74 74  66 66 66  2 2 6  6 6 6
+90 90 90  250 250 250  253 253 253  253 253 253  238 238 238  198 198 198
+190 190 190  190 190 190  195 195 195  221 221 221  246 246 246  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  82 82 82  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  78 78 78  70 70 70  34 34 34
+14 14 14  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  14 14 14
+34 34 34  66 66 66  78 78 78  6 6 6  2 2 6  18 18 18
+218 218 218  253 253 253  253 253 253  253 253 253  253 253 253  246 246 246
+226 226 226  231 231 231  246 246 246  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  179 181 183  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  18 18 18  90 90 90  62 62 62
+30 30 30  10 10 10  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  10 10 10  26 26 26
+58 58 58  90 90 90  18 18 18  2 2 6  2 2 6  106 106 106
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+250 250 250  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  231 231 231  18 18 18  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  18 18 18  94 94 94
+54 54 54  26 26 26  10 10 10  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  6 6 6  22 22 22  50 50 50
+90 90 90  26 26 26  2 2 6  2 2 6  14 14 14  195 195 195
+250 250 250  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+250 250 250  242 242 242  54 54 54  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  38 38 38
+86 86 86  50 50 50  22 22 22  6 6 6  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  6 6 6  14 14 14  38 38 38  82 82 82
+34 34 34  2 2 6  2 2 6  2 2 6  42 42 42  195 195 195
+246 246 246  253 253 253  253 253 253  253 253 253  253 253 253  250 250 250
+242 242 242  242 242 242  250 250 250  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  250 250 250  246 246 246  238 238 238
+226 226 226  231 231 231  101 101 101  6 6 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+38 38 38  82 82 82  42 42 42  14 14 14  6 6 6  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  10 10 10  26 26 26  62 62 62  66 66 66
+2 2 6  2 2 6  2 2 6  6 6 6  70 70 70  170 170 170
+206 206 206  234 234 234  246 246 246  250 250 250  250 250 250  238 238 238
+226 226 226  231 231 231  238 238 238  250 250 250  250 250 250  250 250 250
+246 246 246  231 231 231  214 214 214  206 206 206  202 202 202  202 202 202
+198 198 198  202 202 202  179 181 183  18 18 18  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  62 62 62  66 66 66  30 30 30  10 10 10  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  14 14 14  42 42 42  82 82 82  18 18 18
+2 2 6  2 2 6  2 2 6  10 10 10  94 94 94  179 181 183
+218 218 218  242 242 242  250 250 250  253 253 253  253 253 253  250 250 250
+234 234 234  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  246 246 246  238 238 238  226 226 226  210 210 210  202 202 202
+195 195 195  195 195 195  210 210 210  158 158 158  6 6 6  14 14 14
+50 50 50  14 14 14  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  6 6 6  86 86 86  46 46 46  18 18 18  6 6 6
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  6 6 6  22 22 22  54 54 54  70 70 70  2 2 6
+2 2 6  10 10 10  2 2 6  22 22 22  170 170 170  231 231 231
+250 250 250  253 253 253  253 253 253  253 253 253  253 253 253  250 250 250
+242 242 242  253 253 253  250 250 251  227 232 235  248 249 249  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  246 246 246
+231 231 231  206 206 206  198 198 198  226 226 226  94 94 94  2 2 6
+6 6 6  38 38 38  30 30 30  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  62 62 62  66 66 66  26 26 26  10 10 10
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  10 10 10  30 30 30  74 74 74  50 50 50  2 2 6
+26 26 26  26 26 26  2 2 6  106 106 106  238 238 238  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+250 250 251  198 209 215  99 129 144  28 75 99  65 103 123  198 209 215
+250 250 251  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  246 246 246  218 218 218  202 202 202  210 210 210  14 14 14
+2 2 6  2 2 6  30 30 30  22 22 22  2 2 6  2 2 6
+2 2 6  2 2 6  18 18 18  86 86 86  42 42 42  14 14 14
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  14 14 14  42 42 42  90 90 90  22 22 22  2 2 6
+42 42 42  2 2 6  18 18 18  218 218 218  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  227 232 235
+115 143 158  30 77 101  28 74 101  28 74 101  28 73 99  28 74 101
+99 129 144  219 223 227  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  250 250 250  221 221 221  218 218 218  101 101 101
+2 2 6  14 14 14  18 18 18  38 38 38  10 10 10  2 2 6
+2 2 6  2 2 6  2 2 6  78 78 78  58 58 58  22 22 22
+6 6 6  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+6 6 6  18 18 18  54 54 54  82 82 82  2 2 6  26 26 26
+22 22 22  2 2 6  123 123 123  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  239 241 242  144 162 172  40 84 106
+27 75 101  28 74 101  52 93 114  133 158 170  58 95 118  28 75 101
+28 74 101  34 80 103  118 145 160  231 235 238  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  250 250 250  238 238 238  198 198 198
+6 6 6  38 38 38  58 58 58  26 26 26  38 38 38  2 2 6
+2 2 6  2 2 6  2 2 6  46 46 46  78 78 78  30 30 30
+10 10 10  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+10 10 10  30 30 30  74 74 74  58 58 58  2 2 6  42 42 42
+2 2 6  22 22 22  231 231 231  253 253 253  253 253 253  253 253 253
+253 253 253  248 249 249  171 186 194  58 95 118  28 74 101  27 74 100
+40 84 106  133 158 170  233 237 239  253 253 253  239 241 242  144 162 172
+46 88 110  28 74 100  28 74 100  50 91 112  152 171 181  242 244 245
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  246 246 246
+46 46 46  38 38 38  42 42 42  14 14 14  38 38 38  14 14 14
+2 2 6  2 2 6  2 2 6  6 6 6  86 86 86  46 46 46
+14 14 14  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  6 6 6
+14 14 14  42 42 42  90 90 90  18 18 18  18 18 18  26 26 26
+2 2 6  116 116 116  253 253 253  253 253 253  253 253 253  253 253 253
+198 209 215  65 103 123  28 75 98  28 75 100  30 77 101  99 129 144
+212 220 225  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+227 232 235  115 143 158  34 80 103  28 74 99  28 75 100  65 103 123
+185 196 203  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+94 94 94  6 6 6  2 2 6  2 2 6  10 10 10  34 34 34
+2 2 6  2 2 6  2 2 6  2 2 6  74 74 74  58 58 58
+22 22 22  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  10 10 10
+26 26 26  66 66 66  82 82 82  2 2 6  38 38 38  6 6 6
+14 14 14  210 210 210  253 253 253  253 253 253  253 253 253  253 253 253
+52 93 114  29 73 97  27 75 101  41 84 107  171 186 194  242 242 242
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  198 209 215  51 91 114  28 74 101  28 74 101
+34 80 103  239 241 242  253 253 253  253 253 253  253 253 253  253 253 253
+137 144 148  2 2 6  2 2 6  2 2 6  2 2 6  46 46 46
+2 2 6  2 2 6  2 2 6  2 2 6  42 42 42  74 74 74
+30 30 30  10 10 10  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  6 6 6  14 14 14
+42 42 42  90 90 90  26 26 26  6 6 6  42 42 42  2 2 6
+74 74 74  250 250 250  253 253 253  253 253 253  253 253 253  253 253 253
+118 145 160  30 77 101  28 75 101  28 75 100  65 103 123  185 196 203
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  212 220 225  99 129 144  30 77 101  28 74 101  28 75 99
+99 129 144  250 250 251  253 253 253  253 253 253  253 253 253  253 253 253
+179 181 183  2 2 6  2 2 6  2 2 6  2 2 6  46 46 46
+2 2 6  2 2 6  2 2 6  2 2 6  10 10 10  86 86 86
+38 38 38  10 10 10  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  10 10 10  26 26 26
+66 66 66  82 82 82  2 2 6  22 22 22  18 18 18  2 2 6
+137 144 148  253 253 253  253 253 253  253 253 253  253 253 253  178 193 201
+166 182 192  185 196 203  65 103 123  28 75 99  27 73 98  27 76 99
+99 129 144  220 225 227  253 253 253  253 253 253  253 253 253  231 235 238
+118 145 160  34 80 103  28 75 99  28 74 101  65 103 123  178 193 201
+185 196 203  157 177 185  253 253 253  253 253 253  253 253 253  253 253 253
+206 206 206  2 2 6  2 2 6  2 2 6  2 2 6  38 38 38
+2 2 6  2 2 6  2 2 6  2 2 6  6 6 6  86 86 86
+46 46 46  14 14 14  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  6 6 6  18 18 18  46 46 46
+86 86 86  18 18 18  2 2 6  34 34 34  10 10 10  6 6 6
+210 210 210  253 253 253  253 253 253  253 253 253  253 253 253  150 169 180
+28 75 101  65 103 123  245 247 248  171 186 194  52 93 114  28 73 99
+28 74 100  40 84 106  133 158 170  253 253 253  166 182 192  45 87 109
+28 75 98  29 73 97  45 87 109  152 171 181  245 247 248  99 129 144
+30 77 101  118 145 160  253 253 253  253 253 253  253 253 253  253 253 253
+221 221 221  6 6 6  2 2 6  2 2 6  6 6 6  30 30 30
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  82 82 82
+54 54 54  18 18 18  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  26 26 26  66 66 66
+62 62 62  2 2 6  2 2 6  38 38 38  10 10 10  26 26 26
+238 238 238  253 253 253  253 253 253  253 253 253  253 253 253  150 169 180
+28 74 98  28 73 99  253 253 253  253 253 253  221 221 221  133 158 170
+40 84 106  28 74 101  28 75 100  227 232 235  46 88 110  28 75 100
+34 80 103  133 158 170  233 237 239  253 253 253  253 253 253  58 95 118
+28 73 99  118 145 160  253 253 253  253 253 253  253 253 253  253 253 253
+231 231 231  6 6 6  2 2 6  2 2 6  10 10 10  30 30 30
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  66 66 66
+58 58 58  22 22 22  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  10 10 10  38 38 38  78 78 78
+6 6 6  2 2 6  2 2 6  46 46 46  14 14 14  42 42 42
+246 246 246  253 253 253  253 253 253  253 253 253  253 253 253  150 169 180
+28 75 101  28 74 98  253 253 253  253 253 253  231 231 231  242 242 242
+185 196 203  27 75 100  28 74 99  220 225 227  41 84 107  27 76 99
+144 162 172  253 253 253  253 253 253  253 253 253  253 253 253  58 95 118
+28 75 99  118 145 160  253 253 253  253 253 253  253 253 253  253 253 253
+234 234 234  10 10 10  2 2 6  2 2 6  22 22 22  14 14 14
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  66 66 66
+62 62 62  22 22 22  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  6 6 6  18 18 18  50 50 50  74 74 74
+2 2 6  2 2 6  14 14 14  70 70 70  34 34 34  62 62 62
+250 250 250  253 253 253  253 253 253  253 253 253  253 253 253  150 169 180
+28 75 101  28 74 98  253 253 253  253 253 253  231 231 231  246 246 246
+198 209 215  28 74 101  28 75 99  220 225 227  41 84 107  28 75 100
+150 169 180  253 253 253  253 253 253  253 253 253  253 253 253  58 95 118
+28 75 99  118 145 160  253 253 253  253 253 253  253 253 253  253 253 253
+234 234 234  14 14 14  2 2 6  2 2 6  30 30 30  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  66 66 66
+62 62 62  22 22 22  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  6 6 6  18 18 18  54 54 54  62 62 62
+2 2 6  2 2 6  2 2 6  30 30 30  46 46 46  70 70 70
+250 250 250  253 253 253  253 253 253  253 253 253  253 253 253  150 169 180
+27 75 100  28 75 99  253 253 253  253 253 253  231 231 231  246 246 246
+185 196 203  28 75 99  28 75 99  220 225 227  41 84 107  28 74 99
+150 170 182  253 253 253  253 253 253  253 253 253  253 253 253  58 95 118
+27 73 98  133 158 170  253 253 253  253 253 253  253 253 253  253 253 253
+226 226 226  10 10 10  2 2 6  6 6 6  30 30 30  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  66 66 66
+58 58 58  22 22 22  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  6 6 6  22 22 22  58 58 58  62 62 62
+2 2 6  2 2 6  2 2 6  2 2 6  30 30 30  78 78 78
+250 250 250  253 253 253  253 253 253  253 253 253  253 253 253  152 171 181
+26 74 99  45 87 109  253 253 253  253 253 253  231 231 231  246 246 246
+178 193 201  28 75 99  28 75 99  220 225 227  41 84 107  28 74 99
+150 170 182  253 253 253  253 253 253  253 253 253  253 253 253  58 95 118
+28 74 99  133 158 170  253 253 253  253 253 253  253 253 253  253 253 253
+206 206 206  2 2 6  22 22 22  34 34 34  18 14 6  22 22 22
+26 26 26  18 18 18  6 6 6  2 2 6  2 2 6  82 82 82
+54 54 54  18 18 18  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  6 6 6  26 26 26  62 62 62  106 106 106
+74 54 14  185 133 11  216 158 10  121 92 8  6 6 6  62 62 62
+238 238 238  253 253 253  253 253 253  253 253 253  253 253 253  150 169 180
+27 75 101  41 84 107  253 253 253  253 253 253  231 231 231  246 246 246
+178 193 201  28 75 99  28 75 99  220 225 227  41 84 107  28 74 100
+150 170 182  253 253 253  253 253 253  253 253 253  253 253 253  58 95 118
+28 75 99  133 158 170  253 253 253  253 253 253  253 253 253  253 253 253
+158 158 158  18 18 18  14 14 14  2 2 6  2 2 6  2 2 6
+6 6 6  18 18 18  66 66 66  38 38 38  6 6 6  94 94 94
+50 50 50  18 18 18  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  6 6 6
+10 10 10  10 10 10  18 18 18  38 38 38  78 78 78  150 142 96
+216 158 10  242 186 14  246 190 14  246 190 14  156 118 10  10 10 10
+90 90 90  238 238 238  253 253 253  253 253 253  253 253 253  152 171 181
+27 73 98  30 77 101  133 158 170  239 241 242  231 231 231  250 250 250
+178 193 201  28 74 98  28 74 100  220 225 227  40 84 106  28 74 101
+148 168 179  253 253 253  253 253 253  242 244 245  152 171 181  34 80 103
+28 74 100  133 158 170  253 253 253  220 217 172  214 189 72  214 189 72
+180 133 36  37 26 9  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  38 38 38  46 46 46  26 26 26  106 106 106
+54 54 54  18 18 18  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  6 6 6  14 14 14  22 22 22
+30 30 30  38 38 38  50 50 50  70 70 70  106 106 106  190 142 34
+226 170 11  242 186 14  246 190 14  246 190 14  246 190 14  156 118 10
+6 6 6  74 74 74  226 226 226  253 253 253  253 253 253  212 220 225
+46 88 110  27 76 99  28 74 100  51 94 116  144 162 172  242 242 242
+178 193 201  28 74 98  28 74 100  220 225 227  41 84 107  28 74 101
+148 168 179  248 249 249  181 195 203  65 103 123  28 74 101  28 74 99
+40 84 106  178 193 201  253 253 253  222 190 57  241 196 14  241 208 19
+232 195 16  38 30 10  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  6 6 6  30 30 30  26 26 26  213 154 11  150 142 96
+66 66 66  26 26 26  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  6 6 6  18 18 18  38 38 38  58 58 58
+78 78 78  86 86 86  101 101 101  123 123 123  163 133 67  210 150 10
+234 174 13  246 186 14  246 190 14  246 190 14  246 190 14  236 186 11
+97 70 12  2 2 6  46 46 46  198 198 198  253 253 253  253 253 253
+227 232 235  115 143 158  34 80 103  28 74 98  27 72 98  65 103 123
+115 143 158  28 74 99  28 75 99  220 225 227  41 84 107  28 75 101
+115 143 158  99 129 144  30 77 101  28 75 101  30 77 101  99 129 144
+212 220 225  253 253 253  253 253 253  222 190 57  242 186 14  241 196 14
+219 162 10  22 18 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  6 6 6  121 92 8  238 202 15  232 195 16
+82 82 82  34 34 34  10 10 10  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  14 14 14  38 38 38  70 70 70  154 122 46
+190 142 34  200 144 11  197 138 11  197 138 11  213 154 11  226 170 11
+242 186 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+225 175 15  48 33 6  2 2 6  22 22 22  158 158 158  250 250 250
+253 253 253  253 253 253  212 220 225  99 129 144  28 75 101  27 74 100
+28 74 100  28 75 98  28 74 100  219 223 227  41 84 107  28 75 99
+28 75 99  28 74 99  26 74 99  65 103 123  185 196 203  250 250 250
+253 253 253  250 250 250  242 242 242  222 190 57  239 182 13  236 186 11
+213 154 11  48 33 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  61 42 6  225 175 15  236 186 11  236 186 11
+118 109 73  42 42 42  14 14 14  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  6 6 6  22 22 22  54 54 54  154 122 46  213 154 11
+226 170 11  230 174 11  226 170 11  226 170 11  236 178 12  242 186 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+241 196 14  184 144 12  10 10 10  2 2 6  6 6 6  116 116 116
+242 242 242  253 253 253  253 253 253  248 249 249  185 196 203  65 103 123
+28 74 100  27 73 98  28 75 100  227 232 235  41 84 107  28 75 99
+29 75 100  51 91 114  158 177 186  245 247 248  253 253 253  253 253 253
+253 253 253  231 231 231  198 198 198  201 174 68  236 178 12  236 178 12
+210 150 10  137 92 6  18 14 6  2 2 6  2 2 6  2 2 6
+6 6 6  70 47 6  200 144 11  236 178 12  239 182 13  239 182 13
+124 112 88  58 58 58  22 22 22  6 6 6  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  10 10 10  30 30 30  70 70 70  180 133 36  226 170 11
+239 182 13  242 186 14  242 186 14  246 186 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  232 195 16  104 69 6  2 2 6  2 2 6  2 2 6
+66 66 66  221 221 221  253 253 253  253 253 253  253 253 253  245 247 248
+158 177 186  58 95 118  65 103 123  253 253 253  99 129 144  40 84 106
+133 158 170  233 237 239  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  206 206 206  198 198 198  201 174 68  230 174 11  230 174 11
+216 158 10  192 133 9  163 110 8  121 92 8  97 70 12  121 92 8
+167 114 7  197 138 11  226 170 11  239 182 13  242 186 14  242 186 14
+150 142 96  78 78 78  34 34 34  14 14 14  6 6 6  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  6 6 6  30 30 30  78 78 78  190 142 34  226 170 11
+239 182 13  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  241 196 14  213 154 11  22 18 6  2 2 6  2 2 6
+2 2 6  38 38 38  218 218 218  253 253 253  253 253 253  253 253 253
+253 253 253  248 249 249  248 249 249  253 253 253  248 249 249  242 244 245
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+250 250 250  206 206 206  198 198 198  194 162 60  226 170 11  236 178 12
+224 166 10  210 150 10  200 144 11  197 138 11  192 133 9  197 138 11
+210 150 10  226 170 11  242 186 14  246 190 14  246 190 14  246 186 14
+225 175 15  124 112 88  62 62 62  30 30 30  14 14 14  6 6 6
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  10 10 10  30 30 30  78 78 78  180 133 36  224 166 10
+239 182 13  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  241 196 14  137 92 6  2 2 6  2 2 6
+2 2 6  2 2 6  78 78 78  250 250 250  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+250 250 250  214 214 214  198 198 198  190 150 46  219 162 10  236 178 12
+234 174 13  224 166 10  216 158 10  213 154 11  213 154 11  216 158 10
+226 170 11  239 182 13  246 190 14  246 190 14  246 190 14  246 190 14
+242 186 14  190 150 46  101 101 101  58 58 58  30 30 30  14 14 14
+6 6 6  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  10 10 10  30 30 30  74 74 74  180 133 36  216 158 10
+236 178 12  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  241 196 14  226 184 13  61 42 6  2 2 6
+2 2 6  2 2 6  22 22 22  238 238 238  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  226 226 226  187 187 187  180 133 36  216 158 10  236 178 12
+239 182 13  236 178 12  230 174 11  226 170 11  226 170 11  230 174 11
+236 178 12  242 186 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 186 14  239 182 13  190 150 46  106 106 106  66 66 66  34 34 34
+14 14 14  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  6 6 6  26 26 26  70 70 70  163 133 67  213 154 11
+236 178 12  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  241 196 14  184 144 12  18 14 6
+2 2 6  2 2 6  46 46 46  246 246 246  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  221 221 221  86 86 86  156 107 11  216 158 10  236 178 12
+242 186 14  246 186 14  242 186 14  239 182 13  239 182 13  242 186 14
+242 186 14  246 186 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  242 186 14  225 175 15  141 122 60  66 66 66
+30 30 30  10 10 10  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  6 6 6  26 26 26  70 70 70  163 133 67  210 150 10
+236 178 12  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  232 195 16  121 92 8
+34 34 34  106 106 106  221 221 221  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+242 242 242  82 82 82  18 14 6  163 110 8  216 158 10  236 178 12
+242 186 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  242 186 14  163 133 67
+46 46 46  18 18 18  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  10 10 10  30 30 30  78 78 78  163 133 67  210 150 10
+236 178 12  246 186 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  241 196 14  225 175 15
+198 184 128  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  218 218 218
+58 58 58  2 2 6  22 18 6  167 114 7  216 158 10  236 178 12
+246 186 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 186 14  242 186 14  190 150 46
+54 54 54  22 22 22  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  14 14 14  38 38 38  86 86 86  180 133 36  213 154 11
+236 178 12  246 186 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  232 195 16
+184 144 12  214 214 214  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  250 250 250  170 170 170  26 26 26
+2 2 6  2 2 6  37 26 9  163 110 8  219 162 10  239 182 13
+246 186 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 186 14  236 178 12  224 166 10  141 122 60
+46 46 46  18 18 18  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+6 6 6  18 18 18  50 50 50  107 110 99  192 133 9  224 166 10
+242 186 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  242 186 14  226 184 13
+216 158 10  154 122 46  226 226 226  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  253 253 253  198 198 198  66 66 66  2 2 6  2 2 6
+2 2 6  2 2 6  48 33 6  156 107 11  219 162 10  239 182 13
+246 186 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  242 186 14  234 174 13  213 154 11  154 122 46  66 66 66
+30 30 30  10 10 10  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+6 6 6  22 22 22  58 58 58  141 122 60  206 145 10  234 174 13
+242 186 14  246 186 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 186 14  236 178 12
+216 158 10  163 110 8  61 42 6  132 138 141  218 218 218  250 250 250
+253 253 253  253 253 253  253 253 253  250 250 250  242 242 242  210 210 210
+137 144 148  66 66 66  6 6 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  61 42 6  163 110 8  216 158 10  236 178 12
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  239 182 13
+230 174 11  216 158 10  190 142 34  124 112 88  70 70 70  38 38 38
+18 18 18  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+6 6 6  22 22 22  62 62 62  158 118 36  206 145 10  224 166 10
+236 178 12  239 182 13  242 186 14  242 186 14  246 186 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  236 178 12
+216 158 10  175 118 6  80 54 7  2 2 6  6 6 6  30 30 30
+54 54 54  62 62 62  50 50 50  38 38 38  14 14 14  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  6 6 6  80 54 7  167 114 7  213 154 11  236 178 12
+246 190 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  242 186 14  239 182 13  239 182 13  230 174 11  210 150 10
+180 133 36  124 112 88  82 82 82  54 54 54  34 34 34  18 18 18
+6 6 6  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+6 6 6  18 18 18  50 50 50  158 118 36  192 133 9  200 144 11
+216 158 10  219 162 10  224 166 10  226 170 11  230 174 11  236 178 12
+239 182 13  239 182 13  242 186 14  246 186 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  246 186 14  230 174 11
+210 150 10  163 110 8  104 69 6  10 10 10  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  6 6 6  91 60 6  167 114 7  206 145 10  230 174 11
+242 186 14  246 190 14  246 190 14  246 190 14  246 186 14  242 186 14
+239 182 13  230 174 11  224 166 10  213 154 11  180 133 36  124 112 88
+86 86 86  58 58 58  38 38 38  22 22 22  10 10 10  6 6 6
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  14 14 14  34 34 34  70 70 70  141 122 60  158 118 36
+167 114 7  180 123 7  192 133 9  197 138 11  200 144 11  206 145 10
+213 154 11  219 162 10  224 166 10  230 174 11  239 182 13  242 186 14
+246 186 14  246 186 14  246 186 14  246 186 14  239 182 13  216 158 10
+185 133 11  152 99 6  104 69 6  18 14 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  2 2 6  2 2 6  2 2 6
+2 2 6  6 6 6  80 54 7  152 99 6  192 133 9  219 162 10
+236 178 12  239 182 13  246 186 14  242 186 14  239 182 13  236 178 12
+224 166 10  206 145 10  192 133 9  141 122 60  94 94 94  62 62 62
+42 42 42  22 22 22  14 14 14  6 6 6  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  6 6 6  18 18 18  34 34 34  58 58 58  78 78 78
+101 98 89  124 112 88  154 122 46  156 107 11  163 110 8  167 114 7
+175 118 6  180 123 7  185 133 11  197 138 11  210 150 10  219 162 10
+226 170 11  236 178 12  236 178 12  234 174 13  219 162 10  197 138 11
+163 110 8  137 92 6  91 60 6  10 10 10  2 2 6  2 2 6
+18 18 18  38 38 38  38 38 38  38 38 38  38 38 38  38 38 38
+38 38 38  38 38 38  38 38 38  38 38 38  26 26 26  2 2 6
+2 2 6  6 6 6  70 47 6  137 92 6  175 118 6  200 144 11
+219 162 10  230 174 11  234 174 13  230 174 11  219 162 10  210 150 10
+192 133 9  163 110 8  124 112 88  82 82 82  50 50 50  30 30 30
+14 14 14  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  6 6 6  14 14 14  22 22 22  34 34 34
+42 42 42  58 58 58  74 74 74  86 86 86  101 98 89  118 109 73
+120 102 47  121 87 25  137 92 6  152 99 6  163 110 8  180 123 7
+185 133 11  197 138 11  206 145 10  200 144 11  180 123 7  156 107 11
+137 92 6  104 69 6  48 33 6  54 54 54  106 106 106  101 98 89
+86 86 86  82 82 82  78 78 78  78 78 78  78 78 78  78 78 78
+78 78 78  78 78 78  78 78 78  82 82 82  86 86 86  94 94 94
+106 106 106  101 101 101  86 66 32  121 92 8  156 107 11  180 123 7
+192 133 9  200 144 11  206 145 10  200 144 11  192 133 9  175 118 6
+137 92 6  107 110 99  70 70 70  42 42 42  22 22 22  10 10 10
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  6 6 6  10 10 10
+14 14 14  22 22 22  30 30 30  38 38 38  50 50 50  62 62 62
+74 74 74  90 90 90  101 98 89  118 109 73  121 87 25  121 92 8
+137 92 6  152 99 6  152 99 6  152 99 6  137 92 6  121 92 8
+104 69 6  86 66 32  101 98 89  82 82 82  58 58 58  46 46 46
+38 38 38  34 34 34  34 34 34  34 34 34  34 34 34  34 34 34
+34 34 34  34 34 34  34 34 34  34 34 34  38 38 38  42 42 42
+54 54 54  82 82 82  89 81 66  91 60 6  137 92 6  156 107 11
+167 114 7  175 118 6  175 118 6  167 114 7  152 99 6  121 87 25
+101 98 89  62 62 62  34 34 34  18 18 18  6 6 6  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  6 6 6  6 6 6  10 10 10  18 18 18  22 22 22
+30 30 30  42 42 42  50 50 50  66 66 66  86 86 86  101 98 89
+105 86 58  104 69 6  104 69 6  104 69 6  104 69 6  91 60 6
+82 62 34  90 90 90  62 62 62  38 38 38  22 22 22  14 14 14
+10 10 10  10 10 10  10 10 10  10 10 10  10 10 10  10 10 10
+6 6 6  10 10 10  10 10 10  10 10 10  10 10 10  14 14 14
+22 22 22  42 42 42  70 70 70  89 81 66  80 54 7  104 69 6
+121 92 8  137 92 6  137 92 6  121 92 8  105 86 58  86 86 86
+58 58 58  30 30 30  14 14 14  6 6 6  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  6 6 6
+10 10 10  14 14 14  18 18 18  26 26 26  38 38 38  54 54 54
+70 70 70  86 86 86  89 81 66  89 81 66  89 81 66  86 86 86
+74 74 74  50 50 50  30 30 30  14 14 14  6 6 6  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+6 6 6  18 18 18  34 34 34  58 58 58  82 82 82  89 81 66
+89 81 66  89 81 66  89 81 66  89 81 66  74 74 74  50 50 50
+26 26 26  14 14 14  6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  6 6 6  6 6 6  14 14 14  18 18 18
+30 30 30  38 38 38  46 46 46  54 54 54  50 50 50  42 42 42
+30 30 30  18 18 18  10 10 10  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  6 6 6  14 14 14  26 26 26  38 38 38  50 50 50
+58 58 58  58 58 58  54 54 54  42 42 42  30 30 30  18 18 18
+10 10 10  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  6 6 6
+6 6 6  10 10 10  14 14 14  18 18 18  18 18 18  14 14 14
+10 10 10  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  6 6 6  14 14 14  18 18 18
+22 22 22  22 22 22  18 18 18  14 14 14  10 10 10  6 6 6
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0
index dbd59b8..d82f09b 100644 (file)
@@ -738,12 +738,6 @@ static void acx_panel_set_timings(struct omap_dss_device *dssdev,
        }
 }
 
-static void acx_panel_get_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static int acx_panel_check_timings(struct omap_dss_device *dssdev,
                struct omap_video_timings *timings)
 {
@@ -761,7 +755,6 @@ static struct omap_dss_driver acx_panel_driver = {
        .resume         = acx_panel_resume,
 
        .set_timings    = acx_panel_set_timings,
-       .get_timings    = acx_panel_get_timings,
        .check_timings  = acx_panel_check_timings,
 
        .get_recommended_bpp = acx_get_recommended_bpp,
index 519c47d..445ea2d 100644 (file)
@@ -460,12 +460,6 @@ static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
        dpi_set_timings(dssdev, timings);
 }
 
-static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
                struct omap_video_timings *timings)
 {
@@ -482,7 +476,6 @@ static struct omap_dss_driver dpi_driver = {
        .resume         = generic_dpi_panel_resume,
 
        .set_timings    = generic_dpi_panel_set_timings,
-       .get_timings    = generic_dpi_panel_get_timings,
        .check_timings  = generic_dpi_panel_check_timings,
 
        .driver         = {
index 150e8ba..eba98a0 100644 (file)
@@ -610,12 +610,6 @@ static int n8x0_panel_resume(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static void n8x0_panel_get_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres)
 {
@@ -678,8 +672,6 @@ static struct omap_dss_driver n8x0_panel_driver = {
        .get_resolution = n8x0_panel_get_resolution,
        .get_recommended_bpp = omapdss_default_get_recommended_bpp,
 
-       .get_timings    = n8x0_panel_get_timings,
-
        .driver         = {
                .name   = "n8x0_panel",
                .owner  = THIS_MODULE,
index 80c3f6a..174c004 100644 (file)
@@ -583,12 +583,6 @@ static const struct backlight_ops taal_bl_ops = {
        .update_status  = taal_bl_update_status,
 };
 
-static void taal_get_timings(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static void taal_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres)
 {
@@ -1899,8 +1893,6 @@ static struct omap_dss_driver taal_driver = {
        .run_test       = taal_run_test,
        .memory_read    = taal_memory_read,
 
-       .get_timings    = taal_get_timings,
-
        .driver         = {
                .name   = "taal",
                .owner  = THIS_MODULE,
index 2462b9e..1c7254f 100644 (file)
                        TPO_R03_EN_PRE_CHARGE | TPO_R03_SOFTWARE_CTL)
 
 static const u16 tpo_td043_def_gamma[12] = {
-       106, 200, 289, 375, 460, 543, 625, 705, 785, 864, 942, 1020
+       105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023
 };
 
 struct tpo_td043_device {
        struct spi_device *spi;
        struct regulator *vcc_reg;
+       int nreset_gpio;
        u16 gamma[12];
        u32 mode;
        u32 hmirror:1;
        u32 vmirror:1;
+       u32 powered_on:1;
+       u32 spi_suspended:1;
+       u32 power_on_resume:1;
 };
 
 static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
@@ -256,37 +260,29 @@ static const struct omap_video_timings tpo_td043_timings = {
 
        .pixel_clock    = 36000,
 
+       /* note:
+        * hbp+hsw must be 215
+        * if vbp+vsw < 32, the panel tears at the bottom
+        * if vbp+vsw > 35, it wraps from the top */
        .hsw            = 1,
-       .hfp            = 68,
+       .hfp            = 150,
        .hbp            = 214,
 
        .vsw            = 1,
-       .vfp            = 39,
+       .vfp            = 0,
        .vbp            = 34,
 };
 
-static int tpo_td043_power_on(struct omap_dss_device *dssdev)
+static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
 {
-       struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
-       int nreset_gpio = dssdev->reset_gpio;
-       int r;
+       int nreset_gpio = tpo_td043->nreset_gpio;
 
-       if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+       if (tpo_td043->powered_on)
                return 0;
 
-       r = omapdss_dpi_display_enable(dssdev);
-       if (r)
-               goto err0;
-
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto err1;
-       }
-
        regulator_enable(tpo_td043->vcc_reg);
 
-       /* wait for power up */
+       /* wait for regulator to stabilize */
        msleep(160);
 
        if (gpio_is_valid(nreset_gpio))
@@ -301,19 +297,15 @@ static int tpo_td043_power_on(struct omap_dss_device *dssdev)
                        tpo_td043->vmirror);
        tpo_td043_write_gamma(tpo_td043->spi, tpo_td043->gamma);
 
+       tpo_td043->powered_on = 1;
        return 0;
-err1:
-       omapdss_dpi_display_disable(dssdev);
-err0:
-       return r;
 }
 
-static void tpo_td043_power_off(struct omap_dss_device *dssdev)
+static void tpo_td043_power_off(struct tpo_td043_device *tpo_td043)
 {
-       struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
-       int nreset_gpio = dssdev->reset_gpio;
+       int nreset_gpio = tpo_td043->nreset_gpio;
 
-       if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+       if (!tpo_td043->powered_on)
                return;
 
        tpo_td043_write(tpo_td043->spi, 3,
@@ -329,54 +321,94 @@ static void tpo_td043_power_off(struct omap_dss_device *dssdev)
 
        regulator_disable(tpo_td043->vcc_reg);
 
+       tpo_td043->powered_on = 0;
+}
+
+static int tpo_td043_enable_dss(struct omap_dss_device *dssdev)
+{
+       struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+       int r;
+
+       if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+               return 0;
+
+       r = omapdss_dpi_display_enable(dssdev);
+       if (r)
+               goto err0;
+
+       if (dssdev->platform_enable) {
+               r = dssdev->platform_enable(dssdev);
+               if (r)
+                       goto err1;
+       }
+
+       /* 
+        * If we are resuming from system suspend, SPI clocks might not be
+        * enabled yet, so we'll program the LCD from SPI PM resume callback.
+        */
+       if (!tpo_td043->spi_suspended) {
+               r = tpo_td043_power_on(tpo_td043);
+               if (r)
+                       goto err1;
+       }
+
+       dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+       return 0;
+err1:
+       omapdss_dpi_display_disable(dssdev);
+err0:
+       return r;
+}
+
+static void tpo_td043_disable_dss(struct omap_dss_device *dssdev)
+{
+       struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+
+       if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+               return;
+
        if (dssdev->platform_disable)
                dssdev->platform_disable(dssdev);
 
        omapdss_dpi_display_disable(dssdev);
+
+       if (!tpo_td043->spi_suspended)
+               tpo_td043_power_off(tpo_td043);
 }
 
 static int tpo_td043_enable(struct omap_dss_device *dssdev)
 {
-       int ret;
-
        dev_dbg(&dssdev->dev, "enable\n");
 
-       ret = tpo_td043_power_on(dssdev);
-       if (ret)
-               return ret;
-
-       dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-       return 0;
+       return tpo_td043_enable_dss(dssdev);
 }
 
 static void tpo_td043_disable(struct omap_dss_device *dssdev)
 {
        dev_dbg(&dssdev->dev, "disable\n");
 
-       tpo_td043_power_off(dssdev);
+       tpo_td043_disable_dss(dssdev);
 
        dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
 }
 
 static int tpo_td043_suspend(struct omap_dss_device *dssdev)
 {
-       tpo_td043_power_off(dssdev);
+       dev_dbg(&dssdev->dev, "suspend\n");
+
+       tpo_td043_disable_dss(dssdev);
+
        dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
        return 0;
 }
 
 static int tpo_td043_resume(struct omap_dss_device *dssdev)
 {
-       int r = 0;
+       dev_dbg(&dssdev->dev, "resume\n");
 
-       r = tpo_td043_power_on(dssdev);
-       if (r)
-               return r;
-
-       dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-       return 0;
+       return tpo_td043_enable_dss(dssdev);
 }
 
 static int tpo_td043_probe(struct omap_dss_device *dssdev)
@@ -449,6 +481,18 @@ static void tpo_td043_remove(struct omap_dss_device *dssdev)
                gpio_free(nreset_gpio);
 }
 
+static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       dpi_set_timings(dssdev, timings);
+}
+
+static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       return dpi_check_timings(dssdev, timings);
+}
+
 static struct omap_dss_driver tpo_td043_driver = {
        .probe          = tpo_td043_probe,
        .remove         = tpo_td043_remove,
@@ -460,6 +504,9 @@ static struct omap_dss_driver tpo_td043_driver = {
        .set_mirror     = tpo_td043_set_hmirror,
        .get_mirror     = tpo_td043_get_hmirror,
 
+       .set_timings    = tpo_td043_set_timings,
+       .check_timings  = tpo_td043_check_timings,
+
        .driver         = {
                .name   = "tpo_td043mtea1_panel",
                .owner  = THIS_MODULE,
@@ -491,6 +538,7 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
                return -ENOMEM;
 
        tpo_td043->spi = spi;
+       tpo_td043->nreset_gpio = dssdev->reset_gpio;
        dev_set_drvdata(&spi->dev, tpo_td043);
        dev_set_drvdata(&dssdev->dev, tpo_td043);
 
@@ -509,11 +557,47 @@ static int __devexit tpo_td043_spi_remove(struct spi_device *spi)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int tpo_td043_spi_suspend(struct device *dev)
+{
+       struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
+
+       dev_dbg(dev, "tpo_td043_spi_suspend, tpo %p\n", tpo_td043);
+
+       tpo_td043->power_on_resume = tpo_td043->powered_on;
+       tpo_td043_power_off(tpo_td043);
+       tpo_td043->spi_suspended = 1;
+
+       return 0;
+}
+
+static int tpo_td043_spi_resume(struct device *dev)
+{
+       struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
+       int ret;
+
+       dev_dbg(dev, "tpo_td043_spi_resume\n");
+
+       if (tpo_td043->power_on_resume) {
+               ret = tpo_td043_power_on(tpo_td043);
+               if (ret)
+                       return ret;
+       }
+       tpo_td043->spi_suspended = 0;
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpo_td043_spi_pm,
+       tpo_td043_spi_suspend, tpo_td043_spi_resume);
+
 static struct spi_driver tpo_td043_spi_driver = {
        .driver = {
                .name   = "tpo_td043mtea1_panel_spi",
                .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
+               .pm     = &tpo_td043_spi_pm,
        },
        .probe  = tpo_td043_spi_probe,
        .remove = __devexit_p(tpo_td043_spi_remove),
index 86ec12e..fc408d0 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/io.h>
 #include <linux/device.h>
 #include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
 
 #include <video/omapdss.h>
 
@@ -168,7 +169,29 @@ static inline void dss_uninitialize_debugfs(void)
 #endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 
 /* PLATFORM DEVICE */
-static int omap_dss_probe(struct platform_device *pdev)
+static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
+{
+       DSSDBG("pm notif %lu\n", v);
+
+       switch (v) {
+       case PM_SUSPEND_PREPARE:
+               DSSDBG("suspending displays\n");
+               return dss_suspend_all_devices();
+
+       case PM_POST_SUSPEND:
+               DSSDBG("resuming displays\n");
+               return dss_resume_all_devices();
+
+       default:
+               return 0;
+       }
+}
+
+static struct notifier_block omap_dss_pm_notif_block = {
+       .notifier_call = omap_dss_pm_notif,
+};
+
+static int __init omap_dss_probe(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        int r;
@@ -181,42 +204,6 @@ static int omap_dss_probe(struct platform_device *pdev)
        dss_init_overlay_managers(pdev);
        dss_init_overlays(pdev);
 
-       r = dss_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize DSS platform driver\n");
-               goto err_dss;
-       }
-
-       r = dispc_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize dispc platform driver\n");
-               goto err_dispc;
-       }
-
-       r = rfbi_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize rfbi platform driver\n");
-               goto err_rfbi;
-       }
-
-       r = venc_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize venc platform driver\n");
-               goto err_venc;
-       }
-
-       r = dsi_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize DSI platform driver\n");
-               goto err_dsi;
-       }
-
-       r = hdmi_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize hdmi\n");
-               goto err_hdmi;
-       }
-
        r = dss_initialize_debugfs();
        if (r)
                goto err_debugfs;
@@ -239,23 +226,13 @@ static int omap_dss_probe(struct platform_device *pdev)
                        pdata->default_device = dssdev;
        }
 
+       register_pm_notifier(&omap_dss_pm_notif_block);
+
        return 0;
 
 err_register:
        dss_uninitialize_debugfs();
 err_debugfs:
-       hdmi_uninit_platform_driver();
-err_hdmi:
-       dsi_uninit_platform_driver();
-err_dsi:
-       venc_uninit_platform_driver();
-err_venc:
-       dispc_uninit_platform_driver();
-err_dispc:
-       rfbi_uninit_platform_driver();
-err_rfbi:
-       dss_uninit_platform_driver();
-err_dss:
 
        return r;
 }
@@ -265,14 +242,9 @@ static int omap_dss_remove(struct platform_device *pdev)
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        int i;
 
-       dss_uninitialize_debugfs();
+       unregister_pm_notifier(&omap_dss_pm_notif_block);
 
-       hdmi_uninit_platform_driver();
-       dsi_uninit_platform_driver();
-       venc_uninit_platform_driver();
-       rfbi_uninit_platform_driver();
-       dispc_uninit_platform_driver();
-       dss_uninit_platform_driver();
+       dss_uninitialize_debugfs();
 
        dss_uninit_overlays(pdev);
        dss_uninit_overlay_managers(pdev);
@@ -289,26 +261,9 @@ static void omap_dss_shutdown(struct platform_device *pdev)
        dss_disable_all_devices();
 }
 
-static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
-{
-       DSSDBG("suspend %d\n", state.event);
-
-       return dss_suspend_all_devices();
-}
-
-static int omap_dss_resume(struct platform_device *pdev)
-{
-       DSSDBG("resume\n");
-
-       return dss_resume_all_devices();
-}
-
 static struct platform_driver omap_dss_driver = {
-       .probe          = omap_dss_probe,
        .remove         = omap_dss_remove,
        .shutdown       = omap_dss_shutdown,
-       .suspend        = omap_dss_suspend,
-       .resume         = omap_dss_resume,
        .driver         = {
                .name   = "omapdss",
                .owner  = THIS_MODULE,
@@ -434,6 +389,8 @@ int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
        if (dssdriver->get_recommended_bpp == NULL)
                dssdriver->get_recommended_bpp =
                        omapdss_default_get_recommended_bpp;
+       if (dssdriver->get_timings == NULL)
+               dssdriver->get_timings = omapdss_default_get_timings;
 
        return driver_register(&dssdriver->driver);
 }
@@ -523,6 +480,80 @@ static int omap_dss_bus_register(void)
 
 /* INIT */
 
+static int __init omap_dss_register_drivers(void)
+{
+       int r;
+
+       r = platform_driver_probe(&omap_dss_driver, omap_dss_probe);
+       if (r)
+               return r;
+
+       r = dss_init_platform_driver();
+       if (r) {
+               DSSERR("Failed to initialize DSS platform driver\n");
+               goto err_dss;
+       }
+
+       r = dispc_init_platform_driver();
+       if (r) {
+               DSSERR("Failed to initialize dispc platform driver\n");
+               goto err_dispc;
+       }
+
+       r = rfbi_init_platform_driver();
+       if (r) {
+               DSSERR("Failed to initialize rfbi platform driver\n");
+               goto err_rfbi;
+       }
+
+       r = venc_init_platform_driver();
+       if (r) {
+               DSSERR("Failed to initialize venc platform driver\n");
+               goto err_venc;
+       }
+
+       r = dsi_init_platform_driver();
+       if (r) {
+               DSSERR("Failed to initialize DSI platform driver\n");
+               goto err_dsi;
+       }
+
+       r = hdmi_init_platform_driver();
+       if (r) {
+               DSSERR("Failed to initialize hdmi\n");
+               goto err_hdmi;
+       }
+
+       return 0;
+
+err_hdmi:
+       dsi_uninit_platform_driver();
+err_dsi:
+       venc_uninit_platform_driver();
+err_venc:
+       rfbi_uninit_platform_driver();
+err_rfbi:
+       dispc_uninit_platform_driver();
+err_dispc:
+       dss_uninit_platform_driver();
+err_dss:
+       platform_driver_unregister(&omap_dss_driver);
+
+       return r;
+}
+
+static void __exit omap_dss_unregister_drivers(void)
+{
+       hdmi_uninit_platform_driver();
+       dsi_uninit_platform_driver();
+       venc_uninit_platform_driver();
+       rfbi_uninit_platform_driver();
+       dispc_uninit_platform_driver();
+       dss_uninit_platform_driver();
+
+       platform_driver_unregister(&omap_dss_driver);
+}
+
 #ifdef CONFIG_OMAP2_DSS_MODULE
 static void omap_dss_bus_unregister(void)
 {
@@ -539,7 +570,7 @@ static int __init omap_dss_init(void)
        if (r)
                return r;
 
-       r = platform_driver_register(&omap_dss_driver);
+       r = omap_dss_register_drivers();
        if (r) {
                omap_dss_bus_unregister();
                return r;
@@ -560,7 +591,7 @@ static void __exit omap_dss_exit(void)
                core.vdds_sdi_reg = NULL;
        }
 
-       platform_driver_unregister(&omap_dss_driver);
+       omap_dss_unregister_drivers();
 
        omap_dss_bus_unregister();
 }
@@ -575,7 +606,7 @@ static int __init omap_dss_init(void)
 
 static int __init omap_dss_init2(void)
 {
-       return platform_driver_register(&omap_dss_driver);
+       return omap_dss_register_drivers();
 }
 
 core_initcall(omap_dss_init);
index 5c81533..c86bd06 100644 (file)
@@ -58,6 +58,8 @@
 
 #define DISPC_MAX_NR_ISRS              8
 
+#define TABLE_SIZE (256 * 4)
+
 struct omap_dispc_isr_data {
        omap_dispc_isr_t        isr;
        void                    *arg;
@@ -115,9 +117,18 @@ static struct {
        u32 error_irqs;
        struct work_struct error_work;
 
+       u32 frame_counter;
+       u32 fc_last_use;
+       bool fc_isr_registered;
+       struct completion *fc_complete[4];
+
        bool            ctx_valid;
        u32             ctx[DISPC_SZ_REGS / sizeof(u32)];
 
+       /* palette/gamma table */
+       void            *table_virt;
+       dma_addr_t      table_phys;
+
 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
        spinlock_t irq_stats_lock;
        struct dispc_irq_stats irq_stats;
@@ -410,6 +421,7 @@ int dispc_runtime_get(void)
        WARN_ON(r < 0);
        return r < 0 ? r : 0;
 }
+EXPORT_SYMBOL(dispc_runtime_get);
 
 void dispc_runtime_put(void)
 {
@@ -417,9 +429,10 @@ void dispc_runtime_put(void)
 
        DSSDBG("dispc_runtime_put\n");
 
-       r = pm_runtime_put(&dispc.pdev->dev);
+       r = pm_runtime_put_sync(&dispc.pdev->dev);
        WARN_ON(r < 0);
 }
+EXPORT_SYMBOL(dispc_runtime_put);
 
 static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
 {
@@ -483,7 +496,9 @@ void dispc_mgr_go(enum omap_channel channel)
                go_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
 
        if (go_bit) {
+#if 0 /* pandora hack */
                DSSERR("GO bit not down for channel %d\n", channel);
+#endif
                return;
        }
 
@@ -533,80 +548,80 @@ static void dispc_ovl_write_firv2_reg(enum omap_plane plane, int reg, u32 value)
        dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value);
 }
 
-static void dispc_ovl_set_scale_coef(enum omap_plane plane, int hscaleup,
-                                 int vscaleup, int five_taps,
-                                 enum omap_color_component color_comp)
-{
-       /* Coefficients for horizontal up-sampling */
-       static const struct dispc_h_coef coef_hup[8] = {
-               {  0,   0, 128,   0,  0 },
-               { -1,  13, 124,  -8,  0 },
-               { -2,  30, 112, -11, -1 },
-               { -5,  51,  95, -11, -2 },
-               {  0,  -9,  73,  73, -9 },
-               { -2, -11,  95,  51, -5 },
-               { -1, -11, 112,  30, -2 },
-               {  0,  -8, 124,  13, -1 },
-       };
+/* Coefficients for horizontal up-sampling */
+static struct dispc_h_coef coef_hup[8] = {
+       {  0,   0, 128,   0,  0 },
+       { -1,  13, 124,  -8,  0 },
+       { -2,  30, 112, -11, -1 },
+       { -5,  51,  95, -11, -2 },
+       {  0,  -9,  73,  73, -9 },
+       { -2, -11,  95,  51, -5 },
+       { -1, -11, 112,  30, -2 },
+       {  0,  -8, 124,  13, -1 },
+};
 
-       /* Coefficients for vertical up-sampling */
-       static const struct dispc_v_coef coef_vup_3tap[8] = {
-               { 0,  0, 128,  0, 0 },
-               { 0,  3, 123,  2, 0 },
-               { 0, 12, 111,  5, 0 },
-               { 0, 32,  89,  7, 0 },
-               { 0,  0,  64, 64, 0 },
-               { 0,  7,  89, 32, 0 },
-               { 0,  5, 111, 12, 0 },
-               { 0,  2, 123,  3, 0 },
-       };
+/* Coefficients for vertical up-sampling */
+static struct dispc_v_coef coef_vup_3tap[8] = {
+       { 0,  0, 128,  0, 0 },
+       { 0,  3, 123,  2, 0 },
+       { 0, 12, 111,  5, 0 },
+       { 0, 32,  89,  7, 0 },
+       { 0,  0,  64, 64, 0 },
+       { 0,  7,  89, 32, 0 },
+       { 0,  5, 111, 12, 0 },
+       { 0,  2, 123,  3, 0 },
+};
 
-       static const struct dispc_v_coef coef_vup_5tap[8] = {
-               {  0,   0, 128,   0,  0 },
-               { -1,  13, 124,  -8,  0 },
-               { -2,  30, 112, -11, -1 },
-               { -5,  51,  95, -11, -2 },
-               {  0,  -9,  73,  73, -9 },
-               { -2, -11,  95,  51, -5 },
-               { -1, -11, 112,  30, -2 },
-               {  0,  -8, 124,  13, -1 },
-       };
+static struct dispc_v_coef coef_vup_5tap[8] = {
+       {  0,   0, 128,   0,  0 },
+       { -1,  13, 124,  -8,  0 },
+       { -2,  30, 112, -11, -1 },
+       { -5,  51,  95, -11, -2 },
+       {  0,  -9,  73,  73, -9 },
+       { -2, -11,  95,  51, -5 },
+       { -1, -11, 112,  30, -2 },
+       {  0,  -8, 124,  13, -1 },
+};
 
-       /* Coefficients for horizontal down-sampling */
-       static const struct dispc_h_coef coef_hdown[8] = {
-               {   0, 36, 56, 36,  0 },
-               {   4, 40, 55, 31, -2 },
-               {   8, 44, 54, 27, -5 },
-               {  12, 48, 53, 22, -7 },
-               {  -9, 17, 52, 51, 17 },
-               {  -7, 22, 53, 48, 12 },
-               {  -5, 27, 54, 44,  8 },
-               {  -2, 31, 55, 40,  4 },
-       };
+/* Coefficients for horizontal down-sampling */
+static struct dispc_h_coef coef_hdown[8] = {
+       {   0, 36, 56, 36,  0 },
+       {   4, 40, 55, 31, -2 },
+       {   8, 44, 54, 27, -5 },
+       {  12, 48, 53, 22, -7 },
+       {  -9, 17, 52, 51, 17 },
+       {  -7, 22, 53, 48, 12 },
+       {  -5, 27, 54, 44,  8 },
+       {  -2, 31, 55, 40,  4 },
+};
 
-       /* Coefficients for vertical down-sampling */
-       static const struct dispc_v_coef coef_vdown_3tap[8] = {
-               { 0, 36, 56, 36, 0 },
-               { 0, 40, 57, 31, 0 },
-               { 0, 45, 56, 27, 0 },
-               { 0, 50, 55, 23, 0 },
-               { 0, 18, 55, 55, 0 },
-               { 0, 23, 55, 50, 0 },
-               { 0, 27, 56, 45, 0 },
-               { 0, 31, 57, 40, 0 },
-       };
+/* Coefficients for vertical down-sampling */
+static struct dispc_v_coef coef_vdown_3tap[8] = {
+       { 0, 36, 56, 36, 0 },
+       { 0, 40, 57, 31, 0 },
+       { 0, 45, 56, 27, 0 },
+       { 0, 50, 55, 23, 0 },
+       { 0, 18, 55, 55, 0 },
+       { 0, 23, 55, 50, 0 },
+       { 0, 27, 56, 45, 0 },
+       { 0, 31, 57, 40, 0 },
+};
 
-       static const struct dispc_v_coef coef_vdown_5tap[8] = {
-               {   0, 36, 56, 36,  0 },
-               {   4, 40, 55, 31, -2 },
-               {   8, 44, 54, 27, -5 },
-               {  12, 48, 53, 22, -7 },
-               {  -9, 17, 52, 51, 17 },
-               {  -7, 22, 53, 48, 12 },
-               {  -5, 27, 54, 44,  8 },
-               {  -2, 31, 55, 40,  4 },
-       };
+static struct dispc_v_coef coef_vdown_5tap[8] = {
+       {   0, 36, 56, 36,  0 },
+       {   4, 40, 55, 31, -2 },
+       {   8, 44, 54, 27, -5 },
+       {  12, 48, 53, 22, -7 },
+       {  -9, 17, 52, 51, 17 },
+       {  -7, 22, 53, 48, 12 },
+       {  -5, 27, 54, 44,  8 },
+       {  -2, 31, 55, 40,  4 },
+};
 
+static void dispc_ovl_set_scale_coef(enum omap_plane plane, int hscaleup,
+                                 int vscaleup, int five_taps,
+                                 enum omap_color_component color_comp)
+{
        const struct dispc_h_coef *h_coef;
        const struct dispc_v_coef *v_coef;
        int i;
@@ -656,6 +671,68 @@ static void dispc_ovl_set_scale_coef(enum omap_plane plane, int hscaleup,
        }
 }
 
+static struct dispc_h_coef *dispc_get_scale_coef_table(enum omap_plane plane,
+               enum omap_filter filter)
+{
+       switch (filter) {
+       case OMAP_DSS_FILTER_UP_H:
+               return coef_hup;
+       case OMAP_DSS_FILTER_UP_V3:
+               /* XXX: relying on fact that h and v tables have same layout */
+               return (void *)coef_vup_3tap;
+       case OMAP_DSS_FILTER_UP_V5:
+               return (void *)coef_vup_5tap;
+       case OMAP_DSS_FILTER_DOWN_H:
+               return coef_hdown;
+       case OMAP_DSS_FILTER_DOWN_V3:
+               return (void *)coef_vdown_3tap;
+       case OMAP_DSS_FILTER_DOWN_V5:
+               return (void *)coef_vdown_5tap;
+       default:
+               return NULL;
+       }
+}
+
+void dispc_get_scale_coef_phase(enum omap_plane plane, enum omap_filter filter,
+               int phase, int *vals)
+{
+       const struct dispc_h_coef *table;
+
+       if (phase < 0 || phase >= 8)
+               return;
+
+       table = dispc_get_scale_coef_table(plane, filter);
+       if (table == NULL)
+               return;
+
+       table += phase;
+       vals[0] = table->hc4;
+       vals[1] = table->hc3;
+       vals[2] = table->hc2;
+       vals[3] = table->hc1;
+       vals[4] = table->hc0;
+}
+
+void dispc_set_scale_coef_phase(enum omap_plane plane, enum omap_filter filter,
+               int phase, const int *vals)
+{
+       struct dispc_h_coef *table;
+
+       if (phase < 0 || phase >= 8)
+               return;
+
+       table = dispc_get_scale_coef_table(plane, filter);
+       if (table == NULL)
+               return;
+
+       table += phase;
+       table->hc4 = vals[0];
+       table->hc3 = vals[1];
+       table->hc2 = vals[2];
+       table->hc1 = vals[3];
+       table->hc0 = vals[4];
+}
+
 static void _dispc_setup_color_conv_coef(void)
 {
        int i;
@@ -964,6 +1041,20 @@ void dispc_enable_gamma_table(bool enable)
        REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
 }
 
+void dispc_set_gamma_table(void *table, u32 size)
+{
+       if (table == NULL || size == 0 || size > TABLE_SIZE) {
+               REG_FLD_MOD(DISPC_CONFIG, 0, 3, 3);
+               return;
+       }
+
+       memcpy(dispc.table_virt, table, size);
+
+       dispc_write_reg(DISPC_OVL_TABLE_BA(0), dispc.table_phys);
+       dispc_set_loadmode(OMAP_DSS_LOAD_CLUT_ONCE_FRAME);
+       REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3);
+}
+
 void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
 {
        u16 reg;
@@ -2908,20 +2999,20 @@ static void _omap_dispc_set_irqs(void)
        dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
 
        dispc_write_reg(DISPC_IRQENABLE, mask);
+       /* flush posted write */
+       dispc_read_reg(DISPC_IRQENABLE);
 }
 
-int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+static int omap_dispc_register_isr_unlocked(omap_dispc_isr_t isr,
+               void *arg, u32 mask)
 {
        int i;
        int ret;
-       unsigned long flags;
        struct omap_dispc_isr_data *isr_data;
 
        if (isr == NULL)
                return -EINVAL;
 
-       spin_lock_irqsave(&dispc.irq_lock, flags);
-
        /* check for duplicate entry */
        for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
                isr_data = &dispc.registered_isr[i];
@@ -2954,25 +3045,30 @@ int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
 
        _omap_dispc_set_irqs();
 
-       spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
-       return 0;
 err:
+       return ret;
+}
+
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dispc.irq_lock, flags);
+       ret = omap_dispc_register_isr_unlocked(isr, arg, mask);
        spin_unlock_irqrestore(&dispc.irq_lock, flags);
 
        return ret;
 }
 EXPORT_SYMBOL(omap_dispc_register_isr);
 
-int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+static int omap_dispc_unregister_isr_unlocked(omap_dispc_isr_t isr,
+               void *arg, u32 mask)
 {
        int i;
-       unsigned long flags;
        int ret = -EINVAL;
        struct omap_dispc_isr_data *isr_data;
 
-       spin_lock_irqsave(&dispc.irq_lock, flags);
-
        for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
                isr_data = &dispc.registered_isr[i];
                if (isr_data->isr != isr || isr_data->arg != arg ||
@@ -2992,6 +3088,16 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
        if (ret == 0)
                _omap_dispc_set_irqs();
 
+       return ret;
+}
+
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dispc.irq_lock, flags);
+       ret = omap_dispc_unregister_isr_unlocked(isr, arg, mask);
        spin_unlock_irqrestore(&dispc.irq_lock, flags);
 
        return ret;
@@ -3196,13 +3302,13 @@ static void dispc_error_worker(struct work_struct *work)
        dispc_runtime_put();
 }
 
-int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
+static void dispc_irq_wait_handler(void *data, u32 mask)
 {
-       void dispc_irq_wait_handler(void *data, u32 mask)
-       {
-               complete((struct completion *)data);
-       }
+       complete((struct completion *)data);
+}
 
+int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
+{
        int r;
        DECLARE_COMPLETION_ONSTACK(completion);
 
@@ -3228,11 +3334,6 @@ int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
 int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
                unsigned long timeout)
 {
-       void dispc_irq_wait_handler(void *data, u32 mask)
-       {
-               complete((struct completion *)data);
-       }
-
        int r;
        DECLARE_COMPLETION_ONSTACK(completion);
 
@@ -3256,6 +3357,120 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
        return 0;
 }
 
+static void dispc_irq_vsync_on_frame_handler(void *data, u32 mask)
+{
+       struct completion *completion;
+       unsigned int i;
+       u32 diff;
+       int ret;
+
+       spin_lock(&dispc.irq_lock);
+
+       dispc.frame_counter++;
+
+       diff = dispc.frame_counter - dispc.fc_last_use;
+       if (diff > 5 * 60 && dispc.fc_isr_registered) {
+               ret = omap_dispc_unregister_isr_unlocked(
+                       dispc_irq_vsync_on_frame_handler,
+                       data, DISPC_IRQ_VSYNC);
+               if (ret == 0)
+                       dispc.fc_isr_registered = false;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(dispc.fc_complete); i++) {
+               completion = xchg(&dispc.fc_complete[i], NULL);
+               if (completion != NULL)
+                       complete(completion);
+       }
+
+       spin_unlock(&dispc.irq_lock);
+}
+
+int omap_dispc_wait_for_vsync_on_frame(u32 *frame,
+       unsigned long timeout, bool force)
+{
+       DECLARE_COMPLETION_ONSTACK(completion);
+       bool need_to_wait = force;
+       unsigned long flags;
+       unsigned int i;
+       long time;
+       int ret;
+
+       spin_lock_irqsave(&dispc.irq_lock, flags);
+
+       if (!dispc.fc_isr_registered) {
+               ret = omap_dispc_register_isr_unlocked(
+                       dispc_irq_vsync_on_frame_handler,
+                       NULL, DISPC_IRQ_VSYNC);
+               if (ret)
+                       goto out_unlock;
+               dispc.fc_isr_registered = true;
+       }
+       else {
+               need_to_wait |= *frame == dispc.frame_counter;
+       }
+       dispc.fc_last_use = dispc.frame_counter;
+
+       if (need_to_wait) {
+               for (i = 0; i < ARRAY_SIZE(dispc.fc_complete); i++) {
+                       if (dispc.fc_complete[i] == NULL) {
+                               dispc.fc_complete[i] = &completion;
+                               break;
+                       }
+               }
+               if (i == ARRAY_SIZE(dispc.fc_complete)) {
+                       ret = -EBUSY;
+                       goto out_unlock;
+               }
+       }
+
+       spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+       ret = 0;
+       if (need_to_wait) {
+               time = wait_for_completion_interruptible_timeout(
+                               &completion, msecs_to_jiffies(17 * 2));
+               if (time == 0)
+                       ret = -ETIMEDOUT;
+               else if (time < 0)
+                       ret = time;
+       }
+       if (ret != 0) {
+               spin_lock(&dispc.irq_lock);
+
+               for (i = 0; i < ARRAY_SIZE(dispc.fc_complete); i++) {
+                       if (dispc.fc_complete[i] == &completion) {
+                               dispc.fc_complete[i] = NULL;
+                               break;
+                       }
+               }
+
+               spin_unlock(&dispc.irq_lock);
+       }
+
+       *frame = dispc.frame_counter;
+       return ret;
+
+out_unlock:
+       spin_unlock_irqrestore(&dispc.irq_lock, flags);
+       return ret;
+}
+
+int omap_dispc_get_line_status(void)
+{
+       int r;
+
+       r = dispc_runtime_get();
+       if (r < 0)
+               return r;
+
+       r = dispc_read_reg(DISPC_LINE_STATUS);
+
+       dispc_runtime_put();
+
+       return r;
+}
+
 #ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
 void dispc_fake_vsync_irq(void)
 {
@@ -3397,6 +3612,15 @@ static int omap_dispchw_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
+       pdev->dev.coherent_dma_mask = ~0;
+       dispc.table_virt = dma_alloc_writecombine(&pdev->dev,
+               TABLE_SIZE, &dispc.table_phys, GFP_KERNEL);
+       if (dispc.table_virt == NULL) {
+               dev_err(&pdev->dev, "failed to alloc palette memory\n");
+               goto err_palette;
+       }
+       memset(dispc.table_virt, 0, TABLE_SIZE);
+
        pm_runtime_enable(&pdev->dev);
 
        r = dispc_runtime_get();
@@ -3417,6 +3641,9 @@ static int omap_dispchw_probe(struct platform_device *pdev)
 
 err_runtime_get:
        pm_runtime_disable(&pdev->dev);
+       dma_free_writecombine(&pdev->dev, TABLE_SIZE,
+               dispc.table_virt, dispc.table_phys);
+err_palette:
        free_irq(dispc.irq, dispc.pdev);
 err_irq:
        iounmap(dispc.base);
@@ -3430,6 +3657,9 @@ static int omap_dispchw_remove(struct platform_device *pdev)
 {
        pm_runtime_disable(&pdev->dev);
 
+       dma_free_writecombine(&pdev->dev, TABLE_SIZE,
+               dispc.table_virt, dispc.table_phys);
+
        clk_put(dispc.dss_clk);
 
        free_irq(dispc.irq, dispc.pdev);
@@ -3464,7 +3694,6 @@ static const struct dev_pm_ops dispc_pm_ops = {
 };
 
 static struct platform_driver omap_dispchw_driver = {
-       .probe          = omap_dispchw_probe,
        .remove         = omap_dispchw_remove,
        .driver         = {
                .name   = "omapdss_dispc",
@@ -3475,7 +3704,7 @@ static struct platform_driver omap_dispchw_driver = {
 
 int dispc_init_platform_driver(void)
 {
-       return platform_driver_register(&omap_dispchw_driver);
+       return platform_driver_probe(&omap_dispchw_driver, omap_dispchw_probe);
 }
 
 void dispc_uninit_platform_driver(void)
index be331dc..b4506d7 100644 (file)
@@ -248,6 +248,38 @@ static ssize_t display_wss_store(struct device *dev,
        return size;
 }
 
+#include <linux/ctype.h>
+
+static ssize_t display_dss_gamma_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct omap_dss_device *dssdev = to_dss_device(dev);
+       unsigned int table[256];
+       char *end = NULL;
+       int i;
+
+       for (i = 0; i < 256; ) {
+               table[i++] = simple_strtoul(buf, &end, 0);
+               while (isspace(*end))
+                       end++;
+               if (*end == 0)
+                       break;
+               buf = end;
+       }
+       
+       if (i == 1 && table[0] == 0)
+               dispc_set_gamma_table(NULL, 0);
+       else if (i < 256) {
+               dev_err(dev, "not enough gamma values supplied (%d)\n", i);
+               dispc_set_gamma_table(NULL, 0);
+       } else
+               dispc_set_gamma_table(table, 256 * 4);
+
+       dispc_mgr_go(dssdev->manager->id);
+
+       return size;
+}
+
 static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
                display_enabled_show, display_enabled_store);
 static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
@@ -260,6 +292,8 @@ static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
                display_mirror_show, display_mirror_store);
 static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
                display_wss_show, display_wss_store);
+static DEVICE_ATTR(dss_gamma, S_IRUGO|S_IWUSR,
+               NULL, display_dss_gamma_store);
 
 static struct device_attribute *display_sysfs_attrs[] = {
        &dev_attr_enabled,
@@ -318,6 +352,13 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
 }
 EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
 
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       *timings = dssdev->panel.timings;
+}
+EXPORT_SYMBOL(omapdss_default_get_timings);
+
 /* Checks if replication logic should be used. Only use for active matrix,
  * when overlay is in RGB12U or RGB16 mode, and LCD interface is
  * 18bpp or 24bpp */
@@ -410,6 +451,12 @@ void dss_init_device(struct platform_device *pdev,
                        DSSERR("failed to create sysfs file\n");
        }
 
+       if (dssdev->channel == OMAP_DSS_CHANNEL_LCD) {
+               r = device_create_file(&dssdev->dev, &dev_attr_dss_gamma);
+               if (r)
+                       DSSERR("failed to create sysfs file\n");
+       }
+
        /* create display? sysfs links */
        r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj,
                        dev_name(&dssdev->dev));
index 5abf8e7..e5c16bf 100644 (file)
@@ -1065,7 +1065,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
 
        DSSDBG("dsi_runtime_put\n");
 
-       r = pm_runtime_put(&dsi->pdev->dev);
+       r = pm_runtime_put_sync(&dsi->pdev->dev);
        WARN_ON(r < 0);
 }
 
index 1703345..1eaf02a 100644 (file)
@@ -720,7 +720,7 @@ void dss_runtime_put(void)
 
        DSSDBG("dss_runtime_put\n");
 
-       r = pm_runtime_put(&dss.pdev->dev);
+       r = pm_runtime_put_sync(&dss.pdev->dev);
        WARN_ON(r < 0);
 }
 
@@ -846,7 +846,6 @@ static const struct dev_pm_ops dss_pm_ops = {
 };
 
 static struct platform_driver omap_dsshw_driver = {
-       .probe          = omap_dsshw_probe,
        .remove         = omap_dsshw_remove,
        .driver         = {
                .name   = "omapdss_dss",
@@ -857,7 +856,7 @@ static struct platform_driver omap_dsshw_driver = {
 
 int dss_init_platform_driver(void)
 {
-       return platform_driver_register(&omap_dsshw_driver);
+       return platform_driver_probe(&omap_dsshw_driver, omap_dsshw_probe);
 }
 
 void dss_uninit_platform_driver(void)
index 6308fc5..c7eaa80 100644 (file)
@@ -389,6 +389,7 @@ void dispc_pck_free_enable(bool enable);
 void dispc_set_digit_size(u16 width, u16 height);
 void dispc_enable_fifomerge(bool enable);
 void dispc_enable_gamma_table(bool enable);
+void dispc_set_gamma_table(void *table, u32 size);
 void dispc_set_loadmode(enum omap_dss_load_mode mode);
 
 bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
@@ -444,6 +445,20 @@ int dispc_mgr_set_clock_div(enum omap_channel channel,
 int dispc_mgr_get_clock_div(enum omap_channel channel,
                struct dispc_clock_info *cinfo);
 
+enum omap_filter {
+       OMAP_DSS_FILTER_UP_H,
+       OMAP_DSS_FILTER_UP_V3,
+       OMAP_DSS_FILTER_UP_V5,
+       OMAP_DSS_FILTER_DOWN_H,
+       OMAP_DSS_FILTER_DOWN_V3,
+       OMAP_DSS_FILTER_DOWN_V5,
+};
+void dispc_get_scale_coef_phase(enum omap_plane plane, enum omap_filter filter,
+               int phase, int *vals);
+void dispc_set_scale_coef_phase(enum omap_plane plane, enum omap_filter filter,
+               int phase, const int *vals);
+
+
 /* VENC */
 #ifdef CONFIG_OMAP2_DSS_VENC
 int venc_init_platform_driver(void);
index 7099c31..03bd821 100644 (file)
@@ -176,7 +176,7 @@ static void hdmi_runtime_put(void)
 
        DSSDBG("hdmi_runtime_put\n");
 
-       r = pm_runtime_put(&hdmi.pdev->dev);
+       r = pm_runtime_put_sync(&hdmi.pdev->dev);
        WARN_ON(r < 0);
 }
 
index 6e63845..b07f817 100644 (file)
@@ -608,8 +608,13 @@ static int omap_dss_unset_device(struct omap_overlay_manager *mgr)
 
 static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
 {
-       unsigned long timeout = msecs_to_jiffies(500);
+       unsigned long timeout = usecs_to_jiffies(16667 * 2);
        u32 irq;
+       int r;
+
+       r = dispc_runtime_get();
+       if (r)
+               return r;
 
        if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
                irq = DISPC_IRQ_EVSYNC_ODD;
@@ -621,12 +626,17 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
                else
                        irq = DISPC_IRQ_VSYNC2;
        }
-       return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+
+       r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+
+       dispc_runtime_put();
+
+       return r;
 }
 
 static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
 {
-       unsigned long timeout = msecs_to_jiffies(500);
+       unsigned long timeout = usecs_to_jiffies(16667 * 2);
        struct manager_cache_data *mc;
        u32 irq;
        int r;
@@ -639,6 +649,10 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
        if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
                return 0;
 
+       r = dispc_runtime_get();
+       if (r)
+               return r;
+
        if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
                        || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
                irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
@@ -685,6 +699,8 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
                }
        }
 
+       dispc_runtime_put();
+
        return r;
 }
 
@@ -708,6 +724,10 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
        if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
                return 0;
 
+       r = dispc_runtime_get();
+       if (r)
+               return r;
+
        if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
                        || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
                irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
@@ -754,6 +774,8 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
                }
        }
 
+       dispc_runtime_put();
+
        return r;
 }
 
@@ -998,7 +1020,9 @@ static int configure_dispc(void)
        busy = false;
 
        for (i = 0; i < num_mgrs; i++) {
-               mgr_busy[i] = dispc_mgr_go_busy(i);
+               /* pandora HACK: if something is running faster than display,
+                * it's ok to lose older frame config. */
+               mgr_busy[i] = false; /*dispc_mgr_go_busy(i);*/
                mgr_go[i] = false;
        }
 
index ab8e40e..0bb0a7b 100644 (file)
@@ -347,6 +347,88 @@ static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
        return size;
 }
 
+static ssize_t overlay_filter_coef_show(struct omap_overlay *ovl, char *buf,
+               int which)
+{
+       ssize_t ret, len = 0;
+       int vals[5];
+       int i;
+
+       for (i = 0; i < 8; i++) {
+               dispc_get_scale_coef_phase(ovl->id, which, i, vals);
+               ret = snprintf(buf, PAGE_SIZE - len, "%3d %3d %3d %3d %3d\n",
+                               vals[0], vals[1], vals[2], vals[3], vals[4]);
+               buf += ret;
+               len += ret;
+       }
+
+       return len;
+}
+
+static ssize_t overlay_filter_coef_store(struct omap_overlay *ovl,
+               const char *buf, size_t size, int which)
+{
+       const char *p;
+       int vals[8][5];
+       int i, ret;
+
+       p = buf;
+       for (i = 0; i < 8; i++) {
+               ret = sscanf(p, "%d %d %d %d %d\n", &vals[i][0], &vals[i][1],
+                       &vals[i][2], &vals[i][3], &vals[i][4]);
+               if (ret != 5) {
+                       DSSWARN("parse err, line %d, ret %d\n", i, ret);
+                       return -EINVAL;
+               }
+
+               while (*p != 0 && *p != '\n')
+                       p++;
+               if (*p == '\n')
+                       p++;
+       }
+
+       for (i = 0; i < 8; i++)
+               dispc_set_scale_coef_phase(ovl->id, which, i, &vals[i][0]);
+
+       if (ovl->manager && (ret = ovl->manager->apply(ovl->manager)))
+               return ret;
+
+       return size;
+}
+
+static ssize_t overlay_filter_coef_up_h_show(struct omap_overlay *ovl, char *buf)
+{
+       return overlay_filter_coef_show(ovl, buf, OMAP_DSS_FILTER_UP_H);
+}
+
+static ssize_t overlay_filter_coef_up_h_store(struct omap_overlay *ovl,
+               const char *buf, size_t size)
+{
+       return overlay_filter_coef_store(ovl, buf, size, OMAP_DSS_FILTER_UP_H);
+}
+
+static ssize_t overlay_filter_coef_up_v3_show(struct omap_overlay *ovl, char *buf)
+{
+       return overlay_filter_coef_show(ovl, buf, OMAP_DSS_FILTER_UP_V3);
+}
+
+static ssize_t overlay_filter_coef_up_v3_store(struct omap_overlay *ovl,
+               const char *buf, size_t size)
+{
+       return overlay_filter_coef_store(ovl, buf, size, OMAP_DSS_FILTER_UP_V3);
+}
+
+static ssize_t overlay_filter_coef_up_v5_show(struct omap_overlay *ovl, char *buf)
+{
+       return overlay_filter_coef_show(ovl, buf, OMAP_DSS_FILTER_UP_V5);
+}
+
+static ssize_t overlay_filter_coef_up_v5_store(struct omap_overlay *ovl,
+               const char *buf, size_t size)
+{
+       return overlay_filter_coef_store(ovl, buf, size, OMAP_DSS_FILTER_UP_V5);
+}
+
 struct overlay_attribute {
        struct attribute attr;
        ssize_t (*show)(struct omap_overlay *, char *);
@@ -375,6 +457,12 @@ static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR,
                overlay_pre_mult_alpha_store);
 static OVERLAY_ATTR(zorder, S_IRUGO|S_IWUSR,
                overlay_zorder_show, overlay_zorder_store);
+static OVERLAY_ATTR(filter_coef_up_h, S_IRUGO|S_IWUSR,
+               overlay_filter_coef_up_h_show, overlay_filter_coef_up_h_store);
+static OVERLAY_ATTR(filter_coef_up_v3, S_IRUGO|S_IWUSR,
+               overlay_filter_coef_up_v3_show, overlay_filter_coef_up_v3_store);
+static OVERLAY_ATTR(filter_coef_up_v5, S_IRUGO|S_IWUSR,
+               overlay_filter_coef_up_v5_show, overlay_filter_coef_up_v5_store);
 
 static struct attribute *overlay_sysfs_attrs[] = {
        &overlay_attr_name.attr,
@@ -387,6 +475,9 @@ static struct attribute *overlay_sysfs_attrs[] = {
        &overlay_attr_global_alpha.attr,
        &overlay_attr_pre_mult_alpha.attr,
        &overlay_attr_zorder.attr,
+       &overlay_attr_filter_coef_up_h.attr,
+       &overlay_attr_filter_coef_up_v3.attr,
+       &overlay_attr_filter_coef_up_v5.attr,
        NULL
 };
 
@@ -447,7 +538,7 @@ int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
        info = &ovl->info;
 
        if (info->paddr == 0) {
-               DSSDBG("check_overlay failed: paddr 0\n");
+               DSSERR("check_overlay failed: paddr 0\n");
                return -EINVAL;
        }
 
@@ -476,13 +567,13 @@ int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
        }
 
        if (dw < info->pos_x + outw) {
-               DSSDBG("check_overlay failed 1: %d < %d + %d\n",
+               DSSERR("check_overlay failed 1: %d < %d + %d\n",
                                dw, info->pos_x, outw);
                return -EINVAL;
        }
 
        if (dh < info->pos_y + outh) {
-               DSSDBG("check_overlay failed 2: %d < %d + %d\n",
+               DSSERR("check_overlay failed 2: %d < %d + %d\n",
                                dh, info->pos_y, outh);
                return -EINVAL;
        }
index 1130c60..9cb751b 100644 (file)
@@ -140,7 +140,7 @@ static void rfbi_runtime_put(void)
 
        DSSDBG("rfbi_runtime_put\n");
 
-       r = pm_runtime_put(&rfbi.pdev->dev);
+       r = pm_runtime_put_sync(&rfbi.pdev->dev);
        WARN_ON(r < 0);
 }
 
index 7152b53..8503630 100644 (file)
@@ -401,7 +401,7 @@ static void venc_runtime_put(void)
 
        DSSDBG("venc_runtime_put\n");
 
-       r = pm_runtime_put(&venc.pdev->dev);
+       r = pm_runtime_put_sync(&venc.pdev->dev);
        WARN_ON(r < 0);
 }
 
@@ -469,16 +469,69 @@ unsigned long venc_get_pixel_clock(void)
        return 13500000;
 }
 
+static ssize_t display_venc_type_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct omap_dss_device *dssdev = to_dss_device(dev);
+       const char *ret;
+
+       switch (dssdev->phy.venc.type) {
+       case OMAP_DSS_VENC_TYPE_COMPOSITE:
+               ret = "composite";
+               break;
+       case OMAP_DSS_VENC_TYPE_SVIDEO:
+               ret = "svideo";
+               break;
+       default:
+               ret = "unknown";
+               break;
+       }
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", ret);
+}
+
+static ssize_t display_venc_type_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct omap_dss_device *dssdev = to_dss_device(dev);
+       enum omap_dss_venc_type new_type;
+
+       if (strncmp("composite", buf, 9) == 0)
+               new_type = OMAP_DSS_VENC_TYPE_COMPOSITE;
+       else if (strncmp("svideo", buf, 6) == 0)
+               new_type = OMAP_DSS_VENC_TYPE_SVIDEO;
+       else
+               return -EINVAL;
+
+       mutex_lock(&venc.venc_lock);
+
+       if (dssdev->phy.venc.type != new_type) {
+               dssdev->phy.venc.type = new_type;
+               if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+                       venc_power_off(dssdev);
+                       venc_power_on(dssdev);
+               }
+       }
+
+       mutex_unlock(&venc.venc_lock);
+
+       return size;
+}
+
+static DEVICE_ATTR(venc_type, S_IRUGO | S_IWUSR,
+               display_venc_type_show, display_venc_type_store);
+
 /* driver */
 static int venc_panel_probe(struct omap_dss_device *dssdev)
 {
        dssdev->panel.timings = omap_dss_pal_timings;
 
-       return 0;
+       return device_create_file(&dssdev->dev, &dev_attr_venc_type);
 }
 
 static void venc_panel_remove(struct omap_dss_device *dssdev)
 {
+       device_remove_file(&dssdev->dev, &dev_attr_venc_type);
 }
 
 static int venc_panel_enable(struct omap_dss_device *dssdev)
@@ -557,12 +610,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
        return venc_panel_enable(dssdev);
 }
 
-static void venc_get_timings(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static void venc_set_timings(struct omap_dss_device *dssdev,
                        struct omap_video_timings *timings)
 {
@@ -641,7 +688,6 @@ static struct omap_dss_driver venc_driver = {
        .get_resolution = omapdss_default_get_resolution,
        .get_recommended_bpp = omapdss_default_get_recommended_bpp,
 
-       .get_timings    = venc_get_timings,
        .set_timings    = venc_set_timings,
        .check_timings  = venc_check_timings,
 
index 83d3fe7..4cb12ce 100644 (file)
@@ -1,8 +1,7 @@
 menuconfig FB_OMAP2
         tristate "OMAP2+ frame buffer support"
-        depends on FB && OMAP2_DSS
+        depends on FB && OMAP2_DSS && !DRM_OMAP
 
-       select OMAP2_VRAM
        select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
         select FB_CFB_FILLRECT
         select FB_CFB_COPYAREA
index df7bcce..99d7662 100644 (file)
@@ -31,7 +31,6 @@
 
 #include <video/omapdss.h>
 #include <plat/vrfb.h>
-#include <plat/vram.h>
 
 #include "omapfb.h"
 
@@ -70,7 +69,7 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
 
        DBG("omapfb_setup_plane\n");
 
-       if (ofbi->num_overlays != 1) {
+       if (ofbi->num_overlays == 0) {
                r = -EINVAL;
                goto out;
        }
@@ -185,7 +184,7 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
 {
        struct omapfb_info *ofbi = FB2OFB(fbi);
 
-       if (ofbi->num_overlays != 1) {
+       if (ofbi->num_overlays == 0) {
                memset(pi, 0, sizeof(*pi));
        } else {
                struct omap_overlay *ovl;
@@ -225,6 +224,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
        down_write_nested(&rg->lock, rg->id);
        atomic_inc(&rg->lock_count);
 
+       if (rg->size == size && rg->type == mi->type)
+               goto out;
+
        if (atomic_read(&rg->map_count)) {
                r = -EBUSY;
                goto out;
@@ -245,12 +247,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
                }
        }
 
-       if (rg->size != size || rg->type != mi->type) {
-               r = omapfb_realloc_fbmem(fbi, size, mi->type);
-               if (r) {
-                       dev_err(fbdev->dev, "realloc fbmem failed\n");
-                       goto out;
-               }
+       r = omapfb_realloc_fbmem(fbi, size, mi->type);
+       if (r) {
+               dev_err(fbdev->dev, "realloc fbmem failed\n");
+               goto out;
        }
 
  out:
@@ -591,6 +591,70 @@ static int omapfb_wait_for_go(struct fb_info *fbi)
        return r;
 }
 
+static int omapfb_do_vsync(struct fb_info *fbi,
+       struct omap_dss_device *display, u32 *frame, bool force)
+{
+       unsigned long timeout = usecs_to_jiffies(16667 * 2);
+       struct omapfb_info *ofbi = FB2OFB(fbi);
+       static u32 frame_tv;
+       bool is_tv = false;
+       u32 frame_dummy = 0;
+       int i, r;
+
+       if (frame == NULL)
+               frame = &frame_dummy;
+
+       /* try to find the first enabled overlay+display pair */
+       for (i = 0; i < ofbi->num_overlays; i++) {
+               struct omap_overlay_manager *manager;
+
+               if (!ofbi->overlays[i]->info.enabled)
+                       continue;
+
+               manager = ofbi->overlays[i]->manager;
+               if (!manager)
+                       continue;
+
+               if (manager->device->state
+                   == OMAP_DSS_DISPLAY_ACTIVE)
+               {
+                       display = manager->device;
+                       break;
+               }
+       }
+
+       if (display->type == OMAP_DISPLAY_TYPE_VENC)
+               is_tv = true;
+
+       r = dispc_runtime_get();
+       if (r)
+               return r;
+
+       /* this is unsafe pandora hack, but should work as fb
+        * is compiled in (no worry about rmmod) and there
+        * is no way to rm fb instances at runtime */
+       unlock_fb_info(fbi);
+
+       if (is_tv) {
+               u32 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
+               r = omap_dispc_wait_for_irq_interruptible_timeout(irq,
+                       timeout);
+               /* there is no real frame counter for TV */
+               *frame = ++frame_tv;
+       }
+       else {
+               r = omap_dispc_wait_for_vsync_on_frame(frame,
+                       timeout, force);
+       }
+
+       if (!lock_fb_info(fbi))
+               printk(KERN_ERR "omapfb: lock_fb_info failed\n");
+
+       dispc_runtime_put();
+
+       return r;
+}
+
 int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
 {
        struct omapfb_info *ofbi = FB2OFB(fbi);
@@ -612,6 +676,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
                struct omapfb_tearsync_info     tearsync_info;
                struct omapfb_display_info      display_info;
                u32                             crt;
+               u32                             frame;
        } p;
 
        int r = 0;
@@ -779,7 +844,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
                        r = -ENODEV;
                        break;
                }
-               /* FALLTHROUGH */
+               r = omapfb_do_vsync(fbi, display, NULL, true);
+               break;
 
        case OMAPFB_WAITFORVSYNC:
                DBG("ioctl WAITFORVSYNC\n");
@@ -801,6 +867,28 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
                r = omapfb_wait_for_go(fbi);
                break;
 
+       case OMAPFB_WAITFORVSYNC_FRAME:
+               if (get_user(p.frame, (__u32 __user *)arg)) {
+                       r = -EFAULT;
+                       break;
+               }
+               r = omapfb_do_vsync(fbi, display, &p.frame, false);
+               /* report the frame # regardless */
+               if (copy_to_user((void __user *)arg, &p.frame,
+                                sizeof(p.frame)))
+                       r = -EFAULT;
+               break;
+
+       case OMAPFB_GET_LINE_STATUS:
+               r = omap_dispc_get_line_status();
+               if (r < 0)
+                       break;
+               if (copy_to_user((void __user *)arg, &r, sizeof(r)))
+                       r = -EFAULT;
+               else
+                       r = 0;
+               break;
+
        /* LCD and CTRL tests do the same thing for backward
         * compatibility */
        case OMAPFB_LCD_TEST:
@@ -847,14 +935,15 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
                break;
 
        case OMAPFB_GET_VRAM_INFO: {
-               unsigned long vram, free, largest;
-
                DBG("ioctl GET_VRAM_INFO\n");
 
-               omap_vram_get_info(&vram, &free, &largest);
-               p.vram_info.total = vram;
-               p.vram_info.free = free;
-               p.vram_info.largest_free_block = largest;
+               /*
+                * We don't have the ability to get this vram info anymore.
+                * Fill in something that should keep the applications working.
+                */
+               p.vram_info.total = SZ_1M * 64;
+               p.vram_info.free = SZ_1M * 64;
+               p.vram_info.largest_free_block = SZ_1M * 64;
 
                if (copy_to_user((void __user *)arg, &p.vram_info,
                                        sizeof(p.vram_info)))
index f7c1753..46a2c11 100644 (file)
@@ -48,6 +48,7 @@ static int def_rotate;
 static int def_mirror;
 static bool auto_update;
 static unsigned int auto_update_freq;
+static bool def_vram_cache = true;
 module_param(auto_update, bool, 0);
 module_param(auto_update_freq, uint, 0644);
 
@@ -1119,16 +1120,22 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
 
        vma->vm_pgoff = off >> PAGE_SHIFT;
        vma->vm_flags |= VM_IO | VM_RESERVED;
-       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       if (def_vram_cache)
+               vma->vm_page_prot = pgprot_writethrough(vma->vm_page_prot);
+       else
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
        vma->vm_ops = &mmap_user_ops;
        vma->vm_private_data = rg;
-       if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-                              vma->vm_end - vma->vm_start,
-                              vma->vm_page_prot)) {
+       if (remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+                           vma->vm_end - vma->vm_start,
+                           vma->vm_page_prot)) {
                r = -EAGAIN;
                goto error;
        }
 
+       /* not IO memory */
+       vma->vm_flags &= ~VM_IO;
+
        /* vm_ops.open won't be called for mmap itself. */
        atomic_inc(&rg->map_count);
 
@@ -1326,24 +1333,25 @@ static void omapfb_free_fbmem(struct fb_info *fbi)
 
        rg = ofbi->region;
 
-       WARN_ON(atomic_read(&rg->map_count));
-
-       if (rg->paddr)
-               if (omap_vram_free(rg->paddr, rg->size))
-                       dev_err(fbdev->dev, "VRAM FREE failed\n");
+       if (rg->token == NULL)
+               return;
 
-       if (rg->vaddr)
-               iounmap(rg->vaddr);
+       WARN_ON(atomic_read(&rg->map_count));
 
        if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
                /* unmap the 0 angle rotation */
                if (rg->vrfb.vaddr[0]) {
                        iounmap(rg->vrfb.vaddr[0]);
-                       omap_vrfb_release_ctx(&rg->vrfb);
                        rg->vrfb.vaddr[0] = NULL;
                }
+
+               omap_vrfb_release_ctx(&rg->vrfb);
        }
 
+       dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle,
+                       &rg->attrs);
+
+       rg->token = NULL;
        rg->vaddr = NULL;
        rg->paddr = 0;
        rg->alloc = 0;
@@ -1378,7 +1386,9 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
        struct omapfb_info *ofbi = FB2OFB(fbi);
        struct omapfb2_device *fbdev = ofbi->fbdev;
        struct omapfb2_mem_region *rg;
-       void __iomem *vaddr;
+       void *token;
+       DEFINE_DMA_ATTRS(attrs);
+       dma_addr_t dma_handle;
        int r;
 
        rg = ofbi->region;
@@ -1393,42 +1403,40 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
 
        size = PAGE_ALIGN(size);
 
-       if (!paddr) {
-               DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
-               r = omap_vram_alloc(OMAP_VRAM_MEMTYPE_SDRAM, size, &paddr);
-       } else {
-               DBG("reserving %lu bytes at %lx for fb %d\n", size, paddr,
-                               ofbi->id);
-               r = omap_vram_reserve(paddr, size);
-       }
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
 
-       if (r) {
+       if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+
+       DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
+
+       token = dma_alloc_attrs(fbdev->dev, size, &dma_handle,
+                       GFP_KERNEL, &attrs);
+
+       if (token == NULL) {
                dev_err(fbdev->dev, "failed to allocate framebuffer\n");
                return -ENOMEM;
        }
 
-       if (ofbi->rotation_type != OMAP_DSS_ROT_VRFB) {
-               vaddr = ioremap_wc(paddr, size);
-
-               if (!vaddr) {
-                       dev_err(fbdev->dev, "failed to ioremap framebuffer\n");
-                       omap_vram_free(paddr, size);
-                       return -ENOMEM;
-               }
+       DBG("allocated VRAM paddr %lx, vaddr %p\n",
+                       (unsigned long)dma_handle, token);
 
-               DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr);
-       } else {
+       if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
                r = omap_vrfb_request_ctx(&rg->vrfb);
                if (r) {
+                       dma_free_attrs(fbdev->dev, size, token, dma_handle,
+                                       &attrs);
                        dev_err(fbdev->dev, "vrfb create ctx failed\n");
                        return r;
                }
-
-               vaddr = NULL;
        }
 
-       rg->paddr = paddr;
-       rg->vaddr = vaddr;
+       rg->attrs = attrs;
+       rg->token = token;
+       rg->dma_handle = dma_handle;
+
+       rg->paddr = (unsigned long)dma_handle;
+       rg->vaddr = (void __iomem *)token;
        rg->size = size;
        rg->alloc = 1;
 
@@ -1576,6 +1584,9 @@ static int omapfb_parse_vram_param(const char *param, int max_entries,
 
                }
 
+               WARN_ONCE(paddr,
+                       "reserving memory at predefined address not supported\n");
+
                paddrs[fbnum] = paddr;
                sizes[fbnum] = size;
 
@@ -2584,6 +2595,7 @@ module_param_named(vram, def_vram, charp, 0);
 module_param_named(rotate, def_rotate, int, 0);
 module_param_named(vrfb, def_vrfb, bool, 0);
 module_param_named(mirror, def_mirror, bool, 0);
+module_param_named(vram_cache, def_vram_cache, bool, 0644);
 
 /* late_initcall to let panel/ctrl drivers loaded first.
  * I guess better option would be a more dynamic approach,
index fdf0ede..259bf05 100644 (file)
@@ -28,6 +28,8 @@
 #endif
 
 #include <linux/rwsem.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
 
 #include <video/omapdss.h>
 
@@ -49,6 +51,9 @@ extern unsigned int omapfb_debug;
 
 struct omapfb2_mem_region {
        int             id;
+       struct dma_attrs attrs;
+       void            *token;
+       dma_addr_t      dma_handle;
        u32             paddr;
        void __iomem    *vaddr;
        struct vrfb     vrfb;
index b5045ca..39a3ef4 100644 (file)
@@ -271,5 +271,5 @@ module_exit(twl4030_wdt_exit);
 MODULE_AUTHOR("Nokia Corporation");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS("platform:twl4030_wdt");
+/* MODULE_ALIAS("platform:twl4030_wdt"); */
 
index 6ad58a5..ded2ffb 100644 (file)
@@ -215,6 +215,7 @@ source "fs/pstore/Kconfig"
 source "fs/sysv/Kconfig"
 source "fs/ufs/Kconfig"
 source "fs/exofs/Kconfig"
+source "fs/aufs/Kconfig"
 
 endif # MISC_FILESYSTEMS
 
index d2c3353..680ad8a 100644 (file)
@@ -123,3 +123,4 @@ obj-$(CONFIG_GFS2_FS)           += gfs2/
 obj-y                          += exofs/ # Multiple modules
 obj-$(CONFIG_CEPH_FS)          += ceph/
 obj-$(CONFIG_PSTORE)           += pstore/
+obj-$(CONFIG_AUFS_FS)           += aufs/
diff --git a/fs/aufs/Kconfig b/fs/aufs/Kconfig
new file mode 100644 (file)
index 0000000..7a96cb5
--- /dev/null
@@ -0,0 +1,203 @@
+config AUFS_FS
+       tristate "Aufs (Advanced multi layered unification filesystem) support"
+       depends on EXPERIMENTAL
+       help
+       Aufs is a stackable unification filesystem such as Unionfs,
+       which unifies several directories and provides a merged single
+       directory.
+       In the early days, aufs was entirely re-designed and
+       re-implemented Unionfs Version 1.x series. Introducing many
+       original ideas, approaches and improvements, it becomes totally
+       different from Unionfs while keeping the basic features.
+
+if AUFS_FS
+choice
+       prompt "Maximum number of branches"
+       default AUFS_BRANCH_MAX_127
+       help
+       Specifies the maximum number of branches (or member directories)
+       in a single aufs. The larger value consumes more system
+       resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_127
+       bool "127"
+       help
+       Specifies the maximum number of branches (or member directories)
+       in a single aufs. The larger value consumes more system
+       resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_511
+       bool "511"
+       help
+       Specifies the maximum number of branches (or member directories)
+       in a single aufs. The larger value consumes more system
+       resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_1023
+       bool "1023"
+       help
+       Specifies the maximum number of branches (or member directories)
+       in a single aufs. The larger value consumes more system
+       resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_32767
+       bool "32767"
+       help
+       Specifies the maximum number of branches (or member directories)
+       in a single aufs. The larger value consumes more system
+       resources and has a minor impact to performance.
+endchoice
+
+config AUFS_SBILIST
+       bool
+       depends on AUFS_MAGIC_SYSRQ || PROC_FS
+       default y
+       help
+       Automatic configuration for internal use.
+       When aufs supports Magic SysRq or /proc, enabled automatically.
+
+config AUFS_HNOTIFY
+       bool "Detect direct branch access (bypassing aufs)"
+       help
+       If you want to modify files on branches directly, eg. bypassing aufs,
+       and want aufs to detect the changes of them fully, then enable this
+       option and use 'udba=notify' mount option.
+       Currently there is only one available configuration, "fsnotify".
+       It will have a negative impact to the performance.
+       See detail in aufs.5.
+
+choice
+       prompt "method" if AUFS_HNOTIFY
+       default AUFS_HFSNOTIFY
+config AUFS_HFSNOTIFY
+       bool "fsnotify"
+       select FSNOTIFY
+endchoice
+
+config AUFS_EXPORT
+       bool "NFS-exportable aufs"
+       depends on EXPORTFS
+       help
+       If you want to export your mounted aufs via NFS, then enable this
+       option. There are several requirements for this configuration.
+       See detail in aufs.5.
+
+config AUFS_INO_T_64
+       bool
+       depends on AUFS_EXPORT
+       depends on 64BIT && !(ALPHA || S390)
+       default y
+       help
+       Automatic configuration for internal use.
+       /* typedef unsigned long/int __kernel_ino_t */
+       /* alpha and s390x are int */
+
+config AUFS_RDU
+       bool "Readdir in userspace"
+       help
+       Aufs has two methods to provide a merged view for a directory,
+       by a user-space library and by kernel-space natively. The latter
+       is always enabled but sometimes large and slow.
+       If you enable this option, install the library in aufs2-util
+       package, and set some environment variables for your readdir(3),
+       then the work will be handled in user-space which generally
+       shows better performance in most cases.
+       See detail in aufs.5.
+
+config AUFS_PROC_MAP
+       bool "support for /proc/maps and lsof(1)"
+       depends on PROC_FS
+       help
+       When you issue mmap(2) in aufs, it is actually a direct mmap(2)
+       call to the file on the branch fs since the file in aufs is
+       purely virtual. And the file path printed in /proc/maps (and
+       others) will be the path on the branch fs. In most cases, it
+       does no harm. But some utilities like lsof(1) may confuse since
+       the utility or user may expect the file path in aufs to be
+       printed.
+       To address this issue, aufs provides a patch which introduces a
+       new member called vm_prfile into struct vm_are_struct. The patch
+       is meaningless without enabling this configuration since nobody
+       sets the new vm_prfile member.
+       If you don't apply the patch, then enabling this configuration
+       will cause a compile error.
+       This approach is fragile since if someone else make some changes
+       around vm_file, then vm_prfile may not work anymore. As a
+       workaround such case, aufs provides this configuration. If you
+       disable it, then lsof(1) may produce incorrect result but the
+       problem will be gone even if the aufs patch is applied (I hope).
+
+config AUFS_SP_IATTR
+       bool "Respect the attributes (mtime/ctime mainly) of special files"
+       help
+       When you write something to a special file, some attributes of it
+       (mtime/ctime mainly) may be updated. Generally such updates are
+       less important (actually some device drivers and NFS ignore
+       it). But some applications (such like test program) requires
+       such updates. If you need these updates, then enable this
+       configuration which introduces some overhead.
+       Currently this configuration handles FIFO only.
+
+config AUFS_SHWH
+       bool "Show whiteouts"
+       help
+       If you want to make the whiteouts in aufs visible, then enable
+       this option and specify 'shwh' mount option. Although it may
+       sounds like philosophy or something, but in technically it
+       simply shows the name of whiteout with keeping its behaviour.
+
+config AUFS_BR_RAMFS
+       bool "Ramfs (initramfs/rootfs) as an aufs branch"
+       help
+       If you want to use ramfs as an aufs branch fs, then enable this
+       option. Generally tmpfs is recommended.
+       Aufs prohibited them to be a branch fs by default, because
+       initramfs becomes unusable after switch_root or something
+       generally. If you sets initramfs as an aufs branch and boot your
+       system by switch_root, you will meet a problem easily since the
+       files in initramfs may be inaccessible.
+       Unless you are going to use ramfs as an aufs branch fs without
+       switch_root or something, leave it N.
+
+config AUFS_BR_FUSE
+       bool "Fuse fs as an aufs branch"
+       depends on FUSE_FS
+       select AUFS_POLL
+       help
+       If you want to use fuse-based userspace filesystem as an aufs
+       branch fs, then enable this option.
+       It implements the internal poll(2) operation which is
+       implemented by fuse only (curretnly).
+
+config AUFS_POLL
+       bool
+       help
+       Automatic configuration for internal use.
+
+config AUFS_BR_HFSPLUS
+       bool "Hfsplus as an aufs branch"
+       depends on HFSPLUS_FS
+       default y
+       help
+       If you want to use hfsplus fs as an aufs branch fs, then enable
+       this option. This option introduces a small overhead at
+       copying-up a file on hfsplus.
+
+config AUFS_BDEV_LOOP
+       bool
+       depends on BLK_DEV_LOOP
+       default y
+       help
+       Automatic configuration for internal use.
+       Convert =[ym] into =y.
+
+config AUFS_DEBUG
+       bool "Debug aufs"
+       help
+       Enable this to compile aufs internal debug code.
+       It will have a negative impact to the performance.
+
+config AUFS_MAGIC_SYSRQ
+       bool
+       depends on AUFS_DEBUG && MAGIC_SYSRQ
+       default y
+       help
+       Automatic configuration for internal use.
+       When aufs supports Magic SysRq, enabled automatically.
+endif
diff --git a/fs/aufs/Makefile b/fs/aufs/Makefile
new file mode 100644 (file)
index 0000000..9b25bc8
--- /dev/null
@@ -0,0 +1,42 @@
+
+include ${src}/magic.mk
+ifeq (${CONFIG_AUFS_FS},m)
+include ${src}/conf.mk
+endif
+-include ${src}/priv_def.mk
+
+# cf. include/linux/kernel.h
+# enable pr_debug
+ccflags-y += -DDEBUG
+# sparse requires the full pathname
+ifdef M
+ccflags-y += -include ${M}/../../include/linux/aufs_type.h
+else
+ccflags-y += -include ${srctree}/include/linux/aufs_type.h
+endif
+
+obj-$(CONFIG_AUFS_FS) += aufs.o
+aufs-y := module.o sbinfo.o super.o branch.o xino.o sysaufs.o opts.o \
+       wkq.o vfsub.o dcsub.o \
+       cpup.o whout.o wbr_policy.o \
+       dinfo.o dentry.o \
+       dynop.o \
+       finfo.o file.o f_op.o \
+       dir.o vdir.o \
+       iinfo.o inode.o i_op.o i_op_add.o i_op_del.o i_op_ren.o \
+       ioctl.o
+
+# all are boolean
+aufs-$(CONFIG_PROC_FS) += procfs.o plink.o
+aufs-$(CONFIG_SYSFS) += sysfs.o
+aufs-$(CONFIG_DEBUG_FS) += dbgaufs.o
+aufs-$(CONFIG_AUFS_BDEV_LOOP) += loop.o
+aufs-$(CONFIG_AUFS_HNOTIFY) += hnotify.o
+aufs-$(CONFIG_AUFS_HFSNOTIFY) += hfsnotify.o
+aufs-$(CONFIG_AUFS_EXPORT) += export.o
+aufs-$(CONFIG_AUFS_POLL) += poll.o
+aufs-$(CONFIG_AUFS_RDU) += rdu.o
+aufs-$(CONFIG_AUFS_SP_IATTR) += f_op_sp.o
+aufs-$(CONFIG_AUFS_BR_HFSPLUS) += hfsplus.o
+aufs-$(CONFIG_AUFS_DEBUG) += debug.o
+aufs-$(CONFIG_AUFS_MAGIC_SYSRQ) += sysrq.o
diff --git a/fs/aufs/aufs.h b/fs/aufs/aufs.h
new file mode 100644 (file)
index 0000000..8adb4f2
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * all header files
+ */
+
+#ifndef __AUFS_H__
+#define __AUFS_H__
+
+#ifdef __KERNEL__
+
+#define AuStub(type, name, body, ...) \
+       static inline type name(__VA_ARGS__) { body; }
+
+#define AuStubVoid(name, ...) \
+       AuStub(void, name, , __VA_ARGS__)
+#define AuStubInt0(name, ...) \
+       AuStub(int, name, return 0, __VA_ARGS__)
+
+#include "debug.h"
+
+#include "branch.h"
+#include "cpup.h"
+#include "dcsub.h"
+#include "dbgaufs.h"
+#include "dentry.h"
+#include "dir.h"
+#include "dynop.h"
+#include "file.h"
+#include "fstype.h"
+#include "inode.h"
+#include "loop.h"
+#include "module.h"
+#include "opts.h"
+#include "rwsem.h"
+#include "spl.h"
+#include "super.h"
+#include "sysaufs.h"
+#include "vfsub.h"
+#include "whout.h"
+#include "wkq.h"
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_H__ */
diff --git a/fs/aufs/branch.c b/fs/aufs/branch.c
new file mode 100644 (file)
index 0000000..cca3117
--- /dev/null
@@ -0,0 +1,1206 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * branch management
+ */
+
+#include <linux/compat.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/*
+ * free a single branch
+ */
+
+/* prohibit rmdir to the root of the branch */
+/* todo: another new flag? */
+static void au_br_dflags_force(struct au_branch *br)
+{
+       struct dentry *h_dentry;
+
+       h_dentry = au_br_dentry(br);
+       spin_lock(&h_dentry->d_lock);
+       br->br_dflags = h_dentry->d_flags & DCACHE_MOUNTED;
+       h_dentry->d_flags |= DCACHE_MOUNTED;
+       spin_unlock(&h_dentry->d_lock);
+}
+
+/* restore its d_flags */
+static void au_br_dflags_restore(struct au_branch *br)
+{
+       struct dentry *h_dentry;
+
+       if (br->br_dflags)
+               return;
+
+       h_dentry = au_br_dentry(br);
+       spin_lock(&h_dentry->d_lock);
+       h_dentry->d_flags &= ~DCACHE_MOUNTED;
+       spin_unlock(&h_dentry->d_lock);
+}
+
+static void au_br_do_free(struct au_branch *br)
+{
+       int i;
+       struct au_wbr *wbr;
+       struct au_dykey **key;
+
+       au_hnotify_fin_br(br);
+
+       if (br->br_xino.xi_file)
+               fput(br->br_xino.xi_file);
+       mutex_destroy(&br->br_xino.xi_nondir_mtx);
+
+       AuDebugOn(atomic_read(&br->br_count));
+
+       wbr = br->br_wbr;
+       if (wbr) {
+               for (i = 0; i < AuBrWh_Last; i++)
+                       dput(wbr->wbr_wh[i]);
+               AuDebugOn(atomic_read(&wbr->wbr_wh_running));
+               AuRwDestroy(&wbr->wbr_wh_rwsem);
+       }
+
+       key = br->br_dykey;
+       for (i = 0; i < AuBrDynOp; i++, key++)
+               if (*key)
+                       au_dy_put(*key);
+               else
+                       break;
+
+       au_br_dflags_restore(br);
+
+       /* recursive lock, s_umount of branch's */
+       lockdep_off();
+       path_put(&br->br_path);
+       lockdep_on();
+       kfree(wbr);
+       kfree(br);
+}
+
+/*
+ * frees all branches
+ */
+void au_br_free(struct au_sbinfo *sbinfo)
+{
+       aufs_bindex_t bmax;
+       struct au_branch **br;
+
+       AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+       bmax = sbinfo->si_bend + 1;
+       br = sbinfo->si_branch;
+       while (bmax--)
+               au_br_do_free(*br++);
+}
+
+/*
+ * find the index of a branch which is specified by @br_id.
+ */
+int au_br_index(struct super_block *sb, aufs_bindex_t br_id)
+{
+       aufs_bindex_t bindex, bend;
+
+       bend = au_sbend(sb);
+       for (bindex = 0; bindex <= bend; bindex++)
+               if (au_sbr_id(sb, bindex) == br_id)
+                       return bindex;
+       return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * add a branch
+ */
+
+static int test_overlap(struct super_block *sb, struct dentry *h_adding,
+                       struct dentry *h_root)
+{
+       if (unlikely(h_adding == h_root
+                    || au_test_loopback_overlap(sb, h_adding)))
+               return 1;
+       if (h_adding->d_sb != h_root->d_sb)
+               return 0;
+       return au_test_subdir(h_adding, h_root)
+               || au_test_subdir(h_root, h_adding);
+}
+
+/*
+ * returns a newly allocated branch. @new_nbranch is a number of branches
+ * after adding a branch.
+ */
+static struct au_branch *au_br_alloc(struct super_block *sb, int new_nbranch,
+                                    int perm)
+{
+       struct au_branch *add_branch;
+       struct dentry *root;
+       int err;
+
+       err = -ENOMEM;
+       root = sb->s_root;
+       add_branch = kmalloc(sizeof(*add_branch), GFP_NOFS);
+       if (unlikely(!add_branch))
+               goto out;
+
+       err = au_hnotify_init_br(add_branch, perm);
+       if (unlikely(err))
+               goto out_br;
+
+       add_branch->br_wbr = NULL;
+       if (au_br_writable(perm)) {
+               /* may be freed separately at changing the branch permission */
+               add_branch->br_wbr = kmalloc(sizeof(*add_branch->br_wbr),
+                                            GFP_NOFS);
+               if (unlikely(!add_branch->br_wbr))
+                       goto out_hnotify;
+       }
+
+       err = au_sbr_realloc(au_sbi(sb), new_nbranch);
+       if (!err)
+               err = au_di_realloc(au_di(root), new_nbranch);
+       if (!err)
+               err = au_ii_realloc(au_ii(root->d_inode), new_nbranch);
+       if (!err)
+               return add_branch; /* success */
+
+       kfree(add_branch->br_wbr);
+
+out_hnotify:
+       au_hnotify_fin_br(add_branch);
+out_br:
+       kfree(add_branch);
+out:
+       return ERR_PTR(err);
+}
+
+/*
+ * test if the branch permission is legal or not.
+ */
+static int test_br(struct inode *inode, int brperm, char *path)
+{
+       int err;
+
+       err = (au_br_writable(brperm) && IS_RDONLY(inode));
+       if (!err)
+               goto out;
+
+       err = -EINVAL;
+       pr_err("write permission for readonly mount or inode, %s\n", path);
+
+out:
+       return err;
+}
+
+/*
+ * returns:
+ * 0: success, the caller will add it
+ * plus: success, it is already unified, the caller should ignore it
+ * minus: error
+ */
+static int test_add(struct super_block *sb, struct au_opt_add *add, int remount)
+{
+       int err;
+       aufs_bindex_t bend, bindex;
+       struct dentry *root;
+       struct inode *inode, *h_inode;
+
+       root = sb->s_root;
+       bend = au_sbend(sb);
+       if (unlikely(bend >= 0
+                    && au_find_dbindex(root, add->path.dentry) >= 0)) {
+               err = 1;
+               if (!remount) {
+                       err = -EINVAL;
+                       pr_err("%s duplicated\n", add->pathname);
+               }
+               goto out;
+       }
+
+       err = -ENOSPC; /* -E2BIG; */
+       if (unlikely(AUFS_BRANCH_MAX <= add->bindex
+                    || AUFS_BRANCH_MAX - 1 <= bend)) {
+               pr_err("number of branches exceeded %s\n", add->pathname);
+               goto out;
+       }
+
+       err = -EDOM;
+       if (unlikely(add->bindex < 0 || bend + 1 < add->bindex)) {
+               pr_err("bad index %d\n", add->bindex);
+               goto out;
+       }
+
+       inode = add->path.dentry->d_inode;
+       err = -ENOENT;
+       if (unlikely(!inode->i_nlink)) {
+               pr_err("no existence %s\n", add->pathname);
+               goto out;
+       }
+
+       err = -EINVAL;
+       if (unlikely(inode->i_sb == sb)) {
+               pr_err("%s must be outside\n", add->pathname);
+               goto out;
+       }
+
+       if (unlikely(au_test_fs_unsuppoted(inode->i_sb))) {
+               pr_err("unsupported filesystem, %s (%s)\n",
+                      add->pathname, au_sbtype(inode->i_sb));
+               goto out;
+       }
+
+       err = test_br(add->path.dentry->d_inode, add->perm, add->pathname);
+       if (unlikely(err))
+               goto out;
+
+       if (bend < 0)
+               return 0; /* success */
+
+       err = -EINVAL;
+       for (bindex = 0; bindex <= bend; bindex++)
+               if (unlikely(test_overlap(sb, add->path.dentry,
+                                         au_h_dptr(root, bindex)))) {
+                       pr_err("%s is overlapped\n", add->pathname);
+                       goto out;
+               }
+
+       err = 0;
+       if (au_opt_test(au_mntflags(sb), WARN_PERM)) {
+               h_inode = au_h_dptr(root, 0)->d_inode;
+               if ((h_inode->i_mode & S_IALLUGO) != (inode->i_mode & S_IALLUGO)
+                   || h_inode->i_uid != inode->i_uid
+                   || h_inode->i_gid != inode->i_gid)
+                       pr_warn("uid/gid/perm %s %u/%u/0%o, %u/%u/0%o\n",
+                               add->pathname,
+                               inode->i_uid, inode->i_gid,
+                               (inode->i_mode & S_IALLUGO),
+                               h_inode->i_uid, h_inode->i_gid,
+                               (h_inode->i_mode & S_IALLUGO));
+       }
+
+out:
+       return err;
+}
+
+/*
+ * initialize or clean the whiteouts for an adding branch
+ */
+static int au_br_init_wh(struct super_block *sb, struct au_branch *br,
+                        int new_perm)
+{
+       int err, old_perm;
+       aufs_bindex_t bindex;
+       struct mutex *h_mtx;
+       struct au_wbr *wbr;
+       struct au_hinode *hdir;
+
+       wbr = br->br_wbr;
+       old_perm = br->br_perm;
+       br->br_perm = new_perm;
+       hdir = NULL;
+       h_mtx = NULL;
+       bindex = au_br_index(sb, br->br_id);
+       if (0 <= bindex) {
+               hdir = au_hi(sb->s_root->d_inode, bindex);
+               au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+       } else {
+               h_mtx = &au_br_dentry(br)->d_inode->i_mutex;
+               mutex_lock_nested(h_mtx, AuLsc_I_PARENT);
+       }
+       if (!wbr)
+               err = au_wh_init(br, sb);
+       else {
+               wbr_wh_write_lock(wbr);
+               err = au_wh_init(br, sb);
+               wbr_wh_write_unlock(wbr);
+       }
+       if (hdir)
+               au_hn_imtx_unlock(hdir);
+       else
+               mutex_unlock(h_mtx);
+       br->br_perm = old_perm;
+
+       if (!err && wbr && !au_br_writable(new_perm)) {
+               kfree(wbr);
+               br->br_wbr = NULL;
+       }
+
+       return err;
+}
+
+static int au_wbr_init(struct au_branch *br, struct super_block *sb,
+                      int perm)
+{
+       int err;
+       struct kstatfs kst;
+       struct au_wbr *wbr;
+
+       wbr = br->br_wbr;
+       au_rw_init(&wbr->wbr_wh_rwsem);
+       memset(wbr->wbr_wh, 0, sizeof(wbr->wbr_wh));
+       atomic_set(&wbr->wbr_wh_running, 0);
+       wbr->wbr_bytes = 0;
+
+       /*
+        * a limit for rmdir/rename a dir
+        * cf. AUFS_MAX_NAMELEN in include/linux/aufs_type.h
+        */
+       err = vfs_statfs(&br->br_path, &kst);
+       if (unlikely(err))
+               goto out;
+       err = -EINVAL;
+       if (kst.f_namelen >= NAME_MAX)
+               err = au_br_init_wh(sb, br, perm);
+       else
+               pr_err("%.*s(%s), unsupported namelen %ld\n",
+                      AuDLNPair(au_br_dentry(br)),
+                      au_sbtype(au_br_dentry(br)->d_sb), kst.f_namelen);
+
+out:
+       return err;
+}
+
+/* intialize a new branch */
+static int au_br_init(struct au_branch *br, struct super_block *sb,
+                     struct au_opt_add *add)
+{
+       int err;
+
+       err = 0;
+       memset(&br->br_xino, 0, sizeof(br->br_xino));
+       mutex_init(&br->br_xino.xi_nondir_mtx);
+       br->br_perm = add->perm;
+       BUILD_BUG_ON(sizeof(br->br_dflags)
+                    != sizeof(br->br_path.dentry->d_flags));
+       br->br_dflags = DCACHE_MOUNTED;
+       br->br_path = add->path; /* set first, path_get() later */
+       spin_lock_init(&br->br_dykey_lock);
+       memset(br->br_dykey, 0, sizeof(br->br_dykey));
+       atomic_set(&br->br_count, 0);
+       atomic_set(&br->br_xino_running, 0);
+       br->br_id = au_new_br_id(sb);
+       AuDebugOn(br->br_id < 0);
+
+       if (au_br_writable(add->perm)) {
+               err = au_wbr_init(br, sb, add->perm);
+               if (unlikely(err))
+                       goto out_err;
+       }
+
+       if (au_opt_test(au_mntflags(sb), XINO)) {
+               err = au_xino_br(sb, br, add->path.dentry->d_inode->i_ino,
+                                au_sbr(sb, 0)->br_xino.xi_file, /*do_test*/1);
+               if (unlikely(err)) {
+                       AuDebugOn(br->br_xino.xi_file);
+                       goto out_err;
+               }
+       }
+
+       sysaufs_br_init(br);
+       path_get(&br->br_path);
+       goto out; /* success */
+
+out_err:
+       memset(&br->br_path, 0, sizeof(br->br_path));
+out:
+       return err;
+}
+
+static void au_br_do_add_brp(struct au_sbinfo *sbinfo, aufs_bindex_t bindex,
+                            struct au_branch *br, aufs_bindex_t bend,
+                            aufs_bindex_t amount)
+{
+       struct au_branch **brp;
+
+       AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+       brp = sbinfo->si_branch + bindex;
+       memmove(brp + 1, brp, sizeof(*brp) * amount);
+       *brp = br;
+       sbinfo->si_bend++;
+       if (unlikely(bend < 0))
+               sbinfo->si_bend = 0;
+}
+
+static void au_br_do_add_hdp(struct au_dinfo *dinfo, aufs_bindex_t bindex,
+                            aufs_bindex_t bend, aufs_bindex_t amount)
+{
+       struct au_hdentry *hdp;
+
+       AuRwMustWriteLock(&dinfo->di_rwsem);
+
+       hdp = dinfo->di_hdentry + bindex;
+       memmove(hdp + 1, hdp, sizeof(*hdp) * amount);
+       au_h_dentry_init(hdp);
+       dinfo->di_bend++;
+       if (unlikely(bend < 0))
+               dinfo->di_bstart = 0;
+}
+
+static void au_br_do_add_hip(struct au_iinfo *iinfo, aufs_bindex_t bindex,
+                            aufs_bindex_t bend, aufs_bindex_t amount)
+{
+       struct au_hinode *hip;
+
+       AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+       hip = iinfo->ii_hinode + bindex;
+       memmove(hip + 1, hip, sizeof(*hip) * amount);
+       hip->hi_inode = NULL;
+       au_hn_init(hip);
+       iinfo->ii_bend++;
+       if (unlikely(bend < 0))
+               iinfo->ii_bstart = 0;
+}
+
+static void au_br_do_add(struct super_block *sb, struct au_branch *br,
+                        aufs_bindex_t bindex)
+{
+       struct dentry *root, *h_dentry;
+       struct inode *root_inode;
+       aufs_bindex_t bend, amount;
+
+       au_br_dflags_force(br);
+
+       root = sb->s_root;
+       root_inode = root->d_inode;
+       bend = au_sbend(sb);
+       amount = bend + 1 - bindex;
+       h_dentry = au_br_dentry(br);
+       au_sbilist_lock();
+       au_br_do_add_brp(au_sbi(sb), bindex, br, bend, amount);
+       au_br_do_add_hdp(au_di(root), bindex, bend, amount);
+       au_br_do_add_hip(au_ii(root_inode), bindex, bend, amount);
+       au_set_h_dptr(root, bindex, dget(h_dentry));
+       au_set_h_iptr(root_inode, bindex, au_igrab(h_dentry->d_inode),
+                     /*flags*/0);
+       au_sbilist_unlock();
+}
+
+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount)
+{
+       int err;
+       aufs_bindex_t bend, add_bindex;
+       struct dentry *root, *h_dentry;
+       struct inode *root_inode;
+       struct au_branch *add_branch;
+
+       root = sb->s_root;
+       root_inode = root->d_inode;
+       IMustLock(root_inode);
+       err = test_add(sb, add, remount);
+       if (unlikely(err < 0))
+               goto out;
+       if (err) {
+               err = 0;
+               goto out; /* success */
+       }
+
+       bend = au_sbend(sb);
+       add_branch = au_br_alloc(sb, bend + 2, add->perm);
+       err = PTR_ERR(add_branch);
+       if (IS_ERR(add_branch))
+               goto out;
+
+       err = au_br_init(add_branch, sb, add);
+       if (unlikely(err)) {
+               au_br_do_free(add_branch);
+               goto out;
+       }
+
+       add_bindex = add->bindex;
+       if (!remount)
+               au_br_do_add(sb, add_branch, add_bindex);
+       else {
+               sysaufs_brs_del(sb, add_bindex);
+               au_br_do_add(sb, add_branch, add_bindex);
+               sysaufs_brs_add(sb, add_bindex);
+       }
+
+       h_dentry = add->path.dentry;
+       if (!add_bindex) {
+               au_cpup_attr_all(root_inode, /*force*/1);
+               sb->s_maxbytes = h_dentry->d_sb->s_maxbytes;
+       } else
+               au_add_nlink(root_inode, h_dentry->d_inode);
+
+       /*
+        * this test/set prevents aufs from handling unnecesary notify events
+        * of xino files, in case of re-adding a writable branch which was
+        * once detached from aufs.
+        */
+       if (au_xino_brid(sb) < 0
+           && au_br_writable(add_branch->br_perm)
+           && !au_test_fs_bad_xino(h_dentry->d_sb)
+           && add_branch->br_xino.xi_file
+           && add_branch->br_xino.xi_file->f_dentry->d_parent == h_dentry)
+               au_xino_brid_set(sb, add_branch->br_id);
+
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * delete a branch
+ */
+
+/* to show the line number, do not make it inlined function */
+#define AuVerbose(do_info, fmt, ...) do { \
+       if (do_info) \
+               pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+static int au_test_ibusy(struct inode *inode, aufs_bindex_t bstart,
+                        aufs_bindex_t bend)
+{
+       return (inode && !S_ISDIR(inode->i_mode)) || bstart == bend;
+}
+
+static int au_test_dbusy(struct dentry *dentry, aufs_bindex_t bstart,
+                        aufs_bindex_t bend)
+{
+       return au_test_ibusy(dentry->d_inode, bstart, bend);
+}
+
+/*
+ * test if the branch is deletable or not.
+ */
+static int test_dentry_busy(struct dentry *root, aufs_bindex_t bindex,
+                           unsigned int sigen, const unsigned int verbose)
+{
+       int err, i, j, ndentry;
+       aufs_bindex_t bstart, bend;
+       struct au_dcsub_pages dpages;
+       struct au_dpage *dpage;
+       struct dentry *d;
+
+       err = au_dpages_init(&dpages, GFP_NOFS);
+       if (unlikely(err))
+               goto out;
+       err = au_dcsub_pages(&dpages, root, NULL, NULL);
+       if (unlikely(err))
+               goto out_dpages;
+
+       for (i = 0; !err && i < dpages.ndpage; i++) {
+               dpage = dpages.dpages + i;
+               ndentry = dpage->ndentry;
+               for (j = 0; !err && j < ndentry; j++) {
+                       d = dpage->dentries[j];
+                       AuDebugOn(!d->d_count);
+                       if (!au_digen_test(d, sigen)) {
+                               di_read_lock_child(d, AuLock_IR);
+                               if (unlikely(au_dbrange_test(d))) {
+                                       di_read_unlock(d, AuLock_IR);
+                                       continue;
+                               }
+                       } else {
+                               di_write_lock_child(d);
+                               if (unlikely(au_dbrange_test(d))) {
+                                       di_write_unlock(d);
+                                       continue;
+                               }
+                               err = au_reval_dpath(d, sigen);
+                               if (!err)
+                                       di_downgrade_lock(d, AuLock_IR);
+                               else {
+                                       di_write_unlock(d);
+                                       break;
+                               }
+                       }
+
+                       /* AuDbgDentry(d); */
+                       bstart = au_dbstart(d);
+                       bend = au_dbend(d);
+                       if (bstart <= bindex
+                           && bindex <= bend
+                           && au_h_dptr(d, bindex)
+                           && au_test_dbusy(d, bstart, bend)) {
+                               err = -EBUSY;
+                               AuVerbose(verbose, "busy %.*s\n", AuDLNPair(d));
+                               AuDbgDentry(d);
+                       }
+                       di_read_unlock(d, AuLock_IR);
+               }
+       }
+
+out_dpages:
+       au_dpages_free(&dpages);
+out:
+       return err;
+}
+
+static int test_inode_busy(struct super_block *sb, aufs_bindex_t bindex,
+                          unsigned int sigen, const unsigned int verbose)
+{
+       int err;
+       unsigned long long max, ull;
+       struct inode *i, **array;
+       aufs_bindex_t bstart, bend;
+
+       array = au_iarray_alloc(sb, &max);
+       err = PTR_ERR(array);
+       if (IS_ERR(array))
+               goto out;
+
+       err = 0;
+       AuDbg("b%d\n", bindex);
+       for (ull = 0; !err && ull < max; ull++) {
+               i = array[ull];
+               if (i->i_ino == AUFS_ROOT_INO)
+                       continue;
+
+               /* AuDbgInode(i); */
+               if (au_iigen(i, NULL) == sigen)
+                       ii_read_lock_child(i);
+               else {
+                       ii_write_lock_child(i);
+                       err = au_refresh_hinode_self(i);
+                       au_iigen_dec(i);
+                       if (!err)
+                               ii_downgrade_lock(i);
+                       else {
+                               ii_write_unlock(i);
+                               break;
+                       }
+               }
+
+               bstart = au_ibstart(i);
+               bend = au_ibend(i);
+               if (bstart <= bindex
+                   && bindex <= bend
+                   && au_h_iptr(i, bindex)
+                   && au_test_ibusy(i, bstart, bend)) {
+                       err = -EBUSY;
+                       AuVerbose(verbose, "busy i%lu\n", i->i_ino);
+                       AuDbgInode(i);
+               }
+               ii_read_unlock(i);
+       }
+       au_iarray_free(array, max);
+
+out:
+       return err;
+}
+
+static int test_children_busy(struct dentry *root, aufs_bindex_t bindex,
+                             const unsigned int verbose)
+{
+       int err;
+       unsigned int sigen;
+
+       sigen = au_sigen(root->d_sb);
+       DiMustNoWaiters(root);
+       IiMustNoWaiters(root->d_inode);
+       di_write_unlock(root);
+       err = test_dentry_busy(root, bindex, sigen, verbose);
+       if (!err)
+               err = test_inode_busy(root->d_sb, bindex, sigen, verbose);
+       di_write_lock_child(root); /* aufs_write_lock() calls ..._child() */
+
+       return err;
+}
+
+static void au_br_do_del_brp(struct au_sbinfo *sbinfo,
+                            const aufs_bindex_t bindex,
+                            const aufs_bindex_t bend)
+{
+       struct au_branch **brp, **p;
+
+       AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+       brp = sbinfo->si_branch + bindex;
+       if (bindex < bend)
+               memmove(brp, brp + 1, sizeof(*brp) * (bend - bindex));
+       sbinfo->si_branch[0 + bend] = NULL;
+       sbinfo->si_bend--;
+
+       p = krealloc(sbinfo->si_branch, sizeof(*p) * bend, AuGFP_SBILIST);
+       if (p)
+               sbinfo->si_branch = p;
+       /* harmless error */
+}
+
+static void au_br_do_del_hdp(struct au_dinfo *dinfo, const aufs_bindex_t bindex,
+                            const aufs_bindex_t bend)
+{
+       struct au_hdentry *hdp, *p;
+
+       AuRwMustWriteLock(&dinfo->di_rwsem);
+
+       hdp = dinfo->di_hdentry;
+       if (bindex < bend)
+               memmove(hdp + bindex, hdp + bindex + 1,
+                       sizeof(*hdp) * (bend - bindex));
+       hdp[0 + bend].hd_dentry = NULL;
+       dinfo->di_bend--;
+
+       p = krealloc(hdp, sizeof(*p) * bend, AuGFP_SBILIST);
+       if (p)
+               dinfo->di_hdentry = p;
+       /* harmless error */
+}
+
+static void au_br_do_del_hip(struct au_iinfo *iinfo, const aufs_bindex_t bindex,
+                            const aufs_bindex_t bend)
+{
+       struct au_hinode *hip, *p;
+
+       AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+       hip = iinfo->ii_hinode + bindex;
+       if (bindex < bend)
+               memmove(hip, hip + 1, sizeof(*hip) * (bend - bindex));
+       iinfo->ii_hinode[0 + bend].hi_inode = NULL;
+       au_hn_init(iinfo->ii_hinode + bend);
+       iinfo->ii_bend--;
+
+       p = krealloc(iinfo->ii_hinode, sizeof(*p) * bend, AuGFP_SBILIST);
+       if (p)
+               iinfo->ii_hinode = p;
+       /* harmless error */
+}
+
+static void au_br_do_del(struct super_block *sb, aufs_bindex_t bindex,
+                        struct au_branch *br)
+{
+       aufs_bindex_t bend;
+       struct au_sbinfo *sbinfo;
+       struct dentry *root, *h_root;
+       struct inode *inode, *h_inode;
+       struct au_hinode *hinode;
+
+       SiMustWriteLock(sb);
+
+       root = sb->s_root;
+       inode = root->d_inode;
+       sbinfo = au_sbi(sb);
+       bend = sbinfo->si_bend;
+
+       h_root = au_h_dptr(root, bindex);
+       hinode = au_hi(inode, bindex);
+       h_inode = au_igrab(hinode->hi_inode);
+       au_hiput(hinode);
+
+       au_sbilist_lock();
+       au_br_do_del_brp(sbinfo, bindex, bend);
+       au_br_do_del_hdp(au_di(root), bindex, bend);
+       au_br_do_del_hip(au_ii(inode), bindex, bend);
+       au_sbilist_unlock();
+
+       dput(h_root);
+       iput(h_inode);
+       au_br_do_free(br);
+}
+
+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount)
+{
+       int err, rerr, i;
+       unsigned int mnt_flags;
+       aufs_bindex_t bindex, bend, br_id;
+       unsigned char do_wh, verbose;
+       struct au_branch *br;
+       struct au_wbr *wbr;
+
+       err = 0;
+       bindex = au_find_dbindex(sb->s_root, del->h_path.dentry);
+       if (bindex < 0) {
+               if (remount)
+                       goto out; /* success */
+               err = -ENOENT;
+               pr_err("%s no such branch\n", del->pathname);
+               goto out;
+       }
+       AuDbg("bindex b%d\n", bindex);
+
+       err = -EBUSY;
+       mnt_flags = au_mntflags(sb);
+       verbose = !!au_opt_test(mnt_flags, VERBOSE);
+       bend = au_sbend(sb);
+       if (unlikely(!bend)) {
+               AuVerbose(verbose, "no more branches left\n");
+               goto out;
+       }
+       br = au_sbr(sb, bindex);
+       AuDebugOn(!path_equal(&br->br_path, &del->h_path));
+       i = atomic_read(&br->br_count);
+       if (unlikely(i)) {
+               AuVerbose(verbose, "%d file(s) opened\n", i);
+               goto out;
+       }
+
+       wbr = br->br_wbr;
+       do_wh = wbr && (wbr->wbr_whbase || wbr->wbr_plink || wbr->wbr_orph);
+       if (do_wh) {
+               /* instead of WbrWhMustWriteLock(wbr) */
+               SiMustWriteLock(sb);
+               for (i = 0; i < AuBrWh_Last; i++) {
+                       dput(wbr->wbr_wh[i]);
+                       wbr->wbr_wh[i] = NULL;
+               }
+       }
+
+       err = test_children_busy(sb->s_root, bindex, verbose);
+       if (unlikely(err)) {
+               if (do_wh)
+                       goto out_wh;
+               goto out;
+       }
+
+       err = 0;
+       br_id = br->br_id;
+       if (!remount)
+               au_br_do_del(sb, bindex, br);
+       else {
+               sysaufs_brs_del(sb, bindex);
+               au_br_do_del(sb, bindex, br);
+               sysaufs_brs_add(sb, bindex);
+       }
+
+       if (!bindex) {
+               au_cpup_attr_all(sb->s_root->d_inode, /*force*/1);
+               sb->s_maxbytes = au_sbr_sb(sb, 0)->s_maxbytes;
+       } else
+               au_sub_nlink(sb->s_root->d_inode, del->h_path.dentry->d_inode);
+       if (au_opt_test(mnt_flags, PLINK))
+               au_plink_half_refresh(sb, br_id);
+
+       if (au_xino_brid(sb) == br_id)
+               au_xino_brid_set(sb, -1);
+       goto out; /* success */
+
+out_wh:
+       /* revert */
+       rerr = au_br_init_wh(sb, br, br->br_perm);
+       if (rerr)
+               pr_warn("failed re-creating base whiteout, %s. (%d)\n",
+                       del->pathname, rerr);
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_ibusy(struct super_block *sb, struct aufs_ibusy __user *arg)
+{
+       int err;
+       aufs_bindex_t bstart, bend;
+       struct aufs_ibusy ibusy;
+       struct inode *inode, *h_inode;
+
+       err = -EPERM;
+       if (unlikely(!capable(CAP_SYS_ADMIN)))
+               goto out;
+
+       err = copy_from_user(&ibusy, arg, sizeof(ibusy));
+       if (!err)
+               err = !access_ok(VERIFY_WRITE, &arg->h_ino, sizeof(arg->h_ino));
+       if (unlikely(err)) {
+               err = -EFAULT;
+               AuTraceErr(err);
+               goto out;
+       }
+
+       err = -EINVAL;
+       si_read_lock(sb, AuLock_FLUSH);
+       if (unlikely(ibusy.bindex < 0 || ibusy.bindex > au_sbend(sb)))
+               goto out_unlock;
+
+       err = 0;
+       ibusy.h_ino = 0; /* invalid */
+       inode = ilookup(sb, ibusy.ino);
+       if (!inode
+           || inode->i_ino == AUFS_ROOT_INO
+           || is_bad_inode(inode))
+               goto out_unlock;
+
+       ii_read_lock_child(inode);
+       bstart = au_ibstart(inode);
+       bend = au_ibend(inode);
+       if (bstart <= ibusy.bindex && ibusy.bindex <= bend) {
+               h_inode = au_h_iptr(inode, ibusy.bindex);
+               if (h_inode && au_test_ibusy(inode, bstart, bend))
+                       ibusy.h_ino = h_inode->i_ino;
+       }
+       ii_read_unlock(inode);
+       iput(inode);
+
+out_unlock:
+       si_read_unlock(sb);
+       if (!err) {
+               err = __put_user(ibusy.h_ino, &arg->h_ino);
+               if (unlikely(err)) {
+                       err = -EFAULT;
+                       AuTraceErr(err);
+               }
+       }
+out:
+       return err;
+}
+
+long au_ibusy_ioctl(struct file *file, unsigned long arg)
+{
+       return au_ibusy(file->f_dentry->d_sb, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg)
+{
+       return au_ibusy(file->f_dentry->d_sb, compat_ptr(arg));
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * change a branch permission
+ */
+
+static void au_warn_ima(void)
+{
+#ifdef CONFIG_IMA
+       /* since it doesn't support mark_files_ro() */
+       AuWarn1("RW -> RO makes IMA to produce wrong message\n");
+#endif
+}
+
+static int do_need_sigen_inc(int a, int b)
+{
+       return au_br_whable(a) && !au_br_whable(b);
+}
+
+static int need_sigen_inc(int old, int new)
+{
+       return do_need_sigen_inc(old, new)
+               || do_need_sigen_inc(new, old);
+}
+
+static unsigned long long au_farray_cb(void *a,
+                                      unsigned long long max __maybe_unused,
+                                      void *arg)
+{
+       unsigned long long n;
+       struct file **p, *f;
+       struct super_block *sb = arg;
+
+       n = 0;
+       p = a;
+       lg_global_lock(files_lglock);
+       do_file_list_for_each_entry(sb, f) {
+               if (au_fi(f)
+                   && file_count(f)
+                   && !special_file(f->f_dentry->d_inode->i_mode)) {
+                       get_file(f);
+                       *p++ = f;
+                       n++;
+                       AuDebugOn(n > max);
+               }
+       } while_file_list_for_each_entry;
+       lg_global_unlock(files_lglock);
+
+       return n;
+}
+
+static struct file **au_farray_alloc(struct super_block *sb,
+                                    unsigned long long *max)
+{
+       *max = atomic_long_read(&au_sbi(sb)->si_nfiles);
+       return au_array_alloc(max, au_farray_cb, sb);
+}
+
+static void au_farray_free(struct file **a, unsigned long long max)
+{
+       unsigned long long ull;
+
+       for (ull = 0; ull < max; ull++)
+               if (a[ull])
+                       fput(a[ull]);
+       au_array_free(a);
+}
+
+static int au_br_mod_files_ro(struct super_block *sb, aufs_bindex_t bindex)
+{
+       int err, do_warn;
+       unsigned int mnt_flags;
+       unsigned long long ull, max;
+       aufs_bindex_t br_id;
+       unsigned char verbose;
+       struct file *file, *hf, **array;
+       struct inode *inode;
+       struct au_hfile *hfile;
+
+       mnt_flags = au_mntflags(sb);
+       verbose = !!au_opt_test(mnt_flags, VERBOSE);
+
+       array = au_farray_alloc(sb, &max);
+       err = PTR_ERR(array);
+       if (IS_ERR(array))
+               goto out;
+
+       do_warn = 0;
+       br_id = au_sbr_id(sb, bindex);
+       for (ull = 0; ull < max; ull++) {
+               file = array[ull];
+
+               /* AuDbg("%.*s\n", AuDLNPair(file->f_dentry)); */
+               fi_read_lock(file);
+               if (unlikely(au_test_mmapped(file))) {
+                       err = -EBUSY;
+                       AuVerbose(verbose, "mmapped %.*s\n",
+                                 AuDLNPair(file->f_dentry));
+                       AuDbgFile(file);
+                       FiMustNoWaiters(file);
+                       fi_read_unlock(file);
+                       goto out_array;
+               }
+
+               inode = file->f_dentry->d_inode;
+               hfile = &au_fi(file)->fi_htop;
+               hf = hfile->hf_file;
+               if (!S_ISREG(inode->i_mode)
+                   || !(file->f_mode & FMODE_WRITE)
+                   || hfile->hf_br->br_id != br_id
+                   || !(hf->f_mode & FMODE_WRITE))
+                       array[ull] = NULL;
+               else {
+                       do_warn = 1;
+                       get_file(file);
+               }
+
+               FiMustNoWaiters(file);
+               fi_read_unlock(file);
+               fput(file);
+       }
+
+       err = 0;
+       if (do_warn)
+               au_warn_ima();
+
+       for (ull = 0; ull < max; ull++) {
+               file = array[ull];
+               if (!file)
+                       continue;
+
+               /* todo: already flushed? */
+               /* cf. fs/super.c:mark_files_ro() */
+               /* fi_read_lock(file); */
+               hfile = &au_fi(file)->fi_htop;
+               hf = hfile->hf_file;
+               /* fi_read_unlock(file); */
+               spin_lock(&hf->f_lock);
+               hf->f_mode &= ~FMODE_WRITE;
+               spin_unlock(&hf->f_lock);
+               if (!file_check_writeable(hf)) {
+                       file_release_write(hf);
+                       mnt_drop_write(hf->f_vfsmnt);
+               }
+       }
+
+out_array:
+       au_farray_free(array, max);
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
+             int *do_refresh)
+{
+       int err, rerr;
+       aufs_bindex_t bindex;
+       struct dentry *root;
+       struct au_branch *br;
+
+       root = sb->s_root;
+       bindex = au_find_dbindex(root, mod->h_root);
+       if (bindex < 0) {
+               if (remount)
+                       return 0; /* success */
+               err = -ENOENT;
+               pr_err("%s no such branch\n", mod->path);
+               goto out;
+       }
+       AuDbg("bindex b%d\n", bindex);
+
+       err = test_br(mod->h_root->d_inode, mod->perm, mod->path);
+       if (unlikely(err))
+               goto out;
+
+       br = au_sbr(sb, bindex);
+       AuDebugOn(mod->h_root != au_br_dentry(br));
+       if (br->br_perm == mod->perm)
+               return 0; /* success */
+
+       if (au_br_writable(br->br_perm)) {
+               /* remove whiteout base */
+               err = au_br_init_wh(sb, br, mod->perm);
+               if (unlikely(err))
+                       goto out;
+
+               if (!au_br_writable(mod->perm)) {
+                       /* rw --> ro, file might be mmapped */
+                       DiMustNoWaiters(root);
+                       IiMustNoWaiters(root->d_inode);
+                       di_write_unlock(root);
+                       err = au_br_mod_files_ro(sb, bindex);
+                       /* aufs_write_lock() calls ..._child() */
+                       di_write_lock_child(root);
+
+                       if (unlikely(err)) {
+                               rerr = -ENOMEM;
+                               br->br_wbr = kmalloc(sizeof(*br->br_wbr),
+                                                    GFP_NOFS);
+                               if (br->br_wbr)
+                                       rerr = au_wbr_init(br, sb, br->br_perm);
+                               if (unlikely(rerr)) {
+                                       AuIOErr("nested error %d (%d)\n",
+                                               rerr, err);
+                                       br->br_perm = mod->perm;
+                               }
+                       }
+               }
+       } else if (au_br_writable(mod->perm)) {
+               /* ro --> rw */
+               err = -ENOMEM;
+               br->br_wbr = kmalloc(sizeof(*br->br_wbr), GFP_NOFS);
+               if (br->br_wbr) {
+                       err = au_wbr_init(br, sb, mod->perm);
+                       if (unlikely(err)) {
+                               kfree(br->br_wbr);
+                               br->br_wbr = NULL;
+                       }
+               }
+       }
+
+       if (!err) {
+               if ((br->br_perm & AuBrAttr_UNPIN)
+                   && !(mod->perm & AuBrAttr_UNPIN))
+                       au_br_dflags_force(br);
+               else if (!(br->br_perm & AuBrAttr_UNPIN)
+                        && (mod->perm & AuBrAttr_UNPIN))
+                       au_br_dflags_restore(br);
+               *do_refresh |= need_sigen_inc(br->br_perm, mod->perm);
+               br->br_perm = mod->perm;
+       }
+
+out:
+       AuTraceErr(err);
+       return err;
+}
diff --git a/fs/aufs/branch.h b/fs/aufs/branch.h
new file mode 100644 (file)
index 0000000..998a92e
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * branch filesystems and xino for them
+ */
+
+#ifndef __AUFS_BRANCH_H__
+#define __AUFS_BRANCH_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mount.h>
+#include "dynop.h"
+#include "rwsem.h"
+#include "super.h"
+
+/* ---------------------------------------------------------------------- */
+
+/* a xino file */
+struct au_xino_file {
+       struct file             *xi_file;
+       struct mutex            xi_nondir_mtx;
+
+       /* todo: make xino files an array to support huge inode number */
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry            *xi_dbgaufs;
+#endif
+};
+
+/* members for writable branch only */
+enum {AuBrWh_BASE, AuBrWh_PLINK, AuBrWh_ORPH, AuBrWh_Last};
+struct au_wbr {
+       struct au_rwsem         wbr_wh_rwsem;
+       struct dentry           *wbr_wh[AuBrWh_Last];
+       atomic_t                wbr_wh_running;
+#define wbr_whbase             wbr_wh[AuBrWh_BASE]     /* whiteout base */
+#define wbr_plink              wbr_wh[AuBrWh_PLINK]    /* pseudo-link dir */
+#define wbr_orph               wbr_wh[AuBrWh_ORPH]     /* dir for orphans */
+
+       /* mfs mode */
+       unsigned long long      wbr_bytes;
+};
+
+/* ext2 has 3 types of operations at least, ext3 has 4 */
+#define AuBrDynOp (AuDyLast * 4)
+
+/* sysfs entries */
+struct au_brsysfs {
+       char                    name[16];
+       struct attribute        attr;
+};
+
+enum {
+       AuBrSysfs_BR,
+       AuBrSysfs_BRID,
+       AuBrSysfs_Last
+};
+
+/* protected by superblock rwsem */
+struct au_branch {
+       struct au_xino_file     br_xino;
+
+       aufs_bindex_t           br_id;
+
+       int                     br_perm;
+       unsigned int            br_dflags;
+       struct path             br_path;
+       spinlock_t              br_dykey_lock;
+       struct au_dykey         *br_dykey[AuBrDynOp];
+       atomic_t                br_count;
+
+       struct au_wbr           *br_wbr;
+
+       /* xino truncation */
+       atomic_t                br_xino_running;
+
+#ifdef CONFIG_AUFS_HFSNOTIFY
+       struct fsnotify_group   *br_hfsn_group;
+       struct fsnotify_ops     br_hfsn_ops;
+#endif
+
+#ifdef CONFIG_SYSFS
+       /* entries under sysfs per mount-point */
+       struct au_brsysfs       br_sysfs[AuBrSysfs_Last];
+#endif
+};
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct vfsmount *au_br_mnt(struct au_branch *br)
+{
+       return br->br_path.mnt;
+}
+
+static inline struct dentry *au_br_dentry(struct au_branch *br)
+{
+       return br->br_path.dentry;
+}
+
+static inline struct super_block *au_br_sb(struct au_branch *br)
+{
+       return au_br_mnt(br)->mnt_sb;
+}
+
+/* branch permissions and attributes */
+#define AuBrPerm_RW            1               /* writable, hardlinkable wh */
+#define AuBrPerm_RO            (1 << 1)        /* readonly */
+#define AuBrPerm_RR            (1 << 2)        /* natively readonly */
+#define AuBrPerm_Mask          (AuBrPerm_RW | AuBrPerm_RO | AuBrPerm_RR)
+
+#define AuBrRAttr_WH           (1 << 3)        /* whiteout-able */
+
+#define AuBrWAttr_NoLinkWH     (1 << 4)        /* un-hardlinkable whiteouts */
+
+#define AuBrAttr_UNPIN         (1 << 5)        /* rename-able top dir of
+                                                  branch */
+
+static inline int au_br_writable(int brperm)
+{
+       return brperm & AuBrPerm_RW;
+}
+
+static inline int au_br_whable(int brperm)
+{
+       return brperm & (AuBrPerm_RW | AuBrRAttr_WH);
+}
+
+static inline int au_br_wh_linkable(int brperm)
+{
+       return !(brperm & AuBrWAttr_NoLinkWH);
+}
+
+static inline int au_br_rdonly(struct au_branch *br)
+{
+       return ((au_br_sb(br)->s_flags & MS_RDONLY)
+               || !au_br_writable(br->br_perm))
+               ? -EROFS : 0;
+}
+
+static inline int au_br_hnotifyable(int brperm __maybe_unused)
+{
+#ifdef CONFIG_AUFS_HNOTIFY
+       return !(brperm & AuBrPerm_RR);
+#else
+       return 0;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* branch.c */
+struct au_sbinfo;
+void au_br_free(struct au_sbinfo *sinfo);
+int au_br_index(struct super_block *sb, aufs_bindex_t br_id);
+struct au_opt_add;
+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount);
+struct au_opt_del;
+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount);
+long au_ibusy_ioctl(struct file *file, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg);
+#endif
+struct au_opt_mod;
+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
+             int *do_refresh);
+
+/* xino.c */
+static const loff_t au_loff_max = LLONG_MAX;
+
+int au_xib_trunc(struct super_block *sb);
+ssize_t xino_fread(au_readf_t func, struct file *file, void *buf, size_t size,
+                  loff_t *pos);
+ssize_t xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size,
+                   loff_t *pos);
+struct file *au_xino_create2(struct file *base_file, struct file *copy_src);
+struct file *au_xino_create(struct super_block *sb, char *fname, int silent);
+ino_t au_xino_new_ino(struct super_block *sb);
+void au_xino_delete_inode(struct inode *inode, const int unlinked);
+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+                 ino_t ino);
+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+                ino_t *ino);
+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t hino,
+              struct file *base_file, int do_test);
+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex);
+
+struct au_opt_xino;
+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount);
+void au_xino_clr(struct super_block *sb);
+struct file *au_xino_def(struct super_block *sb);
+int au_xino_path(struct seq_file *seq, struct file *file);
+
+/* ---------------------------------------------------------------------- */
+
+/* Superblock to branch */
+static inline
+aufs_bindex_t au_sbr_id(struct super_block *sb, aufs_bindex_t bindex)
+{
+       return au_sbr(sb, bindex)->br_id;
+}
+
+static inline
+struct vfsmount *au_sbr_mnt(struct super_block *sb, aufs_bindex_t bindex)
+{
+       return au_br_mnt(au_sbr(sb, bindex));
+}
+
+static inline
+struct super_block *au_sbr_sb(struct super_block *sb, aufs_bindex_t bindex)
+{
+       return au_br_sb(au_sbr(sb, bindex));
+}
+
+static inline void au_sbr_put(struct super_block *sb, aufs_bindex_t bindex)
+{
+       atomic_dec(&au_sbr(sb, bindex)->br_count);
+}
+
+static inline int au_sbr_perm(struct super_block *sb, aufs_bindex_t bindex)
+{
+       return au_sbr(sb, bindex)->br_perm;
+}
+
+static inline int au_sbr_whable(struct super_block *sb, aufs_bindex_t bindex)
+{
+       return au_br_whable(au_sbr_perm(sb, bindex));
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * wbr_wh_read_lock, wbr_wh_write_lock
+ * wbr_wh_read_unlock, wbr_wh_write_unlock, wbr_wh_downgrade_lock
+ */
+AuSimpleRwsemFuncs(wbr_wh, struct au_wbr *wbr, &wbr->wbr_wh_rwsem);
+
+#define WbrWhMustNoWaiters(wbr)        AuRwMustNoWaiters(&wbr->wbr_wh_rwsem)
+#define WbrWhMustAnyLock(wbr)  AuRwMustAnyLock(&wbr->wbr_wh_rwsem)
+#define WbrWhMustWriteLock(wbr)        AuRwMustWriteLock(&wbr->wbr_wh_rwsem)
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_BRANCH_H__ */
diff --git a/fs/aufs/conf.mk b/fs/aufs/conf.mk
new file mode 100644 (file)
index 0000000..6c5108d
--- /dev/null
@@ -0,0 +1,38 @@
+
+AuConfStr = CONFIG_AUFS_FS=${CONFIG_AUFS_FS}
+
+define AuConf
+ifdef ${1}
+AuConfStr += ${1}=${${1}}
+endif
+endef
+
+AuConfAll = BRANCH_MAX_127 BRANCH_MAX_511 BRANCH_MAX_1023 BRANCH_MAX_32767 \
+       SBILIST \
+       HNOTIFY HFSNOTIFY \
+       EXPORT INO_T_64 \
+       RDU \
+       PROC_MAP \
+       SP_IATTR \
+       SHWH \
+       BR_RAMFS \
+       BR_FUSE POLL \
+       BR_HFSPLUS \
+       BDEV_LOOP \
+       DEBUG MAGIC_SYSRQ
+$(foreach i, ${AuConfAll}, \
+       $(eval $(call AuConf,CONFIG_AUFS_${i})))
+
+AuConfName = ${obj}/conf.str
+${AuConfName}.tmp: FORCE
+       @echo ${AuConfStr} | tr ' ' '\n' | sed -e 's/^/"/' -e 's/$$/\\n"/' > $@
+${AuConfName}: ${AuConfName}.tmp
+       @diff -q $< $@ > /dev/null 2>&1 || { \
+       echo '  GEN    ' $@; \
+       cp -p $< $@; \
+       }
+FORCE:
+clean-files += ${AuConfName} ${AuConfName}.tmp
+${obj}/sysfs.o: ${AuConfName}
+
+-include ${srctree}/${src}/conf_priv.mk
diff --git a/fs/aufs/cpup.c b/fs/aufs/cpup.c
new file mode 100644 (file)
index 0000000..7c0085f
--- /dev/null
@@ -0,0 +1,1236 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * copy-up functions, see wbr_policy.c for copy-down
+ */
+
+#include <linux/fs_stack.h>
+#include <linux/mm.h>
+#include "aufs.h"
+
+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags)
+{
+       const unsigned int mask = S_DEAD | S_SWAPFILE | S_PRIVATE
+               | S_NOATIME | S_NOCMTIME | S_AUTOMOUNT;
+
+       BUILD_BUG_ON(sizeof(iflags) != sizeof(dst->i_flags));
+
+       dst->i_flags |= iflags & ~mask;
+       if (au_test_fs_notime(dst->i_sb))
+               dst->i_flags |= S_NOATIME | S_NOCMTIME;
+}
+
+void au_cpup_attr_timesizes(struct inode *inode)
+{
+       struct inode *h_inode;
+
+       h_inode = au_h_iptr(inode, au_ibstart(inode));
+       fsstack_copy_attr_times(inode, h_inode);
+       fsstack_copy_inode_size(inode, h_inode);
+}
+
+void au_cpup_attr_nlink(struct inode *inode, int force)
+{
+       struct inode *h_inode;
+       struct super_block *sb;
+       aufs_bindex_t bindex, bend;
+
+       sb = inode->i_sb;
+       bindex = au_ibstart(inode);
+       h_inode = au_h_iptr(inode, bindex);
+       if (!force
+           && !S_ISDIR(h_inode->i_mode)
+           && au_opt_test(au_mntflags(sb), PLINK)
+           && au_plink_test(inode))
+               return;
+
+       set_nlink(inode, h_inode->i_nlink);
+
+       /*
+        * fewer nlink makes find(1) noisy, but larger nlink doesn't.
+        * it may includes whplink directory.
+        */
+       if (S_ISDIR(h_inode->i_mode)) {
+               bend = au_ibend(inode);
+               for (bindex++; bindex <= bend; bindex++) {
+                       h_inode = au_h_iptr(inode, bindex);
+                       if (h_inode)
+                               au_add_nlink(inode, h_inode);
+               }
+       }
+}
+
+void au_cpup_attr_changeable(struct inode *inode)
+{
+       struct inode *h_inode;
+
+       h_inode = au_h_iptr(inode, au_ibstart(inode));
+       inode->i_mode = h_inode->i_mode;
+       inode->i_uid = h_inode->i_uid;
+       inode->i_gid = h_inode->i_gid;
+       au_cpup_attr_timesizes(inode);
+       au_cpup_attr_flags(inode, h_inode->i_flags);
+}
+
+void au_cpup_igen(struct inode *inode, struct inode *h_inode)
+{
+       struct au_iinfo *iinfo = au_ii(inode);
+
+       IiMustWriteLock(inode);
+
+       iinfo->ii_higen = h_inode->i_generation;
+       iinfo->ii_hsb1 = h_inode->i_sb;
+}
+
+void au_cpup_attr_all(struct inode *inode, int force)
+{
+       struct inode *h_inode;
+
+       h_inode = au_h_iptr(inode, au_ibstart(inode));
+       au_cpup_attr_changeable(inode);
+       if (inode->i_nlink > 0)
+               au_cpup_attr_nlink(inode, force);
+       inode->i_rdev = h_inode->i_rdev;
+       inode->i_blkbits = h_inode->i_blkbits;
+       au_cpup_igen(inode, h_inode);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Note: dt_dentry and dt_h_dentry are not dget/dput-ed */
+
+/* keep the timestamps of the parent dir when cpup */
+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
+                   struct path *h_path)
+{
+       struct inode *h_inode;
+
+       dt->dt_dentry = dentry;
+       dt->dt_h_path = *h_path;
+       h_inode = h_path->dentry->d_inode;
+       dt->dt_atime = h_inode->i_atime;
+       dt->dt_mtime = h_inode->i_mtime;
+       /* smp_mb(); */
+}
+
+void au_dtime_revert(struct au_dtime *dt)
+{
+       struct iattr attr;
+       int err;
+
+       attr.ia_atime = dt->dt_atime;
+       attr.ia_mtime = dt->dt_mtime;
+       attr.ia_valid = ATTR_FORCE | ATTR_MTIME | ATTR_MTIME_SET
+               | ATTR_ATIME | ATTR_ATIME_SET;
+
+       err = vfsub_notify_change(&dt->dt_h_path, &attr);
+       if (unlikely(err))
+               pr_warn("restoring timestamps failed(%d). ignored\n", err);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* internal use only */
+struct au_cpup_reg_attr {
+       int             valid;
+       struct kstat    st;
+       unsigned int    iflags; /* inode->i_flags */
+};
+
+static noinline_for_stack
+int cpup_iattr(struct dentry *dst, aufs_bindex_t bindex, struct dentry *h_src,
+              struct au_cpup_reg_attr *h_src_attr)
+{
+       int err, sbits;
+       struct iattr ia;
+       struct path h_path;
+       struct inode *h_isrc, *h_idst;
+       struct kstat *h_st;
+
+       h_path.dentry = au_h_dptr(dst, bindex);
+       h_idst = h_path.dentry->d_inode;
+       h_path.mnt = au_sbr_mnt(dst->d_sb, bindex);
+       h_isrc = h_src->d_inode;
+       ia.ia_valid = ATTR_FORCE | ATTR_UID | ATTR_GID
+               | ATTR_ATIME | ATTR_MTIME
+               | ATTR_ATIME_SET | ATTR_MTIME_SET;
+       if (h_src_attr && h_src_attr->valid) {
+               h_st = &h_src_attr->st;
+               ia.ia_uid = h_st->uid;
+               ia.ia_gid = h_st->gid;
+               ia.ia_atime = h_st->atime;
+               ia.ia_mtime = h_st->mtime;
+               if (h_idst->i_mode != h_st->mode
+                   && !S_ISLNK(h_idst->i_mode)) {
+                       ia.ia_valid |= ATTR_MODE;
+                       ia.ia_mode = h_st->mode;
+               }
+               sbits = !!(h_st->mode & (S_ISUID | S_ISGID));
+               au_cpup_attr_flags(h_idst, h_src_attr->iflags);
+       } else {
+               ia.ia_uid = h_isrc->i_uid;
+               ia.ia_gid = h_isrc->i_gid;
+               ia.ia_atime = h_isrc->i_atime;
+               ia.ia_mtime = h_isrc->i_mtime;
+               if (h_idst->i_mode != h_isrc->i_mode
+                   && !S_ISLNK(h_idst->i_mode)) {
+                       ia.ia_valid |= ATTR_MODE;
+                       ia.ia_mode = h_isrc->i_mode;
+               }
+               sbits = !!(h_isrc->i_mode & (S_ISUID | S_ISGID));
+               au_cpup_attr_flags(h_idst, h_isrc->i_flags);
+       }
+       err = vfsub_notify_change(&h_path, &ia);
+
+       /* is this nfs only? */
+       if (!err && sbits && au_test_nfs(h_path.dentry->d_sb)) {
+               ia.ia_valid = ATTR_FORCE | ATTR_MODE;
+               ia.ia_mode = h_isrc->i_mode;
+               err = vfsub_notify_change(&h_path, &ia);
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_copy_file(struct file *dst, struct file *src, loff_t len,
+                          char *buf, unsigned long blksize)
+{
+       int err;
+       size_t sz, rbytes, wbytes;
+       unsigned char all_zero;
+       char *p, *zp;
+       struct mutex *h_mtx;
+       /* reduce stack usage */
+       struct iattr *ia;
+
+       zp = page_address(ZERO_PAGE(0));
+       if (unlikely(!zp))
+               return -ENOMEM; /* possible? */
+
+       err = 0;
+       all_zero = 0;
+       while (len) {
+               AuDbg("len %lld\n", len);
+               sz = blksize;
+               if (len < blksize)
+                       sz = len;
+
+               rbytes = 0;
+               /* todo: signal_pending? */
+               while (!rbytes || err == -EAGAIN || err == -EINTR) {
+                       rbytes = vfsub_read_k(src, buf, sz, &src->f_pos);
+                       err = rbytes;
+               }
+               if (unlikely(err < 0))
+                       break;
+
+               all_zero = 0;
+               if (len >= rbytes && rbytes == blksize)
+                       all_zero = !memcmp(buf, zp, rbytes);
+               if (!all_zero) {
+                       wbytes = rbytes;
+                       p = buf;
+                       while (wbytes) {
+                               size_t b;
+
+                               b = vfsub_write_k(dst, p, wbytes, &dst->f_pos);
+                               err = b;
+                               /* todo: signal_pending? */
+                               if (unlikely(err == -EAGAIN || err == -EINTR))
+                                       continue;
+                               if (unlikely(err < 0))
+                                       break;
+                               wbytes -= b;
+                               p += b;
+                       }
+                       if (unlikely(err < 0))
+                               break;
+               } else {
+                       loff_t res;
+
+                       AuLabel(hole);
+                       res = vfsub_llseek(dst, rbytes, SEEK_CUR);
+                       err = res;
+                       if (unlikely(res < 0))
+                               break;
+               }
+               len -= rbytes;
+               err = 0;
+       }
+
+       /* the last block may be a hole */
+       if (!err && all_zero) {
+               AuLabel(last hole);
+
+               err = 1;
+               if (au_test_nfs(dst->f_dentry->d_sb)) {
+                       /* nfs requires this step to make last hole */
+                       /* is this only nfs? */
+                       do {
+                               /* todo: signal_pending? */
+                               err = vfsub_write_k(dst, "\0", 1, &dst->f_pos);
+                       } while (err == -EAGAIN || err == -EINTR);
+                       if (err == 1)
+                               dst->f_pos--;
+               }
+
+               if (err == 1) {
+                       ia = (void *)buf;
+                       ia->ia_size = dst->f_pos;
+                       ia->ia_valid = ATTR_SIZE | ATTR_FILE;
+                       ia->ia_file = dst;
+                       h_mtx = &dst->f_dentry->d_inode->i_mutex;
+                       mutex_lock_nested(h_mtx, AuLsc_I_CHILD2);
+                       err = vfsub_notify_change(&dst->f_path, ia);
+                       mutex_unlock(h_mtx);
+               }
+       }
+
+       return err;
+}
+
+int au_copy_file(struct file *dst, struct file *src, loff_t len)
+{
+       int err;
+       unsigned long blksize;
+       unsigned char do_kfree;
+       char *buf;
+
+       err = -ENOMEM;
+       blksize = dst->f_dentry->d_sb->s_blocksize;
+       if (!blksize || PAGE_SIZE < blksize)
+               blksize = PAGE_SIZE;
+       AuDbg("blksize %lu\n", blksize);
+       do_kfree = (blksize != PAGE_SIZE && blksize >= sizeof(struct iattr *));
+       if (do_kfree)
+               buf = kmalloc(blksize, GFP_NOFS);
+       else
+               buf = (void *)__get_free_page(GFP_NOFS);
+       if (unlikely(!buf))
+               goto out;
+
+       if (len > (1 << 22))
+               AuDbg("copying a large file %lld\n", (long long)len);
+
+       src->f_pos = 0;
+       dst->f_pos = 0;
+       err = au_do_copy_file(dst, src, len, buf, blksize);
+       if (do_kfree)
+               kfree(buf);
+       else
+               free_page((unsigned long)buf);
+
+out:
+       return err;
+}
+
+/*
+ * to support a sparse file which is opened with O_APPEND,
+ * we need to close the file.
+ */
+static int au_cp_regular(struct au_cp_generic *cpg)
+{
+       int err, i;
+       enum { SRC, DST };
+       struct {
+               aufs_bindex_t bindex;
+               unsigned int flags;
+               struct dentry *dentry;
+               struct file *file;
+               void *label, *label_file;
+       } *f, file[] = {
+               {
+                       .bindex = cpg->bsrc,
+                       .flags = O_RDONLY | O_NOATIME | O_LARGEFILE,
+                       .file = NULL,
+                       .label = &&out,
+                       .label_file = &&out_src
+               },
+               {
+                       .bindex = cpg->bdst,
+                       .flags = O_WRONLY | O_NOATIME | O_LARGEFILE,
+                       .file = NULL,
+                       .label = &&out_src,
+                       .label_file = &&out_dst
+               }
+       };
+       struct super_block *sb;
+
+       /* bsrc branch can be ro/rw. */
+       sb = cpg->dentry->d_sb;
+       f = file;
+       for (i = 0; i < 2; i++, f++) {
+               f->dentry = au_h_dptr(cpg->dentry, f->bindex);
+               f->file = au_h_open(cpg->dentry, f->bindex, f->flags,
+                                   /*file*/NULL);
+               err = PTR_ERR(f->file);
+               if (IS_ERR(f->file))
+                       goto *f->label;
+               err = -EINVAL;
+               if (unlikely(!f->file->f_op))
+                       goto *f->label_file;
+       }
+
+       /* try stopping to update while we copyup */
+       IMustLock(file[SRC].dentry->d_inode);
+       err = au_copy_file(file[DST].file, file[SRC].file, cpg->len);
+
+out_dst:
+       fput(file[DST].file);
+       au_sbr_put(sb, file[DST].bindex);
+out_src:
+       fput(file[SRC].file);
+       au_sbr_put(sb, file[SRC].bindex);
+out:
+       return err;
+}
+
+static int au_do_cpup_regular(struct au_cp_generic *cpg,
+                             struct au_cpup_reg_attr *h_src_attr)
+{
+       int err, rerr;
+       loff_t l;
+       struct dentry *h_src_dentry;
+       struct inode *h_src_inode;
+       struct vfsmount *h_src_mnt;
+
+       err = 0;
+       h_src_inode = au_h_iptr(cpg->dentry->d_inode, cpg->bsrc);
+       l = i_size_read(h_src_inode);
+       if (cpg->len == -1 || l < cpg->len)
+               cpg->len = l;
+       if (cpg->len) {
+               /* try stopping to update while we are referencing */
+               mutex_lock_nested(&h_src_inode->i_mutex, AuLsc_I_CHILD);
+               au_pin_hdir_unlock(cpg->pin);
+
+               h_src_dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
+               h_src_mnt = au_sbr_mnt(cpg->dentry->d_sb, cpg->bsrc);
+               h_src_attr->iflags = h_src_inode->i_flags;
+               err = vfs_getattr(h_src_mnt, h_src_dentry, &h_src_attr->st);
+               if (unlikely(err)) {
+                       mutex_unlock(&h_src_inode->i_mutex);
+                       goto out;
+               }
+               h_src_attr->valid = 1;
+               err = au_cp_regular(cpg);
+               mutex_unlock(&h_src_inode->i_mutex);
+               rerr = au_pin_hdir_relock(cpg->pin);
+               if (!err && rerr)
+                       err = rerr;
+       }
+
+out:
+       return err;
+}
+
+static int au_do_cpup_symlink(struct path *h_path, struct dentry *h_src,
+                             struct inode *h_dir)
+{
+       int err, symlen;
+       mm_segment_t old_fs;
+       union {
+               char *k;
+               char __user *u;
+       } sym;
+
+       err = -ENOSYS;
+       if (unlikely(!h_src->d_inode->i_op->readlink))
+               goto out;
+
+       err = -ENOMEM;
+       sym.k = __getname_gfp(GFP_NOFS);
+       if (unlikely(!sym.k))
+               goto out;
+
+       /* unnecessary to support mmap_sem since symlink is not mmap-able */
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+       symlen = h_src->d_inode->i_op->readlink(h_src, sym.u, PATH_MAX);
+       err = symlen;
+       set_fs(old_fs);
+
+       if (symlen > 0) {
+               sym.k[symlen] = 0;
+               err = vfsub_symlink(h_dir, h_path, sym.k);
+       }
+       __putname(sym.k);
+
+out:
+       return err;
+}
+
+static noinline_for_stack
+int cpup_entry(struct au_cp_generic *cpg, struct dentry *dst_parent,
+              struct au_cpup_reg_attr *h_src_attr)
+{
+       int err;
+       umode_t mode;
+       unsigned int mnt_flags;
+       unsigned char isdir;
+       const unsigned char do_dt = !!au_ftest_cpup(cpg->flags, DTIME);
+       struct au_dtime dt;
+       struct path h_path;
+       struct dentry *h_src, *h_dst, *h_parent;
+       struct inode *h_inode, *h_dir;
+       struct super_block *sb;
+
+       /* bsrc branch can be ro/rw. */
+       h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
+       h_inode = h_src->d_inode;
+       AuDebugOn(h_inode != au_h_iptr(cpg->dentry->d_inode, cpg->bsrc));
+
+       /* try stopping to be referenced while we are creating */
+       h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
+       if (au_ftest_cpup(cpg->flags, RENAME))
+               AuDebugOn(strncmp(h_dst->d_name.name, AUFS_WH_PFX,
+                                 AUFS_WH_PFX_LEN));
+       h_parent = h_dst->d_parent; /* dir inode is locked */
+       h_dir = h_parent->d_inode;
+       IMustLock(h_dir);
+       AuDebugOn(h_parent != h_dst->d_parent);
+
+       sb = cpg->dentry->d_sb;
+       h_path.mnt = au_sbr_mnt(sb, cpg->bdst);
+       if (do_dt) {
+               h_path.dentry = h_parent;
+               au_dtime_store(&dt, dst_parent, &h_path);
+       }
+       h_path.dentry = h_dst;
+
+       isdir = 0;
+       mode = h_inode->i_mode;
+       switch (mode & S_IFMT) {
+       case S_IFREG:
+               err = vfsub_create(h_dir, &h_path, mode | S_IWUSR);
+               if (!err)
+                       err = au_do_cpup_regular(cpg, h_src_attr);
+               break;
+       case S_IFDIR:
+               isdir = 1;
+               err = vfsub_mkdir(h_dir, &h_path, mode);
+               if (!err) {
+                       /*
+                        * strange behaviour from the users view,
+                        * particularry setattr case
+                        */
+                       if (au_ibstart(dst_parent->d_inode) == cpg->bdst)
+                               au_cpup_attr_nlink(dst_parent->d_inode,
+                                                  /*force*/1);
+                       au_cpup_attr_nlink(cpg->dentry->d_inode, /*force*/1);
+               }
+               break;
+       case S_IFLNK:
+               err = au_do_cpup_symlink(&h_path, h_src, h_dir);
+               break;
+       case S_IFCHR:
+       case S_IFBLK:
+               AuDebugOn(!capable(CAP_MKNOD));
+               /*FALLTHROUGH*/
+       case S_IFIFO:
+       case S_IFSOCK:
+               err = vfsub_mknod(h_dir, &h_path, mode, h_inode->i_rdev);
+               break;
+       default:
+               AuIOErr("Unknown inode type 0%o\n", mode);
+               err = -EIO;
+       }
+
+       mnt_flags = au_mntflags(sb);
+       if (!au_opt_test(mnt_flags, UDBA_NONE)
+           && !isdir
+           && au_opt_test(mnt_flags, XINO)
+           && h_inode->i_nlink == 1
+           /* todo: unnecessary? */
+           /* && cpg->dentry->d_inode->i_nlink == 1 */
+           && cpg->bdst < cpg->bsrc
+           && !au_ftest_cpup(cpg->flags, KEEPLINO))
+               au_xino_write(sb, cpg->bsrc, h_inode->i_ino, /*ino*/0);
+               /* ignore this error */
+
+       if (do_dt)
+               au_dtime_revert(&dt);
+       return err;
+}
+
+static int au_do_ren_after_cpup(struct dentry *dentry, aufs_bindex_t bdst,
+                               struct path *h_path)
+{
+       int err;
+       struct dentry *h_dentry, *h_parent;
+       struct inode *h_dir;
+
+       h_dentry = dget(au_h_dptr(dentry, bdst));
+       au_set_h_dptr(dentry, bdst, NULL);
+       err = au_lkup_neg(dentry, bdst, /*wh*/0);
+       if (unlikely(err)) {
+               au_set_h_dptr(dentry, bdst, h_dentry);
+               goto out;
+       }
+
+       h_path->dentry = dget(au_h_dptr(dentry, bdst));
+       au_set_h_dptr(dentry, bdst, h_dentry);
+       h_parent = h_dentry->d_parent; /* dir inode is locked */
+       h_dir = h_parent->d_inode;
+       IMustLock(h_dir);
+       AuDbg("%.*s %.*s\n", AuDLNPair(h_dentry), AuDLNPair(h_path->dentry));
+       err = vfsub_rename(h_dir, h_dentry, h_dir, h_path);
+       dput(h_path->dentry);
+
+out:
+       return err;
+}
+
+/*
+ * copyup the @dentry from @bsrc to @bdst.
+ * the caller must set the both of lower dentries.
+ * @len is for truncating when it is -1 copyup the entire file.
+ * in link/rename cases, @dst_parent may be different from the real one.
+ */
+static int au_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
+{
+       int err, rerr;
+       aufs_bindex_t old_ibstart;
+       unsigned char isdir, plink;
+       struct dentry *h_src, *h_dst, *h_parent;
+       struct inode *dst_inode, *h_dir, *inode;
+       struct super_block *sb;
+       struct au_branch *br;
+       /* to reuduce stack size */
+       struct {
+               struct au_dtime dt;
+               struct path h_path;
+               struct au_cpup_reg_attr h_src_attr;
+       } *a;
+
+       AuDebugOn(cpg->bsrc <= cpg->bdst);
+
+       err = -ENOMEM;
+       a = kmalloc(sizeof(*a), GFP_NOFS);
+       if (unlikely(!a))
+               goto out;
+       a->h_src_attr.valid = 0;
+
+       sb = cpg->dentry->d_sb;
+       br = au_sbr(sb, cpg->bdst);
+       a->h_path.mnt = au_br_mnt(br);
+       h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
+       h_parent = h_dst->d_parent; /* dir inode is locked */
+       h_dir = h_parent->d_inode;
+       IMustLock(h_dir);
+
+       h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
+       inode = cpg->dentry->d_inode;
+
+       if (!dst_parent)
+               dst_parent = dget_parent(cpg->dentry);
+       else
+               dget(dst_parent);
+
+       plink = !!au_opt_test(au_mntflags(sb), PLINK);
+       dst_inode = au_h_iptr(inode, cpg->bdst);
+       if (dst_inode) {
+               if (unlikely(!plink)) {
+                       err = -EIO;
+                       AuIOErr("hi%lu(i%lu) exists on b%d "
+                               "but plink is disabled\n",
+                               dst_inode->i_ino, inode->i_ino, cpg->bdst);
+                       goto out_parent;
+               }
+
+               if (dst_inode->i_nlink) {
+                       const int do_dt = au_ftest_cpup(cpg->flags, DTIME);
+
+                       h_src = au_plink_lkup(inode, cpg->bdst);
+                       err = PTR_ERR(h_src);
+                       if (IS_ERR(h_src))
+                               goto out_parent;
+                       if (unlikely(!h_src->d_inode)) {
+                               err = -EIO;
+                               AuIOErr("i%lu exists on a upper branch "
+                                       "but not pseudo-linked\n",
+                                       inode->i_ino);
+                               dput(h_src);
+                               goto out_parent;
+                       }
+
+                       if (do_dt) {
+                               a->h_path.dentry = h_parent;
+                               au_dtime_store(&a->dt, dst_parent, &a->h_path);
+                       }
+
+                       a->h_path.dentry = h_dst;
+                       err = vfsub_link(h_src, h_dir, &a->h_path);
+                       if (!err && au_ftest_cpup(cpg->flags, RENAME))
+                               err = au_do_ren_after_cpup
+                                       (cpg->dentry, cpg->bdst, &a->h_path);
+                       if (do_dt)
+                               au_dtime_revert(&a->dt);
+                       dput(h_src);
+                       goto out_parent;
+               } else
+                       /* todo: cpup_wh_file? */
+                       /* udba work */
+                       au_update_ibrange(inode, /*do_put_zero*/1);
+       }
+
+       isdir = S_ISDIR(inode->i_mode);
+       old_ibstart = au_ibstart(inode);
+       err = cpup_entry(cpg, dst_parent, &a->h_src_attr);
+       if (unlikely(err))
+               goto out_rev;
+       dst_inode = h_dst->d_inode;
+       mutex_lock_nested(&dst_inode->i_mutex, AuLsc_I_CHILD2);
+       /* todo: necessary? */
+       /* au_pin_hdir_unlock(cpg->pin); */
+
+       err = cpup_iattr(cpg->dentry, cpg->bdst, h_src, &a->h_src_attr);
+       if (unlikely(err)) {
+               /* todo: necessary? */
+               /* au_pin_hdir_relock(cpg->pin); */ /* ignore an error */
+               mutex_unlock(&dst_inode->i_mutex);
+               goto out_rev;
+       }
+
+       if (cpg->bdst < old_ibstart) {
+               if (S_ISREG(inode->i_mode)) {
+                       err = au_dy_iaop(inode, cpg->bdst, dst_inode);
+                       if (unlikely(err)) {
+                               /* ignore an error */
+                               /* au_pin_hdir_relock(cpg->pin); */
+                               mutex_unlock(&dst_inode->i_mutex);
+                               goto out_rev;
+                       }
+               }
+               au_set_ibstart(inode, cpg->bdst);
+       }
+       au_set_h_iptr(inode, cpg->bdst, au_igrab(dst_inode),
+                     au_hi_flags(inode, isdir));
+
+       /* todo: necessary? */
+       /* err = au_pin_hdir_relock(cpg->pin); */
+       mutex_unlock(&dst_inode->i_mutex);
+       if (unlikely(err))
+               goto out_rev;
+
+       if (!isdir
+           && h_src->d_inode->i_nlink > 1
+           && plink)
+               au_plink_append(inode, cpg->bdst, h_dst);
+
+       if (au_ftest_cpup(cpg->flags, RENAME)) {
+               a->h_path.dentry = h_dst;
+               err = au_do_ren_after_cpup(cpg->dentry, cpg->bdst, &a->h_path);
+       }
+       if (!err)
+               goto out_parent; /* success */
+
+       /* revert */
+out_rev:
+       a->h_path.dentry = h_parent;
+       au_dtime_store(&a->dt, dst_parent, &a->h_path);
+       a->h_path.dentry = h_dst;
+       rerr = 0;
+       if (h_dst->d_inode) {
+               if (!isdir)
+                       rerr = vfsub_unlink(h_dir, &a->h_path, /*force*/0);
+               else
+                       rerr = vfsub_rmdir(h_dir, &a->h_path);
+       }
+       au_dtime_revert(&a->dt);
+       if (rerr) {
+               AuIOErr("failed removing broken entry(%d, %d)\n", err, rerr);
+               err = -EIO;
+       }
+out_parent:
+       dput(dst_parent);
+       kfree(a);
+out:
+       return err;
+}
+
+#if 0 /* unused */
+struct au_cpup_single_args {
+       int *errp;
+       struct au_cp_generic *cpg;
+       struct dentry *dst_parent;
+};
+
+static void au_call_cpup_single(void *args)
+{
+       struct au_cpup_single_args *a = args;
+
+       au_pin_hdir_acquire_nest(a->cpg->pin);
+       *a->errp = au_cpup_single(a->cpg, a->dst_parent);
+       au_pin_hdir_release(a->cpg->pin);
+}
+#endif
+
+/*
+ * prevent SIGXFSZ in copy-up.
+ * testing CAP_MKNOD is for generic fs,
+ * but CAP_FSETID is for xfs only, currently.
+ */
+static int au_cpup_sio_test(struct au_pin *pin, umode_t mode)
+{
+       int do_sio;
+       struct super_block *sb;
+       struct inode *h_dir;
+
+       do_sio = 0;
+       sb = au_pinned_parent(pin)->d_sb;
+       if (!au_wkq_test()
+           && (!au_sbi(sb)->si_plink_maint_pid
+               || au_plink_maint(sb, AuLock_NOPLM))) {
+               switch (mode & S_IFMT) {
+               case S_IFREG:
+                       /* no condition about RLIMIT_FSIZE and the file size */
+                       do_sio = 1;
+                       break;
+               case S_IFCHR:
+               case S_IFBLK:
+                       do_sio = !capable(CAP_MKNOD);
+                       break;
+               }
+               if (!do_sio)
+                       do_sio = ((mode & (S_ISUID | S_ISGID))
+                                 && !capable(CAP_FSETID));
+               /* this workaround may be removed in the future */
+               if (!do_sio) {
+                       h_dir = au_pinned_h_dir(pin);
+                       do_sio = h_dir->i_mode & S_ISVTX;
+               }
+       }
+
+       return do_sio;
+}
+
+#if 0 /* unused */
+int au_sio_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
+{
+       int err, wkq_err;
+       struct dentry *h_dentry;
+
+       h_dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
+       if (!au_cpup_sio_test(pin, h_dentry->d_inode->i_mode))
+               err = au_cpup_single(cpg, dst_parent);
+       else {
+               struct au_cpup_single_args args = {
+                       .errp           = &err,
+                       .cpg            = cpg,
+                       .dst_parent     = dst_parent
+               };
+               wkq_err = au_wkq_wait(au_call_cpup_single, &args);
+               if (unlikely(wkq_err))
+                       err = wkq_err;
+       }
+
+       return err;
+}
+#endif
+
+/*
+ * copyup the @dentry from the first active lower branch to @bdst,
+ * using au_cpup_single().
+ */
+static int au_cpup_simple(struct au_cp_generic *cpg)
+{
+       int err;
+       unsigned int flags_orig;
+       aufs_bindex_t bsrc, bend;
+       struct dentry *dentry, *h_dentry;
+
+       dentry = cpg->dentry;
+       DiMustWriteLock(dentry);
+
+       bend = au_dbend(dentry);
+       if (cpg->bsrc < 0) {
+               for (bsrc = cpg->bdst + 1; bsrc <= bend; bsrc++) {
+                       h_dentry = au_h_dptr(dentry, bsrc);
+                       if (h_dentry) {
+                               AuDebugOn(!h_dentry->d_inode);
+                               break;
+                       }
+               }
+               AuDebugOn(bsrc > bend);
+               cpg->bsrc = bsrc;
+       }
+
+       err = au_lkup_neg(dentry, cpg->bdst, /*wh*/1);
+       if (!err) {
+               flags_orig = cpg->flags;
+               au_fset_cpup(cpg->flags, RENAME);
+               err = au_cpup_single(cpg, NULL);
+               cpg->flags = flags_orig;
+               if (!err)
+                       return 0; /* success */
+
+               /* revert */
+               au_set_h_dptr(dentry, cpg->bdst, NULL);
+               au_set_dbstart(dentry, cpg->bsrc);
+       }
+
+       return err;
+}
+
+struct au_cpup_simple_args {
+       int *errp;
+       struct au_cp_generic *cpg;
+};
+
+static void au_call_cpup_simple(void *args)
+{
+       struct au_cpup_simple_args *a = args;
+
+       au_pin_hdir_acquire_nest(a->cpg->pin);
+       *a->errp = au_cpup_simple(a->cpg);
+       au_pin_hdir_release(a->cpg->pin);
+}
+
+int au_sio_cpup_simple(struct au_cp_generic *cpg)
+{
+       int err, wkq_err;
+       struct dentry *dentry, *parent;
+       struct file *h_file;
+       struct inode *h_dir;
+
+       dentry = cpg->dentry;
+       h_file = NULL;
+       if (au_ftest_cpup(cpg->flags, HOPEN)) {
+               AuDebugOn(cpg->bsrc < 0);
+               h_file = au_h_open_pre(dentry, cpg->bsrc);
+               err = PTR_ERR(h_file);
+               if (IS_ERR(h_file))
+                       goto out;
+       }
+
+       parent = dget_parent(dentry);
+       h_dir = au_h_iptr(parent->d_inode, cpg->bdst);
+       if (!au_test_h_perm_sio(h_dir, MAY_EXEC | MAY_WRITE)
+           && !au_cpup_sio_test(cpg->pin, dentry->d_inode->i_mode))
+               err = au_cpup_simple(cpg);
+       else {
+               struct au_cpup_simple_args args = {
+                       .errp           = &err,
+                       .cpg            = cpg
+               };
+               wkq_err = au_wkq_wait(au_call_cpup_simple, &args);
+               if (unlikely(wkq_err))
+                       err = wkq_err;
+       }
+
+       dput(parent);
+       if (h_file)
+               au_h_open_post(dentry, cpg->bsrc, h_file);
+
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * copyup the deleted file for writing.
+ */
+static int au_do_cpup_wh(struct au_cp_generic *cpg, struct dentry *wh_dentry,
+                        struct file *file)
+{
+       int err;
+       unsigned int flags_orig;
+       aufs_bindex_t bsrc_orig;
+       struct dentry *h_d_dst, *h_d_start;
+       struct au_dinfo *dinfo;
+       struct au_hdentry *hdp;
+
+       dinfo = au_di(cpg->dentry);
+       AuRwMustWriteLock(&dinfo->di_rwsem);
+
+       bsrc_orig = cpg->bsrc;
+       cpg->bsrc = dinfo->di_bstart;
+       hdp = dinfo->di_hdentry;
+       h_d_dst = hdp[0 + cpg->bdst].hd_dentry;
+       dinfo->di_bstart = cpg->bdst;
+       hdp[0 + cpg->bdst].hd_dentry = wh_dentry;
+       h_d_start = NULL;
+       if (file) {
+               h_d_start = hdp[0 + cpg->bsrc].hd_dentry;
+               hdp[0 + cpg->bsrc].hd_dentry = au_hf_top(file)->f_dentry;
+       }
+       flags_orig = cpg->flags;
+       cpg->flags = !AuCpup_DTIME;
+       err = au_cpup_single(cpg, /*h_parent*/NULL);
+       cpg->flags = flags_orig;
+       if (file) {
+               if (!err)
+                       err = au_reopen_nondir(file);
+               hdp[0 + cpg->bsrc].hd_dentry = h_d_start;
+       }
+       hdp[0 + cpg->bdst].hd_dentry = h_d_dst;
+       dinfo->di_bstart = cpg->bsrc;
+       cpg->bsrc = bsrc_orig;
+
+       return err;
+}
+
+static int au_cpup_wh(struct au_cp_generic *cpg, struct file *file)
+{
+       int err;
+       aufs_bindex_t bdst;
+       struct au_dtime dt;
+       struct dentry *dentry, *parent, *h_parent, *wh_dentry;
+       struct au_branch *br;
+       struct path h_path;
+
+       dentry = cpg->dentry;
+       bdst = cpg->bdst;
+       br = au_sbr(dentry->d_sb, bdst);
+       parent = dget_parent(dentry);
+       h_parent = au_h_dptr(parent, bdst);
+       wh_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
+       err = PTR_ERR(wh_dentry);
+       if (IS_ERR(wh_dentry))
+               goto out;
+
+       h_path.dentry = h_parent;
+       h_path.mnt = au_br_mnt(br);
+       au_dtime_store(&dt, parent, &h_path);
+       err = au_do_cpup_wh(cpg, wh_dentry, file);
+       if (unlikely(err))
+               goto out_wh;
+
+       dget(wh_dentry);
+       h_path.dentry = wh_dentry;
+       if (!S_ISDIR(wh_dentry->d_inode->i_mode))
+               err = vfsub_unlink(h_parent->d_inode, &h_path, /*force*/0);
+       else
+               err = vfsub_rmdir(h_parent->d_inode, &h_path);
+       if (unlikely(err)) {
+               AuIOErr("failed remove copied-up tmp file %.*s(%d)\n",
+                       AuDLNPair(wh_dentry), err);
+               err = -EIO;
+       }
+       au_dtime_revert(&dt);
+       au_set_hi_wh(dentry->d_inode, bdst, wh_dentry);
+
+out_wh:
+       dput(wh_dentry);
+out:
+       dput(parent);
+       return err;
+}
+
+struct au_cpup_wh_args {
+       int *errp;
+       struct au_cp_generic *cpg;
+       struct file *file;
+};
+
+static void au_call_cpup_wh(void *args)
+{
+       struct au_cpup_wh_args *a = args;
+
+       au_pin_hdir_acquire_nest(a->cpg->pin);
+       *a->errp = au_cpup_wh(a->cpg, a->file);
+       au_pin_hdir_release(a->cpg->pin);
+}
+
+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file)
+{
+       int err, wkq_err;
+       aufs_bindex_t bdst;
+       struct dentry *dentry, *parent, *h_orph, *h_parent, *h_dentry;
+       struct inode *dir, *h_dir, *h_tmpdir;
+       struct au_wbr *wbr;
+       struct au_pin wh_pin, *pin_orig;
+
+       dentry = cpg->dentry;
+       bdst = cpg->bdst;
+       parent = dget_parent(dentry);
+       dir = parent->d_inode;
+       h_orph = NULL;
+       h_parent = NULL;
+       h_dir = au_igrab(au_h_iptr(dir, bdst));
+       h_tmpdir = h_dir;
+       pin_orig = NULL;
+       if (!h_dir->i_nlink) {
+               wbr = au_sbr(dentry->d_sb, bdst)->br_wbr;
+               h_orph = wbr->wbr_orph;
+
+               h_parent = dget(au_h_dptr(parent, bdst));
+               au_set_h_dptr(parent, bdst, dget(h_orph));
+               h_tmpdir = h_orph->d_inode;
+               au_set_h_iptr(dir, bdst, au_igrab(h_tmpdir), /*flags*/0);
+
+               if (file)
+                       h_dentry = au_hf_top(file)->f_dentry;
+               else
+                       h_dentry = au_h_dptr(dentry, au_dbstart(dentry));
+               mutex_lock_nested(&h_tmpdir->i_mutex, AuLsc_I_PARENT3);
+               /* todo: au_h_open_pre()? */
+
+               pin_orig = cpg->pin;
+               au_pin_init(&wh_pin, dentry, bdst, AuLsc_DI_PARENT,
+                           AuLsc_I_PARENT3, cpg->pin->udba, AuPin_DI_LOCKED);
+               cpg->pin = &wh_pin;
+       }
+
+       if (!au_test_h_perm_sio(h_tmpdir, MAY_EXEC | MAY_WRITE)
+           && !au_cpup_sio_test(cpg->pin, dentry->d_inode->i_mode))
+               err = au_cpup_wh(cpg, file);
+       else {
+               struct au_cpup_wh_args args = {
+                       .errp   = &err,
+                       .cpg    = cpg,
+                       .file   = file
+               };
+               wkq_err = au_wkq_wait(au_call_cpup_wh, &args);
+               if (unlikely(wkq_err))
+                       err = wkq_err;
+       }
+
+       if (h_orph) {
+               mutex_unlock(&h_tmpdir->i_mutex);
+               /* todo: au_h_open_post()? */
+               au_set_h_iptr(dir, bdst, au_igrab(h_dir), /*flags*/0);
+               au_set_h_dptr(parent, bdst, h_parent);
+               AuDebugOn(!pin_orig);
+               cpg->pin = pin_orig;
+       }
+       iput(h_dir);
+       dput(parent);
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * generic routine for both of copy-up and copy-down.
+ */
+/* cf. revalidate function in file.c */
+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
+              int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
+                        struct au_pin *pin,
+                        struct dentry *h_parent, void *arg),
+              void *arg)
+{
+       int err;
+       struct au_pin pin;
+       struct dentry *d, *parent, *h_parent, *real_parent;
+
+       err = 0;
+       parent = dget_parent(dentry);
+       if (IS_ROOT(parent))
+               goto out;
+
+       au_pin_init(&pin, dentry, bdst, AuLsc_DI_PARENT2, AuLsc_I_PARENT2,
+                   au_opt_udba(dentry->d_sb), AuPin_MNT_WRITE);
+
+       /* do not use au_dpage */
+       real_parent = parent;
+       while (1) {
+               dput(parent);
+               parent = dget_parent(dentry);
+               h_parent = au_h_dptr(parent, bdst);
+               if (h_parent)
+                       goto out; /* success */
+
+               /* find top dir which is necessary to cpup */
+               do {
+                       d = parent;
+                       dput(parent);
+                       parent = dget_parent(d);
+                       di_read_lock_parent3(parent, !AuLock_IR);
+                       h_parent = au_h_dptr(parent, bdst);
+                       di_read_unlock(parent, !AuLock_IR);
+               } while (!h_parent);
+
+               if (d != real_parent)
+                       di_write_lock_child3(d);
+
+               /* somebody else might create while we were sleeping */
+               if (!au_h_dptr(d, bdst) || !au_h_dptr(d, bdst)->d_inode) {
+                       if (au_h_dptr(d, bdst))
+                               au_update_dbstart(d);
+
+                       au_pin_set_dentry(&pin, d);
+                       err = au_do_pin(&pin);
+                       if (!err) {
+                               err = cp(d, bdst, &pin, h_parent, arg);
+                               au_unpin(&pin);
+                       }
+               }
+
+               if (d != real_parent)
+                       di_write_unlock(d);
+               if (unlikely(err))
+                       break;
+       }
+
+out:
+       dput(parent);
+       return err;
+}
+
+static int au_cpup_dir(struct dentry *dentry, aufs_bindex_t bdst,
+                      struct au_pin *pin,
+                      struct dentry *h_parent __maybe_unused ,
+                      void *arg __maybe_unused)
+{
+       struct au_cp_generic cpg = {
+               .dentry = dentry,
+               .bdst   = bdst,
+               .bsrc   = -1,
+               .len    = 0,
+               .pin    = pin,
+               .flags  = AuCpup_DTIME
+       };
+       return au_sio_cpup_simple(&cpg);
+}
+
+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+       return au_cp_dirs(dentry, bdst, au_cpup_dir, NULL);
+}
+
+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+       int err;
+       struct dentry *parent;
+       struct inode *dir;
+
+       parent = dget_parent(dentry);
+       dir = parent->d_inode;
+       err = 0;
+       if (au_h_iptr(dir, bdst))
+               goto out;
+
+       di_read_unlock(parent, AuLock_IR);
+       di_write_lock_parent(parent);
+       /* someone else might change our inode while we were sleeping */
+       if (!au_h_iptr(dir, bdst))
+               err = au_cpup_dirs(dentry, bdst);
+       di_downgrade_lock(parent, AuLock_IR);
+
+out:
+       dput(parent);
+       return err;
+}
diff --git a/fs/aufs/cpup.h b/fs/aufs/cpup.h
new file mode 100644 (file)
index 0000000..40eef8c
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * copy-up/down functions
+ */
+
+#ifndef __AUFS_CPUP_H__
+#define __AUFS_CPUP_H__
+
+#ifdef __KERNEL__
+
+#include <linux/path.h>
+
+struct inode;
+struct file;
+struct au_pin;
+
+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags);
+void au_cpup_attr_timesizes(struct inode *inode);
+void au_cpup_attr_nlink(struct inode *inode, int force);
+void au_cpup_attr_changeable(struct inode *inode);
+void au_cpup_igen(struct inode *inode, struct inode *h_inode);
+void au_cpup_attr_all(struct inode *inode, int force);
+
+/* ---------------------------------------------------------------------- */
+
+struct au_cp_generic {
+       struct dentry   *dentry;
+       aufs_bindex_t   bdst, bsrc;
+       loff_t          len;
+       struct au_pin   *pin;
+       unsigned int    flags;
+};
+
+/* cpup flags */
+#define AuCpup_DTIME   1               /* do dtime_store/revert */
+#define AuCpup_KEEPLINO        (1 << 1)        /* do not clear the lower xino,
+                                          for link(2) */
+#define AuCpup_RENAME  (1 << 2)        /* rename after cpup */
+#define AuCpup_HOPEN   (1 << 3)        /* call h_open_pre/post() in cpup */
+
+#define au_ftest_cpup(flags, name)     ((flags) & AuCpup_##name)
+#define au_fset_cpup(flags, name) \
+       do { (flags) |= AuCpup_##name; } while (0)
+#define au_fclr_cpup(flags, name) \
+       do { (flags) &= ~AuCpup_##name; } while (0)
+
+int au_copy_file(struct file *dst, struct file *src, loff_t len);
+int au_sio_cpup_simple(struct au_cp_generic *cpg);
+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file);
+
+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
+              int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
+                        struct au_pin *pin,
+                        struct dentry *h_parent, void *arg),
+              void *arg);
+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+
+/* ---------------------------------------------------------------------- */
+
+/* keep timestamps when copyup */
+struct au_dtime {
+       struct dentry *dt_dentry;
+       struct path dt_h_path;
+       struct timespec dt_atime, dt_mtime;
+};
+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
+                   struct path *h_path);
+void au_dtime_revert(struct au_dtime *dt);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_CPUP_H__ */
diff --git a/fs/aufs/dbgaufs.c b/fs/aufs/dbgaufs.c
new file mode 100644 (file)
index 0000000..6ba07ec
--- /dev/null
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * debugfs interface
+ */
+
+#include <linux/debugfs.h>
+#include "aufs.h"
+
+#ifndef CONFIG_SYSFS
+#error DEBUG_FS depends upon SYSFS
+#endif
+
+static struct dentry *dbgaufs;
+static const mode_t dbgaufs_mode = S_IRUSR | S_IRGRP | S_IROTH;
+
+/* 20 is max digits length of ulong 64 */
+struct dbgaufs_arg {
+       int n;
+       char a[20 * 4];
+};
+
+/*
+ * common function for all XINO files
+ */
+static int dbgaufs_xi_release(struct inode *inode __maybe_unused,
+                             struct file *file)
+{
+       kfree(file->private_data);
+       return 0;
+}
+
+static int dbgaufs_xi_open(struct file *xf, struct file *file, int do_fcnt)
+{
+       int err;
+       struct kstat st;
+       struct dbgaufs_arg *p;
+
+       err = -ENOMEM;
+       p = kmalloc(sizeof(*p), GFP_NOFS);
+       if (unlikely(!p))
+               goto out;
+
+       err = 0;
+       p->n = 0;
+       file->private_data = p;
+       if (!xf)
+               goto out;
+
+       err = vfs_getattr(xf->f_vfsmnt, xf->f_dentry, &st);
+       if (!err) {
+               if (do_fcnt)
+                       p->n = snprintf
+                               (p->a, sizeof(p->a), "%ld, %llux%lu %lld\n",
+                                (long)file_count(xf), st.blocks, st.blksize,
+                                (long long)st.size);
+               else
+                       p->n = snprintf(p->a, sizeof(p->a), "%llux%lu %lld\n",
+                                       st.blocks, st.blksize,
+                                       (long long)st.size);
+               AuDebugOn(p->n >= sizeof(p->a));
+       } else {
+               p->n = snprintf(p->a, sizeof(p->a), "err %d\n", err);
+               err = 0;
+       }
+
+out:
+       return err;
+
+}
+
+static ssize_t dbgaufs_xi_read(struct file *file, char __user *buf,
+                              size_t count, loff_t *ppos)
+{
+       struct dbgaufs_arg *p;
+
+       p = file->private_data;
+       return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dbgaufs_plink_arg {
+       int n;
+       char a[];
+};
+
+static int dbgaufs_plink_release(struct inode *inode __maybe_unused,
+                                struct file *file)
+{
+       free_page((unsigned long)file->private_data);
+       return 0;
+}
+
+static int dbgaufs_plink_open(struct inode *inode, struct file *file)
+{
+       int err, i, limit;
+       unsigned long n, sum;
+       struct dbgaufs_plink_arg *p;
+       struct au_sbinfo *sbinfo;
+       struct super_block *sb;
+       struct au_sphlhead *sphl;
+
+       err = -ENOMEM;
+       p = (void *)get_zeroed_page(GFP_NOFS);
+       if (unlikely(!p))
+               goto out;
+
+       err = -EFBIG;
+       sbinfo = inode->i_private;
+       sb = sbinfo->si_sb;
+       si_noflush_read_lock(sb);
+       if (au_opt_test(au_mntflags(sb), PLINK)) {
+               limit = PAGE_SIZE - sizeof(p->n);
+
+               /* the number of buckets */
+               n = snprintf(p->a + p->n, limit, "%d\n", AuPlink_NHASH);
+               p->n += n;
+               limit -= n;
+
+               sum = 0;
+               for (i = 0, sphl = sbinfo->si_plink;
+                    i < AuPlink_NHASH;
+                    i++, sphl++) {
+                       n = au_sphl_count(sphl);
+                       sum += n;
+
+                       n = snprintf(p->a + p->n, limit, "%lu ", n);
+                       p->n += n;
+                       limit -= n;
+                       if (unlikely(limit <= 0))
+                               goto out_free;
+               }
+               p->a[p->n - 1] = '\n';
+
+               /* the sum of plinks */
+               n = snprintf(p->a + p->n, limit, "%lu\n", sum);
+               p->n += n;
+               limit -= n;
+               if (unlikely(limit <= 0))
+                       goto out_free;
+       } else {
+#define str "1\n0\n0\n"
+               p->n = sizeof(str) - 1;
+               strcpy(p->a, str);
+#undef str
+       }
+       si_read_unlock(sb);
+
+       err = 0;
+       file->private_data = p;
+       goto out; /* success */
+
+out_free:
+       free_page((unsigned long)p);
+out:
+       return err;
+}
+
+static ssize_t dbgaufs_plink_read(struct file *file, char __user *buf,
+                                 size_t count, loff_t *ppos)
+{
+       struct dbgaufs_plink_arg *p;
+
+       p = file->private_data;
+       return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
+}
+
+static const struct file_operations dbgaufs_plink_fop = {
+       .owner          = THIS_MODULE,
+       .open           = dbgaufs_plink_open,
+       .release        = dbgaufs_plink_release,
+       .read           = dbgaufs_plink_read
+};
+
+/* ---------------------------------------------------------------------- */
+
+static int dbgaufs_xib_open(struct inode *inode, struct file *file)
+{
+       int err;
+       struct au_sbinfo *sbinfo;
+       struct super_block *sb;
+
+       sbinfo = inode->i_private;
+       sb = sbinfo->si_sb;
+       si_noflush_read_lock(sb);
+       err = dbgaufs_xi_open(sbinfo->si_xib, file, /*do_fcnt*/0);
+       si_read_unlock(sb);
+       return err;
+}
+
+static const struct file_operations dbgaufs_xib_fop = {
+       .owner          = THIS_MODULE,
+       .open           = dbgaufs_xib_open,
+       .release        = dbgaufs_xi_release,
+       .read           = dbgaufs_xi_read
+};
+
+/* ---------------------------------------------------------------------- */
+
+#define DbgaufsXi_PREFIX "xi"
+
+static int dbgaufs_xino_open(struct inode *inode, struct file *file)
+{
+       int err;
+       long l;
+       struct au_sbinfo *sbinfo;
+       struct super_block *sb;
+       struct file *xf;
+       struct qstr *name;
+
+       err = -ENOENT;
+       xf = NULL;
+       name = &file->f_dentry->d_name;
+       if (unlikely(name->len < sizeof(DbgaufsXi_PREFIX)
+                    || memcmp(name->name, DbgaufsXi_PREFIX,
+                              sizeof(DbgaufsXi_PREFIX) - 1)))
+               goto out;
+       err = kstrtol(name->name + sizeof(DbgaufsXi_PREFIX) - 1, 10, &l);
+       if (unlikely(err))
+               goto out;
+
+       sbinfo = inode->i_private;
+       sb = sbinfo->si_sb;
+       si_noflush_read_lock(sb);
+       if (l <= au_sbend(sb)) {
+               xf = au_sbr(sb, (aufs_bindex_t)l)->br_xino.xi_file;
+               err = dbgaufs_xi_open(xf, file, /*do_fcnt*/1);
+       } else
+               err = -ENOENT;
+       si_read_unlock(sb);
+
+out:
+       return err;
+}
+
+static const struct file_operations dbgaufs_xino_fop = {
+       .owner          = THIS_MODULE,
+       .open           = dbgaufs_xino_open,
+       .release        = dbgaufs_xi_release,
+       .read           = dbgaufs_xi_read
+};
+
+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
+{
+       aufs_bindex_t bend;
+       struct au_branch *br;
+       struct au_xino_file *xi;
+
+       if (!au_sbi(sb)->si_dbgaufs)
+               return;
+
+       bend = au_sbend(sb);
+       for (; bindex <= bend; bindex++) {
+               br = au_sbr(sb, bindex);
+               xi = &br->br_xino;
+               debugfs_remove(xi->xi_dbgaufs);
+               xi->xi_dbgaufs = NULL;
+       }
+}
+
+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
+{
+       struct au_sbinfo *sbinfo;
+       struct dentry *parent;
+       struct au_branch *br;
+       struct au_xino_file *xi;
+       aufs_bindex_t bend;
+       char name[sizeof(DbgaufsXi_PREFIX) + 5]; /* "xi" bindex NULL */
+
+       sbinfo = au_sbi(sb);
+       parent = sbinfo->si_dbgaufs;
+       if (!parent)
+               return;
+
+       bend = au_sbend(sb);
+       for (; bindex <= bend; bindex++) {
+               snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d", bindex);
+               br = au_sbr(sb, bindex);
+               xi = &br->br_xino;
+               AuDebugOn(xi->xi_dbgaufs);
+               xi->xi_dbgaufs = debugfs_create_file(name, dbgaufs_mode, parent,
+                                                    sbinfo, &dbgaufs_xino_fop);
+               /* ignore an error */
+               if (unlikely(!xi->xi_dbgaufs))
+                       AuWarn1("failed %s under debugfs\n", name);
+       }
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_EXPORT
+static int dbgaufs_xigen_open(struct inode *inode, struct file *file)
+{
+       int err;
+       struct au_sbinfo *sbinfo;
+       struct super_block *sb;
+
+       sbinfo = inode->i_private;
+       sb = sbinfo->si_sb;
+       si_noflush_read_lock(sb);
+       err = dbgaufs_xi_open(sbinfo->si_xigen, file, /*do_fcnt*/0);
+       si_read_unlock(sb);
+       return err;
+}
+
+static const struct file_operations dbgaufs_xigen_fop = {
+       .owner          = THIS_MODULE,
+       .open           = dbgaufs_xigen_open,
+       .release        = dbgaufs_xi_release,
+       .read           = dbgaufs_xi_read
+};
+
+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
+{
+       int err;
+
+       /*
+        * This function is a dynamic '__init' fucntion actually,
+        * so the tiny check for si_rwsem is unnecessary.
+        */
+       /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+       err = -EIO;
+       sbinfo->si_dbgaufs_xigen = debugfs_create_file
+               ("xigen", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+                &dbgaufs_xigen_fop);
+       if (sbinfo->si_dbgaufs_xigen)
+               err = 0;
+
+       return err;
+}
+#else
+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
+{
+       return 0;
+}
+#endif /* CONFIG_AUFS_EXPORT */
+
+/* ---------------------------------------------------------------------- */
+
+void dbgaufs_si_fin(struct au_sbinfo *sbinfo)
+{
+       /*
+        * This function is a dynamic '__init' fucntion actually,
+        * so the tiny check for si_rwsem is unnecessary.
+        */
+       /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+       debugfs_remove_recursive(sbinfo->si_dbgaufs);
+       sbinfo->si_dbgaufs = NULL;
+       kobject_put(&sbinfo->si_kobj);
+}
+
+int dbgaufs_si_init(struct au_sbinfo *sbinfo)
+{
+       int err;
+       char name[SysaufsSiNameLen];
+
+       /*
+        * This function is a dynamic '__init' fucntion actually,
+        * so the tiny check for si_rwsem is unnecessary.
+        */
+       /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+       err = -ENOENT;
+       if (!dbgaufs) {
+               AuErr1("/debug/aufs is uninitialized\n");
+               goto out;
+       }
+
+       err = -EIO;
+       sysaufs_name(sbinfo, name);
+       sbinfo->si_dbgaufs = debugfs_create_dir(name, dbgaufs);
+       if (unlikely(!sbinfo->si_dbgaufs))
+               goto out;
+       kobject_get(&sbinfo->si_kobj);
+
+       sbinfo->si_dbgaufs_xib = debugfs_create_file
+               ("xib", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+                &dbgaufs_xib_fop);
+       if (unlikely(!sbinfo->si_dbgaufs_xib))
+               goto out_dir;
+
+       sbinfo->si_dbgaufs_plink = debugfs_create_file
+               ("plink", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+                &dbgaufs_plink_fop);
+       if (unlikely(!sbinfo->si_dbgaufs_plink))
+               goto out_dir;
+
+       err = dbgaufs_xigen_init(sbinfo);
+       if (!err)
+               goto out; /* success */
+
+out_dir:
+       dbgaufs_si_fin(sbinfo);
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void dbgaufs_fin(void)
+{
+       debugfs_remove(dbgaufs);
+}
+
+int __init dbgaufs_init(void)
+{
+       int err;
+
+       err = -EIO;
+       dbgaufs = debugfs_create_dir(AUFS_NAME, NULL);
+       if (dbgaufs)
+               err = 0;
+       return err;
+}
diff --git a/fs/aufs/dbgaufs.h b/fs/aufs/dbgaufs.h
new file mode 100644 (file)
index 0000000..f418c92
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * debugfs interface
+ */
+
+#ifndef __DBGAUFS_H__
+#define __DBGAUFS_H__
+
+#ifdef __KERNEL__
+
+struct super_block;
+struct au_sbinfo;
+
+#ifdef CONFIG_DEBUG_FS
+/* dbgaufs.c */
+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
+void dbgaufs_si_fin(struct au_sbinfo *sbinfo);
+int dbgaufs_si_init(struct au_sbinfo *sbinfo);
+void dbgaufs_fin(void);
+int __init dbgaufs_init(void);
+#else
+AuStubVoid(dbgaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(dbgaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(dbgaufs_si_fin, struct au_sbinfo *sbinfo)
+AuStubInt0(dbgaufs_si_init, struct au_sbinfo *sbinfo)
+AuStubVoid(dbgaufs_fin, void)
+AuStubInt0(__init dbgaufs_init, void)
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __KERNEL__ */
+#endif /* __DBGAUFS_H__ */
diff --git a/fs/aufs/dcsub.c b/fs/aufs/dcsub.c
new file mode 100644 (file)
index 0000000..bb02c70
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sub-routines for dentry cache
+ */
+
+#include "aufs.h"
+
+static void au_dpage_free(struct au_dpage *dpage)
+{
+       int i;
+       struct dentry **p;
+
+       p = dpage->dentries;
+       for (i = 0; i < dpage->ndentry; i++)
+               dput(*p++);
+       free_page((unsigned long)dpage->dentries);
+}
+
+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp)
+{
+       int err;
+       void *p;
+
+       err = -ENOMEM;
+       dpages->dpages = kmalloc(sizeof(*dpages->dpages), gfp);
+       if (unlikely(!dpages->dpages))
+               goto out;
+
+       p = (void *)__get_free_page(gfp);
+       if (unlikely(!p))
+               goto out_dpages;
+
+       dpages->dpages[0].ndentry = 0;
+       dpages->dpages[0].dentries = p;
+       dpages->ndpage = 1;
+       return 0; /* success */
+
+out_dpages:
+       kfree(dpages->dpages);
+out:
+       return err;
+}
+
+void au_dpages_free(struct au_dcsub_pages *dpages)
+{
+       int i;
+       struct au_dpage *p;
+
+       p = dpages->dpages;
+       for (i = 0; i < dpages->ndpage; i++)
+               au_dpage_free(p++);
+       kfree(dpages->dpages);
+}
+
+static int au_dpages_append(struct au_dcsub_pages *dpages,
+                           struct dentry *dentry, gfp_t gfp)
+{
+       int err, sz;
+       struct au_dpage *dpage;
+       void *p;
+
+       dpage = dpages->dpages + dpages->ndpage - 1;
+       sz = PAGE_SIZE / sizeof(dentry);
+       if (unlikely(dpage->ndentry >= sz)) {
+               AuLabel(new dpage);
+               err = -ENOMEM;
+               sz = dpages->ndpage * sizeof(*dpages->dpages);
+               p = au_kzrealloc(dpages->dpages, sz,
+                                sz + sizeof(*dpages->dpages), gfp);
+               if (unlikely(!p))
+                       goto out;
+
+               dpages->dpages = p;
+               dpage = dpages->dpages + dpages->ndpage;
+               p = (void *)__get_free_page(gfp);
+               if (unlikely(!p))
+                       goto out;
+
+               dpage->ndentry = 0;
+               dpage->dentries = p;
+               dpages->ndpage++;
+       }
+
+       AuDebugOn(!dentry->d_count);
+       dpage->dentries[dpage->ndentry++] = dget_dlock(dentry);
+       return 0; /* success */
+
+out:
+       return err;
+}
+
+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
+                  au_dpages_test test, void *arg)
+{
+       int err;
+       struct dentry *this_parent;
+       struct list_head *next;
+       struct super_block *sb = root->d_sb;
+
+       err = 0;
+       write_seqlock(&rename_lock);
+       this_parent = root;
+       spin_lock(&this_parent->d_lock);
+repeat:
+       next = this_parent->d_subdirs.next;
+resume:
+       if (this_parent->d_sb == sb
+           && !IS_ROOT(this_parent)
+           && au_di(this_parent)
+           && this_parent->d_count
+           && (!test || test(this_parent, arg))) {
+               err = au_dpages_append(dpages, this_parent, GFP_ATOMIC);
+               if (unlikely(err))
+                       goto out;
+       }
+
+       while (next != &this_parent->d_subdirs) {
+               struct list_head *tmp = next;
+               struct dentry *dentry = list_entry(tmp, struct dentry,
+                                                  d_child);
+
+               next = tmp->next;
+               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+               if (dentry->d_count) {
+                       if (!list_empty(&dentry->d_subdirs)) {
+                               spin_unlock(&this_parent->d_lock);
+                               spin_release(&dentry->d_lock.dep_map, 1,
+                                            _RET_IP_);
+                               this_parent = dentry;
+                               spin_acquire(&this_parent->d_lock.dep_map, 0, 1,
+                                            _RET_IP_);
+                               goto repeat;
+                       }
+                       if (dentry->d_sb == sb
+                           && au_di(dentry)
+                           && (!test || test(dentry, arg)))
+                               err = au_dpages_append(dpages, dentry,
+                                                      GFP_ATOMIC);
+               }
+               spin_unlock(&dentry->d_lock);
+               if (unlikely(err))
+                       goto out;
+       }
+
+       if (this_parent != root) {
+               struct dentry *tmp;
+               struct dentry *child;
+
+               tmp = this_parent->d_parent;
+               rcu_read_lock();
+               spin_unlock(&this_parent->d_lock);
+               child = this_parent;
+               this_parent = tmp;
+               spin_lock(&this_parent->d_lock);
+               rcu_read_unlock();
+               next = child->d_child.next;
+               goto resume;
+       }
+
+out:
+       spin_unlock(&this_parent->d_lock);
+       write_sequnlock(&rename_lock);
+       return err;
+}
+
+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
+                      int do_include, au_dpages_test test, void *arg)
+{
+       int err;
+
+       err = 0;
+       write_seqlock(&rename_lock);
+       spin_lock(&dentry->d_lock);
+       if (do_include
+           && dentry->d_count
+           && (!test || test(dentry, arg)))
+               err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
+       spin_unlock(&dentry->d_lock);
+       if (unlikely(err))
+               goto out;
+
+       /*
+        * vfsmount_lock is unnecessary since this is a traverse in a single
+        * mount
+        */
+       while (!IS_ROOT(dentry)) {
+               dentry = dentry->d_parent; /* rename_lock is locked */
+               spin_lock(&dentry->d_lock);
+               if (dentry->d_count
+                   && (!test || test(dentry, arg)))
+                       err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
+               spin_unlock(&dentry->d_lock);
+               if (unlikely(err))
+                       break;
+       }
+
+out:
+       write_sequnlock(&rename_lock);
+       return err;
+}
+
+static inline int au_dcsub_dpages_aufs(struct dentry *dentry, void *arg)
+{
+       return au_di(dentry) && dentry->d_sb == arg;
+}
+
+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
+                           struct dentry *dentry, int do_include)
+{
+       return au_dcsub_pages_rev(dpages, dentry, do_include,
+                                 au_dcsub_dpages_aufs, dentry->d_sb);
+}
+
+int au_test_subdir(struct dentry *d1, struct dentry *d2)
+{
+       struct path path[2] = {
+               {
+                       .dentry = d1
+               },
+               {
+                       .dentry = d2
+               }
+       };
+
+       return path_is_under(path + 0, path + 1);
+}
diff --git a/fs/aufs/dcsub.h b/fs/aufs/dcsub.h
new file mode 100644 (file)
index 0000000..53dcbd7
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sub-routines for dentry cache
+ */
+
+#ifndef __AUFS_DCSUB_H__
+#define __AUFS_DCSUB_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include <linux/fs.h>
+
+struct dentry;
+
+struct au_dpage {
+       int ndentry;
+       struct dentry **dentries;
+};
+
+struct au_dcsub_pages {
+       int ndpage;
+       struct au_dpage *dpages;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dcsub.c */
+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp);
+void au_dpages_free(struct au_dcsub_pages *dpages);
+typedef int (*au_dpages_test)(struct dentry *dentry, void *arg);
+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
+                  au_dpages_test test, void *arg);
+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
+                      int do_include, au_dpages_test test, void *arg);
+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
+                           struct dentry *dentry, int do_include);
+int au_test_subdir(struct dentry *d1, struct dentry *d2);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int au_d_hashed_positive(struct dentry *d)
+{
+       int err;
+       struct inode *inode = d->d_inode;
+       err = 0;
+       if (unlikely(d_unhashed(d) || !inode || !inode->i_nlink))
+               err = -ENOENT;
+       return err;
+}
+
+static inline int au_d_alive(struct dentry *d)
+{
+       int err;
+       struct inode *inode;
+       err = 0;
+       if (!IS_ROOT(d))
+               err = au_d_hashed_positive(d);
+       else {
+               inode = d->d_inode;
+               if (unlikely(d_unlinked(d) || !inode || !inode->i_nlink))
+                       err = -ENOENT;
+       }
+       return err;
+}
+
+static inline int au_alive_dir(struct dentry *d)
+{
+       int err;
+       err = au_d_alive(d);
+       if (unlikely(err || IS_DEADDIR(d->d_inode)))
+               err = -ENOENT;
+       return err;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DCSUB_H__ */
diff --git a/fs/aufs/debug.c b/fs/aufs/debug.c
new file mode 100644 (file)
index 0000000..7d42feb
--- /dev/null
@@ -0,0 +1,519 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * debug print functions
+ */
+
+#include <linux/vt_kern.h>
+#include "aufs.h"
+
+/* Returns 0, or -errno.  arg is in kp->arg. */
+static int param_atomic_t_set(const char *val, const struct kernel_param *kp)
+{
+       int err, n;
+
+       err = kstrtoint(val, 0, &n);
+       if (!err) {
+               if (n > 0)
+                       au_debug_on();
+               else
+                       au_debug_off();
+       }
+       return err;
+}
+
+/* Returns length written or -errno.  Buffer is 4k (ie. be short!) */
+static int param_atomic_t_get(char *buffer, const struct kernel_param *kp)
+{
+       atomic_t *a;
+
+       a = kp->arg;
+       return sprintf(buffer, "%d", atomic_read(a));
+}
+
+static struct kernel_param_ops param_ops_atomic_t = {
+       .set = param_atomic_t_set,
+       .get = param_atomic_t_get
+       /* void (*free)(void *arg) */
+};
+
+atomic_t aufs_debug = ATOMIC_INIT(0);
+MODULE_PARM_DESC(debug, "debug print");
+module_param_named(debug, aufs_debug, atomic_t, S_IRUGO | S_IWUSR | S_IWGRP);
+
+char *au_plevel = KERN_DEBUG;
+#define dpri(fmt, ...) do {                                    \
+       if ((au_plevel                                          \
+            && strcmp(au_plevel, KERN_DEBUG))                  \
+           || au_debug_test())                                 \
+               printk("%s" fmt, au_plevel, ##__VA_ARGS__);     \
+} while (0)
+
+/* ---------------------------------------------------------------------- */
+
+void au_dpri_whlist(struct au_nhash *whlist)
+{
+       unsigned long ul, n;
+       struct hlist_head *head;
+       struct au_vdir_wh *tpos;
+       struct hlist_node *pos;
+
+       n = whlist->nh_num;
+       head = whlist->nh_head;
+       for (ul = 0; ul < n; ul++) {
+               hlist_for_each_entry(tpos, pos, head, wh_hash)
+                       dpri("b%d, %.*s, %d\n",
+                            tpos->wh_bindex,
+                            tpos->wh_str.len, tpos->wh_str.name,
+                            tpos->wh_str.len);
+               head++;
+       }
+}
+
+void au_dpri_vdir(struct au_vdir *vdir)
+{
+       unsigned long ul;
+       union au_vdir_deblk_p p;
+       unsigned char *o;
+
+       if (!vdir || IS_ERR(vdir)) {
+               dpri("err %ld\n", PTR_ERR(vdir));
+               return;
+       }
+
+       dpri("deblk %u, nblk %lu, deblk %p, last{%lu, %p}, ver %lu\n",
+            vdir->vd_deblk_sz, vdir->vd_nblk, vdir->vd_deblk,
+            vdir->vd_last.ul, vdir->vd_last.p.deblk, vdir->vd_version);
+       for (ul = 0; ul < vdir->vd_nblk; ul++) {
+               p.deblk = vdir->vd_deblk[ul];
+               o = p.deblk;
+               dpri("[%lu]: %p\n", ul, o);
+       }
+}
+
+static int do_pri_inode(aufs_bindex_t bindex, struct inode *inode, int hn,
+                       struct dentry *wh)
+{
+       char *n = NULL;
+       int l = 0;
+
+       if (!inode || IS_ERR(inode)) {
+               dpri("i%d: err %ld\n", bindex, PTR_ERR(inode));
+               return -1;
+       }
+
+       /* the type of i_blocks depends upon CONFIG_LBDAF */
+       BUILD_BUG_ON(sizeof(inode->i_blocks) != sizeof(unsigned long)
+                    && sizeof(inode->i_blocks) != sizeof(u64));
+       if (wh) {
+               n = (void *)wh->d_name.name;
+               l = wh->d_name.len;
+       }
+
+       dpri("i%d: %p, i%lu, %s, cnt %d, nl %u, 0%o, sz %llu, blk %llu,"
+            " hn %d, ct %lld, np %lu, st 0x%lx, f 0x%x, v %llu, g %x%s%.*s\n",
+            bindex, inode,
+            inode->i_ino, inode->i_sb ? au_sbtype(inode->i_sb) : "??",
+            atomic_read(&inode->i_count), inode->i_nlink, inode->i_mode,
+            i_size_read(inode), (unsigned long long)inode->i_blocks,
+            hn, (long long)timespec_to_ns(&inode->i_ctime) & 0x0ffff,
+            inode->i_mapping ? inode->i_mapping->nrpages : 0,
+            inode->i_state, inode->i_flags, inode->i_version,
+            inode->i_generation,
+            l ? ", wh " : "", l, n);
+       return 0;
+}
+
+void au_dpri_inode(struct inode *inode)
+{
+       struct au_iinfo *iinfo;
+       aufs_bindex_t bindex;
+       int err, hn;
+
+       err = do_pri_inode(-1, inode, -1, NULL);
+       if (err || !au_test_aufs(inode->i_sb))
+               return;
+
+       iinfo = au_ii(inode);
+       if (!iinfo)
+               return;
+       dpri("i-1: bstart %d, bend %d, gen %d\n",
+            iinfo->ii_bstart, iinfo->ii_bend, au_iigen(inode, NULL));
+       if (iinfo->ii_bstart < 0)
+               return;
+       hn = 0;
+       for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; bindex++) {
+               hn = !!au_hn(iinfo->ii_hinode + bindex);
+               do_pri_inode(bindex, iinfo->ii_hinode[0 + bindex].hi_inode, hn,
+                            iinfo->ii_hinode[0 + bindex].hi_whdentry);
+       }
+}
+
+void au_dpri_dalias(struct inode *inode)
+{
+       struct dentry *d;
+
+       spin_lock(&inode->i_lock);
+       list_for_each_entry(d, &inode->i_dentry, d_alias)
+               au_dpri_dentry(d);
+       spin_unlock(&inode->i_lock);
+}
+
+static int do_pri_dentry(aufs_bindex_t bindex, struct dentry *dentry)
+{
+       struct dentry *wh = NULL;
+       int hn;
+
+       if (!dentry || IS_ERR(dentry)) {
+               dpri("d%d: err %ld\n", bindex, PTR_ERR(dentry));
+               return -1;
+       }
+       /* do not call dget_parent() here */
+       /* note: access d_xxx without d_lock */
+       dpri("d%d: %.*s?/%.*s, %s, cnt %d, flags 0x%x\n",
+            bindex,
+            AuDLNPair(dentry->d_parent), AuDLNPair(dentry),
+            dentry->d_sb ? au_sbtype(dentry->d_sb) : "??",
+            dentry->d_count, dentry->d_flags);
+       hn = -1;
+       if (bindex >= 0 && dentry->d_inode && au_test_aufs(dentry->d_sb)) {
+               struct au_iinfo *iinfo = au_ii(dentry->d_inode);
+               if (iinfo) {
+                       hn = !!au_hn(iinfo->ii_hinode + bindex);
+                       wh = iinfo->ii_hinode[0 + bindex].hi_whdentry;
+               }
+       }
+       do_pri_inode(bindex, dentry->d_inode, hn, wh);
+       return 0;
+}
+
+void au_dpri_dentry(struct dentry *dentry)
+{
+       struct au_dinfo *dinfo;
+       aufs_bindex_t bindex;
+       int err;
+       struct au_hdentry *hdp;
+
+       err = do_pri_dentry(-1, dentry);
+       if (err || !au_test_aufs(dentry->d_sb))
+               return;
+
+       dinfo = au_di(dentry);
+       if (!dinfo)
+               return;
+       dpri("d-1: bstart %d, bend %d, bwh %d, bdiropq %d, gen %d\n",
+            dinfo->di_bstart, dinfo->di_bend,
+            dinfo->di_bwh, dinfo->di_bdiropq, au_digen(dentry));
+       if (dinfo->di_bstart < 0)
+               return;
+       hdp = dinfo->di_hdentry;
+       for (bindex = dinfo->di_bstart; bindex <= dinfo->di_bend; bindex++)
+               do_pri_dentry(bindex, hdp[0 + bindex].hd_dentry);
+}
+
+static int do_pri_file(aufs_bindex_t bindex, struct file *file)
+{
+       char a[32];
+
+       if (!file || IS_ERR(file)) {
+               dpri("f%d: err %ld\n", bindex, PTR_ERR(file));
+               return -1;
+       }
+       a[0] = 0;
+       if (bindex < 0
+           && file->f_dentry
+           && au_test_aufs(file->f_dentry->d_sb)
+           && au_fi(file))
+               snprintf(a, sizeof(a), ", gen %d, mmapped %d",
+                        au_figen(file), atomic_read(&au_fi(file)->fi_mmapped));
+       dpri("f%d: mode 0x%x, flags 0%o, cnt %ld, v %llu, pos %llu%s\n",
+            bindex, file->f_mode, file->f_flags, (long)file_count(file),
+            file->f_version, file->f_pos, a);
+       if (file->f_dentry)
+               do_pri_dentry(bindex, file->f_dentry);
+       return 0;
+}
+
+void au_dpri_file(struct file *file)
+{
+       struct au_finfo *finfo;
+       struct au_fidir *fidir;
+       struct au_hfile *hfile;
+       aufs_bindex_t bindex;
+       int err;
+
+       err = do_pri_file(-1, file);
+       if (err || !file->f_dentry || !au_test_aufs(file->f_dentry->d_sb))
+               return;
+
+       finfo = au_fi(file);
+       if (!finfo)
+               return;
+       if (finfo->fi_btop < 0)
+               return;
+       fidir = finfo->fi_hdir;
+       if (!fidir)
+               do_pri_file(finfo->fi_btop, finfo->fi_htop.hf_file);
+       else
+               for (bindex = finfo->fi_btop;
+                    bindex >= 0 && bindex <= fidir->fd_bbot;
+                    bindex++) {
+                       hfile = fidir->fd_hfile + bindex;
+                       do_pri_file(bindex, hfile ? hfile->hf_file : NULL);
+               }
+}
+
+static int do_pri_br(aufs_bindex_t bindex, struct au_branch *br)
+{
+       struct vfsmount *mnt;
+       struct super_block *sb;
+
+       if (!br || IS_ERR(br))
+               goto out;
+       mnt = au_br_mnt(br);
+       if (!mnt || IS_ERR(mnt))
+               goto out;
+       sb = mnt->mnt_sb;
+       if (!sb || IS_ERR(sb))
+               goto out;
+
+       dpri("s%d: {perm 0x%x, id %d, cnt %d, wbr %p}, "
+            "%s, dev 0x%02x%02x, flags 0x%lx, cnt %d, active %d, "
+            "xino %d\n",
+            bindex, br->br_perm, br->br_id, atomic_read(&br->br_count),
+            br->br_wbr, au_sbtype(sb), MAJOR(sb->s_dev), MINOR(sb->s_dev),
+            sb->s_flags, sb->s_count,
+            atomic_read(&sb->s_active), !!br->br_xino.xi_file);
+       return 0;
+
+out:
+       dpri("s%d: err %ld\n", bindex, PTR_ERR(br));
+       return -1;
+}
+
+void au_dpri_sb(struct super_block *sb)
+{
+       struct au_sbinfo *sbinfo;
+       aufs_bindex_t bindex;
+       int err;
+       /* to reuduce stack size */
+       struct {
+               struct vfsmount mnt;
+               struct au_branch fake;
+       } *a;
+
+       /* this function can be called from magic sysrq */
+       a = kzalloc(sizeof(*a), GFP_ATOMIC);
+       if (unlikely(!a)) {
+               dpri("no memory\n");
+               return;
+       }
+
+       a->mnt.mnt_sb = sb;
+       a->fake.br_perm = 0;
+       a->fake.br_path.mnt = &a->mnt;
+       a->fake.br_xino.xi_file = NULL;
+       atomic_set(&a->fake.br_count, 0);
+       smp_mb(); /* atomic_set */
+       err = do_pri_br(-1, &a->fake);
+       kfree(a);
+       dpri("dev 0x%x\n", sb->s_dev);
+       if (err || !au_test_aufs(sb))
+               return;
+
+       sbinfo = au_sbi(sb);
+       if (!sbinfo)
+               return;
+       dpri("nw %d, gen %u, kobj %d\n",
+            atomic_read(&sbinfo->si_nowait.nw_len), sbinfo->si_generation,
+            atomic_read(&sbinfo->si_kobj.kref.refcount));
+       for (bindex = 0; bindex <= sbinfo->si_bend; bindex++)
+               do_pri_br(bindex, sbinfo->si_branch[0 + bindex]);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_dbg_sleep_jiffy(int jiffy)
+{
+       while (jiffy)
+               jiffy = schedule_timeout_uninterruptible(jiffy);
+}
+
+void au_dbg_iattr(struct iattr *ia)
+{
+#define AuBit(name)                                    \
+       do {                                            \
+               if (ia->ia_valid & ATTR_ ## name)       \
+                       dpri(#name "\n");               \
+       } while (0)
+       AuBit(MODE);
+       AuBit(UID);
+       AuBit(GID);
+       AuBit(SIZE);
+       AuBit(ATIME);
+       AuBit(MTIME);
+       AuBit(CTIME);
+       AuBit(ATIME_SET);
+       AuBit(MTIME_SET);
+       AuBit(FORCE);
+       AuBit(ATTR_FLAG);
+       AuBit(KILL_SUID);
+       AuBit(KILL_SGID);
+       AuBit(FILE);
+       AuBit(KILL_PRIV);
+       AuBit(OPEN);
+       AuBit(TIMES_SET);
+#undef AuBit
+       dpri("ia_file %p\n", ia->ia_file);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line)
+{
+       struct inode *h_inode, *inode = dentry->d_inode;
+       struct dentry *h_dentry;
+       aufs_bindex_t bindex, bend, bi;
+
+       if (!inode /* || au_di(dentry)->di_lsc == AuLsc_DI_TMP */)
+               return;
+
+       bend = au_dbend(dentry);
+       bi = au_ibend(inode);
+       if (bi < bend)
+               bend = bi;
+       bindex = au_dbstart(dentry);
+       bi = au_ibstart(inode);
+       if (bi > bindex)
+               bindex = bi;
+
+       for (; bindex <= bend; bindex++) {
+               h_dentry = au_h_dptr(dentry, bindex);
+               if (!h_dentry)
+                       continue;
+               h_inode = au_h_iptr(inode, bindex);
+               if (unlikely(h_inode != h_dentry->d_inode)) {
+                       au_debug_on();
+                       AuDbg("b%d, %s:%d\n", bindex, func, line);
+                       AuDbgDentry(dentry);
+                       AuDbgInode(inode);
+                       au_debug_off();
+                       BUG();
+               }
+       }
+}
+
+void au_dbg_verify_dir_parent(struct dentry *dentry, unsigned int sigen)
+{
+       struct dentry *parent;
+
+       parent = dget_parent(dentry);
+       AuDebugOn(!S_ISDIR(dentry->d_inode->i_mode));
+       AuDebugOn(IS_ROOT(dentry));
+       AuDebugOn(au_digen_test(parent, sigen));
+       dput(parent);
+}
+
+void au_dbg_verify_nondir_parent(struct dentry *dentry, unsigned int sigen)
+{
+       struct dentry *parent;
+       struct inode *inode;
+
+       parent = dget_parent(dentry);
+       inode = dentry->d_inode;
+       AuDebugOn(inode && S_ISDIR(dentry->d_inode->i_mode));
+       AuDebugOn(au_digen_test(parent, sigen));
+       dput(parent);
+}
+
+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen)
+{
+       int err, i, j;
+       struct au_dcsub_pages dpages;
+       struct au_dpage *dpage;
+       struct dentry **dentries;
+
+       err = au_dpages_init(&dpages, GFP_NOFS);
+       AuDebugOn(err);
+       err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/1);
+       AuDebugOn(err);
+       for (i = dpages.ndpage - 1; !err && i >= 0; i--) {
+               dpage = dpages.dpages + i;
+               dentries = dpage->dentries;
+               for (j = dpage->ndentry - 1; !err && j >= 0; j--)
+                       AuDebugOn(au_digen_test(dentries[j], sigen));
+       }
+       au_dpages_free(&dpages);
+}
+
+void au_dbg_verify_kthread(void)
+{
+       if (au_wkq_test()) {
+               au_dbg_blocked();
+               /*
+                * It may be recursive, but udba=notify between two aufs mounts,
+                * where a single ro branch is shared, is not a problem.
+                */
+               /* WARN_ON(1); */
+       }
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_debug_sbinfo_init(struct au_sbinfo *sbinfo __maybe_unused)
+{
+#ifdef AuForceNoPlink
+       au_opt_clr(sbinfo->si_mntflags, PLINK);
+#endif
+#ifdef AuForceNoXino
+       au_opt_clr(sbinfo->si_mntflags, XINO);
+#endif
+#ifdef AuForceNoRefrof
+       au_opt_clr(sbinfo->si_mntflags, REFROF);
+#endif
+#ifdef AuForceHnotify
+       au_opt_set_udba(sbinfo->si_mntflags, UDBA_HNOTIFY);
+#endif
+#ifdef AuForceRd0
+       sbinfo->si_rdblk = 0;
+       sbinfo->si_rdhash = 0;
+#endif
+}
+
+int __init au_debug_init(void)
+{
+       aufs_bindex_t bindex;
+       struct au_vdir_destr destr;
+
+       bindex = -1;
+       AuDebugOn(bindex >= 0);
+
+       destr.len = -1;
+       AuDebugOn(destr.len < NAME_MAX);
+
+#ifdef CONFIG_4KSTACKS
+       pr_warn("CONFIG_4KSTACKS is defined.\n");
+#endif
+
+#ifdef AuForceNoBrs
+       sysaufs_brs = 0;
+#endif
+
+       return 0;
+}
diff --git a/fs/aufs/debug.h b/fs/aufs/debug.h
new file mode 100644 (file)
index 0000000..f5b8fbd
--- /dev/null
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * debug print functions
+ */
+
+#ifndef __AUFS_DEBUG_H__
+#define __AUFS_DEBUG_H__
+
+#ifdef __KERNEL__
+
+#include <asm/system.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/sysrq.h>
+
+#ifdef CONFIG_AUFS_DEBUG
+#define AuDebugOn(a)           BUG_ON(a)
+
+/* module parameter */
+extern atomic_t aufs_debug;
+static inline void au_debug_on(void)
+{
+       atomic_inc(&aufs_debug);
+}
+static inline void au_debug_off(void)
+{
+       atomic_dec_if_positive(&aufs_debug);
+}
+
+static inline int au_debug_test(void)
+{
+       return atomic_read(&aufs_debug) > 0;
+}
+#else
+#define AuDebugOn(a)           do {} while (0)
+AuStubVoid(au_debug_on, void)
+AuStubVoid(au_debug_off, void)
+AuStubInt0(au_debug_test, void)
+#endif /* CONFIG_AUFS_DEBUG */
+
+#define param_check_atomic_t(name, p) __param_check(name, p, atomic_t)
+
+/* ---------------------------------------------------------------------- */
+
+/* debug print */
+
+#define AuDbg(fmt, ...) do { \
+       if (au_debug_test()) \
+               pr_debug("DEBUG: " fmt, ##__VA_ARGS__); \
+} while (0)
+#define AuLabel(l)             AuDbg(#l "\n")
+#define AuIOErr(fmt, ...)      pr_err("I/O Error, " fmt, ##__VA_ARGS__)
+#define AuWarn1(fmt, ...) do { \
+       static unsigned char _c; \
+       if (!_c++) \
+               pr_warn(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuErr1(fmt, ...) do { \
+       static unsigned char _c; \
+       if (!_c++) \
+               pr_err(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuIOErr1(fmt, ...) do { \
+       static unsigned char _c; \
+       if (!_c++) \
+               AuIOErr(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuUnsupportMsg "This operation is not supported." \
+                       " Please report this application to aufs-users ML."
+#define AuUnsupport(fmt, ...) do { \
+       pr_err(AuUnsupportMsg "\n" fmt, ##__VA_ARGS__); \
+       dump_stack(); \
+} while (0)
+
+#define AuTraceErr(e) do { \
+       if (unlikely((e) < 0)) \
+               AuDbg("err %d\n", (int)(e)); \
+} while (0)
+
+#define AuTraceErrPtr(p) do { \
+       if (IS_ERR(p)) \
+               AuDbg("err %ld\n", PTR_ERR(p)); \
+} while (0)
+
+/* dirty macros for debug print, use with "%.*s" and caution */
+#define AuLNPair(qstr)         (qstr)->len, (qstr)->name
+#define AuDLNPair(d)           AuLNPair(&(d)->d_name)
+
+/* ---------------------------------------------------------------------- */
+
+struct au_sbinfo;
+struct au_finfo;
+struct dentry;
+#ifdef CONFIG_AUFS_DEBUG
+extern char *au_plevel;
+struct au_nhash;
+void au_dpri_whlist(struct au_nhash *whlist);
+struct au_vdir;
+void au_dpri_vdir(struct au_vdir *vdir);
+struct inode;
+void au_dpri_inode(struct inode *inode);
+void au_dpri_dalias(struct inode *inode);
+void au_dpri_dentry(struct dentry *dentry);
+struct file;
+void au_dpri_file(struct file *filp);
+struct super_block;
+void au_dpri_sb(struct super_block *sb);
+
+void au_dbg_sleep_jiffy(int jiffy);
+struct iattr;
+void au_dbg_iattr(struct iattr *ia);
+
+#define au_dbg_verify_dinode(d) __au_dbg_verify_dinode(d, __func__, __LINE__)
+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line);
+void au_dbg_verify_dir_parent(struct dentry *dentry, unsigned int sigen);
+void au_dbg_verify_nondir_parent(struct dentry *dentry, unsigned int sigen);
+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen);
+void au_dbg_verify_kthread(void);
+
+int __init au_debug_init(void);
+void au_debug_sbinfo_init(struct au_sbinfo *sbinfo);
+#define AuDbgWhlist(w) do { \
+       AuDbg(#w "\n"); \
+       au_dpri_whlist(w); \
+} while (0)
+
+#define AuDbgVdir(v) do { \
+       AuDbg(#v "\n"); \
+       au_dpri_vdir(v); \
+} while (0)
+
+#define AuDbgInode(i) do { \
+       AuDbg(#i "\n"); \
+       au_dpri_inode(i); \
+} while (0)
+
+#define AuDbgDAlias(i) do { \
+       AuDbg(#i "\n"); \
+       au_dpri_dalias(i); \
+} while (0)
+
+#define AuDbgDentry(d) do { \
+       AuDbg(#d "\n"); \
+       au_dpri_dentry(d); \
+} while (0)
+
+#define AuDbgFile(f) do { \
+       AuDbg(#f "\n"); \
+       au_dpri_file(f); \
+} while (0)
+
+#define AuDbgSb(sb) do { \
+       AuDbg(#sb "\n"); \
+       au_dpri_sb(sb); \
+} while (0)
+
+#define AuDbgSleep(sec) do { \
+       AuDbg("sleep %d sec\n", sec); \
+       ssleep(sec); \
+} while (0)
+
+#define AuDbgSleepJiffy(jiffy) do { \
+       AuDbg("sleep %d jiffies\n", jiffy); \
+       au_dbg_sleep_jiffy(jiffy); \
+} while (0)
+
+#define AuDbgIAttr(ia) do { \
+       AuDbg("ia_valid 0x%x\n", (ia)->ia_valid); \
+       au_dbg_iattr(ia); \
+} while (0)
+
+#define AuDbgSym(addr) do {                            \
+       char sym[KSYM_SYMBOL_LEN];                      \
+       sprint_symbol(sym, (unsigned long)addr);        \
+       AuDbg("%s\n", sym);                             \
+} while (0)
+
+#define AuInfoSym(addr) do {                           \
+       char sym[KSYM_SYMBOL_LEN];                      \
+       sprint_symbol(sym, (unsigned long)addr);        \
+       AuInfo("%s\n", sym);                            \
+} while (0)
+#else
+AuStubVoid(au_dbg_verify_dinode, struct dentry *dentry)
+AuStubVoid(au_dbg_verify_dir_parent, struct dentry *dentry, unsigned int sigen)
+AuStubVoid(au_dbg_verify_nondir_parent, struct dentry *dentry,
+          unsigned int sigen)
+AuStubVoid(au_dbg_verify_gen, struct dentry *parent, unsigned int sigen)
+AuStubVoid(au_dbg_verify_kthread, void)
+AuStubInt0(__init au_debug_init, void)
+AuStubVoid(au_debug_sbinfo_init, struct au_sbinfo *sbinfo)
+
+#define AuDbgWhlist(w)         do {} while (0)
+#define AuDbgVdir(v)           do {} while (0)
+#define AuDbgInode(i)          do {} while (0)
+#define AuDbgDAlias(i)         do {} while (0)
+#define AuDbgDentry(d)         do {} while (0)
+#define AuDbgFile(f)           do {} while (0)
+#define AuDbgSb(sb)            do {} while (0)
+#define AuDbgSleep(sec)                do {} while (0)
+#define AuDbgSleepJiffy(jiffy) do {} while (0)
+#define AuDbgIAttr(ia)         do {} while (0)
+#define AuDbgSym(addr)         do {} while (0)
+#define AuInfoSym(addr)                do {} while (0)
+#endif /* CONFIG_AUFS_DEBUG */
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
+int __init au_sysrq_init(void);
+void au_sysrq_fin(void);
+
+#ifdef CONFIG_HW_CONSOLE
+#define au_dbg_blocked() do { \
+       WARN_ON(1); \
+       handle_sysrq('w'); \
+} while (0)
+#else
+AuStubVoid(au_dbg_blocked, void)
+#endif
+
+#else
+AuStubInt0(__init au_sysrq_init, void)
+AuStubVoid(au_sysrq_fin, void)
+AuStubVoid(au_dbg_blocked, void)
+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DEBUG_H__ */
diff --git a/fs/aufs/dentry.c b/fs/aufs/dentry.c
new file mode 100644 (file)
index 0000000..98e94d3
--- /dev/null
@@ -0,0 +1,1144 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * lookup and dentry operations
+ */
+
+#include <linux/namei.h>
+#include "aufs.h"
+
+static void au_h_nd(struct nameidata *h_nd, struct nameidata *nd)
+{
+       if (nd) {
+               *h_nd = *nd;
+
+               /*
+                * gave up supporting LOOKUP_CREATE/OPEN for lower fs,
+                * due to whiteout and branch permission.
+                */
+               h_nd->flags &= ~(/*LOOKUP_PARENT |*/ LOOKUP_OPEN | LOOKUP_CREATE
+                                | LOOKUP_FOLLOW | LOOKUP_EXCL);
+               /* unnecessary? */
+               h_nd->intent.open.file = NULL;
+       } else
+               memset(h_nd, 0, sizeof(*h_nd));
+}
+
+struct au_lkup_one_args {
+       struct dentry **errp;
+       struct qstr *name;
+       struct dentry *h_parent;
+       struct au_branch *br;
+       struct nameidata *nd;
+};
+
+struct dentry *au_lkup_one(struct qstr *name, struct dentry *h_parent,
+                          struct au_branch *br, struct nameidata *nd)
+{
+       struct dentry *h_dentry;
+       int err;
+       struct nameidata h_nd;
+
+       if (au_test_fs_null_nd(h_parent->d_sb))
+               return vfsub_lookup_one_len(name->name, h_parent, name->len);
+
+       au_h_nd(&h_nd, nd);
+       h_nd.path.dentry = h_parent;
+       h_nd.path.mnt = au_br_mnt(br);
+
+       err = vfsub_name_hash(name->name, &h_nd.last, name->len);
+       h_dentry = ERR_PTR(err);
+       if (!err) {
+               path_get(&h_nd.path);
+               h_dentry = vfsub_lookup_hash(&h_nd);
+               path_put(&h_nd.path);
+       }
+
+       AuTraceErrPtr(h_dentry);
+       return h_dentry;
+}
+
+static void au_call_lkup_one(void *args)
+{
+       struct au_lkup_one_args *a = args;
+       *a->errp = au_lkup_one(a->name, a->h_parent, a->br, a->nd);
+}
+
+#define AuLkup_ALLOW_NEG       1
+#define au_ftest_lkup(flags, name)     ((flags) & AuLkup_##name)
+#define au_fset_lkup(flags, name) \
+       do { (flags) |= AuLkup_##name; } while (0)
+#define au_fclr_lkup(flags, name) \
+       do { (flags) &= ~AuLkup_##name; } while (0)
+
+struct au_do_lookup_args {
+       unsigned int            flags;
+       mode_t                  type;
+       struct nameidata        *nd;
+};
+
+/*
+ * returns positive/negative dentry, NULL or an error.
+ * NULL means whiteout-ed or not-found.
+ */
+static struct dentry*
+au_do_lookup(struct dentry *h_parent, struct dentry *dentry,
+            aufs_bindex_t bindex, struct qstr *wh_name,
+            struct au_do_lookup_args *args)
+{
+       struct dentry *h_dentry;
+       struct inode *h_inode, *inode;
+       struct au_branch *br;
+       int wh_found, opq;
+       unsigned char wh_able;
+       const unsigned char allow_neg = !!au_ftest_lkup(args->flags, ALLOW_NEG);
+
+       wh_found = 0;
+       br = au_sbr(dentry->d_sb, bindex);
+       wh_able = !!au_br_whable(br->br_perm);
+       if (wh_able)
+               wh_found = au_wh_test(h_parent, wh_name, br, /*try_sio*/0);
+       h_dentry = ERR_PTR(wh_found);
+       if (!wh_found)
+               goto real_lookup;
+       if (unlikely(wh_found < 0))
+               goto out;
+
+       /* We found a whiteout */
+       /* au_set_dbend(dentry, bindex); */
+       au_set_dbwh(dentry, bindex);
+       if (!allow_neg)
+               return NULL; /* success */
+
+real_lookup:
+       h_dentry = au_lkup_one(&dentry->d_name, h_parent, br, args->nd);
+       if (IS_ERR(h_dentry))
+               goto out;
+
+       h_inode = h_dentry->d_inode;
+       if (!h_inode) {
+               if (!allow_neg)
+                       goto out_neg;
+       } else if (wh_found
+                  || (args->type && args->type != (h_inode->i_mode & S_IFMT)))
+               goto out_neg;
+
+       if (au_dbend(dentry) <= bindex)
+               au_set_dbend(dentry, bindex);
+       if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
+               au_set_dbstart(dentry, bindex);
+       au_set_h_dptr(dentry, bindex, h_dentry);
+
+       inode = dentry->d_inode;
+       if (!h_inode || !S_ISDIR(h_inode->i_mode) || !wh_able
+           || (inode && !S_ISDIR(inode->i_mode)))
+               goto out; /* success */
+
+       mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
+       opq = au_diropq_test(h_dentry, br);
+       mutex_unlock(&h_inode->i_mutex);
+       if (opq > 0)
+               au_set_dbdiropq(dentry, bindex);
+       else if (unlikely(opq < 0)) {
+               au_set_h_dptr(dentry, bindex, NULL);
+               h_dentry = ERR_PTR(opq);
+       }
+       goto out;
+
+out_neg:
+       dput(h_dentry);
+       h_dentry = NULL;
+out:
+       return h_dentry;
+}
+
+static int au_test_shwh(struct super_block *sb, const struct qstr *name)
+{
+       if (unlikely(!au_opt_test(au_mntflags(sb), SHWH)
+                    && !strncmp(name->name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)))
+               return -EPERM;
+       return 0;
+}
+
+/*
+ * returns the number of lower positive dentries,
+ * otherwise an error.
+ * can be called at unlinking with @type is zero.
+ */
+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type,
+                  struct nameidata *nd)
+{
+       int npositive, err;
+       aufs_bindex_t bindex, btail, bdiropq;
+       unsigned char isdir;
+       struct qstr whname;
+       struct au_do_lookup_args args = {
+               .flags  = 0,
+               .type   = type,
+               .nd     = nd
+       };
+       const struct qstr *name = &dentry->d_name;
+       struct dentry *parent;
+       struct inode *inode;
+
+       err = au_test_shwh(dentry->d_sb, name);
+       if (unlikely(err))
+               goto out;
+
+       err = au_wh_name_alloc(&whname, name);
+       if (unlikely(err))
+               goto out;
+
+       inode = dentry->d_inode;
+       isdir = !!(inode && S_ISDIR(inode->i_mode));
+       if (!type)
+               au_fset_lkup(args.flags, ALLOW_NEG);
+
+       npositive = 0;
+       parent = dget_parent(dentry);
+       btail = au_dbtaildir(parent);
+       for (bindex = bstart; bindex <= btail; bindex++) {
+               struct dentry *h_parent, *h_dentry;
+               struct inode *h_inode, *h_dir;
+
+               h_dentry = au_h_dptr(dentry, bindex);
+               if (h_dentry) {
+                       if (h_dentry->d_inode)
+                               npositive++;
+                       if (type != S_IFDIR)
+                               break;
+                       continue;
+               }
+               h_parent = au_h_dptr(parent, bindex);
+               if (!h_parent)
+                       continue;
+               h_dir = h_parent->d_inode;
+               if (!h_dir || !S_ISDIR(h_dir->i_mode))
+                       continue;
+
+               mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
+               h_dentry = au_do_lookup(h_parent, dentry, bindex, &whname,
+                                       &args);
+               mutex_unlock(&h_dir->i_mutex);
+               err = PTR_ERR(h_dentry);
+               if (IS_ERR(h_dentry))
+                       goto out_parent;
+               au_fclr_lkup(args.flags, ALLOW_NEG);
+
+               if (au_dbwh(dentry) >= 0)
+                       break;
+               if (!h_dentry)
+                       continue;
+               h_inode = h_dentry->d_inode;
+               if (!h_inode)
+                       continue;
+               npositive++;
+               if (!args.type)
+                       args.type = h_inode->i_mode & S_IFMT;
+               if (args.type != S_IFDIR)
+                       break;
+               else if (isdir) {
+                       /* the type of lower may be different */
+                       bdiropq = au_dbdiropq(dentry);
+                       if (bdiropq >= 0 && bdiropq <= bindex)
+                               break;
+               }
+       }
+
+       if (npositive) {
+               AuLabel(positive);
+               au_update_dbstart(dentry);
+       }
+       err = npositive;
+       if (unlikely(!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
+                    && au_dbstart(dentry) < 0)) {
+               err = -EIO;
+               AuIOErr("both of real entry and whiteout found, %.*s, err %d\n",
+                       AuDLNPair(dentry), err);
+       }
+
+out_parent:
+       dput(parent);
+       kfree(whname.name);
+out:
+       return err;
+}
+
+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent,
+                              struct au_branch *br)
+{
+       struct dentry *dentry;
+       int wkq_err;
+
+       if (!au_test_h_perm_sio(parent->d_inode, MAY_EXEC))
+               dentry = au_lkup_one(name, parent, br, /*nd*/NULL);
+       else {
+               struct au_lkup_one_args args = {
+                       .errp           = &dentry,
+                       .name           = name,
+                       .h_parent       = parent,
+                       .br             = br,
+                       .nd             = NULL
+               };
+
+               wkq_err = au_wkq_wait(au_call_lkup_one, &args);
+               if (unlikely(wkq_err))
+                       dentry = ERR_PTR(wkq_err);
+       }
+
+       return dentry;
+}
+
+/*
+ * lookup @dentry on @bindex which should be negative.
+ */
+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh)
+{
+       int err;
+       struct dentry *parent, *h_parent, *h_dentry;
+       struct au_branch *br;
+
+       parent = dget_parent(dentry);
+       h_parent = au_h_dptr(parent, bindex);
+       br = au_sbr(dentry->d_sb, bindex);
+       if (wh)
+               h_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
+       else
+               h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent, br);
+       err = PTR_ERR(h_dentry);
+       if (IS_ERR(h_dentry))
+               goto out;
+       if (unlikely(h_dentry->d_inode)) {
+               err = -EIO;
+               AuIOErr("%.*s should be negative on b%d.\n",
+                       AuDLNPair(h_dentry), bindex);
+               dput(h_dentry);
+               goto out;
+       }
+
+       err = 0;
+       if (bindex < au_dbstart(dentry))
+               au_set_dbstart(dentry, bindex);
+       if (au_dbend(dentry) < bindex)
+               au_set_dbend(dentry, bindex);
+       au_set_h_dptr(dentry, bindex, h_dentry);
+
+out:
+       dput(parent);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* subset of struct inode */
+struct au_iattr {
+       unsigned long           i_ino;
+       /* unsigned int         i_nlink; */
+       uid_t                   i_uid;
+       gid_t                   i_gid;
+       u64                     i_version;
+/*
+       loff_t                  i_size;
+       blkcnt_t                i_blocks;
+*/
+       umode_t                 i_mode;
+};
+
+static void au_iattr_save(struct au_iattr *ia, struct inode *h_inode)
+{
+       ia->i_ino = h_inode->i_ino;
+       /* ia->i_nlink = h_inode->i_nlink; */
+       ia->i_uid = h_inode->i_uid;
+       ia->i_gid = h_inode->i_gid;
+       ia->i_version = h_inode->i_version;
+/*
+       ia->i_size = h_inode->i_size;
+       ia->i_blocks = h_inode->i_blocks;
+*/
+       ia->i_mode = (h_inode->i_mode & S_IFMT);
+}
+
+static int au_iattr_test(struct au_iattr *ia, struct inode *h_inode)
+{
+       return ia->i_ino != h_inode->i_ino
+               /* || ia->i_nlink != h_inode->i_nlink */
+               || ia->i_uid != h_inode->i_uid
+               || ia->i_gid != h_inode->i_gid
+               || ia->i_version != h_inode->i_version
+/*
+               || ia->i_size != h_inode->i_size
+               || ia->i_blocks != h_inode->i_blocks
+*/
+               || ia->i_mode != (h_inode->i_mode & S_IFMT);
+}
+
+static int au_h_verify_dentry(struct dentry *h_dentry, struct dentry *h_parent,
+                             struct au_branch *br)
+{
+       int err;
+       struct au_iattr ia;
+       struct inode *h_inode;
+       struct dentry *h_d;
+       struct super_block *h_sb;
+
+       err = 0;
+       memset(&ia, -1, sizeof(ia));
+       h_sb = h_dentry->d_sb;
+       h_inode = h_dentry->d_inode;
+       if (h_inode)
+               au_iattr_save(&ia, h_inode);
+       else if (au_test_nfs(h_sb) || au_test_fuse(h_sb))
+               /* nfs d_revalidate may return 0 for negative dentry */
+               /* fuse d_revalidate always return 0 for negative dentry */
+               goto out;
+
+       /* main purpose is namei.c:cached_lookup() and d_revalidate */
+       h_d = au_lkup_one(&h_dentry->d_name, h_parent, br, /*nd*/NULL);
+       err = PTR_ERR(h_d);
+       if (IS_ERR(h_d))
+               goto out;
+
+       err = 0;
+       if (unlikely(h_d != h_dentry
+                    || h_d->d_inode != h_inode
+                    || (h_inode && au_iattr_test(&ia, h_inode))))
+               err = au_busy_or_stale();
+       dput(h_d);
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
+               struct dentry *h_parent, struct au_branch *br)
+{
+       int err;
+
+       err = 0;
+       if (udba == AuOpt_UDBA_REVAL
+           && !au_test_fs_remote(h_dentry->d_sb)) {
+               IMustLock(h_dir);
+               err = (h_dentry->d_parent->d_inode != h_dir);
+       } else if (udba != AuOpt_UDBA_NONE)
+               err = au_h_verify_dentry(h_dentry, h_parent, br);
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_refresh_hdentry(struct dentry *dentry, struct dentry *parent)
+{
+       int err;
+       aufs_bindex_t new_bindex, bindex, bend, bwh, bdiropq;
+       struct au_hdentry tmp, *p, *q;
+       struct au_dinfo *dinfo;
+       struct super_block *sb;
+
+       DiMustWriteLock(dentry);
+
+       sb = dentry->d_sb;
+       dinfo = au_di(dentry);
+       bend = dinfo->di_bend;
+       bwh = dinfo->di_bwh;
+       bdiropq = dinfo->di_bdiropq;
+       p = dinfo->di_hdentry + dinfo->di_bstart;
+       for (bindex = dinfo->di_bstart; bindex <= bend; bindex++, p++) {
+               if (!p->hd_dentry)
+                       continue;
+
+               new_bindex = au_br_index(sb, p->hd_id);
+               if (new_bindex == bindex)
+                       continue;
+
+               if (dinfo->di_bwh == bindex)
+                       bwh = new_bindex;
+               if (dinfo->di_bdiropq == bindex)
+                       bdiropq = new_bindex;
+               if (new_bindex < 0) {
+                       au_hdput(p);
+                       p->hd_dentry = NULL;
+                       continue;
+               }
+
+               /* swap two lower dentries, and loop again */
+               q = dinfo->di_hdentry + new_bindex;
+               tmp = *q;
+               *q = *p;
+               *p = tmp;
+               if (tmp.hd_dentry) {
+                       bindex--;
+                       p--;
+               }
+       }
+
+       dinfo->di_bwh = -1;
+       if (bwh >= 0 && bwh <= au_sbend(sb) && au_sbr_whable(sb, bwh))
+               dinfo->di_bwh = bwh;
+
+       dinfo->di_bdiropq = -1;
+       if (bdiropq >= 0
+           && bdiropq <= au_sbend(sb)
+           && au_sbr_whable(sb, bdiropq))
+               dinfo->di_bdiropq = bdiropq;
+
+       err = -EIO;
+       dinfo->di_bstart = -1;
+       dinfo->di_bend = -1;
+       bend = au_dbend(parent);
+       p = dinfo->di_hdentry;
+       for (bindex = 0; bindex <= bend; bindex++, p++)
+               if (p->hd_dentry) {
+                       dinfo->di_bstart = bindex;
+                       break;
+               }
+
+       if (dinfo->di_bstart >= 0) {
+               p = dinfo->di_hdentry + bend;
+               for (bindex = bend; bindex >= 0; bindex--, p--)
+                       if (p->hd_dentry) {
+                               dinfo->di_bend = bindex;
+                               err = 0;
+                               break;
+                       }
+       }
+
+       return err;
+}
+
+static void au_do_hide(struct dentry *dentry)
+{
+       struct inode *inode;
+
+       inode = dentry->d_inode;
+       if (inode) {
+               if (!S_ISDIR(inode->i_mode)) {
+                       if (inode->i_nlink && !d_unhashed(dentry))
+                               drop_nlink(inode);
+               } else {
+                       clear_nlink(inode);
+                       /* stop next lookup */
+                       inode->i_flags |= S_DEAD;
+               }
+               smp_mb(); /* necessary? */
+       }
+       d_drop(dentry);
+}
+
+static int au_hide_children(struct dentry *parent)
+{
+       int err, i, j, ndentry;
+       struct au_dcsub_pages dpages;
+       struct au_dpage *dpage;
+       struct dentry *dentry;
+
+       err = au_dpages_init(&dpages, GFP_NOFS);
+       if (unlikely(err))
+               goto out;
+       err = au_dcsub_pages(&dpages, parent, NULL, NULL);
+       if (unlikely(err))
+               goto out_dpages;
+
+       /* in reverse order */
+       for (i = dpages.ndpage - 1; i >= 0; i--) {
+               dpage = dpages.dpages + i;
+               ndentry = dpage->ndentry;
+               for (j = ndentry - 1; j >= 0; j--) {
+                       dentry = dpage->dentries[j];
+                       if (dentry != parent)
+                               au_do_hide(dentry);
+               }
+       }
+
+out_dpages:
+       au_dpages_free(&dpages);
+out:
+       return err;
+}
+
+static void au_hide(struct dentry *dentry)
+{
+       int err;
+       struct inode *inode;
+
+       AuDbgDentry(dentry);
+       inode = dentry->d_inode;
+       if (inode && S_ISDIR(inode->i_mode)) {
+               /* shrink_dcache_parent(dentry); */
+               err = au_hide_children(dentry);
+               if (unlikely(err))
+                       AuIOErr("%.*s, failed hiding children, ignored %d\n",
+                               AuDLNPair(dentry), err);
+       }
+       au_do_hide(dentry);
+}
+
+/*
+ * By adding a dirty branch, a cached dentry may be affected in various ways.
+ *
+ * a dirty branch is added
+ * - on the top of layers
+ * - in the middle of layers
+ * - to the bottom of layers
+ *
+ * on the added branch there exists
+ * - a whiteout
+ * - a diropq
+ * - a same named entry
+ *   + exist
+ *     * negative --> positive
+ *     * positive --> positive
+ *      - type is unchanged
+ *      - type is changed
+ *   + doesn't exist
+ *     * negative --> negative
+ *     * positive --> negative (rejected by au_br_del() for non-dir case)
+ * - none
+ */
+static int au_refresh_by_dinfo(struct dentry *dentry, struct au_dinfo *dinfo,
+                              struct au_dinfo *tmp)
+{
+       int err;
+       aufs_bindex_t bindex, bend;
+       struct {
+               struct dentry *dentry;
+               struct inode *inode;
+               mode_t mode;
+       } orig_h, tmp_h;
+       struct au_hdentry *hd;
+       struct inode *inode, *h_inode;
+       struct dentry *h_dentry;
+
+       err = 0;
+       AuDebugOn(dinfo->di_bstart < 0);
+       orig_h.dentry = dinfo->di_hdentry[dinfo->di_bstart].hd_dentry;
+       orig_h.inode = orig_h.dentry->d_inode;
+       orig_h.mode = 0;
+       if (orig_h.inode)
+               orig_h.mode = orig_h.inode->i_mode & S_IFMT;
+       memset(&tmp_h, 0, sizeof(tmp_h));
+       if (tmp->di_bstart >= 0) {
+               tmp_h.dentry = tmp->di_hdentry[tmp->di_bstart].hd_dentry;
+               tmp_h.inode = tmp_h.dentry->d_inode;
+               if (tmp_h.inode)
+                       tmp_h.mode = tmp_h.inode->i_mode & S_IFMT;
+       }
+
+       inode = dentry->d_inode;
+       if (!orig_h.inode) {
+               AuDbg("nagative originally\n");
+               if (inode) {
+                       au_hide(dentry);
+                       goto out;
+               }
+               AuDebugOn(inode);
+               AuDebugOn(dinfo->di_bstart != dinfo->di_bend);
+               AuDebugOn(dinfo->di_bdiropq != -1);
+
+               if (!tmp_h.inode) {
+                       AuDbg("negative --> negative\n");
+                       /* should have only one negative lower */
+                       if (tmp->di_bstart >= 0
+                           && tmp->di_bstart < dinfo->di_bstart) {
+                               AuDebugOn(tmp->di_bstart != tmp->di_bend);
+                               AuDebugOn(dinfo->di_bstart != dinfo->di_bend);
+                               au_set_h_dptr(dentry, dinfo->di_bstart, NULL);
+                               au_di_cp(dinfo, tmp);
+                               hd = tmp->di_hdentry + tmp->di_bstart;
+                               au_set_h_dptr(dentry, tmp->di_bstart,
+                                             dget(hd->hd_dentry));
+                       }
+                       au_dbg_verify_dinode(dentry);
+               } else {
+                       AuDbg("negative --> positive\n");
+                       /*
+                        * similar to the behaviour of creating with bypassing
+                        * aufs.
+                        * unhash it in order to force an error in the
+                        * succeeding create operation.
+                        * we should not set S_DEAD here.
+                        */
+                       d_drop(dentry);
+                       /* au_di_swap(tmp, dinfo); */
+                       au_dbg_verify_dinode(dentry);
+               }
+       } else {
+               AuDbg("positive originally\n");
+               /* inode may be NULL */
+               AuDebugOn(inode && (inode->i_mode & S_IFMT) != orig_h.mode);
+               if (!tmp_h.inode) {
+                       AuDbg("positive --> negative\n");
+                       /* or bypassing aufs */
+                       au_hide(dentry);
+                       if (tmp->di_bwh >= 0 && tmp->di_bwh <= dinfo->di_bstart)
+                               dinfo->di_bwh = tmp->di_bwh;
+                       if (inode)
+                               err = au_refresh_hinode_self(inode);
+                       au_dbg_verify_dinode(dentry);
+               } else if (orig_h.mode == tmp_h.mode) {
+                       AuDbg("positive --> positive, same type\n");
+                       if (!S_ISDIR(orig_h.mode)
+                           && dinfo->di_bstart > tmp->di_bstart) {
+                               /*
+                                * similar to the behaviour of removing and
+                                * creating.
+                                */
+                               au_hide(dentry);
+                               if (inode)
+                                       err = au_refresh_hinode_self(inode);
+                               au_dbg_verify_dinode(dentry);
+                       } else {
+                               /* fill empty slots */
+                               if (dinfo->di_bstart > tmp->di_bstart)
+                                       dinfo->di_bstart = tmp->di_bstart;
+                               if (dinfo->di_bend < tmp->di_bend)
+                                       dinfo->di_bend = tmp->di_bend;
+                               dinfo->di_bwh = tmp->di_bwh;
+                               dinfo->di_bdiropq = tmp->di_bdiropq;
+                               hd = tmp->di_hdentry;
+                               bend = dinfo->di_bend;
+                               for (bindex = tmp->di_bstart; bindex <= bend;
+                                    bindex++) {
+                                       if (au_h_dptr(dentry, bindex))
+                                               continue;
+                                       h_dentry = hd[bindex].hd_dentry;
+                                       if (!h_dentry)
+                                               continue;
+                                       h_inode = h_dentry->d_inode;
+                                       AuDebugOn(!h_inode);
+                                       AuDebugOn(orig_h.mode
+                                                 != (h_inode->i_mode
+                                                     & S_IFMT));
+                                       au_set_h_dptr(dentry, bindex,
+                                                     dget(h_dentry));
+                               }
+                               err = au_refresh_hinode(inode, dentry);
+                               au_dbg_verify_dinode(dentry);
+                       }
+               } else {
+                       AuDbg("positive --> positive, different type\n");
+                       /* similar to the behaviour of removing and creating */
+                       au_hide(dentry);
+                       if (inode)
+                               err = au_refresh_hinode_self(inode);
+                       au_dbg_verify_dinode(dentry);
+               }
+       }
+
+out:
+       return err;
+}
+
+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent)
+{
+       int err, ebrange;
+       unsigned int sigen;
+       struct au_dinfo *dinfo, *tmp;
+       struct super_block *sb;
+       struct inode *inode;
+
+       DiMustWriteLock(dentry);
+       AuDebugOn(IS_ROOT(dentry));
+       AuDebugOn(!parent->d_inode);
+
+       sb = dentry->d_sb;
+       inode = dentry->d_inode;
+       sigen = au_sigen(sb);
+       err = au_digen_test(parent, sigen);
+       if (unlikely(err))
+               goto out;
+
+       dinfo = au_di(dentry);
+       err = au_di_realloc(dinfo, au_sbend(sb) + 1);
+       if (unlikely(err))
+               goto out;
+       ebrange = au_dbrange_test(dentry);
+       if (!ebrange)
+               ebrange = au_do_refresh_hdentry(dentry, parent);
+
+       if (d_unhashed(dentry) || ebrange) {
+               AuDebugOn(au_dbstart(dentry) < 0 && au_dbend(dentry) >= 0);
+               if (inode)
+                       err = au_refresh_hinode_self(inode);
+               au_dbg_verify_dinode(dentry);
+               if (!err)
+                       goto out_dgen; /* success */
+               goto out;
+       }
+
+       /* temporary dinfo */
+       AuDbgDentry(dentry);
+       err = -ENOMEM;
+       tmp = au_di_alloc(sb, AuLsc_DI_TMP);
+       if (unlikely(!tmp))
+               goto out;
+       au_di_swap(tmp, dinfo);
+       /* returns the number of positive dentries */
+       /*
+        * if current working dir is removed, it returns an error.
+        * but the dentry is legal.
+        */
+       err = au_lkup_dentry(dentry, /*bstart*/0, /*type*/0, /*nd*/NULL);
+       AuDbgDentry(dentry);
+       au_di_swap(tmp, dinfo);
+       if (err == -ENOENT)
+               err = 0;
+       if (err >= 0) {
+               /* compare/refresh by dinfo */
+               AuDbgDentry(dentry);
+               err = au_refresh_by_dinfo(dentry, dinfo, tmp);
+               au_dbg_verify_dinode(dentry);
+               AuTraceErr(err);
+       }
+       au_rw_write_unlock(&tmp->di_rwsem);
+       au_di_free(tmp);
+       if (unlikely(err))
+               goto out;
+
+out_dgen:
+       au_update_digen(dentry);
+out:
+       if (unlikely(err && !(dentry->d_flags & DCACHE_NFSFS_RENAMED))) {
+               AuIOErr("failed refreshing %.*s, %d\n",
+                       AuDLNPair(dentry), err);
+               AuDbgDentry(dentry);
+       }
+       AuTraceErr(err);
+       return err;
+}
+
+static noinline_for_stack
+int au_do_h_d_reval(struct dentry *h_dentry, struct nameidata *nd,
+                   struct dentry *dentry, aufs_bindex_t bindex)
+{
+       int err, valid;
+       int (*reval)(struct dentry *, struct nameidata *);
+
+       err = 0;
+       if (!(h_dentry->d_flags & DCACHE_OP_REVALIDATE))
+               goto out;
+       reval = h_dentry->d_op->d_revalidate;
+
+       AuDbg("b%d\n", bindex);
+       if (au_test_fs_null_nd(h_dentry->d_sb))
+               /* it may return tri-state */
+               valid = reval(h_dentry, NULL);
+       else {
+               struct nameidata h_nd;
+               int locked;
+               struct dentry *parent;
+
+               au_h_nd(&h_nd, nd);
+               parent = nd->path.dentry;
+               locked = (nd && nd->path.dentry != dentry);
+               if (locked)
+                       di_read_lock_parent(parent, AuLock_IR);
+               BUG_ON(bindex > au_dbend(parent));
+               h_nd.path.dentry = au_h_dptr(parent, bindex);
+               BUG_ON(!h_nd.path.dentry);
+               h_nd.path.mnt = au_sbr_mnt(parent->d_sb, bindex);
+               path_get(&h_nd.path);
+               valid = reval(h_dentry, &h_nd);
+               path_put(&h_nd.path);
+               if (locked)
+                       di_read_unlock(parent, AuLock_IR);
+       }
+
+       if (unlikely(valid < 0))
+               err = valid;
+       else if (!valid)
+               err = -EINVAL;
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+/* todo: remove this */
+static int h_d_revalidate(struct dentry *dentry, struct inode *inode,
+                         struct nameidata *nd, int do_udba)
+{
+       int err;
+       umode_t mode, h_mode;
+       aufs_bindex_t bindex, btail, bstart, ibs, ibe;
+       unsigned char plus, unhashed, is_root, h_plus;
+       struct inode *h_inode, *h_cached_inode;
+       struct dentry *h_dentry;
+       struct qstr *name, *h_name;
+
+       err = 0;
+       plus = 0;
+       mode = 0;
+       ibs = -1;
+       ibe = -1;
+       unhashed = !!d_unhashed(dentry);
+       is_root = !!IS_ROOT(dentry);
+       name = &dentry->d_name;
+
+       /*
+        * Theoretically, REVAL test should be unnecessary in case of
+        * {FS,I}NOTIFY.
+        * But {fs,i}notify doesn't fire some necessary events,
+        *      IN_ATTRIB for atime/nlink/pageio
+        *      IN_DELETE for NFS dentry
+        * Let's do REVAL test too.
+        */
+       if (do_udba && inode) {
+               mode = (inode->i_mode & S_IFMT);
+               plus = (inode->i_nlink > 0);
+               ibs = au_ibstart(inode);
+               ibe = au_ibend(inode);
+       }
+
+       bstart = au_dbstart(dentry);
+       btail = bstart;
+       if (inode && S_ISDIR(inode->i_mode))
+               btail = au_dbtaildir(dentry);
+       for (bindex = bstart; bindex <= btail; bindex++) {
+               h_dentry = au_h_dptr(dentry, bindex);
+               if (!h_dentry)
+                       continue;
+
+               AuDbg("b%d, %.*s\n", bindex, AuDLNPair(h_dentry));
+               spin_lock(&h_dentry->d_lock);
+               h_name = &h_dentry->d_name;
+               if (unlikely(do_udba
+                            && !is_root
+                            && (unhashed != !!d_unhashed(h_dentry)
+                                || name->len != h_name->len
+                                || memcmp(name->name, h_name->name, name->len))
+                           )) {
+                       AuDbg("unhash 0x%x 0x%x, %.*s %.*s\n",
+                                 unhashed, d_unhashed(h_dentry),
+                                 AuDLNPair(dentry), AuDLNPair(h_dentry));
+                       spin_unlock(&h_dentry->d_lock);
+                       goto err;
+               }
+               spin_unlock(&h_dentry->d_lock);
+
+               err = au_do_h_d_reval(h_dentry, nd, dentry, bindex);
+               if (unlikely(err))
+                       /* do not goto err, to keep the errno */
+                       break;
+
+               /* todo: plink too? */
+               if (!do_udba)
+                       continue;
+
+               /* UDBA tests */
+               h_inode = h_dentry->d_inode;
+               if (unlikely(!!inode != !!h_inode))
+                       goto err;
+
+               h_plus = plus;
+               h_mode = mode;
+               h_cached_inode = h_inode;
+               if (h_inode) {
+                       h_mode = (h_inode->i_mode & S_IFMT);
+                       h_plus = (h_inode->i_nlink > 0);
+               }
+               if (inode && ibs <= bindex && bindex <= ibe)
+                       h_cached_inode = au_h_iptr(inode, bindex);
+
+               if (unlikely(plus != h_plus
+                            || mode != h_mode
+                            || h_cached_inode != h_inode))
+                       goto err;
+               continue;
+
+       err:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+/* todo: consolidate with do_refresh() and au_reval_for_attr() */
+static int simple_reval_dpath(struct dentry *dentry, unsigned int sigen)
+{
+       int err;
+       struct dentry *parent;
+
+       if (!au_digen_test(dentry, sigen))
+               return 0;
+
+       parent = dget_parent(dentry);
+       di_read_lock_parent(parent, AuLock_IR);
+       AuDebugOn(au_digen_test(parent, sigen));
+       au_dbg_verify_gen(parent, sigen);
+       err = au_refresh_dentry(dentry, parent);
+       di_read_unlock(parent, AuLock_IR);
+       dput(parent);
+       AuTraceErr(err);
+       return err;
+}
+
+int au_reval_dpath(struct dentry *dentry, unsigned int sigen)
+{
+       int err;
+       struct dentry *d, *parent;
+       struct inode *inode;
+
+       if (!au_ftest_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR))
+               return simple_reval_dpath(dentry, sigen);
+
+       /* slow loop, keep it simple and stupid */
+       /* cf: au_cpup_dirs() */
+       err = 0;
+       parent = NULL;
+       while (au_digen_test(dentry, sigen)) {
+               d = dentry;
+               while (1) {
+                       dput(parent);
+                       parent = dget_parent(d);
+                       if (!au_digen_test(parent, sigen))
+                               break;
+                       d = parent;
+               }
+
+               inode = d->d_inode;
+               if (d != dentry)
+                       di_write_lock_child2(d);
+
+               /* someone might update our dentry while we were sleeping */
+               if (au_digen_test(d, sigen)) {
+                       /*
+                        * todo: consolidate with simple_reval_dpath(),
+                        * do_refresh() and au_reval_for_attr().
+                        */
+                       di_read_lock_parent(parent, AuLock_IR);
+                       err = au_refresh_dentry(d, parent);
+                       di_read_unlock(parent, AuLock_IR);
+               }
+
+               if (d != dentry)
+                       di_write_unlock(d);
+               dput(parent);
+               if (unlikely(err))
+                       break;
+       }
+
+       return err;
+}
+
+/*
+ * if valid returns 1, otherwise 0.
+ */
+static int aufs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+       int valid, err;
+       unsigned int sigen;
+       unsigned char do_udba;
+       struct super_block *sb;
+       struct inode *inode;
+
+       /* todo: support rcu-walk? */
+       if (nd && (nd->flags & LOOKUP_RCU))
+               return -ECHILD;
+
+       valid = 0;
+       if (unlikely(!au_di(dentry)))
+               goto out;
+
+       inode = dentry->d_inode;
+       if (inode && is_bad_inode(inode))
+               goto out;
+
+       valid = 1;
+       sb = dentry->d_sb;
+       /*
+        * todo: very ugly
+        * i_mutex of parent dir may be held,
+        * but we should not return 'invalid' due to busy.
+        */
+       err = aufs_read_lock(dentry, AuLock_FLUSH | AuLock_DW | AuLock_NOPLM);
+       if (unlikely(err)) {
+               valid = err;
+               AuTraceErr(err);
+               goto out;
+       }
+       if (unlikely(au_dbrange_test(dentry))) {
+               err = -EINVAL;
+               AuTraceErr(err);
+               goto out_dgrade;
+       }
+
+       sigen = au_sigen(sb);
+       if (au_digen_test(dentry, sigen)) {
+               AuDebugOn(IS_ROOT(dentry));
+               err = au_reval_dpath(dentry, sigen);
+               if (unlikely(err)) {
+                       AuTraceErr(err);
+                       goto out_dgrade;
+               }
+       }
+       di_downgrade_lock(dentry, AuLock_IR);
+
+       err = -EINVAL;
+       if (inode && (IS_DEADDIR(inode) || !inode->i_nlink))
+               goto out_inval;
+
+       do_udba = !au_opt_test(au_mntflags(sb), UDBA_NONE);
+       if (do_udba && inode) {
+               aufs_bindex_t bstart = au_ibstart(inode);
+               struct inode *h_inode;
+
+               if (bstart >= 0) {
+                       h_inode = au_h_iptr(inode, bstart);
+                       if (h_inode && au_test_higen(inode, h_inode))
+                               goto out_inval;
+               }
+       }
+
+       err = h_d_revalidate(dentry, inode, nd, do_udba);
+       if (unlikely(!err && do_udba && au_dbstart(dentry) < 0)) {
+               err = -EIO;
+               AuDbg("both of real entry and whiteout found, %.*s, err %d\n",
+                     AuDLNPair(dentry), err);
+       }
+       goto out_inval;
+
+out_dgrade:
+       di_downgrade_lock(dentry, AuLock_IR);
+out_inval:
+       aufs_read_unlock(dentry, AuLock_IR);
+       AuTraceErr(err);
+       valid = !err;
+out:
+       if (!valid) {
+               AuDbg("%.*s invalid, %d\n", AuDLNPair(dentry), valid);
+               d_drop(dentry);
+       }
+       return valid;
+}
+
+static void aufs_d_release(struct dentry *dentry)
+{
+       if (au_di(dentry)) {
+               au_di_fin(dentry);
+               au_hn_di_reinit(dentry);
+       }
+}
+
+const struct dentry_operations aufs_dop = {
+       .d_revalidate   = aufs_d_revalidate,
+       .d_release      = aufs_d_release
+};
diff --git a/fs/aufs/dentry.h b/fs/aufs/dentry.h
new file mode 100644 (file)
index 0000000..c8dd79e
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * lookup and dentry operations
+ */
+
+#ifndef __AUFS_DENTRY_H__
+#define __AUFS_DENTRY_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include "rwsem.h"
+
+struct au_hdentry {
+       struct dentry           *hd_dentry;
+       aufs_bindex_t           hd_id;
+};
+
+struct au_dinfo {
+       atomic_t                di_generation;
+
+       struct au_rwsem         di_rwsem;
+       aufs_bindex_t           di_bstart, di_bend, di_bwh, di_bdiropq;
+       struct au_hdentry       *di_hdentry;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* dentry.c */
+extern const struct dentry_operations aufs_dop;
+struct au_branch;
+struct dentry *au_lkup_one(struct qstr *name, struct dentry *h_parent,
+                          struct au_branch *br, struct nameidata *nd);
+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent,
+                              struct au_branch *br);
+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
+               struct dentry *h_parent, struct au_branch *br);
+
+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type,
+                  struct nameidata *nd);
+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh);
+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent);
+int au_reval_dpath(struct dentry *dentry, unsigned int sigen);
+
+/* dinfo.c */
+void au_di_init_once(void *_di);
+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc);
+void au_di_free(struct au_dinfo *dinfo);
+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b);
+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src);
+int au_di_init(struct dentry *dentry);
+void au_di_fin(struct dentry *dentry);
+int au_di_realloc(struct au_dinfo *dinfo, int nbr);
+
+void di_read_lock(struct dentry *d, int flags, unsigned int lsc);
+void di_read_unlock(struct dentry *d, int flags);
+void di_downgrade_lock(struct dentry *d, int flags);
+void di_write_lock(struct dentry *d, unsigned int lsc);
+void di_write_unlock(struct dentry *d);
+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir);
+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir);
+void di_write_unlock2(struct dentry *d1, struct dentry *d2);
+
+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex);
+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex);
+aufs_bindex_t au_dbtail(struct dentry *dentry);
+aufs_bindex_t au_dbtaildir(struct dentry *dentry);
+
+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
+                  struct dentry *h_dentry);
+int au_digen_test(struct dentry *dentry, unsigned int sigen);
+int au_dbrange_test(struct dentry *dentry);
+void au_update_digen(struct dentry *dentry);
+void au_update_dbrange(struct dentry *dentry, int do_put_zero);
+void au_update_dbstart(struct dentry *dentry);
+void au_update_dbend(struct dentry *dentry);
+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_dinfo *au_di(struct dentry *dentry)
+{
+       return dentry->d_fsdata;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for dinfo */
+enum {
+       AuLsc_DI_CHILD,         /* child first */
+       AuLsc_DI_CHILD2,        /* rename(2), link(2), and cpup at hnotify */
+       AuLsc_DI_CHILD3,        /* copyup dirs */
+       AuLsc_DI_PARENT,
+       AuLsc_DI_PARENT2,
+       AuLsc_DI_PARENT3,
+       AuLsc_DI_TMP            /* temp for replacing dinfo */
+};
+
+/*
+ * di_read_lock_child, di_write_lock_child,
+ * di_read_lock_child2, di_write_lock_child2,
+ * di_read_lock_child3, di_write_lock_child3,
+ * di_read_lock_parent, di_write_lock_parent,
+ * di_read_lock_parent2, di_write_lock_parent2,
+ * di_read_lock_parent3, di_write_lock_parent3,
+ */
+#define AuReadLockFunc(name, lsc) \
+static inline void di_read_lock_##name(struct dentry *d, int flags) \
+{ di_read_lock(d, flags, AuLsc_DI_##lsc); }
+
+#define AuWriteLockFunc(name, lsc) \
+static inline void di_write_lock_##name(struct dentry *d) \
+{ di_write_lock(d, AuLsc_DI_##lsc); }
+
+#define AuRWLockFuncs(name, lsc) \
+       AuReadLockFunc(name, lsc) \
+       AuWriteLockFunc(name, lsc)
+
+AuRWLockFuncs(child, CHILD);
+AuRWLockFuncs(child2, CHILD2);
+AuRWLockFuncs(child3, CHILD3);
+AuRWLockFuncs(parent, PARENT);
+AuRWLockFuncs(parent2, PARENT2);
+AuRWLockFuncs(parent3, PARENT3);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+#define DiMustNoWaiters(d)     AuRwMustNoWaiters(&au_di(d)->di_rwsem)
+#define DiMustAnyLock(d)       AuRwMustAnyLock(&au_di(d)->di_rwsem)
+#define DiMustWriteLock(d)     AuRwMustWriteLock(&au_di(d)->di_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: memory barrier? */
+static inline unsigned int au_digen(struct dentry *d)
+{
+       return atomic_read(&au_di(d)->di_generation);
+}
+
+static inline void au_h_dentry_init(struct au_hdentry *hdentry)
+{
+       hdentry->hd_dentry = NULL;
+}
+
+static inline void au_hdput(struct au_hdentry *hd)
+{
+       if (hd)
+               dput(hd->hd_dentry);
+}
+
+static inline aufs_bindex_t au_dbstart(struct dentry *dentry)
+{
+       DiMustAnyLock(dentry);
+       return au_di(dentry)->di_bstart;
+}
+
+static inline aufs_bindex_t au_dbend(struct dentry *dentry)
+{
+       DiMustAnyLock(dentry);
+       return au_di(dentry)->di_bend;
+}
+
+static inline aufs_bindex_t au_dbwh(struct dentry *dentry)
+{
+       DiMustAnyLock(dentry);
+       return au_di(dentry)->di_bwh;
+}
+
+static inline aufs_bindex_t au_dbdiropq(struct dentry *dentry)
+{
+       DiMustAnyLock(dentry);
+       return au_di(dentry)->di_bdiropq;
+}
+
+/* todo: hard/soft set? */
+static inline void au_set_dbstart(struct dentry *dentry, aufs_bindex_t bindex)
+{
+       DiMustWriteLock(dentry);
+       au_di(dentry)->di_bstart = bindex;
+}
+
+static inline void au_set_dbend(struct dentry *dentry, aufs_bindex_t bindex)
+{
+       DiMustWriteLock(dentry);
+       au_di(dentry)->di_bend = bindex;
+}
+
+static inline void au_set_dbwh(struct dentry *dentry, aufs_bindex_t bindex)
+{
+       DiMustWriteLock(dentry);
+       /* dbwh can be outside of bstart - bend range */
+       au_di(dentry)->di_bwh = bindex;
+}
+
+static inline void au_set_dbdiropq(struct dentry *dentry, aufs_bindex_t bindex)
+{
+       DiMustWriteLock(dentry);
+       au_di(dentry)->di_bdiropq = bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_HNOTIFY
+static inline void au_digen_dec(struct dentry *d)
+{
+       atomic_dec(&au_di(d)->di_generation);
+}
+
+static inline void au_hn_di_reinit(struct dentry *dentry)
+{
+       dentry->d_fsdata = NULL;
+}
+#else
+AuStubVoid(au_hn_di_reinit, struct dentry *dentry __maybe_unused)
+#endif /* CONFIG_AUFS_HNOTIFY */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DENTRY_H__ */
diff --git a/fs/aufs/dinfo.c b/fs/aufs/dinfo.c
new file mode 100644 (file)
index 0000000..2a92487
--- /dev/null
@@ -0,0 +1,543 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * dentry private data
+ */
+
+#include "aufs.h"
+
+void au_di_init_once(void *_dinfo)
+{
+       struct au_dinfo *dinfo = _dinfo;
+       static struct lock_class_key aufs_di;
+
+       au_rw_init(&dinfo->di_rwsem);
+       au_rw_class(&dinfo->di_rwsem, &aufs_di);
+}
+
+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc)
+{
+       struct au_dinfo *dinfo;
+       int nbr, i;
+
+       dinfo = au_cache_alloc_dinfo();
+       if (unlikely(!dinfo))
+               goto out;
+
+       nbr = au_sbend(sb) + 1;
+       if (nbr <= 0)
+               nbr = 1;
+       dinfo->di_hdentry = kcalloc(nbr, sizeof(*dinfo->di_hdentry), GFP_NOFS);
+       if (dinfo->di_hdentry) {
+               au_rw_write_lock_nested(&dinfo->di_rwsem, lsc);
+               dinfo->di_bstart = -1;
+               dinfo->di_bend = -1;
+               dinfo->di_bwh = -1;
+               dinfo->di_bdiropq = -1;
+               for (i = 0; i < nbr; i++)
+                       dinfo->di_hdentry[i].hd_id = -1;
+               goto out;
+       }
+
+       au_cache_free_dinfo(dinfo);
+       dinfo = NULL;
+
+out:
+       return dinfo;
+}
+
+void au_di_free(struct au_dinfo *dinfo)
+{
+       struct au_hdentry *p;
+       aufs_bindex_t bend, bindex;
+
+       /* dentry may not be revalidated */
+       bindex = dinfo->di_bstart;
+       if (bindex >= 0) {
+               bend = dinfo->di_bend;
+               p = dinfo->di_hdentry + bindex;
+               while (bindex++ <= bend)
+                       au_hdput(p++);
+       }
+       kfree(dinfo->di_hdentry);
+       au_cache_free_dinfo(dinfo);
+}
+
+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b)
+{
+       struct au_hdentry *p;
+       aufs_bindex_t bi;
+
+       AuRwMustWriteLock(&a->di_rwsem);
+       AuRwMustWriteLock(&b->di_rwsem);
+
+#define DiSwap(v, name)                                \
+       do {                                    \
+               v = a->di_##name;               \
+               a->di_##name = b->di_##name;    \
+               b->di_##name = v;               \
+       } while (0)
+
+       DiSwap(p, hdentry);
+       DiSwap(bi, bstart);
+       DiSwap(bi, bend);
+       DiSwap(bi, bwh);
+       DiSwap(bi, bdiropq);
+       /* smp_mb(); */
+
+#undef DiSwap
+}
+
+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src)
+{
+       AuRwMustWriteLock(&dst->di_rwsem);
+       AuRwMustWriteLock(&src->di_rwsem);
+
+       dst->di_bstart = src->di_bstart;
+       dst->di_bend = src->di_bend;
+       dst->di_bwh = src->di_bwh;
+       dst->di_bdiropq = src->di_bdiropq;
+       /* smp_mb(); */
+}
+
+int au_di_init(struct dentry *dentry)
+{
+       int err;
+       struct super_block *sb;
+       struct au_dinfo *dinfo;
+
+       err = 0;
+       sb = dentry->d_sb;
+       dinfo = au_di_alloc(sb, AuLsc_DI_CHILD);
+       if (dinfo) {
+               atomic_set(&dinfo->di_generation, au_sigen(sb));
+               /* smp_mb(); */ /* atomic_set */
+               dentry->d_fsdata = dinfo;
+       } else
+               err = -ENOMEM;
+
+       return err;
+}
+
+void au_di_fin(struct dentry *dentry)
+{
+       struct au_dinfo *dinfo;
+
+       dinfo = au_di(dentry);
+       AuRwDestroy(&dinfo->di_rwsem);
+       au_di_free(dinfo);
+}
+
+int au_di_realloc(struct au_dinfo *dinfo, int nbr)
+{
+       int err, sz;
+       struct au_hdentry *hdp;
+
+       AuRwMustWriteLock(&dinfo->di_rwsem);
+
+       err = -ENOMEM;
+       sz = sizeof(*hdp) * (dinfo->di_bend + 1);
+       if (!sz)
+               sz = sizeof(*hdp);
+       hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS);
+       if (hdp) {
+               dinfo->di_hdentry = hdp;
+               err = 0;
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void do_ii_write_lock(struct inode *inode, unsigned int lsc)
+{
+       switch (lsc) {
+       case AuLsc_DI_CHILD:
+               ii_write_lock_child(inode);
+               break;
+       case AuLsc_DI_CHILD2:
+               ii_write_lock_child2(inode);
+               break;
+       case AuLsc_DI_CHILD3:
+               ii_write_lock_child3(inode);
+               break;
+       case AuLsc_DI_PARENT:
+               ii_write_lock_parent(inode);
+               break;
+       case AuLsc_DI_PARENT2:
+               ii_write_lock_parent2(inode);
+               break;
+       case AuLsc_DI_PARENT3:
+               ii_write_lock_parent3(inode);
+               break;
+       default:
+               BUG();
+       }
+}
+
+static void do_ii_read_lock(struct inode *inode, unsigned int lsc)
+{
+       switch (lsc) {
+       case AuLsc_DI_CHILD:
+               ii_read_lock_child(inode);
+               break;
+       case AuLsc_DI_CHILD2:
+               ii_read_lock_child2(inode);
+               break;
+       case AuLsc_DI_CHILD3:
+               ii_read_lock_child3(inode);
+               break;
+       case AuLsc_DI_PARENT:
+               ii_read_lock_parent(inode);
+               break;
+       case AuLsc_DI_PARENT2:
+               ii_read_lock_parent2(inode);
+               break;
+       case AuLsc_DI_PARENT3:
+               ii_read_lock_parent3(inode);
+               break;
+       default:
+               BUG();
+       }
+}
+
+void di_read_lock(struct dentry *d, int flags, unsigned int lsc)
+{
+       au_rw_read_lock_nested(&au_di(d)->di_rwsem, lsc);
+       if (d->d_inode) {
+               if (au_ftest_lock(flags, IW))
+                       do_ii_write_lock(d->d_inode, lsc);
+               else if (au_ftest_lock(flags, IR))
+                       do_ii_read_lock(d->d_inode, lsc);
+       }
+}
+
+void di_read_unlock(struct dentry *d, int flags)
+{
+       if (d->d_inode) {
+               if (au_ftest_lock(flags, IW)) {
+                       au_dbg_verify_dinode(d);
+                       ii_write_unlock(d->d_inode);
+               } else if (au_ftest_lock(flags, IR)) {
+                       au_dbg_verify_dinode(d);
+                       ii_read_unlock(d->d_inode);
+               }
+       }
+       au_rw_read_unlock(&au_di(d)->di_rwsem);
+}
+
+void di_downgrade_lock(struct dentry *d, int flags)
+{
+       if (d->d_inode && au_ftest_lock(flags, IR))
+               ii_downgrade_lock(d->d_inode);
+       au_rw_dgrade_lock(&au_di(d)->di_rwsem);
+}
+
+void di_write_lock(struct dentry *d, unsigned int lsc)
+{
+       au_rw_write_lock_nested(&au_di(d)->di_rwsem, lsc);
+       if (d->d_inode)
+               do_ii_write_lock(d->d_inode, lsc);
+}
+
+void di_write_unlock(struct dentry *d)
+{
+       au_dbg_verify_dinode(d);
+       if (d->d_inode)
+               ii_write_unlock(d->d_inode);
+       au_rw_write_unlock(&au_di(d)->di_rwsem);
+}
+
+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir)
+{
+       AuDebugOn(d1 == d2
+                 || d1->d_inode == d2->d_inode
+                 || d1->d_sb != d2->d_sb);
+
+       if (isdir && au_test_subdir(d1, d2)) {
+               di_write_lock_child(d1);
+               di_write_lock_child2(d2);
+       } else {
+               /* there should be no races */
+               di_write_lock_child(d2);
+               di_write_lock_child2(d1);
+       }
+}
+
+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir)
+{
+       AuDebugOn(d1 == d2
+                 || d1->d_inode == d2->d_inode
+                 || d1->d_sb != d2->d_sb);
+
+       if (isdir && au_test_subdir(d1, d2)) {
+               di_write_lock_parent(d1);
+               di_write_lock_parent2(d2);
+       } else {
+               /* there should be no races */
+               di_write_lock_parent(d2);
+               di_write_lock_parent2(d1);
+       }
+}
+
+void di_write_unlock2(struct dentry *d1, struct dentry *d2)
+{
+       di_write_unlock(d1);
+       if (d1->d_inode == d2->d_inode)
+               au_rw_write_unlock(&au_di(d2)->di_rwsem);
+       else
+               di_write_unlock(d2);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex)
+{
+       struct dentry *d;
+
+       DiMustAnyLock(dentry);
+
+       if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
+               return NULL;
+       AuDebugOn(bindex < 0);
+       d = au_di(dentry)->di_hdentry[0 + bindex].hd_dentry;
+       AuDebugOn(d && d->d_count <= 0);
+       return d;
+}
+
+/*
+ * extended version of au_h_dptr().
+ * returns a hashed and positive h_dentry in bindex, NULL, or error.
+ */
+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex)
+{
+       struct dentry *h_dentry;
+       struct inode *inode, *h_inode;
+
+       inode = dentry->d_inode;
+       AuDebugOn(!inode);
+
+       h_dentry = NULL;
+       if (au_dbstart(dentry) <= bindex
+           && bindex <= au_dbend(dentry))
+               h_dentry = au_h_dptr(dentry, bindex);
+       if (h_dentry && !au_d_hashed_positive(h_dentry)) {
+               dget(h_dentry);
+               goto out; /* success */
+       }
+
+       AuDebugOn(bindex < au_ibstart(inode));
+       AuDebugOn(au_ibend(inode) < bindex);
+       h_inode = au_h_iptr(inode, bindex);
+       h_dentry = d_find_alias(h_inode);
+       if (h_dentry) {
+               if (!IS_ERR(h_dentry)) {
+                       if (!au_d_hashed_positive(h_dentry))
+                               goto out; /* success */
+                       dput(h_dentry);
+               } else
+                       goto out;
+       }
+
+       if (au_opt_test(au_mntflags(dentry->d_sb), PLINK)) {
+               h_dentry = au_plink_lkup(inode, bindex);
+               AuDebugOn(!h_dentry);
+               if (!IS_ERR(h_dentry)) {
+                       if (!au_d_hashed_positive(h_dentry))
+                               goto out; /* success */
+                       dput(h_dentry);
+                       h_dentry = NULL;
+               }
+       }
+
+out:
+       AuDbgDentry(h_dentry);
+       return h_dentry;
+}
+
+aufs_bindex_t au_dbtail(struct dentry *dentry)
+{
+       aufs_bindex_t bend, bwh;
+
+       bend = au_dbend(dentry);
+       if (0 <= bend) {
+               bwh = au_dbwh(dentry);
+               if (!bwh)
+                       return bwh;
+               if (0 < bwh && bwh < bend)
+                       return bwh - 1;
+       }
+       return bend;
+}
+
+aufs_bindex_t au_dbtaildir(struct dentry *dentry)
+{
+       aufs_bindex_t bend, bopq;
+
+       bend = au_dbtail(dentry);
+       if (0 <= bend) {
+               bopq = au_dbdiropq(dentry);
+               if (0 <= bopq && bopq < bend)
+                       bend = bopq;
+       }
+       return bend;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
+                  struct dentry *h_dentry)
+{
+       struct au_hdentry *hd = au_di(dentry)->di_hdentry + bindex;
+       struct au_branch *br;
+
+       DiMustWriteLock(dentry);
+
+       au_hdput(hd);
+       hd->hd_dentry = h_dentry;
+       if (h_dentry) {
+               br = au_sbr(dentry->d_sb, bindex);
+               hd->hd_id = br->br_id;
+       }
+}
+
+int au_dbrange_test(struct dentry *dentry)
+{
+       int err;
+       aufs_bindex_t bstart, bend;
+
+       err = 0;
+       bstart = au_dbstart(dentry);
+       bend = au_dbend(dentry);
+       if (bstart >= 0)
+               AuDebugOn(bend < 0 && bstart > bend);
+       else {
+               err = -EIO;
+               AuDebugOn(bend >= 0);
+       }
+
+       return err;
+}
+
+int au_digen_test(struct dentry *dentry, unsigned int sigen)
+{
+       int err;
+
+       err = 0;
+       if (unlikely(au_digen(dentry) != sigen
+                    || au_iigen_test(dentry->d_inode, sigen)))
+               err = -EIO;
+
+       return err;
+}
+
+void au_update_digen(struct dentry *dentry)
+{
+       atomic_set(&au_di(dentry)->di_generation, au_sigen(dentry->d_sb));
+       /* smp_mb(); */ /* atomic_set */
+}
+
+void au_update_dbrange(struct dentry *dentry, int do_put_zero)
+{
+       struct au_dinfo *dinfo;
+       struct dentry *h_d;
+       struct au_hdentry *hdp;
+
+       DiMustWriteLock(dentry);
+
+       dinfo = au_di(dentry);
+       if (!dinfo || dinfo->di_bstart < 0)
+               return;
+
+       hdp = dinfo->di_hdentry;
+       if (do_put_zero) {
+               aufs_bindex_t bindex, bend;
+
+               bend = dinfo->di_bend;
+               for (bindex = dinfo->di_bstart; bindex <= bend; bindex++) {
+                       h_d = hdp[0 + bindex].hd_dentry;
+                       if (h_d && !h_d->d_inode)
+                               au_set_h_dptr(dentry, bindex, NULL);
+               }
+       }
+
+       dinfo->di_bstart = -1;
+       while (++dinfo->di_bstart <= dinfo->di_bend)
+               if (hdp[0 + dinfo->di_bstart].hd_dentry)
+                       break;
+       if (dinfo->di_bstart > dinfo->di_bend) {
+               dinfo->di_bstart = -1;
+               dinfo->di_bend = -1;
+               return;
+       }
+
+       dinfo->di_bend++;
+       while (0 <= --dinfo->di_bend)
+               if (hdp[0 + dinfo->di_bend].hd_dentry)
+                       break;
+       AuDebugOn(dinfo->di_bstart > dinfo->di_bend || dinfo->di_bend < 0);
+}
+
+void au_update_dbstart(struct dentry *dentry)
+{
+       aufs_bindex_t bindex, bend;
+       struct dentry *h_dentry;
+
+       bend = au_dbend(dentry);
+       for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) {
+               h_dentry = au_h_dptr(dentry, bindex);
+               if (!h_dentry)
+                       continue;
+               if (h_dentry->d_inode) {
+                       au_set_dbstart(dentry, bindex);
+                       return;
+               }
+               au_set_h_dptr(dentry, bindex, NULL);
+       }
+}
+
+void au_update_dbend(struct dentry *dentry)
+{
+       aufs_bindex_t bindex, bstart;
+       struct dentry *h_dentry;
+
+       bstart = au_dbstart(dentry);
+       for (bindex = au_dbend(dentry); bindex >= bstart; bindex--) {
+               h_dentry = au_h_dptr(dentry, bindex);
+               if (!h_dentry)
+                       continue;
+               if (h_dentry->d_inode) {
+                       au_set_dbend(dentry, bindex);
+                       return;
+               }
+               au_set_h_dptr(dentry, bindex, NULL);
+       }
+}
+
+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry)
+{
+       aufs_bindex_t bindex, bend;
+
+       bend = au_dbend(dentry);
+       for (bindex = au_dbstart(dentry); bindex <= bend; bindex++)
+               if (au_h_dptr(dentry, bindex) == h_dentry)
+                       return bindex;
+       return -1;
+}
diff --git a/fs/aufs/dir.c b/fs/aufs/dir.c
new file mode 100644 (file)
index 0000000..ca3ef14
--- /dev/null
@@ -0,0 +1,636 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * directory operations
+ */
+
+#include <linux/fs_stack.h>
+#include "aufs.h"
+
+void au_add_nlink(struct inode *dir, struct inode *h_dir)
+{
+       unsigned int nlink;
+
+       AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
+
+       nlink = dir->i_nlink;
+       nlink += h_dir->i_nlink - 2;
+       if (h_dir->i_nlink < 2)
+               nlink += 2;
+       smp_mb();
+       set_nlink(dir, nlink);
+}
+
+void au_sub_nlink(struct inode *dir, struct inode *h_dir)
+{
+       unsigned int nlink;
+
+       AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
+
+       nlink = dir->i_nlink;
+       nlink -= h_dir->i_nlink - 2;
+       if (h_dir->i_nlink < 2)
+               nlink -= 2;
+       smp_mb();
+       set_nlink(dir, nlink);
+}
+
+loff_t au_dir_size(struct file *file, struct dentry *dentry)
+{
+       loff_t sz;
+       aufs_bindex_t bindex, bend;
+       struct file *h_file;
+       struct dentry *h_dentry;
+
+       sz = 0;
+       if (file) {
+               AuDebugOn(!file->f_dentry);
+               AuDebugOn(!file->f_dentry->d_inode);
+               AuDebugOn(!S_ISDIR(file->f_dentry->d_inode->i_mode));
+
+               bend = au_fbend_dir(file);
+               for (bindex = au_fbstart(file);
+                    bindex <= bend && sz < KMALLOC_MAX_SIZE;
+                    bindex++) {
+                       h_file = au_hf_dir(file, bindex);
+                       if (h_file
+                           && h_file->f_dentry
+                           && h_file->f_dentry->d_inode)
+                               sz += i_size_read(h_file->f_dentry->d_inode);
+               }
+       } else {
+               AuDebugOn(!dentry);
+               AuDebugOn(!dentry->d_inode);
+               AuDebugOn(!S_ISDIR(dentry->d_inode->i_mode));
+
+               bend = au_dbtaildir(dentry);
+               for (bindex = au_dbstart(dentry);
+                    bindex <= bend && sz < KMALLOC_MAX_SIZE;
+                    bindex++) {
+                       h_dentry = au_h_dptr(dentry, bindex);
+                       if (h_dentry && h_dentry->d_inode)
+                               sz += i_size_read(h_dentry->d_inode);
+               }
+       }
+       if (sz < KMALLOC_MAX_SIZE)
+               sz = roundup_pow_of_two(sz);
+       if (sz > KMALLOC_MAX_SIZE)
+               sz = KMALLOC_MAX_SIZE;
+       else if (sz < NAME_MAX) {
+               BUILD_BUG_ON(AUFS_RDBLK_DEF < NAME_MAX);
+               sz = AUFS_RDBLK_DEF;
+       }
+       return sz;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int reopen_dir(struct file *file)
+{
+       int err;
+       unsigned int flags;
+       aufs_bindex_t bindex, btail, bstart;
+       struct dentry *dentry, *h_dentry;
+       struct file *h_file;
+
+       /* open all lower dirs */
+       dentry = file->f_dentry;
+       bstart = au_dbstart(dentry);
+       for (bindex = au_fbstart(file); bindex < bstart; bindex++)
+               au_set_h_fptr(file, bindex, NULL);
+       au_set_fbstart(file, bstart);
+
+       btail = au_dbtaildir(dentry);
+       for (bindex = au_fbend_dir(file); btail < bindex; bindex--)
+               au_set_h_fptr(file, bindex, NULL);
+       au_set_fbend_dir(file, btail);
+
+       flags = vfsub_file_flags(file);
+       for (bindex = bstart; bindex <= btail; bindex++) {
+               h_dentry = au_h_dptr(dentry, bindex);
+               if (!h_dentry)
+                       continue;
+               h_file = au_hf_dir(file, bindex);
+               if (h_file)
+                       continue;
+
+               h_file = au_h_open(dentry, bindex, flags, file);
+               err = PTR_ERR(h_file);
+               if (IS_ERR(h_file))
+                       goto out; /* close all? */
+               au_set_h_fptr(file, bindex, h_file);
+       }
+       au_update_figen(file);
+       /* todo: necessary? */
+       /* file->f_ra = h_file->f_ra; */
+       err = 0;
+
+out:
+       return err;
+}
+
+static int do_open_dir(struct file *file, int flags)
+{
+       int err;
+       aufs_bindex_t bindex, btail;
+       struct dentry *dentry, *h_dentry;
+       struct file *h_file;
+
+       FiMustWriteLock(file);
+
+       dentry = file->f_dentry;
+       err = au_alive_dir(dentry);
+       if (unlikely(err))
+               goto out;
+
+       file->f_version = dentry->d_inode->i_version;
+       bindex = au_dbstart(dentry);
+       au_set_fbstart(file, bindex);
+       btail = au_dbtaildir(dentry);
+       au_set_fbend_dir(file, btail);
+       for (; !err && bindex <= btail; bindex++) {
+               h_dentry = au_h_dptr(dentry, bindex);
+               if (!h_dentry)
+                       continue;
+
+               h_file = au_h_open(dentry, bindex, flags, file);
+               if (IS_ERR(h_file)) {
+                       err = PTR_ERR(h_file);
+                       break;
+               }
+               au_set_h_fptr(file, bindex, h_file);
+       }
+       au_update_figen(file);
+       /* todo: necessary? */
+       /* file->f_ra = h_file->f_ra; */
+       if (!err)
+               return 0; /* success */
+
+       /* close all */
+       for (bindex = au_fbstart(file); bindex <= btail; bindex++)
+               au_set_h_fptr(file, bindex, NULL);
+       au_set_fbstart(file, -1);
+       au_set_fbend_dir(file, -1);
+
+out:
+       return err;
+}
+
+static int aufs_open_dir(struct inode *inode __maybe_unused,
+                        struct file *file)
+{
+       int err;
+       struct super_block *sb;
+       struct au_fidir *fidir;
+
+       err = -ENOMEM;
+       sb = file->f_dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH);
+       fidir = au_fidir_alloc(sb);
+       if (fidir) {
+               err = au_do_open(file, do_open_dir, fidir);
+               if (unlikely(err))
+                       kfree(fidir);
+       }
+       si_read_unlock(sb);
+       return err;
+}
+
+static int aufs_release_dir(struct inode *inode __maybe_unused,
+                           struct file *file)
+{
+       struct au_vdir *vdir_cache;
+       struct au_finfo *finfo;
+       struct au_fidir *fidir;
+       aufs_bindex_t bindex, bend;
+
+       finfo = au_fi(file);
+       fidir = finfo->fi_hdir;
+       if (fidir) {
+               /* remove me from sb->s_files */
+               file_sb_list_del(file);
+
+               vdir_cache = fidir->fd_vdir_cache; /* lock-free */
+               if (vdir_cache)
+                       au_vdir_free(vdir_cache);
+
+               bindex = finfo->fi_btop;
+               if (bindex >= 0) {
+                       /*
+                        * calls fput() instead of filp_close(),
+                        * since no dnotify or lock for the lower file.
+                        */
+                       bend = fidir->fd_bbot;
+                       for (; bindex <= bend; bindex++)
+                               au_set_h_fptr(file, bindex, NULL);
+               }
+               kfree(fidir);
+               finfo->fi_hdir = NULL;
+       }
+       au_finfo_fin(file);
+       return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_flush_dir(struct file *file, fl_owner_t id)
+{
+       int err;
+       aufs_bindex_t bindex, bend;
+       struct file *h_file;
+
+       err = 0;
+       bend = au_fbend_dir(file);
+       for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) {
+               h_file = au_hf_dir(file, bindex);
+               if (h_file)
+                       err = vfsub_flush(h_file, id);
+       }
+       return err;
+}
+
+static int aufs_flush_dir(struct file *file, fl_owner_t id)
+{
+       return au_do_flush(file, id, au_do_flush_dir);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_fsync_dir_no_file(struct dentry *dentry, int datasync)
+{
+       int err;
+       aufs_bindex_t bend, bindex;
+       struct inode *inode;
+       struct super_block *sb;
+
+       err = 0;
+       sb = dentry->d_sb;
+       inode = dentry->d_inode;
+       IMustLock(inode);
+       bend = au_dbend(dentry);
+       for (bindex = au_dbstart(dentry); !err && bindex <= bend; bindex++) {
+               struct path h_path;
+
+               if (au_test_ro(sb, bindex, inode))
+                       continue;
+               h_path.dentry = au_h_dptr(dentry, bindex);
+               if (!h_path.dentry)
+                       continue;
+
+               h_path.mnt = au_sbr_mnt(sb, bindex);
+               err = vfsub_fsync(NULL, &h_path, datasync);
+       }
+
+       return err;
+}
+
+static int au_do_fsync_dir(struct file *file, int datasync)
+{
+       int err;
+       aufs_bindex_t bend, bindex;
+       struct file *h_file;
+       struct super_block *sb;
+       struct inode *inode;
+
+       err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1);
+       if (unlikely(err))
+               goto out;
+
+       sb = file->f_dentry->d_sb;
+       inode = file->f_dentry->d_inode;
+       bend = au_fbend_dir(file);
+       for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) {
+               h_file = au_hf_dir(file, bindex);
+               if (!h_file || au_test_ro(sb, bindex, inode))
+                       continue;
+
+               err = vfsub_fsync(h_file, &h_file->f_path, datasync);
+       }
+
+out:
+       return err;
+}
+
+/*
+ * @file may be NULL
+ */
+static int aufs_fsync_dir(struct file *file, loff_t start, loff_t end,
+                         int datasync)
+{
+       int err;
+       struct dentry *dentry;
+       struct super_block *sb;
+       struct mutex *mtx;
+
+       err = 0;
+       dentry = file->f_dentry;
+       mtx = &dentry->d_inode->i_mutex;
+       mutex_lock(mtx);
+       sb = dentry->d_sb;
+       si_noflush_read_lock(sb);
+       if (file)
+               err = au_do_fsync_dir(file, datasync);
+       else {
+               di_write_lock_child(dentry);
+               err = au_do_fsync_dir_no_file(dentry, datasync);
+       }
+       au_cpup_attr_timesizes(dentry->d_inode);
+       di_write_unlock(dentry);
+       if (file)
+               fi_write_unlock(file);
+
+       si_read_unlock(sb);
+       mutex_unlock(mtx);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_readdir(struct file *file, void *dirent, filldir_t filldir)
+{
+       int err;
+       struct dentry *dentry;
+       struct inode *inode, *h_inode;
+       struct super_block *sb;
+
+       dentry = file->f_dentry;
+       inode = dentry->d_inode;
+       IMustLock(inode);
+
+       sb = dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH);
+       err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1);
+       if (unlikely(err))
+               goto out;
+       err = au_alive_dir(dentry);
+       if (!err)
+               err = au_vdir_init(file);
+       di_downgrade_lock(dentry, AuLock_IR);
+       if (unlikely(err))
+               goto out_unlock;
+
+       h_inode = au_h_iptr(inode, au_ibstart(inode));
+       if (!au_test_nfsd()) {
+               err = au_vdir_fill_de(file, dirent, filldir);
+               fsstack_copy_attr_atime(inode, h_inode);
+       } else {
+               /*
+                * nfsd filldir may call lookup_one_len(), vfs_getattr(),
+                * encode_fh() and others.
+                */
+               atomic_inc(&h_inode->i_count);
+               di_read_unlock(dentry, AuLock_IR);
+               si_read_unlock(sb);
+               err = au_vdir_fill_de(file, dirent, filldir);
+               fsstack_copy_attr_atime(inode, h_inode);
+               fi_write_unlock(file);
+               iput(h_inode);
+
+               AuTraceErr(err);
+               return err;
+       }
+
+out_unlock:
+       di_read_unlock(dentry, AuLock_IR);
+       fi_write_unlock(file);
+out:
+       si_read_unlock(sb);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define AuTestEmpty_WHONLY     1
+#define AuTestEmpty_CALLED     (1 << 1)
+#define AuTestEmpty_SHWH       (1 << 2)
+#define au_ftest_testempty(flags, name)        ((flags) & AuTestEmpty_##name)
+#define au_fset_testempty(flags, name) \
+       do { (flags) |= AuTestEmpty_##name; } while (0)
+#define au_fclr_testempty(flags, name) \
+       do { (flags) &= ~AuTestEmpty_##name; } while (0)
+
+#ifndef CONFIG_AUFS_SHWH
+#undef AuTestEmpty_SHWH
+#define AuTestEmpty_SHWH       0
+#endif
+
+struct test_empty_arg {
+       struct au_nhash *whlist;
+       unsigned int flags;
+       int err;
+       aufs_bindex_t bindex;
+};
+
+static int test_empty_cb(void *__arg, const char *__name, int namelen,
+                        loff_t offset __maybe_unused, u64 ino,
+                        unsigned int d_type)
+{
+       struct test_empty_arg *arg = __arg;
+       char *name = (void *)__name;
+
+       arg->err = 0;
+       au_fset_testempty(arg->flags, CALLED);
+       /* smp_mb(); */
+       if (name[0] == '.'
+           && (namelen == 1 || (name[1] == '.' && namelen == 2)))
+               goto out; /* success */
+
+       if (namelen <= AUFS_WH_PFX_LEN
+           || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+               if (au_ftest_testempty(arg->flags, WHONLY)
+                   && !au_nhash_test_known_wh(arg->whlist, name, namelen))
+                       arg->err = -ENOTEMPTY;
+               goto out;
+       }
+
+       name += AUFS_WH_PFX_LEN;
+       namelen -= AUFS_WH_PFX_LEN;
+       if (!au_nhash_test_known_wh(arg->whlist, name, namelen))
+               arg->err = au_nhash_append_wh
+                       (arg->whlist, name, namelen, ino, d_type, arg->bindex,
+                        au_ftest_testempty(arg->flags, SHWH));
+
+out:
+       /* smp_mb(); */
+       AuTraceErr(arg->err);
+       return arg->err;
+}
+
+static int do_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
+{
+       int err;
+       struct file *h_file;
+
+       h_file = au_h_open(dentry, arg->bindex,
+                          O_RDONLY | O_NONBLOCK | O_DIRECTORY | O_LARGEFILE,
+                          /*file*/NULL);
+       err = PTR_ERR(h_file);
+       if (IS_ERR(h_file))
+               goto out;
+
+       err = 0;
+       if (!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
+           && !h_file->f_dentry->d_inode->i_nlink)
+               goto out_put;
+
+       do {
+               arg->err = 0;
+               au_fclr_testempty(arg->flags, CALLED);
+               /* smp_mb(); */
+               err = vfsub_readdir(h_file, test_empty_cb, arg);
+               if (err >= 0)
+                       err = arg->err;
+       } while (!err && au_ftest_testempty(arg->flags, CALLED));
+
+out_put:
+       fput(h_file);
+       au_sbr_put(dentry->d_sb, arg->bindex);
+out:
+       return err;
+}
+
+struct do_test_empty_args {
+       int *errp;
+       struct dentry *dentry;
+       struct test_empty_arg *arg;
+};
+
+static void call_do_test_empty(void *args)
+{
+       struct do_test_empty_args *a = args;
+       *a->errp = do_test_empty(a->dentry, a->arg);
+}
+
+static int sio_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
+{
+       int err, wkq_err;
+       struct dentry *h_dentry;
+       struct inode *h_inode;
+
+       h_dentry = au_h_dptr(dentry, arg->bindex);
+       h_inode = h_dentry->d_inode;
+       /* todo: i_mode changes anytime? */
+       mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
+       err = au_test_h_perm_sio(h_inode, MAY_EXEC | MAY_READ);
+       mutex_unlock(&h_inode->i_mutex);
+       if (!err)
+               err = do_test_empty(dentry, arg);
+       else {
+               struct do_test_empty_args args = {
+                       .errp   = &err,
+                       .dentry = dentry,
+                       .arg    = arg
+               };
+               unsigned int flags = arg->flags;
+
+               wkq_err = au_wkq_wait(call_do_test_empty, &args);
+               if (unlikely(wkq_err))
+                       err = wkq_err;
+               arg->flags = flags;
+       }
+
+       return err;
+}
+
+int au_test_empty_lower(struct dentry *dentry)
+{
+       int err;
+       unsigned int rdhash;
+       aufs_bindex_t bindex, bstart, btail;
+       struct au_nhash whlist;
+       struct test_empty_arg arg;
+
+       SiMustAnyLock(dentry->d_sb);
+
+       rdhash = au_sbi(dentry->d_sb)->si_rdhash;
+       if (!rdhash)
+               rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, dentry));
+       err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
+       if (unlikely(err))
+               goto out;
+
+       arg.flags = 0;
+       arg.whlist = &whlist;
+       bstart = au_dbstart(dentry);
+       if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
+               au_fset_testempty(arg.flags, SHWH);
+       arg.bindex = bstart;
+       err = do_test_empty(dentry, &arg);
+       if (unlikely(err))
+               goto out_whlist;
+
+       au_fset_testempty(arg.flags, WHONLY);
+       btail = au_dbtaildir(dentry);
+       for (bindex = bstart + 1; !err && bindex <= btail; bindex++) {
+               struct dentry *h_dentry;
+
+               h_dentry = au_h_dptr(dentry, bindex);
+               if (h_dentry && h_dentry->d_inode) {
+                       arg.bindex = bindex;
+                       err = do_test_empty(dentry, &arg);
+               }
+       }
+
+out_whlist:
+       au_nhash_wh_free(&whlist);
+out:
+       return err;
+}
+
+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist)
+{
+       int err;
+       struct test_empty_arg arg;
+       aufs_bindex_t bindex, btail;
+
+       err = 0;
+       arg.whlist = whlist;
+       arg.flags = AuTestEmpty_WHONLY;
+       if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
+               au_fset_testempty(arg.flags, SHWH);
+       btail = au_dbtaildir(dentry);
+       for (bindex = au_dbstart(dentry); !err && bindex <= btail; bindex++) {
+               struct dentry *h_dentry;
+
+               h_dentry = au_h_dptr(dentry, bindex);
+               if (h_dentry && h_dentry->d_inode) {
+                       arg.bindex = bindex;
+                       err = sio_test_empty(dentry, &arg);
+               }
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+const struct file_operations aufs_dir_fop = {
+       .owner          = THIS_MODULE,
+       .llseek         = default_llseek,
+       .read           = generic_read_dir,
+       .readdir        = aufs_readdir,
+       .unlocked_ioctl = aufs_ioctl_dir,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = aufs_compat_ioctl_dir,
+#endif
+       .open           = aufs_open_dir,
+       .release        = aufs_release_dir,
+       .flush          = aufs_flush_dir,
+       .fsync          = aufs_fsync_dir
+};
diff --git a/fs/aufs/dir.h b/fs/aufs/dir.h
new file mode 100644 (file)
index 0000000..fb237ba
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * directory operations
+ */
+
+#ifndef __AUFS_DIR_H__
+#define __AUFS_DIR_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+
+/* ---------------------------------------------------------------------- */
+
+/* need to be faster and smaller */
+
+struct au_nhash {
+       unsigned int            nh_num;
+       struct hlist_head       *nh_head;
+};
+
+struct au_vdir_destr {
+       unsigned char   len;
+       unsigned char   name[0];
+} __packed;
+
+struct au_vdir_dehstr {
+       struct hlist_node       hash;
+       struct au_vdir_destr    *str;
+} ____cacheline_aligned_in_smp;
+
+struct au_vdir_de {
+       ino_t                   de_ino;
+       unsigned char           de_type;
+       /* caution: packed */
+       struct au_vdir_destr    de_str;
+} __packed;
+
+struct au_vdir_wh {
+       struct hlist_node       wh_hash;
+#ifdef CONFIG_AUFS_SHWH
+       ino_t                   wh_ino;
+       aufs_bindex_t           wh_bindex;
+       unsigned char           wh_type;
+#else
+       aufs_bindex_t           wh_bindex;
+#endif
+       /* caution: packed */
+       struct au_vdir_destr    wh_str;
+} __packed;
+
+union au_vdir_deblk_p {
+       unsigned char           *deblk;
+       struct au_vdir_de       *de;
+};
+
+struct au_vdir {
+       unsigned char   **vd_deblk;
+       unsigned long   vd_nblk;
+       struct {
+               unsigned long           ul;
+               union au_vdir_deblk_p   p;
+       } vd_last;
+
+       unsigned long   vd_version;
+       unsigned int    vd_deblk_sz;
+       unsigned long   vd_jiffy;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* dir.c */
+extern const struct file_operations aufs_dir_fop;
+void au_add_nlink(struct inode *dir, struct inode *h_dir);
+void au_sub_nlink(struct inode *dir, struct inode *h_dir);
+loff_t au_dir_size(struct file *file, struct dentry *dentry);
+int au_test_empty_lower(struct dentry *dentry);
+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist);
+
+/* vdir.c */
+unsigned int au_rdhash_est(loff_t sz);
+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp);
+void au_nhash_wh_free(struct au_nhash *whlist);
+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
+                           int limit);
+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen);
+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
+                      unsigned int d_type, aufs_bindex_t bindex,
+                      unsigned char shwh);
+void au_vdir_free(struct au_vdir *vdir);
+int au_vdir_init(struct file *file);
+int au_vdir_fill_de(struct file *file, void *dirent, filldir_t filldir);
+
+/* ioctl.c */
+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_AUFS_RDU
+/* rdu.c */
+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd,
+                        unsigned long arg);
+#endif
+#else
+static inline long au_rdu_ioctl(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       return -EINVAL;
+}
+#ifdef CONFIG_COMPAT
+static inline long au_rdu_compat_ioctl(struct file *file, unsigned int cmd,
+                                      unsigned long arg)
+{
+       return -EINVAL;
+}
+#endif
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DIR_H__ */
diff --git a/fs/aufs/dynop.c b/fs/aufs/dynop.c
new file mode 100644 (file)
index 0000000..fe323b4
--- /dev/null
@@ -0,0 +1,377 @@
+/*
+ * Copyright (C) 2010-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * dynamically customizable operations for regular files
+ */
+
+#include "aufs.h"
+
+#define DyPrSym(key)   AuDbgSym(key->dk_op.dy_hop)
+
+/*
+ * How large will these lists be?
+ * Usually just a few elements, 20-30 at most for each, I guess.
+ */
+static struct au_splhead dynop[AuDyLast];
+
+static struct au_dykey *dy_gfind_get(struct au_splhead *spl, const void *h_op)
+{
+       struct au_dykey *key, *tmp;
+       struct list_head *head;
+
+       key = NULL;
+       head = &spl->head;
+       rcu_read_lock();
+       list_for_each_entry_rcu(tmp, head, dk_list)
+               if (tmp->dk_op.dy_hop == h_op) {
+                       key = tmp;
+                       kref_get(&key->dk_kref);
+                       break;
+               }
+       rcu_read_unlock();
+
+       return key;
+}
+
+static struct au_dykey *dy_bradd(struct au_branch *br, struct au_dykey *key)
+{
+       struct au_dykey **k, *found;
+       const void *h_op = key->dk_op.dy_hop;
+       int i;
+
+       found = NULL;
+       k = br->br_dykey;
+       for (i = 0; i < AuBrDynOp; i++)
+               if (k[i]) {
+                       if (k[i]->dk_op.dy_hop == h_op) {
+                               found = k[i];
+                               break;
+                       }
+               } else
+                       break;
+       if (!found) {
+               spin_lock(&br->br_dykey_lock);
+               for (; i < AuBrDynOp; i++)
+                       if (k[i]) {
+                               if (k[i]->dk_op.dy_hop == h_op) {
+                                       found = k[i];
+                                       break;
+                               }
+                       } else {
+                               k[i] = key;
+                               break;
+                       }
+               spin_unlock(&br->br_dykey_lock);
+               BUG_ON(i == AuBrDynOp); /* expand the array */
+       }
+
+       return found;
+}
+
+/* kref_get() if @key is already added */
+static struct au_dykey *dy_gadd(struct au_splhead *spl, struct au_dykey *key)
+{
+       struct au_dykey *tmp, *found;
+       struct list_head *head;
+       const void *h_op = key->dk_op.dy_hop;
+
+       found = NULL;
+       head = &spl->head;
+       spin_lock(&spl->spin);
+       list_for_each_entry(tmp, head, dk_list)
+               if (tmp->dk_op.dy_hop == h_op) {
+                       kref_get(&tmp->dk_kref);
+                       found = tmp;
+                       break;
+               }
+       if (!found)
+               list_add_rcu(&key->dk_list, head);
+       spin_unlock(&spl->spin);
+
+       if (!found)
+               DyPrSym(key);
+       return found;
+}
+
+static void dy_free_rcu(struct rcu_head *rcu)
+{
+       struct au_dykey *key;
+
+       key = container_of(rcu, struct au_dykey, dk_rcu);
+       DyPrSym(key);
+       kfree(key);
+}
+
+static void dy_free(struct kref *kref)
+{
+       struct au_dykey *key;
+       struct au_splhead *spl;
+
+       key = container_of(kref, struct au_dykey, dk_kref);
+       spl = dynop + key->dk_op.dy_type;
+       au_spl_del_rcu(&key->dk_list, spl);
+       call_rcu(&key->dk_rcu, dy_free_rcu);
+}
+
+void au_dy_put(struct au_dykey *key)
+{
+       kref_put(&key->dk_kref, dy_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define DyDbgSize(cnt, op)     AuDebugOn(cnt != sizeof(op)/sizeof(void *))
+
+#ifdef CONFIG_AUFS_DEBUG
+#define DyDbgDeclare(cnt)      unsigned int cnt = 0
+#define DyDbgInc(cnt)          do { cnt++; } while (0)
+#else
+#define DyDbgDeclare(cnt)      do {} while (0)
+#define DyDbgInc(cnt)          do {} while (0)
+#endif
+
+#define DySet(func, dst, src, h_op, h_sb) do {                         \
+       DyDbgInc(cnt);                                                  \
+       if (h_op->func) {                                               \
+               if (src.func)                                           \
+                       dst.func = src.func;                            \
+               else                                                    \
+                       AuDbg("%s %s\n", au_sbtype(h_sb), #func);       \
+       }                                                               \
+} while (0)
+
+#define DySetForce(func, dst, src) do {                \
+       AuDebugOn(!src.func);                   \
+       DyDbgInc(cnt);                          \
+       dst.func = src.func;                    \
+} while (0)
+
+#define DySetAop(func) \
+       DySet(func, dyaop->da_op, aufs_aop, h_aop, h_sb)
+#define DySetAopForce(func) \
+       DySetForce(func, dyaop->da_op, aufs_aop)
+
+static void dy_aop(struct au_dykey *key, const void *h_op,
+                  struct super_block *h_sb __maybe_unused)
+{
+       struct au_dyaop *dyaop = (void *)key;
+       const struct address_space_operations *h_aop = h_op;
+       DyDbgDeclare(cnt);
+
+       AuDbg("%s\n", au_sbtype(h_sb));
+
+       DySetAop(writepage);
+       DySetAopForce(readpage);        /* force */
+       DySetAop(writepages);
+       DySetAop(set_page_dirty);
+       DySetAop(readpages);
+       DySetAop(write_begin);
+       DySetAop(write_end);
+       DySetAop(bmap);
+       DySetAop(invalidatepage);
+       DySetAop(releasepage);
+       DySetAop(freepage);
+       /* these two will be changed according to an aufs mount option */
+       DySetAop(direct_IO);
+       DySetAop(get_xip_mem);
+       DySetAop(migratepage);
+       DySetAop(launder_page);
+       DySetAop(is_partially_uptodate);
+       DySetAop(error_remove_page);
+
+       DyDbgSize(cnt, *h_aop);
+       dyaop->da_get_xip_mem = h_aop->get_xip_mem;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void dy_bug(struct kref *kref)
+{
+       BUG();
+}
+
+static struct au_dykey *dy_get(struct au_dynop *op, struct au_branch *br)
+{
+       struct au_dykey *key, *old;
+       struct au_splhead *spl;
+       struct op {
+               unsigned int sz;
+               void (*set)(struct au_dykey *key, const void *h_op,
+                           struct super_block *h_sb __maybe_unused);
+       };
+       static const struct op a[] = {
+               [AuDy_AOP] = {
+                       .sz     = sizeof(struct au_dyaop),
+                       .set    = dy_aop
+               }
+       };
+       const struct op *p;
+
+       spl = dynop + op->dy_type;
+       key = dy_gfind_get(spl, op->dy_hop);
+       if (key)
+               goto out_add; /* success */
+
+       p = a + op->dy_type;
+       key = kzalloc(p->sz, GFP_NOFS);
+       if (unlikely(!key)) {
+               key = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       key->dk_op.dy_hop = op->dy_hop;
+       kref_init(&key->dk_kref);
+       p->set(key, op->dy_hop, au_br_sb(br));
+       old = dy_gadd(spl, key);
+       if (old) {
+               kfree(key);
+               key = old;
+       }
+
+out_add:
+       old = dy_bradd(br, key);
+       if (old)
+               /* its ref-count should never be zero here */
+               kref_put(&key->dk_kref, dy_bug);
+out:
+       return key;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Aufs prohibits O_DIRECT by defaut even if the branch supports it.
+ * This behaviour is neccessary to return an error from open(O_DIRECT) instead
+ * of the succeeding I/O. The dio mount option enables O_DIRECT and makes
+ * open(O_DIRECT) always succeed, but the succeeding I/O may return an error.
+ * See the aufs manual in detail.
+ *
+ * To keep this behaviour, aufs has to set NULL to ->get_xip_mem too, and the
+ * performance of fadvise() and madvise() may be affected.
+ */
+static void dy_adx(struct au_dyaop *dyaop, int do_dx)
+{
+       if (!do_dx) {
+               dyaop->da_op.direct_IO = NULL;
+               dyaop->da_op.get_xip_mem = NULL;
+       } else {
+               dyaop->da_op.direct_IO = aufs_aop.direct_IO;
+               dyaop->da_op.get_xip_mem = aufs_aop.get_xip_mem;
+               if (!dyaop->da_get_xip_mem)
+                       dyaop->da_op.get_xip_mem = NULL;
+       }
+}
+
+static struct au_dyaop *dy_aget(struct au_branch *br,
+                               const struct address_space_operations *h_aop,
+                               int do_dx)
+{
+       struct au_dyaop *dyaop;
+       struct au_dynop op;
+
+       op.dy_type = AuDy_AOP;
+       op.dy_haop = h_aop;
+       dyaop = (void *)dy_get(&op, br);
+       if (IS_ERR(dyaop))
+               goto out;
+       dy_adx(dyaop, do_dx);
+
+out:
+       return dyaop;
+}
+
+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
+               struct inode *h_inode)
+{
+       int err, do_dx;
+       struct super_block *sb;
+       struct au_branch *br;
+       struct au_dyaop *dyaop;
+
+       AuDebugOn(!S_ISREG(h_inode->i_mode));
+       IiMustWriteLock(inode);
+
+       sb = inode->i_sb;
+       br = au_sbr(sb, bindex);
+       do_dx = !!au_opt_test(au_mntflags(sb), DIO);
+       dyaop = dy_aget(br, h_inode->i_mapping->a_ops, do_dx);
+       err = PTR_ERR(dyaop);
+       if (IS_ERR(dyaop))
+               /* unnecessary to call dy_fput() */
+               goto out;
+
+       err = 0;
+       inode->i_mapping->a_ops = &dyaop->da_op;
+
+out:
+       return err;
+}
+
+/*
+ * Is it safe to replace a_ops during the inode/file is in operation?
+ * Yes, I hope so.
+ */
+int au_dy_irefresh(struct inode *inode)
+{
+       int err;
+       aufs_bindex_t bstart;
+       struct inode *h_inode;
+
+       err = 0;
+       if (S_ISREG(inode->i_mode)) {
+               bstart = au_ibstart(inode);
+               h_inode = au_h_iptr(inode, bstart);
+               err = au_dy_iaop(inode, bstart, h_inode);
+       }
+       return err;
+}
+
+void au_dy_arefresh(int do_dx)
+{
+       struct au_splhead *spl;
+       struct list_head *head;
+       struct au_dykey *key;
+
+       spl = dynop + AuDy_AOP;
+       head = &spl->head;
+       spin_lock(&spl->spin);
+       list_for_each_entry(key, head, dk_list)
+               dy_adx((void *)key, do_dx);
+       spin_unlock(&spl->spin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void __init au_dy_init(void)
+{
+       int i;
+
+       /* make sure that 'struct au_dykey *' can be any type */
+       BUILD_BUG_ON(offsetof(struct au_dyaop, da_key));
+
+       for (i = 0; i < AuDyLast; i++)
+               au_spl_init(dynop + i);
+}
+
+void au_dy_fin(void)
+{
+       int i;
+
+       for (i = 0; i < AuDyLast; i++)
+               WARN_ON(!list_empty(&dynop[i].head));
+}
diff --git a/fs/aufs/dynop.h b/fs/aufs/dynop.h
new file mode 100644 (file)
index 0000000..3f2c9aa
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * dynamically customizable operations (for regular files only)
+ */
+
+#ifndef __AUFS_DYNOP_H__
+#define __AUFS_DYNOP_H__
+
+#ifdef __KERNEL__
+
+#include "inode.h"
+
+enum {AuDy_AOP, AuDyLast};
+
+struct au_dynop {
+       int                                             dy_type;
+       union {
+               const void                              *dy_hop;
+               const struct address_space_operations   *dy_haop;
+       };
+};
+
+struct au_dykey {
+       union {
+               struct list_head        dk_list;
+               struct rcu_head         dk_rcu;
+       };
+       struct au_dynop         dk_op;
+
+       /*
+        * during I am in the branch local array, kref is gotten. when the
+        * branch is removed, kref is put.
+        */
+       struct kref             dk_kref;
+};
+
+/* stop unioning since their sizes are very different from each other */
+struct au_dyaop {
+       struct au_dykey                 da_key;
+       struct address_space_operations da_op; /* not const */
+       int (*da_get_xip_mem)(struct address_space *, pgoff_t, int,
+                             void **, unsigned long *);
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dynop.c */
+struct au_branch;
+void au_dy_put(struct au_dykey *key);
+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
+               struct inode *h_inode);
+int au_dy_irefresh(struct inode *inode);
+void au_dy_arefresh(int do_dio);
+
+void __init au_dy_init(void);
+void au_dy_fin(void);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DYNOP_H__ */
diff --git a/fs/aufs/export.c b/fs/aufs/export.c
new file mode 100644 (file)
index 0000000..505062c
--- /dev/null
@@ -0,0 +1,820 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * export via nfs
+ */
+
+#include <linux/exportfs.h>
+#include <linux/mnt_namespace.h>
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/random.h>
+#include <linux/writeback.h>
+#include "aufs.h"
+
+union conv {
+#ifdef CONFIG_AUFS_INO_T_64
+       __u32 a[2];
+#else
+       __u32 a[1];
+#endif
+       ino_t ino;
+};
+
+static ino_t decode_ino(__u32 *a)
+{
+       union conv u;
+
+       BUILD_BUG_ON(sizeof(u.ino) != sizeof(u.a));
+       u.a[0] = a[0];
+#ifdef CONFIG_AUFS_INO_T_64
+       u.a[1] = a[1];
+#endif
+       return u.ino;
+}
+
+static void encode_ino(__u32 *a, ino_t ino)
+{
+       union conv u;
+
+       u.ino = ino;
+       a[0] = u.a[0];
+#ifdef CONFIG_AUFS_INO_T_64
+       a[1] = u.a[1];
+#endif
+}
+
+/* NFS file handle */
+enum {
+       Fh_br_id,
+       Fh_sigen,
+#ifdef CONFIG_AUFS_INO_T_64
+       /* support 64bit inode number */
+       Fh_ino1,
+       Fh_ino2,
+       Fh_dir_ino1,
+       Fh_dir_ino2,
+#else
+       Fh_ino1,
+       Fh_dir_ino1,
+#endif
+       Fh_igen,
+       Fh_h_type,
+       Fh_tail,
+
+       Fh_ino = Fh_ino1,
+       Fh_dir_ino = Fh_dir_ino1
+};
+
+static int au_test_anon(struct dentry *dentry)
+{
+       /* note: read d_flags without d_lock */
+       return !!(dentry->d_flags & DCACHE_DISCONNECTED);
+}
+
+int au_test_nfsd(void)
+{
+       int ret;
+       struct task_struct *tsk = current;
+       char comm[sizeof(tsk->comm)];
+
+       ret = 0;
+       if (tsk->flags & PF_KTHREAD) {
+               get_task_comm(comm, tsk);
+               ret = !strcmp(comm, "nfsd");
+       }
+
+       return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+/* inode generation external table */
+
+void au_xigen_inc(struct inode *inode)
+{
+       loff_t pos;
+       ssize_t sz;
+       __u32 igen;
+       struct super_block *sb;
+       struct au_sbinfo *sbinfo;
+
+       sb = inode->i_sb;
+       AuDebugOn(!au_opt_test(au_mntflags(sb), XINO));
+
+       sbinfo = au_sbi(sb);
+       pos = inode->i_ino;
+       pos *= sizeof(igen);
+       igen = inode->i_generation + 1;
+       sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xigen, &igen,
+                        sizeof(igen), &pos);
+       if (sz == sizeof(igen))
+               return; /* success */
+
+       if (unlikely(sz >= 0))
+               AuIOErr("xigen error (%zd)\n", sz);
+}
+
+int au_xigen_new(struct inode *inode)
+{
+       int err;
+       loff_t pos;
+       ssize_t sz;
+       struct super_block *sb;
+       struct au_sbinfo *sbinfo;
+       struct file *file;
+
+       err = 0;
+       /* todo: dirty, at mount time */
+       if (inode->i_ino == AUFS_ROOT_INO)
+               goto out;
+       sb = inode->i_sb;
+       SiMustAnyLock(sb);
+       if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
+               goto out;
+
+       err = -EFBIG;
+       pos = inode->i_ino;
+       if (unlikely(au_loff_max / sizeof(inode->i_generation) - 1 < pos)) {
+               AuIOErr1("too large i%lld\n", pos);
+               goto out;
+       }
+       pos *= sizeof(inode->i_generation);
+
+       err = 0;
+       sbinfo = au_sbi(sb);
+       file = sbinfo->si_xigen;
+       BUG_ON(!file);
+
+       if (i_size_read(file->f_dentry->d_inode)
+           < pos + sizeof(inode->i_generation)) {
+               inode->i_generation = atomic_inc_return(&sbinfo->si_xigen_next);
+               sz = xino_fwrite(sbinfo->si_xwrite, file, &inode->i_generation,
+                                sizeof(inode->i_generation), &pos);
+       } else
+               sz = xino_fread(sbinfo->si_xread, file, &inode->i_generation,
+                               sizeof(inode->i_generation), &pos);
+       if (sz == sizeof(inode->i_generation))
+               goto out; /* success */
+
+       err = sz;
+       if (unlikely(sz >= 0)) {
+               err = -EIO;
+               AuIOErr("xigen error (%zd)\n", sz);
+       }
+
+out:
+       return err;
+}
+
+int au_xigen_set(struct super_block *sb, struct file *base)
+{
+       int err;
+       struct au_sbinfo *sbinfo;
+       struct file *file;
+
+       SiMustWriteLock(sb);
+
+       sbinfo = au_sbi(sb);
+       file = au_xino_create2(base, sbinfo->si_xigen);
+       err = PTR_ERR(file);
+       if (IS_ERR(file))
+               goto out;
+       err = 0;
+       if (sbinfo->si_xigen)
+               fput(sbinfo->si_xigen);
+       sbinfo->si_xigen = file;
+
+out:
+       return err;
+}
+
+void au_xigen_clr(struct super_block *sb)
+{
+       struct au_sbinfo *sbinfo;
+
+       SiMustWriteLock(sb);
+
+       sbinfo = au_sbi(sb);
+       if (sbinfo->si_xigen) {
+               fput(sbinfo->si_xigen);
+               sbinfo->si_xigen = NULL;
+       }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *decode_by_ino(struct super_block *sb, ino_t ino,
+                                   ino_t dir_ino)
+{
+       struct dentry *dentry, *d;
+       struct inode *inode;
+       unsigned int sigen;
+
+       dentry = NULL;
+       inode = ilookup(sb, ino);
+       if (!inode)
+               goto out;
+
+       dentry = ERR_PTR(-ESTALE);
+       sigen = au_sigen(sb);
+       if (unlikely(is_bad_inode(inode)
+                    || IS_DEADDIR(inode)
+                    || sigen != au_iigen(inode, NULL)))
+               goto out_iput;
+
+       dentry = NULL;
+       if (!dir_ino || S_ISDIR(inode->i_mode))
+               dentry = d_find_alias(inode);
+       else {
+               spin_lock(&inode->i_lock);
+               list_for_each_entry(d, &inode->i_dentry, d_alias) {
+                       spin_lock(&d->d_lock);
+                       if (!au_test_anon(d)
+                           && d->d_parent->d_inode->i_ino == dir_ino) {
+                               dentry = dget_dlock(d);
+                               spin_unlock(&d->d_lock);
+                               break;
+                       }
+                       spin_unlock(&d->d_lock);
+               }
+               spin_unlock(&inode->i_lock);
+       }
+       if (unlikely(dentry && au_digen_test(dentry, sigen))) {
+               /* need to refresh */
+               dput(dentry);
+               dentry = NULL;
+       }
+
+out_iput:
+       iput(inode);
+out:
+       AuTraceErrPtr(dentry);
+       return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: dirty? */
+/* if exportfs_decode_fh() passed vfsmount*, we could be happy */
+
+struct au_compare_mnt_args {
+       /* input */
+       struct super_block *sb;
+
+       /* output */
+       struct vfsmount *mnt;
+};
+
+static int au_compare_mnt(struct vfsmount *mnt, void *arg)
+{
+       struct au_compare_mnt_args *a = arg;
+
+       if (mnt->mnt_sb != a->sb)
+               return 0;
+       a->mnt = mntget(mnt);
+       return 1;
+}
+
+static struct vfsmount *au_mnt_get(struct super_block *sb)
+{
+       int err;
+       struct au_compare_mnt_args args = {
+               .sb = sb
+       };
+       struct mnt_namespace *ns;
+
+       br_read_lock(vfsmount_lock);
+       /* no get/put ?? */
+       AuDebugOn(!current->nsproxy);
+       ns = current->nsproxy->mnt_ns;
+       AuDebugOn(!ns);
+       err = iterate_mounts(au_compare_mnt, &args, ns->root);
+       br_read_unlock(vfsmount_lock);
+       AuDebugOn(!err);
+       AuDebugOn(!args.mnt);
+       return args.mnt;
+}
+
+struct au_nfsd_si_lock {
+       unsigned int sigen;
+       aufs_bindex_t bindex, br_id;
+       unsigned char force_lock;
+};
+
+static int si_nfsd_read_lock(struct super_block *sb,
+                            struct au_nfsd_si_lock *nsi_lock)
+{
+       int err;
+       aufs_bindex_t bindex;
+
+       si_read_lock(sb, AuLock_FLUSH);
+
+       /* branch id may be wrapped around */
+       err = 0;
+       bindex = au_br_index(sb, nsi_lock->br_id);
+       if (bindex >= 0 && nsi_lock->sigen + AUFS_BRANCH_MAX > au_sigen(sb))
+               goto out; /* success */
+
+       err = -ESTALE;
+       bindex = -1;
+       if (!nsi_lock->force_lock)
+               si_read_unlock(sb);
+
+out:
+       nsi_lock->bindex = bindex;
+       return err;
+}
+
+struct find_name_by_ino {
+       int called, found;
+       ino_t ino;
+       char *name;
+       int namelen;
+};
+
+static int
+find_name_by_ino(void *arg, const char *name, int namelen, loff_t offset,
+                u64 ino, unsigned int d_type)
+{
+       struct find_name_by_ino *a = arg;
+
+       a->called++;
+       if (a->ino != ino)
+               return 0;
+
+       memcpy(a->name, name, namelen);
+       a->namelen = namelen;
+       a->found = 1;
+       return 1;
+}
+
+static struct dentry *au_lkup_by_ino(struct path *path, ino_t ino,
+                                    struct au_nfsd_si_lock *nsi_lock)
+{
+       struct dentry *dentry, *parent;
+       struct file *file;
+       struct inode *dir;
+       struct find_name_by_ino arg;
+       int err;
+
+       parent = path->dentry;
+       if (nsi_lock)
+               si_read_unlock(parent->d_sb);
+       file = vfsub_dentry_open(path, au_dir_roflags);
+       dentry = (void *)file;
+       if (IS_ERR(file))
+               goto out;
+
+       dentry = ERR_PTR(-ENOMEM);
+       arg.name = __getname_gfp(GFP_NOFS);
+       if (unlikely(!arg.name))
+               goto out_file;
+       arg.ino = ino;
+       arg.found = 0;
+       do {
+               arg.called = 0;
+               /* smp_mb(); */
+               err = vfsub_readdir(file, find_name_by_ino, &arg);
+       } while (!err && !arg.found && arg.called);
+       dentry = ERR_PTR(err);
+       if (unlikely(err))
+               goto out_name;
+       /* instead of ENOENT */
+       dentry = ERR_PTR(-ESTALE);
+       if (!arg.found)
+               goto out_name;
+
+       /* do not call au_lkup_one() */
+       dir = parent->d_inode;
+       mutex_lock(&dir->i_mutex);
+       dentry = vfsub_lookup_one_len(arg.name, parent, arg.namelen);
+       mutex_unlock(&dir->i_mutex);
+       AuTraceErrPtr(dentry);
+       if (IS_ERR(dentry))
+               goto out_name;
+       AuDebugOn(au_test_anon(dentry));
+       if (unlikely(!dentry->d_inode)) {
+               dput(dentry);
+               dentry = ERR_PTR(-ENOENT);
+       }
+
+out_name:
+       __putname(arg.name);
+out_file:
+       fput(file);
+out:
+       if (unlikely(nsi_lock
+                    && si_nfsd_read_lock(parent->d_sb, nsi_lock) < 0))
+               if (!IS_ERR(dentry)) {
+                       dput(dentry);
+                       dentry = ERR_PTR(-ESTALE);
+               }
+       AuTraceErrPtr(dentry);
+       return dentry;
+}
+
+static struct dentry *decode_by_dir_ino(struct super_block *sb, ino_t ino,
+                                       ino_t dir_ino,
+                                       struct au_nfsd_si_lock *nsi_lock)
+{
+       struct dentry *dentry;
+       struct path path;
+
+       if (dir_ino != AUFS_ROOT_INO) {
+               path.dentry = decode_by_ino(sb, dir_ino, 0);
+               dentry = path.dentry;
+               if (!path.dentry || IS_ERR(path.dentry))
+                       goto out;
+               AuDebugOn(au_test_anon(path.dentry));
+       } else
+               path.dentry = dget(sb->s_root);
+
+       path.mnt = au_mnt_get(sb);
+       dentry = au_lkup_by_ino(&path, ino, nsi_lock);
+       path_put(&path);
+
+out:
+       AuTraceErrPtr(dentry);
+       return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int h_acceptable(void *expv, struct dentry *dentry)
+{
+       return 1;
+}
+
+static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath,
+                          char *buf, int len, struct super_block *sb)
+{
+       char *p;
+       int n;
+       struct path path;
+
+       p = d_path(h_rootpath, buf, len);
+       if (IS_ERR(p))
+               goto out;
+       n = strlen(p);
+
+       path.mnt = h_rootpath->mnt;
+       path.dentry = h_parent;
+       p = d_path(&path, buf, len);
+       if (IS_ERR(p))
+               goto out;
+       if (n != 1)
+               p += n;
+
+       path.mnt = au_mnt_get(sb);
+       path.dentry = sb->s_root;
+       p = d_path(&path, buf, len - strlen(p));
+       mntput(path.mnt);
+       if (IS_ERR(p))
+               goto out;
+       if (n != 1)
+               p[strlen(p)] = '/';
+
+out:
+       AuTraceErrPtr(p);
+       return p;
+}
+
+static
+struct dentry *decode_by_path(struct super_block *sb, ino_t ino, __u32 *fh,
+                             int fh_len, struct au_nfsd_si_lock *nsi_lock)
+{
+       struct dentry *dentry, *h_parent, *root;
+       struct super_block *h_sb;
+       char *pathname, *p;
+       struct vfsmount *h_mnt;
+       struct au_branch *br;
+       int err;
+       struct path path;
+
+       br = au_sbr(sb, nsi_lock->bindex);
+       h_mnt = au_br_mnt(br);
+       h_sb = h_mnt->mnt_sb;
+       /* todo: call lower fh_to_dentry()? fh_to_parent()? */
+       h_parent = exportfs_decode_fh(h_mnt, (void *)(fh + Fh_tail),
+                                     fh_len - Fh_tail, fh[Fh_h_type],
+                                     h_acceptable, /*context*/NULL);
+       dentry = h_parent;
+       if (unlikely(!h_parent || IS_ERR(h_parent))) {
+               AuWarn1("%s decode_fh failed, %ld\n",
+                       au_sbtype(h_sb), PTR_ERR(h_parent));
+               goto out;
+       }
+       dentry = NULL;
+       if (unlikely(au_test_anon(h_parent))) {
+               AuWarn1("%s decode_fh returned a disconnected dentry\n",
+                       au_sbtype(h_sb));
+               goto out_h_parent;
+       }
+
+       dentry = ERR_PTR(-ENOMEM);
+       pathname = (void *)__get_free_page(GFP_NOFS);
+       if (unlikely(!pathname))
+               goto out_h_parent;
+
+       root = sb->s_root;
+       path.mnt = h_mnt;
+       di_read_lock_parent(root, !AuLock_IR);
+       path.dentry = au_h_dptr(root, nsi_lock->bindex);
+       di_read_unlock(root, !AuLock_IR);
+       p = au_build_path(h_parent, &path, pathname, PAGE_SIZE, sb);
+       dentry = (void *)p;
+       if (IS_ERR(p))
+               goto out_pathname;
+
+       si_read_unlock(sb);
+       err = vfsub_kern_path(p, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
+       dentry = ERR_PTR(err);
+       if (unlikely(err))
+               goto out_relock;
+
+       dentry = ERR_PTR(-ENOENT);
+       AuDebugOn(au_test_anon(path.dentry));
+       if (unlikely(!path.dentry->d_inode))
+               goto out_path;
+
+       if (ino != path.dentry->d_inode->i_ino)
+               dentry = au_lkup_by_ino(&path, ino, /*nsi_lock*/NULL);
+       else
+               dentry = dget(path.dentry);
+
+out_path:
+       path_put(&path);
+out_relock:
+       if (unlikely(si_nfsd_read_lock(sb, nsi_lock) < 0))
+               if (!IS_ERR(dentry)) {
+                       dput(dentry);
+                       dentry = ERR_PTR(-ESTALE);
+               }
+out_pathname:
+       free_page((unsigned long)pathname);
+out_h_parent:
+       dput(h_parent);
+out:
+       AuTraceErrPtr(dentry);
+       return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *
+aufs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len,
+                 int fh_type)
+{
+       struct dentry *dentry;
+       __u32 *fh = fid->raw;
+       struct au_branch *br;
+       ino_t ino, dir_ino;
+       struct au_nfsd_si_lock nsi_lock = {
+               .force_lock     = 0
+       };
+
+       dentry = ERR_PTR(-ESTALE);
+       /* it should never happen, but the file handle is unreliable */
+       if (unlikely(fh_len < Fh_tail))
+               goto out;
+       nsi_lock.sigen = fh[Fh_sigen];
+       nsi_lock.br_id = fh[Fh_br_id];
+
+       /* branch id may be wrapped around */
+       br = NULL;
+       if (unlikely(si_nfsd_read_lock(sb, &nsi_lock)))
+               goto out;
+       nsi_lock.force_lock = 1;
+
+       /* is this inode still cached? */
+       ino = decode_ino(fh + Fh_ino);
+       /* it should never happen */
+       if (unlikely(ino == AUFS_ROOT_INO))
+               goto out;
+
+       dir_ino = decode_ino(fh + Fh_dir_ino);
+       dentry = decode_by_ino(sb, ino, dir_ino);
+       if (IS_ERR(dentry))
+               goto out_unlock;
+       if (dentry)
+               goto accept;
+
+       /* is the parent dir cached? */
+       br = au_sbr(sb, nsi_lock.bindex);
+       atomic_inc(&br->br_count);
+       dentry = decode_by_dir_ino(sb, ino, dir_ino, &nsi_lock);
+       if (IS_ERR(dentry))
+               goto out_unlock;
+       if (dentry)
+               goto accept;
+
+       /* lookup path */
+       dentry = decode_by_path(sb, ino, fh, fh_len, &nsi_lock);
+       if (IS_ERR(dentry))
+               goto out_unlock;
+       if (unlikely(!dentry))
+               /* todo?: make it ESTALE */
+               goto out_unlock;
+
+accept:
+       if (!au_digen_test(dentry, au_sigen(sb))
+           && dentry->d_inode->i_generation == fh[Fh_igen])
+               goto out_unlock; /* success */
+
+       dput(dentry);
+       dentry = ERR_PTR(-ESTALE);
+out_unlock:
+       if (br)
+               atomic_dec(&br->br_count);
+       si_read_unlock(sb);
+out:
+       AuTraceErrPtr(dentry);
+       return dentry;
+}
+
+#if 0 /* reserved for future use */
+/* support subtreecheck option */
+static struct dentry *aufs_fh_to_parent(struct super_block *sb, struct fid *fid,
+                                       int fh_len, int fh_type)
+{
+       struct dentry *parent;
+       __u32 *fh = fid->raw;
+       ino_t dir_ino;
+
+       dir_ino = decode_ino(fh + Fh_dir_ino);
+       parent = decode_by_ino(sb, dir_ino, 0);
+       if (IS_ERR(parent))
+               goto out;
+       if (!parent)
+               parent = decode_by_path(sb, au_br_index(sb, fh[Fh_br_id]),
+                                       dir_ino, fh, fh_len);
+
+out:
+       AuTraceErrPtr(parent);
+       return parent;
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
+                         int connectable)
+{
+       int err;
+       aufs_bindex_t bindex, bend;
+       struct super_block *sb, *h_sb;
+       struct inode *inode;
+       struct dentry *parent, *h_parent;
+       struct au_branch *br;
+
+       AuDebugOn(au_test_anon(dentry));
+
+       parent = NULL;
+       err = -ENOSPC;
+       if (unlikely(*max_len <= Fh_tail)) {
+               AuWarn1("NFSv2 client (max_len %d)?\n", *max_len);
+               goto out;
+       }
+
+       err = FILEID_ROOT;
+       if (IS_ROOT(dentry)) {
+               AuDebugOn(dentry->d_inode->i_ino != AUFS_ROOT_INO);
+               goto out;
+       }
+
+       h_parent = NULL;
+       err = aufs_read_lock(dentry, AuLock_FLUSH | AuLock_IR | AuLock_GEN);
+       if (unlikely(err))
+               goto out;
+
+       inode = dentry->d_inode;
+       AuDebugOn(!inode);
+       sb = dentry->d_sb;
+#ifdef CONFIG_AUFS_DEBUG
+       if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
+               AuWarn1("NFS-exporting requires xino\n");
+#endif
+       err = -EIO;
+       parent = dget_parent(dentry);
+       di_read_lock_parent(parent, !AuLock_IR);
+       bend = au_dbtaildir(parent);
+       for (bindex = au_dbstart(parent); bindex <= bend; bindex++) {
+               h_parent = au_h_dptr(parent, bindex);
+               if (h_parent) {
+                       dget(h_parent);
+                       break;
+               }
+       }
+       if (unlikely(!h_parent))
+               goto out_unlock;
+
+       err = -EPERM;
+       br = au_sbr(sb, bindex);
+       h_sb = au_br_sb(br);
+       if (unlikely(!h_sb->s_export_op)) {
+               AuErr1("%s branch is not exportable\n", au_sbtype(h_sb));
+               goto out_dput;
+       }
+
+       fh[Fh_br_id] = br->br_id;
+       fh[Fh_sigen] = au_sigen(sb);
+       encode_ino(fh + Fh_ino, inode->i_ino);
+       encode_ino(fh + Fh_dir_ino, parent->d_inode->i_ino);
+       fh[Fh_igen] = inode->i_generation;
+
+       *max_len -= Fh_tail;
+       fh[Fh_h_type] = exportfs_encode_fh(h_parent, (void *)(fh + Fh_tail),
+                                          max_len,
+                                          /*connectable or subtreecheck*/0);
+       err = fh[Fh_h_type];
+       *max_len += Fh_tail;
+       /* todo: macros? */
+       if (err != 255)
+               err = 99;
+       else
+               AuWarn1("%s encode_fh failed\n", au_sbtype(h_sb));
+
+out_dput:
+       dput(h_parent);
+out_unlock:
+       di_read_unlock(parent, !AuLock_IR);
+       dput(parent);
+       aufs_read_unlock(dentry, AuLock_IR);
+out:
+       if (unlikely(err < 0))
+               err = 255;
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_commit_metadata(struct inode *inode)
+{
+       int err;
+       aufs_bindex_t bindex;
+       struct super_block *sb;
+       struct inode *h_inode;
+       int (*f)(struct inode *inode);
+
+       sb = inode->i_sb;
+       si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+       ii_write_lock_child(inode);
+       bindex = au_ibstart(inode);
+       AuDebugOn(bindex < 0);
+       h_inode = au_h_iptr(inode, bindex);
+
+       f = h_inode->i_sb->s_export_op->commit_metadata;
+       if (f)
+               err = f(h_inode);
+       else {
+               struct writeback_control wbc = {
+                       .sync_mode      = WB_SYNC_ALL,
+                       .nr_to_write    = 0 /* metadata only */
+               };
+
+               err = sync_inode(h_inode, &wbc);
+       }
+
+       au_cpup_attr_timesizes(inode);
+       ii_write_unlock(inode);
+       si_read_unlock(sb);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct export_operations aufs_export_op = {
+       .fh_to_dentry           = aufs_fh_to_dentry,
+       /* .fh_to_parent        = aufs_fh_to_parent, */
+       .encode_fh              = aufs_encode_fh,
+       .commit_metadata        = aufs_commit_metadata
+};
+
+void au_export_init(struct super_block *sb)
+{
+       struct au_sbinfo *sbinfo;
+       __u32 u;
+
+       sb->s_export_op = &aufs_export_op;
+       sbinfo = au_sbi(sb);
+       sbinfo->si_xigen = NULL;
+       get_random_bytes(&u, sizeof(u));
+       BUILD_BUG_ON(sizeof(u) != sizeof(int));
+       atomic_set(&sbinfo->si_xigen_next, u);
+}
diff --git a/fs/aufs/f_op.c b/fs/aufs/f_op.c
new file mode 100644 (file)
index 0000000..d6318f0
--- /dev/null
@@ -0,0 +1,729 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * file and vm operations
+ */
+
+#include <linux/fs_stack.h>
+#include <linux/mman.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+int au_do_open_nondir(struct file *file, int flags)
+{
+       int err;
+       aufs_bindex_t bindex;
+       struct file *h_file;
+       struct dentry *dentry;
+       struct au_finfo *finfo;
+
+       FiMustWriteLock(file);
+
+       dentry = file->f_dentry;
+       err = au_d_alive(dentry);
+       if (unlikely(err))
+               goto out;
+
+       finfo = au_fi(file);
+       memset(&finfo->fi_htop, 0, sizeof(finfo->fi_htop));
+       atomic_set(&finfo->fi_mmapped, 0);
+       bindex = au_dbstart(dentry);
+       h_file = au_h_open(dentry, bindex, flags, file);
+       if (IS_ERR(h_file))
+               err = PTR_ERR(h_file);
+       else {
+               au_set_fbstart(file, bindex);
+               au_set_h_fptr(file, bindex, h_file);
+               au_update_figen(file);
+               /* todo: necessary? */
+               /* file->f_ra = h_file->f_ra; */
+       }
+
+out:
+       return err;
+}
+
+static int aufs_open_nondir(struct inode *inode __maybe_unused,
+                           struct file *file)
+{
+       int err;
+       struct super_block *sb;
+
+       AuDbg("%.*s, f_flags 0x%x, f_mode 0x%x\n",
+             AuDLNPair(file->f_dentry), vfsub_file_flags(file),
+             file->f_mode);
+
+       sb = file->f_dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH);
+       err = au_do_open(file, au_do_open_nondir, /*fidir*/NULL);
+       si_read_unlock(sb);
+       return err;
+}
+
+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file)
+{
+       struct au_finfo *finfo;
+       aufs_bindex_t bindex;
+
+       finfo = au_fi(file);
+       bindex = finfo->fi_btop;
+       if (bindex >= 0) {
+               /* remove me from sb->s_files */
+               file_sb_list_del(file);
+               au_set_h_fptr(file, bindex, NULL);
+       }
+
+       au_finfo_fin(file);
+       return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_flush_nondir(struct file *file, fl_owner_t id)
+{
+       int err;
+       struct file *h_file;
+
+       err = 0;
+       h_file = au_hf_top(file);
+       if (h_file)
+               err = vfsub_flush(h_file, id);
+       return err;
+}
+
+static int aufs_flush_nondir(struct file *file, fl_owner_t id)
+{
+       return au_do_flush(file, id, au_do_flush_nondir);
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * read and write functions acquire [fdi]_rwsem once, but release before
+ * mmap_sem. This is because to stop a race condition between mmap(2).
+ * Releasing these aufs-rwsem should be safe, no branch-mamagement (by keeping
+ * si_rwsem), no harmful copy-up should happen. Actually copy-up may happen in
+ * read functions after [fdi]_rwsem are released, but it should be harmless.
+ */
+
+static ssize_t aufs_read(struct file *file, char __user *buf, size_t count,
+                        loff_t *ppos)
+{
+       ssize_t err;
+       struct dentry *dentry;
+       struct file *h_file;
+       struct super_block *sb;
+
+       dentry = file->f_dentry;
+       sb = dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+       if (unlikely(err))
+               goto out;
+
+       h_file = au_hf_top(file);
+       get_file(h_file);
+       di_read_unlock(dentry, AuLock_IR);
+       fi_read_unlock(file);
+
+       /* filedata may be obsoleted by concurrent copyup, but no problem */
+       err = vfsub_read_u(h_file, buf, count, ppos);
+       /* todo: necessary? */
+       /* file->f_ra = h_file->f_ra; */
+       /* update without lock, I don't think it a problem */
+       fsstack_copy_attr_atime(dentry->d_inode, h_file->f_dentry->d_inode);
+       fput(h_file);
+
+out:
+       si_read_unlock(sb);
+       return err;
+}
+
+/*
+ * todo: very ugly
+ * it locks both of i_mutex and si_rwsem for read in safe.
+ * if the plink maintenance mode continues forever (that is the problem),
+ * may loop forever.
+ */
+static void au_mtx_and_read_lock(struct inode *inode)
+{
+       int err;
+       struct super_block *sb = inode->i_sb;
+
+       while (1) {
+               mutex_lock(&inode->i_mutex);
+               err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+               if (!err)
+                       break;
+               mutex_unlock(&inode->i_mutex);
+               si_read_lock(sb, AuLock_NOPLMW);
+               si_read_unlock(sb);
+       }
+}
+
+static ssize_t aufs_write(struct file *file, const char __user *ubuf,
+                         size_t count, loff_t *ppos)
+{
+       ssize_t err;
+       struct au_pin pin;
+       struct dentry *dentry;
+       struct super_block *sb;
+       struct inode *inode;
+       struct file *h_file;
+       char __user *buf = (char __user *)ubuf;
+
+       dentry = file->f_dentry;
+       sb = dentry->d_sb;
+       inode = dentry->d_inode;
+       au_mtx_and_read_lock(inode);
+
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+       if (unlikely(err))
+               goto out;
+
+       err = au_ready_to_write(file, -1, &pin);
+       di_downgrade_lock(dentry, AuLock_IR);
+       if (unlikely(err)) {
+               di_read_unlock(dentry, AuLock_IR);
+               fi_write_unlock(file);
+               goto out;
+       }
+
+       h_file = au_hf_top(file);
+       get_file(h_file);
+       au_unpin(&pin);
+       di_read_unlock(dentry, AuLock_IR);
+       fi_write_unlock(file);
+
+       err = vfsub_write_u(h_file, buf, count, ppos);
+       ii_write_lock_child(inode);
+       au_cpup_attr_timesizes(inode);
+       inode->i_mode = h_file->f_dentry->d_inode->i_mode;
+       ii_write_unlock(inode);
+       fput(h_file);
+
+out:
+       si_read_unlock(sb);
+       mutex_unlock(&inode->i_mutex);
+       return err;
+}
+
+static ssize_t au_do_aio(struct file *h_file, int rw, struct kiocb *kio,
+                        const struct iovec *iov, unsigned long nv, loff_t pos)
+{
+       ssize_t err;
+       struct file *file;
+       ssize_t (*func)(struct kiocb *, const struct iovec *, unsigned long,
+                       loff_t);
+
+       err = security_file_permission(h_file, rw);
+       if (unlikely(err))
+               goto out;
+
+       err = -ENOSYS;
+       func = NULL;
+       if (rw == MAY_READ)
+               func = h_file->f_op->aio_read;
+       else if (rw == MAY_WRITE)
+               func = h_file->f_op->aio_write;
+       if (func) {
+               file = kio->ki_filp;
+               kio->ki_filp = h_file;
+               lockdep_off();
+               err = func(kio, iov, nv, pos);
+               lockdep_on();
+               kio->ki_filp = file;
+       } else
+               /* currently there is no such fs */
+               WARN_ON_ONCE(1);
+
+out:
+       return err;
+}
+
+static ssize_t aufs_aio_read(struct kiocb *kio, const struct iovec *iov,
+                            unsigned long nv, loff_t pos)
+{
+       ssize_t err;
+       struct file *file, *h_file;
+       struct dentry *dentry;
+       struct super_block *sb;
+
+       file = kio->ki_filp;
+       dentry = file->f_dentry;
+       sb = dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+       if (unlikely(err))
+               goto out;
+
+       h_file = au_hf_top(file);
+       get_file(h_file);
+       di_read_unlock(dentry, AuLock_IR);
+       fi_read_unlock(file);
+
+       err = au_do_aio(h_file, MAY_READ, kio, iov, nv, pos);
+       /* todo: necessary? */
+       /* file->f_ra = h_file->f_ra; */
+       /* update without lock, I don't think it a problem */
+       fsstack_copy_attr_atime(dentry->d_inode, h_file->f_dentry->d_inode);
+       fput(h_file);
+
+out:
+       si_read_unlock(sb);
+       return err;
+}
+
+static ssize_t aufs_aio_write(struct kiocb *kio, const struct iovec *iov,
+                             unsigned long nv, loff_t pos)
+{
+       ssize_t err;
+       struct au_pin pin;
+       struct dentry *dentry;
+       struct inode *inode;
+       struct file *file, *h_file;
+       struct super_block *sb;
+
+       file = kio->ki_filp;
+       dentry = file->f_dentry;
+       sb = dentry->d_sb;
+       inode = dentry->d_inode;
+       au_mtx_and_read_lock(inode);
+
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+       if (unlikely(err))
+               goto out;
+
+       err = au_ready_to_write(file, -1, &pin);
+       di_downgrade_lock(dentry, AuLock_IR);
+       if (unlikely(err)) {
+               di_read_unlock(dentry, AuLock_IR);
+               fi_write_unlock(file);
+               goto out;
+       }
+
+       h_file = au_hf_top(file);
+       get_file(h_file);
+       au_unpin(&pin);
+       di_read_unlock(dentry, AuLock_IR);
+       fi_write_unlock(file);
+
+       err = au_do_aio(h_file, MAY_WRITE, kio, iov, nv, pos);
+       ii_write_lock_child(inode);
+       au_cpup_attr_timesizes(inode);
+       inode->i_mode = h_file->f_dentry->d_inode->i_mode;
+       ii_write_unlock(inode);
+       fput(h_file);
+
+out:
+       si_read_unlock(sb);
+       mutex_unlock(&inode->i_mutex);
+       return err;
+}
+
+static ssize_t aufs_splice_read(struct file *file, loff_t *ppos,
+                               struct pipe_inode_info *pipe, size_t len,
+                               unsigned int flags)
+{
+       ssize_t err;
+       struct file *h_file;
+       struct dentry *dentry;
+       struct super_block *sb;
+
+       dentry = file->f_dentry;
+       sb = dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+       if (unlikely(err))
+               goto out;
+
+       err = -EINVAL;
+       h_file = au_hf_top(file);
+       get_file(h_file);
+       if (au_test_loopback_kthread()) {
+               au_warn_loopback(h_file->f_dentry->d_sb);
+               if (file->f_mapping != h_file->f_mapping) {
+                       file->f_mapping = h_file->f_mapping;
+                       smp_mb(); /* unnecessary? */
+               }
+       }
+       di_read_unlock(dentry, AuLock_IR);
+       fi_read_unlock(file);
+
+       err = vfsub_splice_to(h_file, ppos, pipe, len, flags);
+       /* todo: necessasry? */
+       /* file->f_ra = h_file->f_ra; */
+       /* update without lock, I don't think it a problem */
+       fsstack_copy_attr_atime(dentry->d_inode, h_file->f_dentry->d_inode);
+       fput(h_file);
+
+out:
+       si_read_unlock(sb);
+       return err;
+}
+
+static ssize_t
+aufs_splice_write(struct pipe_inode_info *pipe, struct file *file, loff_t *ppos,
+                 size_t len, unsigned int flags)
+{
+       ssize_t err;
+       struct au_pin pin;
+       struct dentry *dentry;
+       struct inode *inode;
+       struct file *h_file;
+       struct super_block *sb;
+
+       dentry = file->f_dentry;
+       sb = dentry->d_sb;
+       inode = dentry->d_inode;
+       au_mtx_and_read_lock(inode);
+
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+       if (unlikely(err))
+               goto out;
+
+       err = au_ready_to_write(file, -1, &pin);
+       di_downgrade_lock(dentry, AuLock_IR);
+       if (unlikely(err)) {
+               di_read_unlock(dentry, AuLock_IR);
+               fi_write_unlock(file);
+               goto out;
+       }
+
+       h_file = au_hf_top(file);
+       get_file(h_file);
+       au_unpin(&pin);
+       di_read_unlock(dentry, AuLock_IR);
+       fi_write_unlock(file);
+
+       err = vfsub_splice_from(pipe, h_file, ppos, len, flags);
+       ii_write_lock_child(inode);
+       au_cpup_attr_timesizes(inode);
+       inode->i_mode = h_file->f_dentry->d_inode->i_mode;
+       ii_write_unlock(inode);
+       fput(h_file);
+
+out:
+       si_read_unlock(sb);
+       mutex_unlock(&inode->i_mutex);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * The locking order around current->mmap_sem.
+ * - in most and regular cases
+ *   file I/O syscall -- aufs_read() or something
+ *     -- si_rwsem for read -- mmap_sem
+ *     (Note that [fdi]i_rwsem are released before mmap_sem).
+ * - in mmap case
+ *   mmap(2) -- mmap_sem -- aufs_mmap() -- si_rwsem for read -- [fdi]i_rwsem
+ * This AB-BA order is definitly bad, but is not a problem since "si_rwsem for
+ * read" allows muliple processes to acquire it and [fdi]i_rwsem are not held in
+ * file I/O. Aufs needs to stop lockdep in aufs_mmap() though.
+ * It means that when aufs acquires si_rwsem for write, the process should never
+ * acquire mmap_sem.
+ *
+ * Actually aufs_readdir() holds [fdi]i_rwsem before mmap_sem, but this is not a
+ * problem either since any directory is not able to be mmap-ed.
+ * The similar scenario is applied to aufs_readlink() too.
+ */
+
+/* cf. linux/include/linux/mman.h: calc_vm_prot_bits() */
+#define AuConv_VM_PROT(f, b)   _calc_vm_trans(f, VM_##b, PROT_##b)
+
+static unsigned long au_arch_prot_conv(unsigned long flags)
+{
+       /* currently ppc64 only */
+#ifdef CONFIG_PPC64
+       /* cf. linux/arch/powerpc/include/asm/mman.h */
+       AuDebugOn(arch_calc_vm_prot_bits(-1) != VM_SAO);
+       return AuConv_VM_PROT(flags, SAO);
+#else
+       AuDebugOn(arch_calc_vm_prot_bits(-1));
+       return 0;
+#endif
+}
+
+static unsigned long au_prot_conv(unsigned long flags)
+{
+       return AuConv_VM_PROT(flags, READ)
+               | AuConv_VM_PROT(flags, WRITE)
+               | AuConv_VM_PROT(flags, EXEC)
+               | au_arch_prot_conv(flags);
+}
+
+/* cf. linux/include/linux/mman.h: calc_vm_flag_bits() */
+#define AuConv_VM_MAP(f, b)    _calc_vm_trans(f, VM_##b, MAP_##b)
+
+static unsigned long au_flag_conv(unsigned long flags)
+{
+       return AuConv_VM_MAP(flags, GROWSDOWN)
+               | AuConv_VM_MAP(flags, DENYWRITE)
+               | AuConv_VM_MAP(flags, EXECUTABLE)
+               | AuConv_VM_MAP(flags, LOCKED);
+}
+
+static int aufs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       int err;
+       unsigned long prot;
+       aufs_bindex_t bstart;
+       const unsigned char wlock
+               = (file->f_mode & FMODE_WRITE) && (vma->vm_flags & VM_SHARED);
+       struct dentry *dentry;
+       struct super_block *sb;
+       struct file *h_file;
+       struct au_branch *br;
+       struct au_pin pin;
+
+       AuDbgVmRegion(file, vma);
+
+       dentry = file->f_dentry;
+       sb = dentry->d_sb;
+       lockdep_off();
+       si_read_lock(sb, AuLock_NOPLMW);
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+       if (unlikely(err))
+               goto out;
+
+       if (wlock) {
+               err = au_ready_to_write(file, -1, &pin);
+               di_write_unlock(dentry);
+               if (unlikely(err)) {
+                       fi_write_unlock(file);
+                       goto out;
+               }
+               au_unpin(&pin);
+       } else
+               di_write_unlock(dentry);
+
+       bstart = au_fbstart(file);
+       br = au_sbr(sb, bstart);
+       h_file = au_hf_top(file);
+       get_file(h_file);
+       au_set_mmapped(file);
+       fi_write_unlock(file);
+       lockdep_on();
+
+       au_vm_file_reset(vma, h_file);
+       prot = au_prot_conv(vma->vm_flags);
+       err = security_file_mmap(h_file, /*reqprot*/prot, prot,
+                                au_flag_conv(vma->vm_flags), vma->vm_start, 0);
+       if (!err)
+               err = h_file->f_op->mmap(h_file, vma);
+       if (unlikely(err))
+               goto out_reset;
+
+       au_vm_prfile_set(vma, file);
+       /* update without lock, I don't think it a problem */
+       fsstack_copy_attr_atime(file->f_dentry->d_inode,
+                               h_file->f_dentry->d_inode);
+       goto out_fput; /* success */
+
+out_reset:
+       au_unset_mmapped(file);
+       au_vm_file_reset(vma, file);
+out_fput:
+       fput(h_file);
+       lockdep_off();
+out:
+       si_read_unlock(sb);
+       lockdep_on();
+       AuTraceErr(err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_fsync_nondir(struct file *file, loff_t start, loff_t end,
+                            int datasync)
+{
+       int err;
+       struct au_pin pin;
+       struct dentry *dentry;
+       struct inode *inode;
+       struct file *h_file;
+       struct super_block *sb;
+
+       dentry = file->f_dentry;
+       inode = dentry->d_inode;
+       sb = dentry->d_sb;
+       mutex_lock(&inode->i_mutex);
+       err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+       if (unlikely(err))
+               goto out;
+
+       err = 0; /* -EBADF; */ /* posix? */
+       if (unlikely(!(file->f_mode & FMODE_WRITE)))
+               goto out_si;
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+       if (unlikely(err))
+               goto out_si;
+
+       err = au_ready_to_write(file, -1, &pin);
+       di_downgrade_lock(dentry, AuLock_IR);
+       if (unlikely(err))
+               goto out_unlock;
+       au_unpin(&pin);
+
+       err = -EINVAL;
+       h_file = au_hf_top(file);
+       err = vfsub_fsync(h_file, &h_file->f_path, datasync);
+       au_cpup_attr_timesizes(inode);
+
+out_unlock:
+       di_read_unlock(dentry, AuLock_IR);
+       fi_write_unlock(file);
+out_si:
+       si_read_unlock(sb);
+out:
+       mutex_unlock(&inode->i_mutex);
+       return err;
+}
+
+/* no one supports this operation, currently */
+#if 0
+static int aufs_aio_fsync_nondir(struct kiocb *kio, int datasync)
+{
+       int err;
+       struct au_pin pin;
+       struct dentry *dentry;
+       struct inode *inode;
+       struct file *file, *h_file;
+
+       file = kio->ki_filp;
+       dentry = file->f_dentry;
+       inode = dentry->d_inode;
+       au_mtx_and_read_lock(inode);
+
+       err = 0; /* -EBADF; */ /* posix? */
+       if (unlikely(!(file->f_mode & FMODE_WRITE)))
+               goto out;
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+       if (unlikely(err))
+               goto out;
+
+       err = au_ready_to_write(file, -1, &pin);
+       di_downgrade_lock(dentry, AuLock_IR);
+       if (unlikely(err))
+               goto out_unlock;
+       au_unpin(&pin);
+
+       err = -ENOSYS;
+       h_file = au_hf_top(file);
+       if (h_file->f_op && h_file->f_op->aio_fsync) {
+               struct dentry *h_d;
+               struct mutex *h_mtx;
+
+               h_d = h_file->f_dentry;
+               h_mtx = &h_d->d_inode->i_mutex;
+               if (!is_sync_kiocb(kio)) {
+                       get_file(h_file);
+                       fput(file);
+               }
+               kio->ki_filp = h_file;
+               err = h_file->f_op->aio_fsync(kio, datasync);
+               mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
+               if (!err)
+                       vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL);
+               /*ignore*/
+               au_cpup_attr_timesizes(inode);
+               mutex_unlock(h_mtx);
+       }
+
+out_unlock:
+       di_read_unlock(dentry, AuLock_IR);
+       fi_write_unlock(file);
+out:
+       si_read_unlock(inode->sb);
+       mutex_unlock(&inode->i_mutex);
+       return err;
+}
+#endif
+
+static int aufs_fasync(int fd, struct file *file, int flag)
+{
+       int err;
+       struct file *h_file;
+       struct dentry *dentry;
+       struct super_block *sb;
+
+       dentry = file->f_dentry;
+       sb = dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+       if (unlikely(err))
+               goto out;
+
+       h_file = au_hf_top(file);
+       if (h_file->f_op && h_file->f_op->fasync)
+               err = h_file->f_op->fasync(fd, h_file, flag);
+
+       di_read_unlock(dentry, AuLock_IR);
+       fi_read_unlock(file);
+
+out:
+       si_read_unlock(sb);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* no one supports this operation, currently */
+#if 0
+static ssize_t aufs_sendpage(struct file *file, struct page *page, int offset,
+                            size_t len, loff_t *pos , int more)
+{
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+const struct file_operations aufs_file_fop = {
+       .owner          = THIS_MODULE,
+
+       .llseek         = default_llseek,
+
+       .read           = aufs_read,
+       .write          = aufs_write,
+       .aio_read       = aufs_aio_read,
+       .aio_write      = aufs_aio_write,
+#ifdef CONFIG_AUFS_POLL
+       .poll           = aufs_poll,
+#endif
+       .unlocked_ioctl = aufs_ioctl_nondir,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = aufs_ioctl_nondir, /* same */
+#endif
+       .mmap           = aufs_mmap,
+       .open           = aufs_open_nondir,
+       .flush          = aufs_flush_nondir,
+       .release        = aufs_release_nondir,
+       .fsync          = aufs_fsync_nondir,
+       /* .aio_fsync   = aufs_aio_fsync_nondir, */
+       .fasync         = aufs_fasync,
+       /* .sendpage    = aufs_sendpage, */
+       .splice_write   = aufs_splice_write,
+       .splice_read    = aufs_splice_read,
+#if 0
+       .aio_splice_write = aufs_aio_splice_write,
+       .aio_splice_read  = aufs_aio_splice_read
+#endif
+};
diff --git a/fs/aufs/f_op_sp.c b/fs/aufs/f_op_sp.c
new file mode 100644 (file)
index 0000000..9eeedd6
--- /dev/null
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * file operations for special files.
+ * while they exist in aufs virtually,
+ * their file I/O is handled out of aufs.
+ */
+
+#include "aufs.h"
+
+static ssize_t aufs_aio_read_sp(struct kiocb *kio, const struct iovec *iov,
+                               unsigned long nv, loff_t pos)
+{
+       ssize_t err;
+       aufs_bindex_t bstart;
+       unsigned char wbr;
+       struct file *file, *h_file;
+       struct super_block *sb;
+
+       file = kio->ki_filp;
+       sb = file->f_dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH);
+       fi_read_lock(file);
+       bstart = au_fbstart(file);
+       h_file = au_hf_top(file);
+       fi_read_unlock(file);
+       wbr = !!au_br_writable(au_sbr(sb, bstart)->br_perm);
+       si_read_unlock(sb);
+
+       /* do not change the file in kio */
+       AuDebugOn(!h_file->f_op || !h_file->f_op->aio_read);
+       err = h_file->f_op->aio_read(kio, iov, nv, pos);
+       if (err > 0 && wbr)
+               file_accessed(h_file);
+
+       return err;
+}
+
+static ssize_t aufs_aio_write_sp(struct kiocb *kio, const struct iovec *iov,
+                                unsigned long nv, loff_t pos)
+{
+       ssize_t err;
+       aufs_bindex_t bstart;
+       unsigned char wbr;
+       struct super_block *sb;
+       struct file *file, *h_file;
+
+       file = kio->ki_filp;
+       sb = file->f_dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH);
+       fi_read_lock(file);
+       bstart = au_fbstart(file);
+       h_file = au_hf_top(file);
+       fi_read_unlock(file);
+       wbr = !!au_br_writable(au_sbr(sb, bstart)->br_perm);
+       si_read_unlock(sb);
+
+       /* do not change the file in kio */
+       AuDebugOn(!h_file->f_op || !h_file->f_op->aio_write);
+       err = h_file->f_op->aio_write(kio, iov, nv, pos);
+       if (err > 0 && wbr)
+               file_update_time(h_file);
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_release_sp(struct inode *inode, struct file *file)
+{
+       int err;
+       struct file *h_file;
+
+       fi_read_lock(file);
+       h_file = au_hf_top(file);
+       fi_read_unlock(file);
+       /* close this fifo in aufs */
+       err = h_file->f_op->release(inode, file); /* ignore */
+       aufs_release_nondir(inode, file); /* ignore */
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* currently, support only FIFO */
+enum {
+       AuSp_FIFO, AuSp_FIFO_R, AuSp_FIFO_W, AuSp_FIFO_RW,
+       /* AuSp_SOCK, AuSp_CHR, AuSp_BLK, */
+       AuSp_Last
+};
+static int aufs_open_sp(struct inode *inode, struct file *file);
+static struct au_sp_fop {
+       int                     done;
+       struct file_operations  fop;    /* not 'const' */
+       spinlock_t              spin;
+} au_sp_fop[AuSp_Last] = {
+       [AuSp_FIFO] = {
+               .fop    = {
+                       .owner  = THIS_MODULE,
+                       .open   = aufs_open_sp
+               }
+       }
+};
+
+static void au_init_fop_sp(struct file *file)
+{
+       struct au_sp_fop *p;
+       int i;
+       struct file *h_file;
+
+       p = au_sp_fop;
+       if (unlikely(!p->done)) {
+               /* initialize first time only */
+               static DEFINE_SPINLOCK(spin);
+
+               spin_lock(&spin);
+               if (!p->done) {
+                       BUILD_BUG_ON(sizeof(au_sp_fop)/sizeof(*au_sp_fop)
+                                    != AuSp_Last);
+                       for (i = 0; i < AuSp_Last; i++)
+                               spin_lock_init(&p[i].spin);
+                       p->done = 1;
+               }
+               spin_unlock(&spin);
+       }
+
+       switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
+       case FMODE_READ:
+               i = AuSp_FIFO_R;
+               break;
+       case FMODE_WRITE:
+               i = AuSp_FIFO_W;
+               break;
+       case FMODE_READ | FMODE_WRITE:
+               i = AuSp_FIFO_RW;
+               break;
+       default:
+               BUG();
+       }
+
+       p += i;
+       if (unlikely(!p->done)) {
+               /* initialize first time only */
+               h_file = au_hf_top(file);
+               spin_lock(&p->spin);
+               if (!p->done) {
+                       p->fop = *h_file->f_op;
+                       p->fop.owner = THIS_MODULE;
+                       if (p->fop.aio_read)
+                               p->fop.aio_read = aufs_aio_read_sp;
+                       if (p->fop.aio_write)
+                               p->fop.aio_write = aufs_aio_write_sp;
+                       p->fop.release = aufs_release_sp;
+                       p->done = 1;
+               }
+               spin_unlock(&p->spin);
+       }
+       file->f_op = &p->fop;
+}
+
+static int au_cpup_sp(struct dentry *dentry)
+{
+       int err;
+       struct au_pin pin;
+       struct au_wr_dir_args wr_dir_args = {
+               .force_btgt     = -1,
+               .flags          = 0
+       };
+       struct au_cp_generic cpg = {
+               .dentry = dentry,
+               .bdst   = -1,
+               .bsrc   = -1,
+               .len    = -1,
+               .pin    = &pin,
+               .flags  = AuCpup_DTIME
+       };
+
+       AuDbg("%.*s\n", AuDLNPair(dentry));
+
+       di_read_unlock(dentry, AuLock_IR);
+       di_write_lock_child(dentry);
+       err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
+       if (unlikely(err < 0))
+               goto out;
+       cpg.bdst = err;
+       err = 0;
+       if (cpg.bdst == au_dbstart(dentry))
+               goto out; /* success */
+
+       err = au_pin(&pin, dentry, cpg.bdst, au_opt_udba(dentry->d_sb),
+                    AuPin_MNT_WRITE);
+       if (!err) {
+               err = au_sio_cpup_simple(&cpg);
+               au_unpin(&pin);
+       }
+
+out:
+       di_downgrade_lock(dentry, AuLock_IR);
+       return err;
+}
+
+static int au_do_open_sp(struct file *file, int flags)
+{
+       int err;
+       struct dentry *dentry;
+       struct super_block *sb;
+       struct file *h_file;
+       struct inode *h_inode;
+
+       dentry = file->f_dentry;
+       AuDbg("%.*s\n", AuDLNPair(dentry));
+
+       /*
+        * try copying-up.
+        * operate on the ro branch is not an error.
+        */
+       au_cpup_sp(dentry); /* ignore */
+
+       /* prepare h_file */
+       err = au_do_open_nondir(file, vfsub_file_flags(file));
+       if (unlikely(err))
+               goto out;
+
+       sb = dentry->d_sb;
+       h_file = au_hf_top(file);
+       h_inode = h_file->f_dentry->d_inode;
+       di_read_unlock(dentry, AuLock_IR);
+       fi_write_unlock(file);
+       si_read_unlock(sb);
+       /* open this fifo in aufs */
+       err = h_inode->i_fop->open(file->f_dentry->d_inode, file);
+       si_noflush_read_lock(sb);
+       fi_write_lock(file);
+       di_read_lock_child(dentry, AuLock_IR);
+       if (!err)
+               au_init_fop_sp(file);
+
+out:
+       return err;
+}
+
+static int aufs_open_sp(struct inode *inode, struct file *file)
+{
+       int err;
+       struct super_block *sb;
+
+       sb = file->f_dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH);
+       err = au_do_open(file, au_do_open_sp, /*fidir*/NULL);
+       si_read_unlock(sb);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_init_special_fop(struct inode *inode, umode_t mode, dev_t rdev)
+{
+       init_special_inode(inode, mode, rdev);
+
+       switch (mode & S_IFMT) {
+       case S_IFIFO:
+               inode->i_fop = &au_sp_fop[AuSp_FIFO].fop;
+               /*FALLTHROUGH*/
+       case S_IFCHR:
+       case S_IFBLK:
+       case S_IFSOCK:
+               break;
+       default:
+               AuDebugOn(1);
+       }
+}
+
+int au_special_file(umode_t mode)
+{
+       int ret;
+
+       ret = 0;
+       switch (mode & S_IFMT) {
+       case S_IFIFO:
+#if 0
+       case S_IFCHR:
+       case S_IFBLK:
+       case S_IFSOCK:
+#endif
+               ret = 1;
+       }
+
+       return ret;
+}
diff --git a/fs/aufs/file.c b/fs/aufs/file.c
new file mode 100644 (file)
index 0000000..599d73f
--- /dev/null
@@ -0,0 +1,705 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * handling file/dir, and address_space operation
+ */
+
+#ifdef CONFIG_AUFS_DEBUG
+#include <linux/migrate.h>
+#endif
+#include <linux/pagemap.h>
+#include "aufs.h"
+
+/* drop flags for writing */
+unsigned int au_file_roflags(unsigned int flags)
+{
+       flags &= ~(O_WRONLY | O_RDWR | O_APPEND | O_CREAT | O_TRUNC);
+       flags |= O_RDONLY | O_NOATIME;
+       return flags;
+}
+
+/* common functions to regular file and dir */
+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
+                      struct file *file)
+{
+       struct file *h_file;
+       struct dentry *h_dentry;
+       struct inode *h_inode;
+       struct super_block *sb;
+       struct au_branch *br;
+       struct path h_path;
+       int err, exec_flag;
+
+       /* a race condition can happen between open and unlink/rmdir */
+       h_file = ERR_PTR(-ENOENT);
+       h_dentry = au_h_dptr(dentry, bindex);
+       if (au_test_nfsd() && !h_dentry)
+               goto out;
+       h_inode = h_dentry->d_inode;
+       if (au_test_nfsd() && !h_inode)
+               goto out;
+       spin_lock(&h_dentry->d_lock);
+       err = (!d_unhashed(dentry) && d_unlinked(h_dentry))
+               || !h_inode
+               /* || !dentry->d_inode->i_nlink */
+               ;
+       spin_unlock(&h_dentry->d_lock);
+       if (unlikely(err))
+               goto out;
+
+       sb = dentry->d_sb;
+       br = au_sbr(sb, bindex);
+       h_file = ERR_PTR(-EACCES);
+       exec_flag = flags & __FMODE_EXEC;
+       if (exec_flag && (au_br_mnt(br)->mnt_flags & MNT_NOEXEC))
+               goto out;
+
+       /* drop flags for writing */
+       if (au_test_ro(sb, bindex, dentry->d_inode))
+               flags = au_file_roflags(flags);
+       flags &= ~O_CREAT;
+       atomic_inc(&br->br_count);
+       h_path.dentry = h_dentry;
+       h_path.mnt = au_br_mnt(br);
+       if (!au_special_file(h_inode->i_mode))
+               h_file = vfsub_dentry_open(&h_path, flags);
+       else {
+               /* this block depends upon the configuration */
+               di_read_unlock(dentry, AuLock_IR);
+               fi_write_unlock(file);
+               si_read_unlock(sb);
+               h_file = vfsub_dentry_open(&h_path, flags);
+               si_noflush_read_lock(sb);
+               fi_write_lock(file);
+               di_read_lock_child(dentry, AuLock_IR);
+       }
+       if (IS_ERR(h_file))
+               goto out_br;
+
+       if (exec_flag) {
+               err = deny_write_access(h_file);
+               if (unlikely(err)) {
+                       fput(h_file);
+                       h_file = ERR_PTR(err);
+                       goto out_br;
+               }
+       }
+       fsnotify_open(h_file);
+       goto out; /* success */
+
+out_br:
+       atomic_dec(&br->br_count);
+out:
+       return h_file;
+}
+
+int au_do_open(struct file *file, int (*open)(struct file *file, int flags),
+              struct au_fidir *fidir)
+{
+       int err;
+       struct dentry *dentry;
+
+       err = au_finfo_init(file, fidir);
+       if (unlikely(err))
+               goto out;
+
+       dentry = file->f_dentry;
+       di_read_lock_child(dentry, AuLock_IR);
+       err = open(file, vfsub_file_flags(file));
+       di_read_unlock(dentry, AuLock_IR);
+
+       fi_write_unlock(file);
+       if (unlikely(err)) {
+               au_fi(file)->fi_hdir = NULL;
+               au_finfo_fin(file);
+       }
+
+out:
+       return err;
+}
+
+int au_reopen_nondir(struct file *file)
+{
+       int err;
+       aufs_bindex_t bstart;
+       struct dentry *dentry;
+       struct file *h_file, *h_file_tmp;
+
+       dentry = file->f_dentry;
+       AuDebugOn(au_special_file(dentry->d_inode->i_mode));
+       bstart = au_dbstart(dentry);
+       h_file_tmp = NULL;
+       if (au_fbstart(file) == bstart) {
+               h_file = au_hf_top(file);
+               if (file->f_mode == h_file->f_mode)
+                       return 0; /* success */
+               h_file_tmp = h_file;
+               get_file(h_file_tmp);
+               au_set_h_fptr(file, bstart, NULL);
+       }
+       AuDebugOn(au_fi(file)->fi_hdir);
+       /*
+        * it can happen
+        * file exists on both of rw and ro
+        * open --> dbstart and fbstart are both 0
+        * prepend a branch as rw, "rw" become ro
+        * remove rw/file
+        * delete the top branch, "rw" becomes rw again
+        *      --> dbstart is 1, fbstart is still 0
+        * write --> fbstart is 0 but dbstart is 1
+        */
+       /* AuDebugOn(au_fbstart(file) < bstart); */
+
+       h_file = au_h_open(dentry, bstart, vfsub_file_flags(file) & ~O_TRUNC,
+                          file);
+       err = PTR_ERR(h_file);
+       if (IS_ERR(h_file)) {
+               if (h_file_tmp) {
+                       atomic_inc(&au_sbr(dentry->d_sb, bstart)->br_count);
+                       au_set_h_fptr(file, bstart, h_file_tmp);
+                       h_file_tmp = NULL;
+               }
+               goto out; /* todo: close all? */
+       }
+
+       err = 0;
+       au_set_fbstart(file, bstart);
+       au_set_h_fptr(file, bstart, h_file);
+       au_update_figen(file);
+       /* todo: necessary? */
+       /* file->f_ra = h_file->f_ra; */
+
+out:
+       if (h_file_tmp)
+               fput(h_file_tmp);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_reopen_wh(struct file *file, aufs_bindex_t btgt,
+                       struct dentry *hi_wh)
+{
+       int err;
+       aufs_bindex_t bstart;
+       struct au_dinfo *dinfo;
+       struct dentry *h_dentry;
+       struct au_hdentry *hdp;
+
+       dinfo = au_di(file->f_dentry);
+       AuRwMustWriteLock(&dinfo->di_rwsem);
+
+       bstart = dinfo->di_bstart;
+       dinfo->di_bstart = btgt;
+       hdp = dinfo->di_hdentry;
+       h_dentry = hdp[0 + btgt].hd_dentry;
+       hdp[0 + btgt].hd_dentry = hi_wh;
+       err = au_reopen_nondir(file);
+       hdp[0 + btgt].hd_dentry = h_dentry;
+       dinfo->di_bstart = bstart;
+
+       return err;
+}
+
+static int au_ready_to_write_wh(struct file *file, loff_t len,
+                               aufs_bindex_t bcpup, struct au_pin *pin)
+{
+       int err;
+       struct inode *inode, *h_inode;
+       struct dentry *h_dentry, *hi_wh;
+       struct au_cp_generic cpg = {
+               .dentry = file->f_dentry,
+               .bdst   = bcpup,
+               .bsrc   = -1,
+               .len    = len,
+               .pin    = pin
+       };
+
+       au_update_dbstart(cpg.dentry);
+       inode = cpg.dentry->d_inode;
+       h_inode = NULL;
+       if (au_dbstart(cpg.dentry) <= bcpup
+           && au_dbend(cpg.dentry) >= bcpup) {
+               h_dentry = au_h_dptr(cpg.dentry, bcpup);
+               if (h_dentry)
+                       h_inode = h_dentry->d_inode;
+       }
+       hi_wh = au_hi_wh(inode, bcpup);
+       if (!hi_wh && !h_inode)
+               err = au_sio_cpup_wh(&cpg, file);
+       else
+               /* already copied-up after unlink */
+               err = au_reopen_wh(file, bcpup, hi_wh);
+
+       if (!err
+           && inode->i_nlink > 1
+           && au_opt_test(au_mntflags(cpg.dentry->d_sb), PLINK))
+               au_plink_append(inode, bcpup, au_h_dptr(cpg.dentry, bcpup));
+
+       return err;
+}
+
+/*
+ * prepare the @file for writing.
+ */
+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin)
+{
+       int err;
+       aufs_bindex_t dbstart;
+       struct dentry *parent, *h_dentry;
+       struct inode *inode;
+       struct super_block *sb;
+       struct file *h_file;
+       struct au_cp_generic cpg = {
+               .dentry = file->f_dentry,
+               .bdst   = -1,
+               .bsrc   = -1,
+               .len    = len,
+               .pin    = pin,
+               .flags  = AuCpup_DTIME
+       };
+
+       sb = cpg.dentry->d_sb;
+       inode = cpg.dentry->d_inode;
+       AuDebugOn(au_special_file(inode->i_mode));
+       cpg.bsrc = au_fbstart(file);
+       err = au_test_ro(sb, cpg.bsrc, inode);
+       if (!err && (au_hf_top(file)->f_mode & FMODE_WRITE)) {
+               err = au_pin(pin, cpg.dentry, cpg.bsrc, AuOpt_UDBA_NONE,
+                            /*flags*/0);
+               goto out;
+       }
+
+       /* need to cpup or reopen */
+       parent = dget_parent(cpg.dentry);
+       di_write_lock_parent(parent);
+       err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
+       cpg.bdst = err;
+       if (unlikely(err < 0))
+               goto out_dgrade;
+       err = 0;
+
+       if (!d_unhashed(cpg.dentry) && !au_h_dptr(parent, cpg.bdst)) {
+               err = au_cpup_dirs(cpg.dentry, cpg.bdst);
+               if (unlikely(err))
+                       goto out_dgrade;
+       }
+
+       err = au_pin(pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
+                    AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+       if (unlikely(err))
+               goto out_dgrade;
+
+       h_dentry = au_hf_top(file)->f_dentry;
+       dbstart = au_dbstart(cpg.dentry);
+       if (dbstart <= cpg.bdst) {
+               h_dentry = au_h_dptr(cpg.dentry, cpg.bdst);
+               AuDebugOn(!h_dentry);
+               cpg.bsrc = cpg.bdst;
+       }
+
+       if (dbstart <= cpg.bdst         /* just reopen */
+           || !d_unhashed(cpg.dentry)  /* copyup and reopen */
+               ) {
+               h_file = au_h_open_pre(cpg.dentry, cpg.bsrc);
+               if (IS_ERR(h_file))
+                       err = PTR_ERR(h_file);
+               else {
+                       di_downgrade_lock(parent, AuLock_IR);
+                       if (dbstart > cpg.bdst)
+                               err = au_sio_cpup_simple(&cpg);
+                       if (!err)
+                               err = au_reopen_nondir(file);
+                       au_h_open_post(cpg.dentry, cpg.bsrc, h_file);
+               }
+       } else {                        /* copyup as wh and reopen */
+               /*
+                * since writable hfsplus branch is not supported,
+                * h_open_pre/post() are unnecessary.
+                */
+               err = au_ready_to_write_wh(file, len, cpg.bdst, pin);
+               di_downgrade_lock(parent, AuLock_IR);
+       }
+
+       if (!err) {
+               au_pin_set_parent_lflag(pin, /*lflag*/0);
+               goto out_dput; /* success */
+       }
+       au_unpin(pin);
+       goto out_unlock;
+
+out_dgrade:
+       di_downgrade_lock(parent, AuLock_IR);
+out_unlock:
+       di_read_unlock(parent, AuLock_IR);
+out_dput:
+       dput(parent);
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_do_flush(struct file *file, fl_owner_t id,
+               int (*flush)(struct file *file, fl_owner_t id))
+{
+       int err;
+       struct dentry *dentry;
+       struct super_block *sb;
+       struct inode *inode;
+
+       dentry = file->f_dentry;
+       sb = dentry->d_sb;
+       inode = dentry->d_inode;
+       si_noflush_read_lock(sb);
+       fi_read_lock(file);
+       ii_read_lock_child(inode);
+
+       err = flush(file, id);
+       au_cpup_attr_timesizes(inode);
+
+       ii_read_unlock(inode);
+       fi_read_unlock(file);
+       si_read_unlock(sb);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_file_refresh_by_inode(struct file *file, int *need_reopen)
+{
+       int err;
+       struct au_pin pin;
+       struct au_finfo *finfo;
+       struct dentry *parent, *hi_wh;
+       struct inode *inode;
+       struct super_block *sb;
+       struct au_cp_generic cpg = {
+               .dentry = file->f_dentry,
+               .bdst   = -1,
+               .bsrc   = -1,
+               .len    = -1,
+               .pin    = &pin,
+               .flags  = AuCpup_DTIME
+       };
+
+       FiMustWriteLock(file);
+
+       err = 0;
+       finfo = au_fi(file);
+       sb = cpg.dentry->d_sb;
+       inode = cpg.dentry->d_inode;
+       cpg.bdst = au_ibstart(inode);
+       if (cpg.bdst == finfo->fi_btop || IS_ROOT(cpg.dentry))
+               goto out;
+
+       parent = dget_parent(cpg.dentry);
+       if (au_test_ro(sb, cpg.bdst, inode)) {
+               di_read_lock_parent(parent, !AuLock_IR);
+               err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
+               cpg.bdst = err;
+               di_read_unlock(parent, !AuLock_IR);
+               if (unlikely(err < 0))
+                       goto out_parent;
+               err = 0;
+       }
+
+       di_read_lock_parent(parent, AuLock_IR);
+       hi_wh = au_hi_wh(inode, cpg.bdst);
+       if (!S_ISDIR(inode->i_mode)
+           && au_opt_test(au_mntflags(sb), PLINK)
+           && au_plink_test(inode)
+           && !d_unhashed(cpg.dentry)
+           && cpg.bdst < au_dbstart(cpg.dentry)) {
+               err = au_test_and_cpup_dirs(cpg.dentry, cpg.bdst);
+               if (unlikely(err))
+                       goto out_unlock;
+
+               /* always superio. */
+               err = au_pin(&pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
+                            AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+               if (!err) {
+                       err = au_sio_cpup_simple(&cpg);
+                       au_unpin(&pin);
+               }
+       } else if (hi_wh) {
+               /* already copied-up after unlink */
+               err = au_reopen_wh(file, cpg.bdst, hi_wh);
+               *need_reopen = 0;
+       }
+
+out_unlock:
+       di_read_unlock(parent, AuLock_IR);
+out_parent:
+       dput(parent);
+out:
+       return err;
+}
+
+static void au_do_refresh_dir(struct file *file)
+{
+       aufs_bindex_t bindex, bend, new_bindex, brid;
+       struct au_hfile *p, tmp, *q;
+       struct au_finfo *finfo;
+       struct super_block *sb;
+       struct au_fidir *fidir;
+
+       FiMustWriteLock(file);
+
+       sb = file->f_dentry->d_sb;
+       finfo = au_fi(file);
+       fidir = finfo->fi_hdir;
+       AuDebugOn(!fidir);
+       p = fidir->fd_hfile + finfo->fi_btop;
+       brid = p->hf_br->br_id;
+       bend = fidir->fd_bbot;
+       for (bindex = finfo->fi_btop; bindex <= bend; bindex++, p++) {
+               if (!p->hf_file)
+                       continue;
+
+               new_bindex = au_br_index(sb, p->hf_br->br_id);
+               if (new_bindex == bindex)
+                       continue;
+               if (new_bindex < 0) {
+                       au_set_h_fptr(file, bindex, NULL);
+                       continue;
+               }
+
+               /* swap two lower inode, and loop again */
+               q = fidir->fd_hfile + new_bindex;
+               tmp = *q;
+               *q = *p;
+               *p = tmp;
+               if (tmp.hf_file) {
+                       bindex--;
+                       p--;
+               }
+       }
+
+       p = fidir->fd_hfile;
+       if (!au_test_mmapped(file) && !d_unlinked(file->f_dentry)) {
+               bend = au_sbend(sb);
+               for (finfo->fi_btop = 0; finfo->fi_btop <= bend;
+                    finfo->fi_btop++, p++)
+                       if (p->hf_file) {
+                               if (p->hf_file->f_dentry
+                                   && p->hf_file->f_dentry->d_inode)
+                                       break;
+                               else
+                                       au_hfput(p, file);
+                       }
+       } else {
+               bend = au_br_index(sb, brid);
+               for (finfo->fi_btop = 0; finfo->fi_btop < bend;
+                    finfo->fi_btop++, p++)
+                       if (p->hf_file)
+                               au_hfput(p, file);
+               bend = au_sbend(sb);
+       }
+
+       p = fidir->fd_hfile + bend;
+       for (fidir->fd_bbot = bend; fidir->fd_bbot >= finfo->fi_btop;
+            fidir->fd_bbot--, p--)
+               if (p->hf_file) {
+                       if (p->hf_file->f_dentry
+                           && p->hf_file->f_dentry->d_inode)
+                               break;
+                       else
+                               au_hfput(p, file);
+               }
+       AuDebugOn(fidir->fd_bbot < finfo->fi_btop);
+}
+
+/*
+ * after branch manipulating, refresh the file.
+ */
+static int refresh_file(struct file *file, int (*reopen)(struct file *file))
+{
+       int err, need_reopen;
+       aufs_bindex_t bend, bindex;
+       struct dentry *dentry;
+       struct au_finfo *finfo;
+       struct au_hfile *hfile;
+
+       dentry = file->f_dentry;
+       finfo = au_fi(file);
+       if (!finfo->fi_hdir) {
+               hfile = &finfo->fi_htop;
+               AuDebugOn(!hfile->hf_file);
+               bindex = au_br_index(dentry->d_sb, hfile->hf_br->br_id);
+               AuDebugOn(bindex < 0);
+               if (bindex != finfo->fi_btop)
+                       au_set_fbstart(file, bindex);
+       } else {
+               err = au_fidir_realloc(finfo, au_sbend(dentry->d_sb) + 1);
+               if (unlikely(err))
+                       goto out;
+               au_do_refresh_dir(file);
+       }
+
+       err = 0;
+       need_reopen = 1;
+       if (!au_test_mmapped(file))
+               err = au_file_refresh_by_inode(file, &need_reopen);
+       if (!err && need_reopen && !d_unlinked(dentry))
+               err = reopen(file);
+       if (!err) {
+               au_update_figen(file);
+               goto out; /* success */
+       }
+
+       /* error, close all lower files */
+       if (finfo->fi_hdir) {
+               bend = au_fbend_dir(file);
+               for (bindex = au_fbstart(file); bindex <= bend; bindex++)
+                       au_set_h_fptr(file, bindex, NULL);
+       }
+
+out:
+       return err;
+}
+
+/* common function to regular file and dir */
+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
+                         int wlock)
+{
+       int err;
+       unsigned int sigen, figen;
+       aufs_bindex_t bstart;
+       unsigned char pseudo_link;
+       struct dentry *dentry;
+       struct inode *inode;
+
+       err = 0;
+       dentry = file->f_dentry;
+       inode = dentry->d_inode;
+       AuDebugOn(au_special_file(inode->i_mode));
+       sigen = au_sigen(dentry->d_sb);
+       fi_write_lock(file);
+       figen = au_figen(file);
+       di_write_lock_child(dentry);
+       bstart = au_dbstart(dentry);
+       pseudo_link = (bstart != au_ibstart(inode));
+       if (sigen == figen && !pseudo_link && au_fbstart(file) == bstart) {
+               if (!wlock) {
+                       di_downgrade_lock(dentry, AuLock_IR);
+                       fi_downgrade_lock(file);
+               }
+               goto out; /* success */
+       }
+
+       AuDbg("sigen %d, figen %d\n", sigen, figen);
+       if (au_digen_test(dentry, sigen)) {
+               err = au_reval_dpath(dentry, sigen);
+               AuDebugOn(!err && au_digen_test(dentry, sigen));
+       }
+
+       if (!err)
+               err = refresh_file(file, reopen);
+       if (!err) {
+               if (!wlock) {
+                       di_downgrade_lock(dentry, AuLock_IR);
+                       fi_downgrade_lock(file);
+               }
+       } else {
+               di_write_unlock(dentry);
+               fi_write_unlock(file);
+       }
+
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* cf. aufs_nopage() */
+/* for madvise(2) */
+static int aufs_readpage(struct file *file __maybe_unused, struct page *page)
+{
+       unlock_page(page);
+       return 0;
+}
+
+/* it will never be called, but necessary to support O_DIRECT */
+static ssize_t aufs_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
+{ BUG(); return 0; }
+
+/*
+ * it will never be called, but madvise and fadvise behaves differently
+ * when get_xip_mem is defined
+ */
+static int aufs_get_xip_mem(struct address_space *mapping, pgoff_t pgoff,
+                           int create, void **kmem, unsigned long *pfn)
+{ BUG(); return 0; }
+
+/* they will never be called. */
+#ifdef CONFIG_AUFS_DEBUG
+static int aufs_write_begin(struct file *file, struct address_space *mapping,
+                           loff_t pos, unsigned len, unsigned flags,
+                           struct page **pagep, void **fsdata)
+{ AuUnsupport(); return 0; }
+static int aufs_write_end(struct file *file, struct address_space *mapping,
+                         loff_t pos, unsigned len, unsigned copied,
+                         struct page *page, void *fsdata)
+{ AuUnsupport(); return 0; }
+static int aufs_writepage(struct page *page, struct writeback_control *wbc)
+{ AuUnsupport(); return 0; }
+
+static int aufs_set_page_dirty(struct page *page)
+{ AuUnsupport(); return 0; }
+static void aufs_invalidatepage(struct page *page, unsigned long offset)
+{ AuUnsupport(); }
+static int aufs_releasepage(struct page *page, gfp_t gfp)
+{ AuUnsupport(); return 0; }
+static int aufs_migratepage(struct address_space *mapping, struct page *newpage,
+                           struct page *page, enum migrate_mode mode)
+{ AuUnsupport(); return 0; }
+static int aufs_launder_page(struct page *page)
+{ AuUnsupport(); return 0; }
+static int aufs_is_partially_uptodate(struct page *page,
+                                     read_descriptor_t *desc,
+                                     unsigned long from)
+{ AuUnsupport(); return 0; }
+static int aufs_error_remove_page(struct address_space *mapping,
+                                 struct page *page)
+{ AuUnsupport(); return 0; }
+#endif /* CONFIG_AUFS_DEBUG */
+
+const struct address_space_operations aufs_aop = {
+       .readpage               = aufs_readpage,
+       .direct_IO              = aufs_direct_IO,
+       .get_xip_mem            = aufs_get_xip_mem,
+#ifdef CONFIG_AUFS_DEBUG
+       .writepage              = aufs_writepage,
+       /* no writepages, because of writepage */
+       .set_page_dirty         = aufs_set_page_dirty,
+       /* no readpages, because of readpage */
+       .write_begin            = aufs_write_begin,
+       .write_end              = aufs_write_end,
+       /* no bmap, no block device */
+       .invalidatepage         = aufs_invalidatepage,
+       .releasepage            = aufs_releasepage,
+       .migratepage            = aufs_migratepage,
+       .launder_page           = aufs_launder_page,
+       .is_partially_uptodate  = aufs_is_partially_uptodate,
+       .error_remove_page      = aufs_error_remove_page
+#endif /* CONFIG_AUFS_DEBUG */
+};
diff --git a/fs/aufs/file.h b/fs/aufs/file.h
new file mode 100644 (file)
index 0000000..afd27de
--- /dev/null
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * file operations
+ */
+
+#ifndef __AUFS_FILE_H__
+#define __AUFS_FILE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include "rwsem.h"
+
+struct au_branch;
+struct au_hfile {
+       struct file             *hf_file;
+       struct au_branch        *hf_br;
+};
+
+struct au_vdir;
+struct au_fidir {
+       aufs_bindex_t           fd_bbot;
+       aufs_bindex_t           fd_nent;
+       struct au_vdir          *fd_vdir_cache;
+       struct au_hfile         fd_hfile[];
+};
+
+static inline int au_fidir_sz(int nent)
+{
+       AuDebugOn(nent < 0);
+       return sizeof(struct au_fidir) + sizeof(struct au_hfile) * nent;
+}
+
+struct au_finfo {
+       atomic_t                fi_generation;
+
+       struct au_rwsem         fi_rwsem;
+       aufs_bindex_t           fi_btop;
+
+       /* do not union them */
+       struct {                                /* for non-dir */
+               struct au_hfile                 fi_htop;
+               atomic_t                        fi_mmapped;
+       };
+       struct au_fidir         *fi_hdir;       /* for dir only */
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* file.c */
+extern const struct address_space_operations aufs_aop;
+unsigned int au_file_roflags(unsigned int flags);
+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
+                      struct file *file);
+int au_do_open(struct file *file, int (*open)(struct file *file, int flags),
+              struct au_fidir *fidir);
+int au_reopen_nondir(struct file *file);
+struct au_pin;
+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin);
+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
+                         int wlock);
+int au_do_flush(struct file *file, fl_owner_t id,
+               int (*flush)(struct file *file, fl_owner_t id));
+
+/* poll.c */
+#ifdef CONFIG_AUFS_POLL
+unsigned int aufs_poll(struct file *file, poll_table *wait);
+#endif
+
+#ifdef CONFIG_AUFS_BR_HFSPLUS
+/* hfsplus.c */
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex);
+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
+                   struct file *h_file);
+#else
+static inline
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex)
+{
+       return NULL;
+}
+
+AuStubVoid(au_h_open_post, struct dentry *dentry, aufs_bindex_t bindex,
+          struct file *h_file);
+#endif
+
+/* f_op.c */
+extern const struct file_operations aufs_file_fop;
+int au_do_open_nondir(struct file *file, int flags);
+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file);
+
+#ifdef CONFIG_AUFS_SP_IATTR
+/* f_op_sp.c */
+int au_special_file(umode_t mode);
+void au_init_special_fop(struct inode *inode, umode_t mode, dev_t rdev);
+#else
+AuStubInt0(au_special_file, umode_t mode)
+static inline void au_init_special_fop(struct inode *inode, umode_t mode,
+                                      dev_t rdev)
+{
+       init_special_inode(inode, mode, rdev);
+}
+#endif
+
+/* finfo.c */
+void au_hfput(struct au_hfile *hf, struct file *file);
+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex,
+                  struct file *h_file);
+
+void au_update_figen(struct file *file);
+struct au_fidir *au_fidir_alloc(struct super_block *sb);
+int au_fidir_realloc(struct au_finfo *finfo, int nbr);
+
+void au_fi_init_once(void *_fi);
+void au_finfo_fin(struct file *file);
+int au_finfo_init(struct file *file, struct au_fidir *fidir);
+
+/* ioctl.c */
+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
+                          unsigned long arg);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_finfo *au_fi(struct file *file)
+{
+       return file->private_data;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * fi_read_lock, fi_write_lock,
+ * fi_read_unlock, fi_write_unlock, fi_downgrade_lock
+ */
+AuSimpleRwsemFuncs(fi, struct file *f, &au_fi(f)->fi_rwsem);
+
+#define FiMustNoWaiters(f)     AuRwMustNoWaiters(&au_fi(f)->fi_rwsem)
+#define FiMustAnyLock(f)       AuRwMustAnyLock(&au_fi(f)->fi_rwsem)
+#define FiMustWriteLock(f)     AuRwMustWriteLock(&au_fi(f)->fi_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: hard/soft set? */
+static inline aufs_bindex_t au_fbstart(struct file *file)
+{
+       FiMustAnyLock(file);
+       return au_fi(file)->fi_btop;
+}
+
+static inline aufs_bindex_t au_fbend_dir(struct file *file)
+{
+       FiMustAnyLock(file);
+       AuDebugOn(!au_fi(file)->fi_hdir);
+       return au_fi(file)->fi_hdir->fd_bbot;
+}
+
+static inline struct au_vdir *au_fvdir_cache(struct file *file)
+{
+       FiMustAnyLock(file);
+       AuDebugOn(!au_fi(file)->fi_hdir);
+       return au_fi(file)->fi_hdir->fd_vdir_cache;
+}
+
+static inline void au_set_fbstart(struct file *file, aufs_bindex_t bindex)
+{
+       FiMustWriteLock(file);
+       au_fi(file)->fi_btop = bindex;
+}
+
+static inline void au_set_fbend_dir(struct file *file, aufs_bindex_t bindex)
+{
+       FiMustWriteLock(file);
+       AuDebugOn(!au_fi(file)->fi_hdir);
+       au_fi(file)->fi_hdir->fd_bbot = bindex;
+}
+
+static inline void au_set_fvdir_cache(struct file *file,
+                                     struct au_vdir *vdir_cache)
+{
+       FiMustWriteLock(file);
+       AuDebugOn(!au_fi(file)->fi_hdir);
+       au_fi(file)->fi_hdir->fd_vdir_cache = vdir_cache;
+}
+
+static inline struct file *au_hf_top(struct file *file)
+{
+       FiMustAnyLock(file);
+       AuDebugOn(au_fi(file)->fi_hdir);
+       return au_fi(file)->fi_htop.hf_file;
+}
+
+static inline struct file *au_hf_dir(struct file *file, aufs_bindex_t bindex)
+{
+       FiMustAnyLock(file);
+       AuDebugOn(!au_fi(file)->fi_hdir);
+       return au_fi(file)->fi_hdir->fd_hfile[0 + bindex].hf_file;
+}
+
+/* todo: memory barrier? */
+static inline unsigned int au_figen(struct file *f)
+{
+       return atomic_read(&au_fi(f)->fi_generation);
+}
+
+static inline void au_set_mmapped(struct file *f)
+{
+       if (atomic_inc_return(&au_fi(f)->fi_mmapped))
+               return;
+       pr_warn("fi_mmapped wrapped around\n");
+       while (!atomic_inc_return(&au_fi(f)->fi_mmapped))
+               ;
+}
+
+static inline void au_unset_mmapped(struct file *f)
+{
+       atomic_dec(&au_fi(f)->fi_mmapped);
+}
+
+static inline int au_test_mmapped(struct file *f)
+{
+       return atomic_read(&au_fi(f)->fi_mmapped);
+}
+
+/* customize vma->vm_file */
+
+static inline void au_do_vm_file_reset(struct vm_area_struct *vma,
+                                      struct file *file)
+{
+       struct file *f;
+
+       f = vma->vm_file;
+       get_file(file);
+       vma->vm_file = file;
+       fput(f);
+}
+
+#ifdef CONFIG_MMU
+#define AuDbgVmRegion(file, vma) do {} while (0)
+
+static inline void au_vm_file_reset(struct vm_area_struct *vma,
+                                   struct file *file)
+{
+       au_do_vm_file_reset(vma, file);
+}
+#else
+#define AuDbgVmRegion(file, vma) \
+       AuDebugOn((vma)->vm_region && (vma)->vm_region->vm_file != (file))
+
+static inline void au_vm_file_reset(struct vm_area_struct *vma,
+                                   struct file *file)
+{
+       struct file *f;
+
+       au_do_vm_file_reset(vma, file);
+       f = vma->vm_region->vm_file;
+       get_file(file);
+       vma->vm_region->vm_file = file;
+       fput(f);
+}
+#endif /* CONFIG_MMU */
+
+/* handle vma->vm_prfile */
+static inline void au_vm_prfile_set(struct vm_area_struct *vma,
+                                   struct file *file)
+{
+#ifdef CONFIG_AUFS_PROC_MAP
+       get_file(file);
+       vma->vm_prfile = file;
+#ifndef CONFIG_MMU
+       get_file(file);
+       vma->vm_region->vm_prfile = file;
+#endif
+#endif
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_FILE_H__ */
diff --git a/fs/aufs/finfo.c b/fs/aufs/finfo.c
new file mode 100644 (file)
index 0000000..2111355
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * file private data
+ */
+
+#include "aufs.h"
+
+void au_hfput(struct au_hfile *hf, struct file *file)
+{
+       /* todo: direct access f_flags */
+       if (vfsub_file_flags(file) & __FMODE_EXEC)
+               allow_write_access(hf->hf_file);
+       fput(hf->hf_file);
+       hf->hf_file = NULL;
+       atomic_dec(&hf->hf_br->br_count);
+       hf->hf_br = NULL;
+}
+
+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, struct file *val)
+{
+       struct au_finfo *finfo = au_fi(file);
+       struct au_hfile *hf;
+       struct au_fidir *fidir;
+
+       fidir = finfo->fi_hdir;
+       if (!fidir) {
+               AuDebugOn(finfo->fi_btop != bindex);
+               hf = &finfo->fi_htop;
+       } else
+               hf = fidir->fd_hfile + bindex;
+
+       if (hf && hf->hf_file)
+               au_hfput(hf, file);
+       if (val) {
+               FiMustWriteLock(file);
+               hf->hf_file = val;
+               hf->hf_br = au_sbr(file->f_dentry->d_sb, bindex);
+       }
+}
+
+void au_update_figen(struct file *file)
+{
+       atomic_set(&au_fi(file)->fi_generation, au_digen(file->f_dentry));
+       /* smp_mb(); */ /* atomic_set */
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_fidir *au_fidir_alloc(struct super_block *sb)
+{
+       struct au_fidir *fidir;
+       int nbr;
+
+       nbr = au_sbend(sb) + 1;
+       if (nbr < 2)
+               nbr = 2; /* initial allocate for 2 branches */
+       fidir = kzalloc(au_fidir_sz(nbr), GFP_NOFS);
+       if (fidir) {
+               fidir->fd_bbot = -1;
+               fidir->fd_nent = nbr;
+               fidir->fd_vdir_cache = NULL;
+       }
+
+       return fidir;
+}
+
+int au_fidir_realloc(struct au_finfo *finfo, int nbr)
+{
+       int err;
+       struct au_fidir *fidir, *p;
+
+       AuRwMustWriteLock(&finfo->fi_rwsem);
+       fidir = finfo->fi_hdir;
+       AuDebugOn(!fidir);
+
+       err = -ENOMEM;
+       p = au_kzrealloc(fidir, au_fidir_sz(fidir->fd_nent), au_fidir_sz(nbr),
+                        GFP_NOFS);
+       if (p) {
+               p->fd_nent = nbr;
+               finfo->fi_hdir = p;
+               err = 0;
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_finfo_fin(struct file *file)
+{
+       struct au_finfo *finfo;
+
+       au_nfiles_dec(file->f_dentry->d_sb);
+
+       finfo = au_fi(file);
+       AuDebugOn(finfo->fi_hdir);
+       AuRwDestroy(&finfo->fi_rwsem);
+       au_cache_free_finfo(finfo);
+}
+
+void au_fi_init_once(void *_finfo)
+{
+       struct au_finfo *finfo = _finfo;
+       static struct lock_class_key aufs_fi;
+
+       au_rw_init(&finfo->fi_rwsem);
+       au_rw_class(&finfo->fi_rwsem, &aufs_fi);
+}
+
+int au_finfo_init(struct file *file, struct au_fidir *fidir)
+{
+       int err;
+       struct au_finfo *finfo;
+       struct dentry *dentry;
+
+       err = -ENOMEM;
+       dentry = file->f_dentry;
+       finfo = au_cache_alloc_finfo();
+       if (unlikely(!finfo))
+               goto out;
+
+       err = 0;
+       au_nfiles_inc(dentry->d_sb);
+       /* verbose coding for lock class name */
+       if (!fidir)
+               au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcNonDir_FIINFO);
+       else
+               au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcDir_FIINFO);
+       au_rw_write_lock(&finfo->fi_rwsem);
+       finfo->fi_btop = -1;
+       finfo->fi_hdir = fidir;
+       atomic_set(&finfo->fi_generation, au_digen(dentry));
+       /* smp_mb(); */ /* atomic_set */
+
+       file->private_data = finfo;
+
+out:
+       return err;
+}
diff --git a/fs/aufs/fstype.h b/fs/aufs/fstype.h
new file mode 100644 (file)
index 0000000..90d8e30
--- /dev/null
@@ -0,0 +1,486 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * judging filesystem type
+ */
+
+#ifndef __AUFS_FSTYPE_H__
+#define __AUFS_FSTYPE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/magic.h>
+#include <linux/romfs_fs.h>
+
+static inline int au_test_aufs(struct super_block *sb)
+{
+       return sb->s_magic == AUFS_SUPER_MAGIC;
+}
+
+static inline const char *au_sbtype(struct super_block *sb)
+{
+       return sb->s_type->name;
+}
+
+static inline int au_test_iso9660(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_ROMFS_FS) || defined(CONFIG_ROMFS_FS_MODULE)
+       return sb->s_magic == ROMFS_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_romfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_ISO9660_FS) || defined(CONFIG_ISO9660_FS_MODULE)
+       return sb->s_magic == ISOFS_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_cramfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_CRAMFS) || defined(CONFIG_CRAMFS_MODULE)
+       return sb->s_magic == CRAMFS_MAGIC;
+#endif
+       return 0;
+}
+
+static inline int au_test_nfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_NFS_FS) || defined(CONFIG_NFS_FS_MODULE)
+       return sb->s_magic == NFS_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_fuse(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_FUSE_FS) || defined(CONFIG_FUSE_FS_MODULE)
+       return sb->s_magic == FUSE_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_xfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_XFS_FS) || defined(CONFIG_XFS_FS_MODULE)
+       return sb->s_magic == XFS_SB_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_tmpfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_TMPFS
+       return sb->s_magic == TMPFS_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_ecryptfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_ECRYPT_FS) || defined(CONFIG_ECRYPT_FS_MODULE)
+       return !strcmp(au_sbtype(sb), "ecryptfs");
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_ocfs2(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_OCFS2_FS) || defined(CONFIG_OCFS2_FS_MODULE)
+       return sb->s_magic == OCFS2_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_ocfs2_dlmfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_OCFS2_FS_O2CB) || defined(CONFIG_OCFS2_FS_O2CB_MODULE)
+       return sb->s_magic == DLMFS_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_coda(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_CODA_FS) || defined(CONFIG_CODA_FS_MODULE)
+       return sb->s_magic == CODA_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_v9fs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_9P_FS) || defined(CONFIG_9P_FS_MODULE)
+       return sb->s_magic == V9FS_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_ext4(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_EXT4_FS) || defined(CONFIG_EXT4_FS_MODULE)
+       return sb->s_magic == EXT4_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_sysv(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_SYSV_FS) || defined(CONFIG_SYSV_FS_MODULE)
+       return !strcmp(au_sbtype(sb), "sysv");
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_ramfs(struct super_block *sb)
+{
+       return sb->s_magic == RAMFS_MAGIC;
+}
+
+static inline int au_test_ubifs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_UBIFS_FS) || defined(CONFIG_UBIFS_FS_MODULE)
+       return sb->s_magic == UBIFS_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_procfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_PROC_FS
+       return sb->s_magic == PROC_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_sysfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_SYSFS
+       return sb->s_magic == SYSFS_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_configfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_CONFIGFS_FS) || defined(CONFIG_CONFIGFS_FS_MODULE)
+       return sb->s_magic == CONFIGFS_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_minix(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_MINIX_FS) || defined(CONFIG_MINIX_FS_MODULE)
+       return sb->s_magic == MINIX3_SUPER_MAGIC
+               || sb->s_magic == MINIX2_SUPER_MAGIC
+               || sb->s_magic == MINIX2_SUPER_MAGIC2
+               || sb->s_magic == MINIX_SUPER_MAGIC
+               || sb->s_magic == MINIX_SUPER_MAGIC2;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_cifs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_CIFS_FS) || defined(CONFIGCIFS_FS_MODULE)
+       return sb->s_magic == CIFS_MAGIC_NUMBER;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_fat(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_FAT_FS) || defined(CONFIG_FAT_FS_MODULE)
+       return sb->s_magic == MSDOS_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_msdos(struct super_block *sb)
+{
+       return au_test_fat(sb);
+}
+
+static inline int au_test_vfat(struct super_block *sb)
+{
+       return au_test_fat(sb);
+}
+
+static inline int au_test_securityfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_SECURITYFS
+       return sb->s_magic == SECURITYFS_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_squashfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_SQUASHFS) || defined(CONFIG_SQUASHFS_MODULE)
+       return sb->s_magic == SQUASHFS_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_btrfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
+       return sb->s_magic == BTRFS_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_xenfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_XENFS) || defined(CONFIG_XENFS_MODULE)
+       return sb->s_magic == XENFS_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_debugfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_DEBUG_FS
+       return sb->s_magic == DEBUGFS_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_nilfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_NILFS) || defined(CONFIG_NILFS_MODULE)
+       return sb->s_magic == NILFS_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+static inline int au_test_hfsplus(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_HFSPLUS_FS) || defined(CONFIG_HFSPLUS_FS_MODULE)
+       return sb->s_magic == HFSPLUS_SUPER_MAGIC;
+#else
+       return 0;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * they can't be an aufs branch.
+ */
+static inline int au_test_fs_unsuppoted(struct super_block *sb)
+{
+       return
+#ifndef CONFIG_AUFS_BR_RAMFS
+               au_test_ramfs(sb) ||
+#endif
+               au_test_procfs(sb)
+               || au_test_sysfs(sb)
+               || au_test_configfs(sb)
+               || au_test_debugfs(sb)
+               || au_test_securityfs(sb)
+               || au_test_xenfs(sb)
+               || au_test_ecryptfs(sb)
+               /* || !strcmp(au_sbtype(sb), "unionfs") */
+               || au_test_aufs(sb); /* will be supported in next version */
+}
+
+/*
+ * If the filesystem supports NFS-export, then it has to support NULL as
+ * a nameidata parameter for ->create(), ->lookup() and ->d_revalidate().
+ * We can apply this principle when we handle a lower filesystem.
+ */
+static inline int au_test_fs_null_nd(struct super_block *sb)
+{
+       return !!sb->s_export_op;
+}
+
+static inline int au_test_fs_remote(struct super_block *sb)
+{
+       return !au_test_tmpfs(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+               && !au_test_ramfs(sb)
+#endif
+               && !(sb->s_type->fs_flags & FS_REQUIRES_DEV);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * Note: these functions (below) are created after reading ->getattr() in all
+ * filesystems under linux/fs. it means we have to do so in every update...
+ */
+
+/*
+ * some filesystems require getattr to refresh the inode attributes before
+ * referencing.
+ * in most cases, we can rely on the inode attribute in NFS (or every remote fs)
+ * and leave the work for d_revalidate()
+ */
+static inline int au_test_fs_refresh_iattr(struct super_block *sb)
+{
+       return au_test_nfs(sb)
+               || au_test_fuse(sb)
+               /* || au_test_ocfs2(sb) */      /* untested */
+               /* || au_test_btrfs(sb) */      /* untested */
+               /* || au_test_coda(sb) */       /* untested */
+               /* || au_test_v9fs(sb) */       /* untested */
+               ;
+}
+
+/*
+ * filesystems which don't maintain i_size or i_blocks.
+ */
+static inline int au_test_fs_bad_iattr_size(struct super_block *sb)
+{
+       return au_test_xfs(sb)
+               || au_test_btrfs(sb)
+               || au_test_ubifs(sb)
+               || au_test_hfsplus(sb)  /* maintained, but incorrect */
+               /* || au_test_ext4(sb) */       /* untested */
+               /* || au_test_ocfs2(sb) */      /* untested */
+               /* || au_test_ocfs2_dlmfs(sb) */ /* untested */
+               /* || au_test_sysv(sb) */       /* untested */
+               /* || au_test_minix(sb) */      /* untested */
+               ;
+}
+
+/*
+ * filesystems which don't store the correct value in some of their inode
+ * attributes.
+ */
+static inline int au_test_fs_bad_iattr(struct super_block *sb)
+{
+       return au_test_fs_bad_iattr_size(sb)
+               /* || au_test_cifs(sb) */       /* untested */
+               || au_test_fat(sb)
+               || au_test_msdos(sb)
+               || au_test_vfat(sb);
+}
+
+/* they don't check i_nlink in link(2) */
+static inline int au_test_fs_no_limit_nlink(struct super_block *sb)
+{
+       return au_test_tmpfs(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+               || au_test_ramfs(sb)
+#endif
+               || au_test_ubifs(sb)
+               || au_test_btrfs(sb)
+               || au_test_hfsplus(sb);
+}
+
+/*
+ * filesystems which sets S_NOATIME and S_NOCMTIME.
+ */
+static inline int au_test_fs_notime(struct super_block *sb)
+{
+       return au_test_nfs(sb)
+               || au_test_fuse(sb)
+               || au_test_ubifs(sb)
+               /* || au_test_cifs(sb) */       /* untested */
+               ;
+}
+
+/*
+ * filesystems which requires replacing i_mapping.
+ */
+static inline int au_test_fs_bad_mapping(struct super_block *sb)
+{
+       return au_test_fuse(sb)
+               || au_test_ubifs(sb);
+}
+
+/* temporary support for i#1 in cramfs */
+static inline int au_test_fs_unique_ino(struct inode *inode)
+{
+       if (au_test_cramfs(inode->i_sb))
+               return inode->i_ino != 1;
+       return 1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * the filesystem where the xino files placed must support i/o after unlink and
+ * maintain i_size and i_blocks.
+ */
+static inline int au_test_fs_bad_xino(struct super_block *sb)
+{
+       return au_test_fs_remote(sb)
+               || au_test_fs_bad_iattr_size(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+               || !(au_test_ramfs(sb) || au_test_fs_null_nd(sb))
+#else
+               || !au_test_fs_null_nd(sb) /* to keep xino code simple */
+#endif
+               /* don't want unnecessary work for xino */
+               || au_test_aufs(sb)
+               || au_test_ecryptfs(sb)
+               || au_test_nilfs(sb);
+}
+
+static inline int au_test_fs_trunc_xino(struct super_block *sb)
+{
+       return au_test_tmpfs(sb)
+               || au_test_ramfs(sb);
+}
+
+/*
+ * test if the @sb is real-readonly.
+ */
+static inline int au_test_fs_rr(struct super_block *sb)
+{
+       return au_test_squashfs(sb)
+               || au_test_iso9660(sb)
+               || au_test_cramfs(sb)
+               || au_test_romfs(sb);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_FSTYPE_H__ */
diff --git a/fs/aufs/hfsnotify.c b/fs/aufs/hfsnotify.c
new file mode 100644 (file)
index 0000000..db8a602
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * fsnotify for the lower directories
+ */
+
+#include "aufs.h"
+
+/* FS_IN_IGNORED is unnecessary */
+static const __u32 AuHfsnMask = (FS_MOVED_TO | FS_MOVED_FROM | FS_DELETE
+                                | FS_CREATE | FS_EVENT_ON_CHILD);
+static DECLARE_WAIT_QUEUE_HEAD(au_hfsn_wq);
+static __cacheline_aligned_in_smp atomic64_t au_hfsn_ifree = ATOMIC64_INIT(0);
+
+static void au_hfsn_free_mark(struct fsnotify_mark *mark)
+{
+       struct au_hnotify *hn = container_of(mark, struct au_hnotify,
+                                            hn_mark);
+       AuDbg("here\n");
+       au_cache_free_hnotify(hn);
+       smp_mb__before_atomic_dec();
+       atomic64_dec(&au_hfsn_ifree);
+       wake_up(&au_hfsn_wq);
+}
+
+static int au_hfsn_alloc(struct au_hinode *hinode)
+{
+       struct au_hnotify *hn;
+       struct super_block *sb;
+       struct au_branch *br;
+       struct fsnotify_mark *mark;
+       aufs_bindex_t bindex;
+
+       hn = hinode->hi_notify;
+       sb = hn->hn_aufs_inode->i_sb;
+       bindex = au_br_index(sb, hinode->hi_id);
+       br = au_sbr(sb, bindex);
+       mark = &hn->hn_mark;
+       fsnotify_init_mark(mark, au_hfsn_free_mark);
+       mark->mask = AuHfsnMask;
+       /*
+        * by udba rename or rmdir, aufs assign a new inode to the known
+        * h_inode, so specify 1 to allow dups.
+        */
+       return fsnotify_add_mark(mark, br->br_hfsn_group, hinode->hi_inode,
+                                /*mnt*/NULL, /*allow_dups*/1);
+}
+
+static int au_hfsn_free(struct au_hinode *hinode, struct au_hnotify *hn)
+{
+       struct fsnotify_mark *mark;
+       unsigned long long ull;
+
+       ull = atomic64_inc_return(&au_hfsn_ifree);
+       BUG_ON(!ull);
+
+       mark = &hn->hn_mark;
+       fsnotify_destroy_mark(mark);
+       fsnotify_put_mark(mark);
+
+       /* free hn by myself */
+       return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_ctl(struct au_hinode *hinode, int do_set)
+{
+       struct fsnotify_mark *mark;
+
+       mark = &hinode->hi_notify->hn_mark;
+       spin_lock(&mark->lock);
+       if (do_set) {
+               AuDebugOn(mark->mask & AuHfsnMask);
+               mark->mask |= AuHfsnMask;
+       } else {
+               AuDebugOn(!(mark->mask & AuHfsnMask));
+               mark->mask &= ~AuHfsnMask;
+       }
+       spin_unlock(&mark->lock);
+       /* fsnotify_recalc_inode_mask(hinode->hi_inode); */
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* #define AuDbgHnotify */
+#ifdef AuDbgHnotify
+static char *au_hfsn_name(u32 mask)
+{
+#ifdef CONFIG_AUFS_DEBUG
+#define test_ret(flag)                         \
+       do {                                    \
+               if (mask & flag)                \
+                       return #flag;           \
+       } while (0)
+       test_ret(FS_ACCESS);
+       test_ret(FS_MODIFY);
+       test_ret(FS_ATTRIB);
+       test_ret(FS_CLOSE_WRITE);
+       test_ret(FS_CLOSE_NOWRITE);
+       test_ret(FS_OPEN);
+       test_ret(FS_MOVED_FROM);
+       test_ret(FS_MOVED_TO);
+       test_ret(FS_CREATE);
+       test_ret(FS_DELETE);
+       test_ret(FS_DELETE_SELF);
+       test_ret(FS_MOVE_SELF);
+       test_ret(FS_UNMOUNT);
+       test_ret(FS_Q_OVERFLOW);
+       test_ret(FS_IN_IGNORED);
+       test_ret(FS_IN_ISDIR);
+       test_ret(FS_IN_ONESHOT);
+       test_ret(FS_EVENT_ON_CHILD);
+       return "";
+#undef test_ret
+#else
+       return "??";
+#endif
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static int au_hfsn_handle_event(struct fsnotify_group *group,
+                               struct fsnotify_mark *inode_mark,
+                               struct fsnotify_mark *vfsmount_mark,
+                               struct fsnotify_event *event)
+{
+       int err;
+       struct au_hnotify *hnotify;
+       struct inode *h_dir, *h_inode;
+       __u32 mask;
+       struct qstr h_child_qstr = {
+               .name   = event->file_name,
+               .len    = event->name_len
+       };
+
+       AuDebugOn(event->data_type != FSNOTIFY_EVENT_INODE);
+
+       err = 0;
+       /* if FS_UNMOUNT happens, there must be another bug */
+       mask = event->mask;
+       AuDebugOn(mask & FS_UNMOUNT);
+       if (mask & (FS_IN_IGNORED | FS_UNMOUNT))
+               goto out;
+
+       h_dir = event->to_tell;
+       h_inode = event->inode;
+#ifdef AuDbgHnotify
+       au_debug_on();
+       if (1 || h_child_qstr.len != sizeof(AUFS_XINO_FNAME) - 1
+           || strncmp(h_child_qstr.name, AUFS_XINO_FNAME, h_child_qstr.len)) {
+               AuDbg("i%lu, mask 0x%x %s, hcname %.*s, hi%lu\n",
+                     h_dir->i_ino, mask, au_hfsn_name(mask),
+                     AuLNPair(&h_child_qstr), h_inode ? h_inode->i_ino : 0);
+               /* WARN_ON(1); */
+       }
+       au_debug_off();
+#endif
+
+       AuDebugOn(!inode_mark);
+       hnotify = container_of(inode_mark, struct au_hnotify, hn_mark);
+       err = au_hnotify(h_dir, hnotify, mask, &h_child_qstr, h_inode);
+
+out:
+       return err;
+}
+
+/* isn't it waste to ask every registered 'group'? */
+/* copied from linux/fs/notify/inotify/inotify_fsnotiry.c */
+/* it should be exported to modules */
+static bool au_hfsn_should_send_event(struct fsnotify_group *group,
+                                     struct inode *h_inode,
+                                     struct fsnotify_mark *inode_mark,
+                                     struct fsnotify_mark *vfsmount_mark,
+                                     __u32 mask, void *data, int data_type)
+{
+       mask = (mask & ~FS_EVENT_ON_CHILD);
+       return inode_mark->mask & mask;
+}
+
+static struct fsnotify_ops au_hfsn_ops = {
+       .should_send_event      = au_hfsn_should_send_event,
+       .handle_event           = au_hfsn_handle_event
+};
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_fin_br(struct au_branch *br)
+{
+       if (br->br_hfsn_group)
+               fsnotify_put_group(br->br_hfsn_group);
+}
+
+static int au_hfsn_init_br(struct au_branch *br, int perm)
+{
+       br->br_hfsn_group = NULL;
+       br->br_hfsn_ops = au_hfsn_ops;
+       return 0;
+}
+
+static int au_hfsn_reset_br(unsigned int udba, struct au_branch *br, int perm)
+{
+       int err;
+
+       err = 0;
+       if (udba != AuOpt_UDBA_HNOTIFY
+           || !au_br_hnotifyable(perm)) {
+               au_hfsn_fin_br(br);
+               br->br_hfsn_group = NULL;
+               goto out;
+       }
+
+       if (br->br_hfsn_group)
+               goto out;
+
+       br->br_hfsn_group = fsnotify_alloc_group(&br->br_hfsn_ops);
+       if (IS_ERR(br->br_hfsn_group)) {
+               err = PTR_ERR(br->br_hfsn_group);
+               pr_err("fsnotify_alloc_group() failed, %d\n", err);
+               br->br_hfsn_group = NULL;
+       }
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_fin(void)
+{
+       AuDbg("au_hfsn_ifree %lld\n", (long long)atomic64_read(&au_hfsn_ifree));
+       wait_event(au_hfsn_wq, !atomic64_read(&au_hfsn_ifree));
+}
+
+const struct au_hnotify_op au_hnotify_op = {
+       .ctl            = au_hfsn_ctl,
+       .alloc          = au_hfsn_alloc,
+       .free           = au_hfsn_free,
+
+       .fin            = au_hfsn_fin,
+
+       .reset_br       = au_hfsn_reset_br,
+       .fin_br         = au_hfsn_fin_br,
+       .init_br        = au_hfsn_init_br
+};
diff --git a/fs/aufs/hfsplus.c b/fs/aufs/hfsplus.c
new file mode 100644 (file)
index 0000000..c77a2a3
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2010-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * special support for filesystems which aqucires an inode mutex
+ * at final closing a file, eg, hfsplus.
+ *
+ * This trick is very simple and stupid, just to open the file before really
+ * neceeary open to tell hfsplus that this is not the final closing.
+ * The caller should call au_h_open_pre() after acquiring the inode mutex,
+ * and au_h_open_post() after releasing it.
+ */
+
+#include "aufs.h"
+
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex)
+{
+       struct file *h_file;
+       struct dentry *h_dentry;
+
+       h_dentry = au_h_dptr(dentry, bindex);
+       AuDebugOn(!h_dentry);
+       AuDebugOn(!h_dentry->d_inode);
+
+       h_file = NULL;
+       if (au_test_hfsplus(h_dentry->d_sb)
+           && S_ISREG(h_dentry->d_inode->i_mode))
+               h_file = au_h_open(dentry, bindex,
+                                  O_RDONLY | O_NOATIME | O_LARGEFILE,
+                                  /*file*/NULL);
+       return h_file;
+}
+
+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
+                   struct file *h_file)
+{
+       if (h_file) {
+               fput(h_file);
+               au_sbr_put(dentry->d_sb, bindex);
+       }
+}
diff --git a/fs/aufs/hnotify.c b/fs/aufs/hnotify.c
new file mode 100644 (file)
index 0000000..5904667
--- /dev/null
@@ -0,0 +1,712 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * abstraction to notify the direct changes on lower directories
+ */
+
+#include "aufs.h"
+
+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode)
+{
+       int err;
+       struct au_hnotify *hn;
+
+       err = -ENOMEM;
+       hn = au_cache_alloc_hnotify();
+       if (hn) {
+               hn->hn_aufs_inode = inode;
+               hinode->hi_notify = hn;
+               err = au_hnotify_op.alloc(hinode);
+               AuTraceErr(err);
+               if (unlikely(err)) {
+                       hinode->hi_notify = NULL;
+                       au_cache_free_hnotify(hn);
+                       /*
+                        * The upper dir was removed by udba, but the same named
+                        * dir left. In this case, aufs assignes a new inode
+                        * number and set the monitor again.
+                        * For the lower dir, the old monitnor is still left.
+                        */
+                       if (err == -EEXIST)
+                               err = 0;
+               }
+       }
+
+       AuTraceErr(err);
+       return err;
+}
+
+void au_hn_free(struct au_hinode *hinode)
+{
+       struct au_hnotify *hn;
+
+       hn = hinode->hi_notify;
+       if (hn) {
+               hinode->hi_notify = NULL;
+               if (au_hnotify_op.free(hinode, hn))
+                       au_cache_free_hnotify(hn);
+       }
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_hn_ctl(struct au_hinode *hinode, int do_set)
+{
+       if (hinode->hi_notify)
+               au_hnotify_op.ctl(hinode, do_set);
+}
+
+void au_hn_reset(struct inode *inode, unsigned int flags)
+{
+       aufs_bindex_t bindex, bend;
+       struct inode *hi;
+       struct dentry *iwhdentry;
+
+       bend = au_ibend(inode);
+       for (bindex = au_ibstart(inode); bindex <= bend; bindex++) {
+               hi = au_h_iptr(inode, bindex);
+               if (!hi)
+                       continue;
+
+               /* mutex_lock_nested(&hi->i_mutex, AuLsc_I_CHILD); */
+               iwhdentry = au_hi_wh(inode, bindex);
+               if (iwhdentry)
+                       dget(iwhdentry);
+               au_igrab(hi);
+               au_set_h_iptr(inode, bindex, NULL, 0);
+               au_set_h_iptr(inode, bindex, au_igrab(hi),
+                             flags & ~AuHi_XINO);
+               iput(hi);
+               dput(iwhdentry);
+               /* mutex_unlock(&hi->i_mutex); */
+       }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int hn_xino(struct inode *inode, struct inode *h_inode)
+{
+       int err;
+       aufs_bindex_t bindex, bend, bfound, bstart;
+       struct inode *h_i;
+
+       err = 0;
+       if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+               pr_warn("branch root dir was changed\n");
+               goto out;
+       }
+
+       bfound = -1;
+       bend = au_ibend(inode);
+       bstart = au_ibstart(inode);
+#if 0 /* reserved for future use */
+       if (bindex == bend) {
+               /* keep this ino in rename case */
+               goto out;
+       }
+#endif
+       for (bindex = bstart; bindex <= bend; bindex++)
+               if (au_h_iptr(inode, bindex) == h_inode) {
+                       bfound = bindex;
+                       break;
+               }
+       if (bfound < 0)
+               goto out;
+
+       for (bindex = bstart; bindex <= bend; bindex++) {
+               h_i = au_h_iptr(inode, bindex);
+               if (!h_i)
+                       continue;
+
+               err = au_xino_write(inode->i_sb, bindex, h_i->i_ino, /*ino*/0);
+               /* ignore this error */
+               /* bad action? */
+       }
+
+       /* children inode number will be broken */
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+static int hn_gen_tree(struct dentry *dentry)
+{
+       int err, i, j, ndentry;
+       struct au_dcsub_pages dpages;
+       struct au_dpage *dpage;
+       struct dentry **dentries;
+
+       err = au_dpages_init(&dpages, GFP_NOFS);
+       if (unlikely(err))
+               goto out;
+       err = au_dcsub_pages(&dpages, dentry, NULL, NULL);
+       if (unlikely(err))
+               goto out_dpages;
+
+       for (i = 0; i < dpages.ndpage; i++) {
+               dpage = dpages.dpages + i;
+               dentries = dpage->dentries;
+               ndentry = dpage->ndentry;
+               for (j = 0; j < ndentry; j++) {
+                       struct dentry *d;
+
+                       d = dentries[j];
+                       if (IS_ROOT(d))
+                               continue;
+
+                       au_digen_dec(d);
+                       if (d->d_inode)
+                               /* todo: reset children xino?
+                                  cached children only? */
+                               au_iigen_dec(d->d_inode);
+               }
+       }
+
+out_dpages:
+       au_dpages_free(&dpages);
+
+#if 0
+       /* discard children */
+       dentry_unhash(dentry);
+       dput(dentry);
+#endif
+out:
+       return err;
+}
+
+/*
+ * return 0 if processed.
+ */
+static int hn_gen_by_inode(char *name, unsigned int nlen, struct inode *inode,
+                          const unsigned int isdir)
+{
+       int err;
+       struct dentry *d;
+       struct qstr *dname;
+
+       err = 1;
+       if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+               pr_warn("branch root dir was changed\n");
+               err = 0;
+               goto out;
+       }
+
+       if (!isdir) {
+               AuDebugOn(!name);
+               au_iigen_dec(inode);
+               spin_lock(&inode->i_lock);
+               list_for_each_entry(d, &inode->i_dentry, d_alias) {
+                       spin_lock(&d->d_lock);
+                       dname = &d->d_name;
+                       if (dname->len != nlen
+                           && memcmp(dname->name, name, nlen)) {
+                               spin_unlock(&d->d_lock);
+                               continue;
+                       }
+                       err = 0;
+                       au_digen_dec(d);
+                       spin_unlock(&d->d_lock);
+                       break;
+               }
+               spin_unlock(&inode->i_lock);
+       } else {
+               au_fset_si(au_sbi(inode->i_sb), FAILED_REFRESH_DIR);
+               d = d_find_alias(inode);
+               if (!d) {
+                       au_iigen_dec(inode);
+                       goto out;
+               }
+
+               spin_lock(&d->d_lock);
+               dname = &d->d_name;
+               if (dname->len == nlen && !memcmp(dname->name, name, nlen)) {
+                       spin_unlock(&d->d_lock);
+                       err = hn_gen_tree(d);
+                       spin_lock(&d->d_lock);
+               }
+               spin_unlock(&d->d_lock);
+               dput(d);
+       }
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+static int hn_gen_by_name(struct dentry *dentry, const unsigned int isdir)
+{
+       int err;
+       struct inode *inode;
+
+       inode = dentry->d_inode;
+       if (IS_ROOT(dentry)
+           /* || (inode && inode->i_ino == AUFS_ROOT_INO) */
+               ) {
+               pr_warn("branch root dir was changed\n");
+               return 0;
+       }
+
+       err = 0;
+       if (!isdir) {
+               au_digen_dec(dentry);
+               if (inode)
+                       au_iigen_dec(inode);
+       } else {
+               au_fset_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR);
+               if (inode)
+                       err = hn_gen_tree(dentry);
+       }
+
+       AuTraceErr(err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* hnotify job flags */
+#define AuHnJob_XINO0          1
+#define AuHnJob_GEN            (1 << 1)
+#define AuHnJob_DIRENT         (1 << 2)
+#define AuHnJob_ISDIR          (1 << 3)
+#define AuHnJob_TRYXINO0       (1 << 4)
+#define AuHnJob_MNTPNT         (1 << 5)
+#define au_ftest_hnjob(flags, name)    ((flags) & AuHnJob_##name)
+#define au_fset_hnjob(flags, name) \
+       do { (flags) |= AuHnJob_##name; } while (0)
+#define au_fclr_hnjob(flags, name) \
+       do { (flags) &= ~AuHnJob_##name; } while (0)
+
+enum {
+       AuHn_CHILD,
+       AuHn_PARENT,
+       AuHnLast
+};
+
+struct au_hnotify_args {
+       struct inode *h_dir, *dir, *h_child_inode;
+       u32 mask;
+       unsigned int flags[AuHnLast];
+       unsigned int h_child_nlen;
+       char h_child_name[];
+};
+
+struct hn_job_args {
+       unsigned int flags;
+       struct inode *inode, *h_inode, *dir, *h_dir;
+       struct dentry *dentry;
+       char *h_name;
+       int h_nlen;
+};
+
+static int hn_job(struct hn_job_args *a)
+{
+       const unsigned int isdir = au_ftest_hnjob(a->flags, ISDIR);
+
+       /* reset xino */
+       if (au_ftest_hnjob(a->flags, XINO0) && a->inode)
+               hn_xino(a->inode, a->h_inode); /* ignore this error */
+
+       if (au_ftest_hnjob(a->flags, TRYXINO0)
+           && a->inode
+           && a->h_inode) {
+               mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
+               if (!a->h_inode->i_nlink)
+                       hn_xino(a->inode, a->h_inode); /* ignore this error */
+               mutex_unlock(&a->h_inode->i_mutex);
+       }
+
+       /* make the generation obsolete */
+       if (au_ftest_hnjob(a->flags, GEN)) {
+               int err = -1;
+               if (a->inode)
+                       err = hn_gen_by_inode(a->h_name, a->h_nlen, a->inode,
+                                             isdir);
+               if (err && a->dentry)
+                       hn_gen_by_name(a->dentry, isdir);
+               /* ignore this error */
+       }
+
+       /* make dir entries obsolete */
+       if (au_ftest_hnjob(a->flags, DIRENT) && a->inode) {
+               struct au_vdir *vdir;
+
+               vdir = au_ivdir(a->inode);
+               if (vdir)
+                       vdir->vd_jiffy = 0;
+               /* IMustLock(a->inode); */
+               /* a->inode->i_version++; */
+       }
+
+       /* can do nothing but warn */
+       if (au_ftest_hnjob(a->flags, MNTPNT)
+           && a->dentry
+           && d_mountpoint(a->dentry))
+               pr_warn("mount-point %.*s is removed or renamed\n",
+                       AuDLNPair(a->dentry));
+
+       return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *lookup_wlock_by_name(char *name, unsigned int nlen,
+                                          struct inode *dir)
+{
+       struct dentry *dentry, *d, *parent;
+       struct qstr *dname;
+
+       parent = d_find_alias(dir);
+       if (!parent)
+               return NULL;
+
+       dentry = NULL;
+       spin_lock(&parent->d_lock);
+       list_for_each_entry(d, &parent->d_subdirs, d_u.d_child) {
+               /* AuDbg("%.*s\n", AuDLNPair(d)); */
+               spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+               dname = &d->d_name;
+               if (dname->len != nlen || memcmp(dname->name, name, nlen))
+                       goto cont_unlock;
+               if (au_di(d))
+                       au_digen_dec(d);
+               else
+                       goto cont_unlock;
+               if (d->d_count) {
+                       dentry = dget_dlock(d);
+                       spin_unlock(&d->d_lock);
+                       break;
+               }
+
+       cont_unlock:
+               spin_unlock(&d->d_lock);
+       }
+       spin_unlock(&parent->d_lock);
+       dput(parent);
+
+       if (dentry)
+               di_write_lock_child(dentry);
+
+       return dentry;
+}
+
+static struct inode *lookup_wlock_by_ino(struct super_block *sb,
+                                        aufs_bindex_t bindex, ino_t h_ino)
+{
+       struct inode *inode;
+       ino_t ino;
+       int err;
+
+       inode = NULL;
+       err = au_xino_read(sb, bindex, h_ino, &ino);
+       if (!err && ino)
+               inode = ilookup(sb, ino);
+       if (!inode)
+               goto out;
+
+       if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+               pr_warn("wrong root branch\n");
+               iput(inode);
+               inode = NULL;
+               goto out;
+       }
+
+       ii_write_lock_child(inode);
+
+out:
+       return inode;
+}
+
+static void au_hn_bh(void *_args)
+{
+       struct au_hnotify_args *a = _args;
+       struct super_block *sb;
+       aufs_bindex_t bindex, bend, bfound;
+       unsigned char xino, try_iput;
+       int err;
+       struct inode *inode;
+       ino_t h_ino;
+       struct hn_job_args args;
+       struct dentry *dentry;
+       struct au_sbinfo *sbinfo;
+
+       AuDebugOn(!_args);
+       AuDebugOn(!a->h_dir);
+       AuDebugOn(!a->dir);
+       AuDebugOn(!a->mask);
+       AuDbg("mask 0x%x, i%lu, hi%lu, hci%lu\n",
+             a->mask, a->dir->i_ino, a->h_dir->i_ino,
+             a->h_child_inode ? a->h_child_inode->i_ino : 0);
+
+       inode = NULL;
+       dentry = NULL;
+       /*
+        * do not lock a->dir->i_mutex here
+        * because of d_revalidate() may cause a deadlock.
+        */
+       sb = a->dir->i_sb;
+       AuDebugOn(!sb);
+       sbinfo = au_sbi(sb);
+       AuDebugOn(!sbinfo);
+       si_write_lock(sb, AuLock_NOPLMW);
+
+       ii_read_lock_parent(a->dir);
+       bfound = -1;
+       bend = au_ibend(a->dir);
+       for (bindex = au_ibstart(a->dir); bindex <= bend; bindex++)
+               if (au_h_iptr(a->dir, bindex) == a->h_dir) {
+                       bfound = bindex;
+                       break;
+               }
+       ii_read_unlock(a->dir);
+       if (unlikely(bfound < 0))
+               goto out;
+
+       xino = !!au_opt_test(au_mntflags(sb), XINO);
+       h_ino = 0;
+       if (a->h_child_inode)
+               h_ino = a->h_child_inode->i_ino;
+
+       if (a->h_child_nlen
+           && (au_ftest_hnjob(a->flags[AuHn_CHILD], GEN)
+               || au_ftest_hnjob(a->flags[AuHn_CHILD], MNTPNT)))
+               dentry = lookup_wlock_by_name(a->h_child_name, a->h_child_nlen,
+                                             a->dir);
+       try_iput = 0;
+       if (dentry)
+               inode = dentry->d_inode;
+       if (xino && !inode && h_ino
+           && (au_ftest_hnjob(a->flags[AuHn_CHILD], XINO0)
+               || au_ftest_hnjob(a->flags[AuHn_CHILD], TRYXINO0)
+               || au_ftest_hnjob(a->flags[AuHn_CHILD], GEN))) {
+               inode = lookup_wlock_by_ino(sb, bfound, h_ino);
+               try_iput = 1;
+           }
+
+       args.flags = a->flags[AuHn_CHILD];
+       args.dentry = dentry;
+       args.inode = inode;
+       args.h_inode = a->h_child_inode;
+       args.dir = a->dir;
+       args.h_dir = a->h_dir;
+       args.h_name = a->h_child_name;
+       args.h_nlen = a->h_child_nlen;
+       err = hn_job(&args);
+       if (dentry) {
+               if (au_di(dentry))
+                       di_write_unlock(dentry);
+               dput(dentry);
+       }
+       if (inode && try_iput) {
+               ii_write_unlock(inode);
+               iput(inode);
+       }
+
+       ii_write_lock_parent(a->dir);
+       args.flags = a->flags[AuHn_PARENT];
+       args.dentry = NULL;
+       args.inode = a->dir;
+       args.h_inode = a->h_dir;
+       args.dir = NULL;
+       args.h_dir = NULL;
+       args.h_name = NULL;
+       args.h_nlen = 0;
+       err = hn_job(&args);
+       ii_write_unlock(a->dir);
+
+out:
+       iput(a->h_child_inode);
+       iput(a->h_dir);
+       iput(a->dir);
+       si_write_unlock(sb);
+       au_nwt_done(&sbinfo->si_nowait);
+       kfree(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
+              struct qstr *h_child_qstr, struct inode *h_child_inode)
+{
+       int err, len;
+       unsigned int flags[AuHnLast], f;
+       unsigned char isdir, isroot, wh;
+       struct inode *dir;
+       struct au_hnotify_args *args;
+       char *p, *h_child_name;
+
+       err = 0;
+       AuDebugOn(!hnotify || !hnotify->hn_aufs_inode);
+       dir = igrab(hnotify->hn_aufs_inode);
+       if (!dir)
+               goto out;
+
+       isroot = (dir->i_ino == AUFS_ROOT_INO);
+       wh = 0;
+       h_child_name = (void *)h_child_qstr->name;
+       len = h_child_qstr->len;
+       if (h_child_name) {
+               if (len > AUFS_WH_PFX_LEN
+                   && !memcmp(h_child_name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+                       h_child_name += AUFS_WH_PFX_LEN;
+                       len -= AUFS_WH_PFX_LEN;
+                       wh = 1;
+               }
+       }
+
+       isdir = 0;
+       if (h_child_inode)
+               isdir = !!S_ISDIR(h_child_inode->i_mode);
+       flags[AuHn_PARENT] = AuHnJob_ISDIR;
+       flags[AuHn_CHILD] = 0;
+       if (isdir)
+               flags[AuHn_CHILD] = AuHnJob_ISDIR;
+       au_fset_hnjob(flags[AuHn_PARENT], DIRENT);
+       au_fset_hnjob(flags[AuHn_CHILD], GEN);
+       switch (mask & FS_EVENTS_POSS_ON_CHILD) {
+       case FS_MOVED_FROM:
+       case FS_MOVED_TO:
+               au_fset_hnjob(flags[AuHn_CHILD], XINO0);
+               au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
+               /*FALLTHROUGH*/
+       case FS_CREATE:
+               AuDebugOn(!h_child_name || !h_child_inode);
+               break;
+
+       case FS_DELETE:
+               /*
+                * aufs never be able to get this child inode.
+                * revalidation should be in d_revalidate()
+                * by checking i_nlink, i_generation or d_unhashed().
+                */
+               AuDebugOn(!h_child_name);
+               au_fset_hnjob(flags[AuHn_CHILD], TRYXINO0);
+               au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
+               break;
+
+       default:
+               AuDebugOn(1);
+       }
+
+       if (wh)
+               h_child_inode = NULL;
+
+       err = -ENOMEM;
+       /* iput() and kfree() will be called in au_hnotify() */
+       args = kmalloc(sizeof(*args) + len + 1, GFP_NOFS);
+       if (unlikely(!args)) {
+               AuErr1("no memory\n");
+               iput(dir);
+               goto out;
+       }
+       args->flags[AuHn_PARENT] = flags[AuHn_PARENT];
+       args->flags[AuHn_CHILD] = flags[AuHn_CHILD];
+       args->mask = mask;
+       args->dir = dir;
+       args->h_dir = igrab(h_dir);
+       if (h_child_inode)
+               h_child_inode = igrab(h_child_inode); /* can be NULL */
+       args->h_child_inode = h_child_inode;
+       args->h_child_nlen = len;
+       if (len) {
+               p = (void *)args;
+               p += sizeof(*args);
+               memcpy(p, h_child_name, len);
+               p[len] = 0;
+       }
+
+       f = 0;
+       if (!dir->i_nlink)
+               f = AuWkq_NEST;
+       err = au_wkq_nowait(au_hn_bh, args, dir->i_sb, f);
+       if (unlikely(err)) {
+               pr_err("wkq %d\n", err);
+               iput(args->h_child_inode);
+               iput(args->h_dir);
+               iput(args->dir);
+               kfree(args);
+       }
+
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm)
+{
+       int err;
+
+       AuDebugOn(!(udba & AuOptMask_UDBA));
+
+       err = 0;
+       if (au_hnotify_op.reset_br)
+               err = au_hnotify_op.reset_br(udba, br, perm);
+
+       return err;
+}
+
+int au_hnotify_init_br(struct au_branch *br, int perm)
+{
+       int err;
+
+       err = 0;
+       if (au_hnotify_op.init_br)
+               err = au_hnotify_op.init_br(br, perm);
+
+       return err;
+}
+
+void au_hnotify_fin_br(struct au_branch *br)
+{
+       if (au_hnotify_op.fin_br)
+               au_hnotify_op.fin_br(br);
+}
+
+static void au_hn_destroy_cache(void)
+{
+       kmem_cache_destroy(au_cachep[AuCache_HNOTIFY]);
+       au_cachep[AuCache_HNOTIFY] = NULL;
+}
+
+int __init au_hnotify_init(void)
+{
+       int err;
+
+       err = -ENOMEM;
+       au_cachep[AuCache_HNOTIFY] = AuCache(au_hnotify);
+       if (au_cachep[AuCache_HNOTIFY]) {
+               err = 0;
+               if (au_hnotify_op.init)
+                       err = au_hnotify_op.init();
+               if (unlikely(err))
+                       au_hn_destroy_cache();
+       }
+       AuTraceErr(err);
+       return err;
+}
+
+void au_hnotify_fin(void)
+{
+       if (au_hnotify_op.fin)
+               au_hnotify_op.fin();
+       /* cf. au_cache_fin() */
+       if (au_cachep[AuCache_HNOTIFY])
+               au_hn_destroy_cache();
+}
diff --git a/fs/aufs/i_op.c b/fs/aufs/i_op.c
new file mode 100644 (file)
index 0000000..affa6b1
--- /dev/null
@@ -0,0 +1,1090 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode operations (except add/del/rename)
+ */
+
+#include <linux/device_cgroup.h>
+#include <linux/fs_stack.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+static int h_permission(struct inode *h_inode, int mask,
+                       struct vfsmount *h_mnt, int brperm)
+{
+       int err;
+       const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+
+       err = -EACCES;
+       if ((write_mask && IS_IMMUTABLE(h_inode))
+           || ((mask & MAY_EXEC)
+               && S_ISREG(h_inode->i_mode)
+               && ((h_mnt->mnt_flags & MNT_NOEXEC)
+                   || !(h_inode->i_mode & S_IXUGO))))
+               goto out;
+
+       /*
+        * - skip the lower fs test in the case of write to ro branch.
+        * - nfs dir permission write check is optimized, but a policy for
+        *   link/rename requires a real check.
+        */
+       if ((write_mask && !au_br_writable(brperm))
+           || (au_test_nfs(h_inode->i_sb) && S_ISDIR(h_inode->i_mode)
+               && write_mask && !(mask & MAY_READ))
+           || !h_inode->i_op->permission) {
+               /* AuLabel(generic_permission); */
+               err = generic_permission(h_inode, mask);
+       } else {
+               /* AuLabel(h_inode->permission); */
+               err = h_inode->i_op->permission(h_inode, mask);
+               AuTraceErr(err);
+       }
+
+       if (!err)
+               err = devcgroup_inode_permission(h_inode, mask);
+       if (!err)
+               err = security_inode_permission(h_inode, mask);
+
+#if 0
+       if (!err) {
+               /* todo: do we need to call ima_path_check()? */
+               struct path h_path = {
+                       .dentry =
+                       .mnt    = h_mnt
+               };
+               err = ima_path_check(&h_path,
+                                    mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
+                                    IMA_COUNT_LEAVE);
+       }
+#endif
+
+out:
+       return err;
+}
+
+static int aufs_permission(struct inode *inode, int mask)
+{
+       int err;
+       aufs_bindex_t bindex, bend;
+       const unsigned char isdir = !!S_ISDIR(inode->i_mode),
+               write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+       struct inode *h_inode;
+       struct super_block *sb;
+       struct au_branch *br;
+
+       /* todo: support rcu-walk? */
+       if (mask & MAY_NOT_BLOCK)
+               return -ECHILD;
+
+       sb = inode->i_sb;
+       si_read_lock(sb, AuLock_FLUSH);
+       ii_read_lock_child(inode);
+#if 0
+       err = au_iigen_test(inode, au_sigen(sb));
+       if (unlikely(err))
+               goto out;
+#endif
+
+       if (!isdir || write_mask) {
+               err = au_busy_or_stale();
+               h_inode = au_h_iptr(inode, au_ibstart(inode));
+               if (unlikely(!h_inode
+                            || (h_inode->i_mode & S_IFMT)
+                            != (inode->i_mode & S_IFMT)))
+                       goto out;
+
+               err = 0;
+               bindex = au_ibstart(inode);
+               br = au_sbr(sb, bindex);
+               err = h_permission(h_inode, mask, au_br_mnt(br), br->br_perm);
+               if (write_mask
+                   && !err
+                   && !special_file(h_inode->i_mode)) {
+                       /* test whether the upper writable branch exists */
+                       err = -EROFS;
+                       for (; bindex >= 0; bindex--)
+                               if (!au_br_rdonly(au_sbr(sb, bindex))) {
+                                       err = 0;
+                                       break;
+                               }
+               }
+               goto out;
+       }
+
+       /* non-write to dir */
+       err = 0;
+       bend = au_ibend(inode);
+       for (bindex = au_ibstart(inode); !err && bindex <= bend; bindex++) {
+               h_inode = au_h_iptr(inode, bindex);
+               if (h_inode) {
+                       err = au_busy_or_stale();
+                       if (unlikely(!S_ISDIR(h_inode->i_mode)))
+                               break;
+
+                       br = au_sbr(sb, bindex);
+                       err = h_permission(h_inode, mask, au_br_mnt(br),
+                                          br->br_perm);
+               }
+       }
+
+out:
+       ii_read_unlock(inode);
+       si_read_unlock(sb);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *aufs_lookup(struct inode *dir, struct dentry *dentry,
+                                 struct nameidata *nd)
+{
+       struct dentry *ret, *parent;
+       struct inode *inode;
+       struct super_block *sb;
+       int err, npositive;
+
+       IMustLock(dir);
+
+       /* todo: support rcu-walk? */
+       ret = ERR_PTR(-ECHILD);
+       if (nd && (nd->flags & LOOKUP_RCU))
+               goto out;
+
+       ret = ERR_PTR(-ENAMETOOLONG);
+       if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+               goto out;
+
+       sb = dir->i_sb;
+       err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+       ret = ERR_PTR(err);
+       if (unlikely(err))
+               goto out;
+
+       err = au_di_init(dentry);
+       ret = ERR_PTR(err);
+       if (unlikely(err))
+               goto out_si;
+
+       inode = NULL;
+       npositive = 0; /* suppress a warning */
+       parent = dentry->d_parent; /* dir inode is locked */
+       di_read_lock_parent(parent, AuLock_IR);
+       err = au_alive_dir(parent);
+       if (!err)
+               err = au_digen_test(parent, au_sigen(sb));
+       if (!err) {
+               npositive = au_lkup_dentry(dentry, au_dbstart(parent),
+                                          /*type*/0, nd);
+               err = npositive;
+       }
+       di_read_unlock(parent, AuLock_IR);
+       ret = ERR_PTR(err);
+       if (unlikely(err < 0))
+               goto out_unlock;
+
+       if (npositive) {
+               inode = au_new_inode(dentry, /*must_new*/0);
+               ret = (void *)inode;
+       }
+       if (IS_ERR(inode)) {
+               inode = NULL;
+               goto out_unlock;
+       }
+
+       ret = d_splice_alias(inode, dentry);
+#if 0
+       if (unlikely(d_need_lookup(dentry))) {
+               spin_lock(&dentry->d_lock);
+               dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+               spin_unlock(&dentry->d_lock);
+       } else
+#endif
+       if (unlikely(IS_ERR(ret) && inode)) {
+               ii_write_unlock(inode);
+               iput(inode);
+               inode = NULL;
+       }
+
+out_unlock:
+       di_write_unlock(dentry);
+       if (inode) {
+               /* verbose coding for lock class name */
+               if (unlikely(S_ISLNK(inode->i_mode)))
+                       au_rw_class(&au_di(dentry)->di_rwsem,
+                                   au_lc_key + AuLcSymlink_DIINFO);
+               else if (unlikely(S_ISDIR(inode->i_mode)))
+                       au_rw_class(&au_di(dentry)->di_rwsem,
+                                   au_lc_key + AuLcDir_DIINFO);
+               else /* likely */
+                       au_rw_class(&au_di(dentry)->di_rwsem,
+                                   au_lc_key + AuLcNonDir_DIINFO);
+       }
+out_si:
+       si_read_unlock(sb);
+out:
+       return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_wr_dir_cpup(struct dentry *dentry, struct dentry *parent,
+                         const unsigned char add_entry, aufs_bindex_t bcpup,
+                         aufs_bindex_t bstart)
+{
+       int err;
+       struct dentry *h_parent;
+       struct inode *h_dir;
+
+       if (add_entry)
+               IMustLock(parent->d_inode);
+       else
+               di_write_lock_parent(parent);
+
+       err = 0;
+       if (!au_h_dptr(parent, bcpup)) {
+               if (bstart > bcpup)
+                       err = au_cpup_dirs(dentry, bcpup);
+               else if (bstart < bcpup)
+                       err = au_cpdown_dirs(dentry, bcpup);
+               else
+                       BUG();
+       }
+       if (!err && add_entry) {
+               h_parent = au_h_dptr(parent, bcpup);
+               h_dir = h_parent->d_inode;
+               mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
+               err = au_lkup_neg(dentry, bcpup,
+                                 au_ftest_wrdir(add_entry, TMP_WHENTRY));
+               /* todo: no unlock here */
+               mutex_unlock(&h_dir->i_mutex);
+
+               AuDbg("bcpup %d\n", bcpup);
+               if (!err) {
+                       if (!dentry->d_inode)
+                               au_set_h_dptr(dentry, bstart, NULL);
+                       au_update_dbrange(dentry, /*do_put_zero*/0);
+               }
+       }
+
+       if (!add_entry)
+               di_write_unlock(parent);
+       if (!err)
+               err = bcpup; /* success */
+
+       AuTraceErr(err);
+       return err;
+}
+
+/*
+ * decide the branch and the parent dir where we will create a new entry.
+ * returns new bindex or an error.
+ * copyup the parent dir if needed.
+ */
+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
+             struct au_wr_dir_args *args)
+{
+       int err;
+       unsigned int flags;
+       aufs_bindex_t bcpup, bstart, src_bstart;
+       const unsigned char add_entry
+               = au_ftest_wrdir(args->flags, ADD_ENTRY)
+               | au_ftest_wrdir(args->flags, TMP_WHENTRY);
+       struct super_block *sb;
+       struct dentry *parent;
+       struct au_sbinfo *sbinfo;
+
+       sb = dentry->d_sb;
+       sbinfo = au_sbi(sb);
+       parent = dget_parent(dentry);
+       bstart = au_dbstart(dentry);
+       bcpup = bstart;
+       if (args->force_btgt < 0) {
+               if (src_dentry) {
+                       src_bstart = au_dbstart(src_dentry);
+                       if (src_bstart < bstart)
+                               bcpup = src_bstart;
+               } else if (add_entry) {
+                       flags = 0;
+                       if (au_ftest_wrdir(args->flags, ISDIR))
+                               au_fset_wbr(flags, DIR);
+                       err = AuWbrCreate(sbinfo, dentry, flags);
+                       bcpup = err;
+               }
+
+               if (bcpup < 0 || au_test_ro(sb, bcpup, dentry->d_inode)) {
+                       if (add_entry)
+                               err = AuWbrCopyup(sbinfo, dentry);
+                       else {
+                               if (!IS_ROOT(dentry)) {
+                                       di_read_lock_parent(parent, !AuLock_IR);
+                                       err = AuWbrCopyup(sbinfo, dentry);
+                                       di_read_unlock(parent, !AuLock_IR);
+                               } else
+                                       err = AuWbrCopyup(sbinfo, dentry);
+                       }
+                       bcpup = err;
+                       if (unlikely(err < 0))
+                               goto out;
+               }
+       } else {
+               bcpup = args->force_btgt;
+               AuDebugOn(au_test_ro(sb, bcpup, dentry->d_inode));
+       }
+
+       AuDbg("bstart %d, bcpup %d\n", bstart, bcpup);
+       err = bcpup;
+       if (bcpup == bstart)
+               goto out; /* success */
+
+       /* copyup the new parent into the branch we process */
+       err = au_wr_dir_cpup(dentry, parent, add_entry, bcpup, bstart);
+       if (err >= 0) {
+               if (!dentry->d_inode) {
+                       au_set_h_dptr(dentry, bstart, NULL);
+                       au_set_dbstart(dentry, bcpup);
+                       au_set_dbend(dentry, bcpup);
+               }
+               AuDebugOn(add_entry && !au_h_dptr(dentry, bcpup));
+       }
+
+out:
+       dput(parent);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_pin_hdir_unlock(struct au_pin *p)
+{
+       if (p->hdir)
+               au_hn_imtx_unlock(p->hdir);
+}
+
+static int au_pin_hdir_lock(struct au_pin *p)
+{
+       int err;
+
+       err = 0;
+       if (!p->hdir)
+               goto out;
+
+       /* even if an error happens later, keep this lock */
+       au_hn_imtx_lock_nested(p->hdir, p->lsc_hi);
+
+       err = -EBUSY;
+       if (unlikely(p->hdir->hi_inode != p->h_parent->d_inode))
+               goto out;
+
+       err = 0;
+       if (p->h_dentry)
+               err = au_h_verify(p->h_dentry, p->udba, p->hdir->hi_inode,
+                                 p->h_parent, p->br);
+
+out:
+       return err;
+}
+
+int au_pin_hdir_relock(struct au_pin *p)
+{
+       int err, i;
+       struct inode *h_i;
+       struct dentry *h_d[] = {
+               p->h_dentry,
+               p->h_parent
+       };
+
+       err = au_pin_hdir_lock(p);
+       if (unlikely(err))
+               goto out;
+
+       for (i = 0; !err && i < sizeof(h_d)/sizeof(*h_d); i++) {
+               if (!h_d[i])
+                       continue;
+               h_i = h_d[i]->d_inode;
+               if (h_i)
+                       err = !h_i->i_nlink;
+       }
+
+out:
+       return err;
+}
+
+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task)
+{
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
+       p->hdir->hi_inode->i_mutex.owner = task;
+#endif
+}
+
+void au_pin_hdir_acquire_nest(struct au_pin *p)
+{
+       if (p->hdir) {
+               mutex_acquire_nest(&p->hdir->hi_inode->i_mutex.dep_map,
+                                  p->lsc_hi, 0, NULL, _RET_IP_);
+               au_pin_hdir_set_owner(p, current);
+       }
+}
+
+void au_pin_hdir_release(struct au_pin *p)
+{
+       if (p->hdir) {
+               au_pin_hdir_set_owner(p, p->task);
+               mutex_release(&p->hdir->hi_inode->i_mutex.dep_map, 1, _RET_IP_);
+       }
+}
+
+struct dentry *au_pinned_h_parent(struct au_pin *pin)
+{
+       if (pin && pin->parent)
+               return au_h_dptr(pin->parent, pin->bindex);
+       return NULL;
+}
+
+void au_unpin(struct au_pin *p)
+{
+       if (p->h_mnt && au_ftest_pin(p->flags, MNT_WRITE))
+               mnt_drop_write(p->h_mnt);
+       if (!p->hdir)
+               return;
+
+       au_pin_hdir_unlock(p);
+       if (!au_ftest_pin(p->flags, DI_LOCKED))
+               di_read_unlock(p->parent, AuLock_IR);
+       iput(p->hdir->hi_inode);
+       dput(p->parent);
+       p->parent = NULL;
+       p->hdir = NULL;
+       p->h_mnt = NULL;
+       /* do not clear p->task */
+}
+
+int au_do_pin(struct au_pin *p)
+{
+       int err;
+       struct super_block *sb;
+       struct inode *h_dir;
+
+       err = 0;
+       sb = p->dentry->d_sb;
+       p->br = au_sbr(sb, p->bindex);
+       if (IS_ROOT(p->dentry)) {
+               if (au_ftest_pin(p->flags, MNT_WRITE)) {
+                       p->h_mnt = au_br_mnt(p->br);
+                       err = mnt_want_write(p->h_mnt);
+                       if (unlikely(err)) {
+                               au_fclr_pin(p->flags, MNT_WRITE);
+                               goto out_err;
+                       }
+               }
+               goto out;
+       }
+
+       p->h_dentry = NULL;
+       if (p->bindex <= au_dbend(p->dentry))
+               p->h_dentry = au_h_dptr(p->dentry, p->bindex);
+
+       p->parent = dget_parent(p->dentry);
+       if (!au_ftest_pin(p->flags, DI_LOCKED))
+               di_read_lock(p->parent, AuLock_IR, p->lsc_di);
+
+       h_dir = NULL;
+       p->h_parent = au_h_dptr(p->parent, p->bindex);
+       p->hdir = au_hi(p->parent->d_inode, p->bindex);
+       if (p->hdir)
+               h_dir = p->hdir->hi_inode;
+
+       /*
+        * udba case, or
+        * if DI_LOCKED is not set, then p->parent may be different
+        * and h_parent can be NULL.
+        */
+       if (unlikely(!p->hdir || !h_dir || !p->h_parent)) {
+               err = -EBUSY;
+               if (!au_ftest_pin(p->flags, DI_LOCKED))
+                       di_read_unlock(p->parent, AuLock_IR);
+               dput(p->parent);
+               p->parent = NULL;
+               goto out_err;
+       }
+
+       au_igrab(h_dir);
+       err = au_pin_hdir_lock(p);
+       if (unlikely(err))
+               goto out_unpin;
+
+       if (au_ftest_pin(p->flags, MNT_WRITE)) {
+               p->h_mnt = au_br_mnt(p->br);
+               err = mnt_want_write(p->h_mnt);
+               if (unlikely(err)) {
+                       au_fclr_pin(p->flags, MNT_WRITE);
+                       goto out_unpin;
+               }
+       }
+       goto out; /* success */
+
+out_unpin:
+       au_unpin(p);
+out_err:
+       pr_err("err %d\n", err);
+       err = au_busy_or_stale();
+out:
+       return err;
+}
+
+void au_pin_init(struct au_pin *p, struct dentry *dentry,
+                aufs_bindex_t bindex, int lsc_di, int lsc_hi,
+                unsigned int udba, unsigned char flags)
+{
+       p->dentry = dentry;
+       p->udba = udba;
+       p->lsc_di = lsc_di;
+       p->lsc_hi = lsc_hi;
+       p->flags = flags;
+       p->bindex = bindex;
+
+       p->parent = NULL;
+       p->hdir = NULL;
+       p->h_mnt = NULL;
+
+       p->h_dentry = NULL;
+       p->h_parent = NULL;
+       p->br = NULL;
+       p->task = current;
+}
+
+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
+          unsigned int udba, unsigned char flags)
+{
+       au_pin_init(pin, dentry, bindex, AuLsc_DI_PARENT, AuLsc_I_PARENT2,
+                   udba, flags);
+       return au_do_pin(pin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * ->setattr() and ->getattr() are called in various cases.
+ * chmod, stat: dentry is revalidated.
+ * fchmod, fstat: file and dentry are not revalidated, additionally they may be
+ *               unhashed.
+ * for ->setattr(), ia->ia_file is passed from ftruncate only.
+ */
+/* todo: consolidate with do_refresh() and simple_reval_dpath() */
+static int au_reval_for_attr(struct dentry *dentry, unsigned int sigen)
+{
+       int err;
+       struct inode *inode;
+       struct dentry *parent;
+
+       err = 0;
+       inode = dentry->d_inode;
+       if (au_digen_test(dentry, sigen)) {
+               parent = dget_parent(dentry);
+               di_read_lock_parent(parent, AuLock_IR);
+               err = au_refresh_dentry(dentry, parent);
+               di_read_unlock(parent, AuLock_IR);
+               dput(parent);
+       }
+
+       AuTraceErr(err);
+       return err;
+}
+
+#define AuIcpup_DID_CPUP       1
+#define au_ftest_icpup(flags, name)    ((flags) & AuIcpup_##name)
+#define au_fset_icpup(flags, name) \
+       do { (flags) |= AuIcpup_##name; } while (0)
+#define au_fclr_icpup(flags, name) \
+       do { (flags) &= ~AuIcpup_##name; } while (0)
+
+struct au_icpup_args {
+       unsigned char flags;
+       unsigned char pin_flags;
+       aufs_bindex_t btgt;
+       unsigned int udba;
+       struct au_pin pin;
+       struct path h_path;
+       struct inode *h_inode;
+};
+
+static int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia,
+                           struct au_icpup_args *a)
+{
+       int err;
+       loff_t sz;
+       aufs_bindex_t bstart, ibstart;
+       struct dentry *hi_wh, *parent;
+       struct inode *inode;
+       struct au_wr_dir_args wr_dir_args = {
+               .force_btgt     = -1,
+               .flags          = 0
+       };
+
+       bstart = au_dbstart(dentry);
+       inode = dentry->d_inode;
+       if (S_ISDIR(inode->i_mode))
+               au_fset_wrdir(wr_dir_args.flags, ISDIR);
+       /* plink or hi_wh() case */
+       ibstart = au_ibstart(inode);
+       if (bstart != ibstart && !au_test_ro(inode->i_sb, ibstart, inode))
+               wr_dir_args.force_btgt = ibstart;
+       err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
+       if (unlikely(err < 0))
+               goto out;
+       a->btgt = err;
+       if (err != bstart)
+               au_fset_icpup(a->flags, DID_CPUP);
+
+       err = 0;
+       a->pin_flags = AuPin_MNT_WRITE;
+       parent = NULL;
+       if (!IS_ROOT(dentry)) {
+               au_fset_pin(a->pin_flags, DI_LOCKED);
+               parent = dget_parent(dentry);
+               di_write_lock_parent(parent);
+       }
+
+       err = au_pin(&a->pin, dentry, a->btgt, a->udba, a->pin_flags);
+       if (unlikely(err))
+               goto out_parent;
+
+       a->h_path.dentry = au_h_dptr(dentry, bstart);
+       a->h_inode = a->h_path.dentry->d_inode;
+       mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
+       sz = -1;
+       if ((ia->ia_valid & ATTR_SIZE) && ia->ia_size < i_size_read(a->h_inode))
+               sz = ia->ia_size;
+       mutex_unlock(&a->h_inode->i_mutex);
+
+       hi_wh = NULL;
+       if (au_ftest_icpup(a->flags, DID_CPUP) && d_unlinked(dentry)) {
+               hi_wh = au_hi_wh(inode, a->btgt);
+               if (!hi_wh) {
+                       struct au_cp_generic cpg = {
+                               .dentry = dentry,
+                               .bdst   = a->btgt,
+                               .bsrc   = -1,
+                               .len    = sz,
+                               .pin    = &a->pin
+                       };
+                       err = au_sio_cpup_wh(&cpg, /*file*/NULL);
+                       if (unlikely(err))
+                               goto out_unlock;
+                       hi_wh = au_hi_wh(inode, a->btgt);
+                       /* todo: revalidate hi_wh? */
+               }
+       }
+
+       if (parent) {
+               au_pin_set_parent_lflag(&a->pin, /*lflag*/0);
+               di_downgrade_lock(parent, AuLock_IR);
+               dput(parent);
+               parent = NULL;
+       }
+       if (!au_ftest_icpup(a->flags, DID_CPUP))
+               goto out; /* success */
+
+       if (!d_unhashed(dentry)) {
+               struct au_cp_generic cpg = {
+                       .dentry = dentry,
+                       .bdst   = a->btgt,
+                       .bsrc   = bstart,
+                       .len    = sz,
+                       .pin    = &a->pin,
+                       .flags  = AuCpup_DTIME | AuCpup_HOPEN
+               };
+               err = au_sio_cpup_simple(&cpg);
+               if (!err)
+                       a->h_path.dentry = au_h_dptr(dentry, a->btgt);
+       } else if (!hi_wh)
+               a->h_path.dentry = au_h_dptr(dentry, a->btgt);
+       else
+               a->h_path.dentry = hi_wh; /* do not dget here */
+
+out_unlock:
+       a->h_inode = a->h_path.dentry->d_inode;
+       if (!err)
+               goto out; /* success */
+       au_unpin(&a->pin);
+out_parent:
+       if (parent) {
+               di_write_unlock(parent);
+               dput(parent);
+       }
+out:
+       if (!err)
+               mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
+       return err;
+}
+
+static int aufs_setattr(struct dentry *dentry, struct iattr *ia)
+{
+       int err;
+       struct inode *inode;
+       struct super_block *sb;
+       struct file *file;
+       struct au_icpup_args *a;
+
+       inode = dentry->d_inode;
+       IMustLock(inode);
+
+       err = -ENOMEM;
+       a = kzalloc(sizeof(*a), GFP_NOFS);
+       if (unlikely(!a))
+               goto out;
+
+       if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+               ia->ia_valid &= ~ATTR_MODE;
+
+       file = NULL;
+       sb = dentry->d_sb;
+       err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+       if (unlikely(err))
+               goto out_kfree;
+
+       if (ia->ia_valid & ATTR_FILE) {
+               /* currently ftruncate(2) only */
+               AuDebugOn(!S_ISREG(inode->i_mode));
+               file = ia->ia_file;
+               err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+               if (unlikely(err))
+                       goto out_si;
+               ia->ia_file = au_hf_top(file);
+               a->udba = AuOpt_UDBA_NONE;
+       } else {
+               /* fchmod() doesn't pass ia_file */
+               a->udba = au_opt_udba(sb);
+               di_write_lock_child(dentry);
+               /* no d_unlinked(), to set UDBA_NONE for root */
+               if (d_unhashed(dentry))
+                       a->udba = AuOpt_UDBA_NONE;
+               if (a->udba != AuOpt_UDBA_NONE) {
+                       AuDebugOn(IS_ROOT(dentry));
+                       err = au_reval_for_attr(dentry, au_sigen(sb));
+                       if (unlikely(err))
+                               goto out_dentry;
+               }
+       }
+
+       err = au_pin_and_icpup(dentry, ia, a);
+       if (unlikely(err < 0))
+               goto out_dentry;
+       if (au_ftest_icpup(a->flags, DID_CPUP)) {
+               ia->ia_file = NULL;
+               ia->ia_valid &= ~ATTR_FILE;
+       }
+
+       a->h_path.mnt = au_sbr_mnt(sb, a->btgt);
+       if ((ia->ia_valid & (ATTR_MODE | ATTR_CTIME))
+           == (ATTR_MODE | ATTR_CTIME)) {
+               err = security_path_chmod(a->h_path.dentry, a->h_path.mnt,
+                                         ia->ia_mode);
+               if (unlikely(err))
+                       goto out_unlock;
+       } else if ((ia->ia_valid & (ATTR_UID | ATTR_GID))
+                  && (ia->ia_valid & ATTR_CTIME)) {
+               err = security_path_chown(&a->h_path, ia->ia_uid, ia->ia_gid);
+               if (unlikely(err))
+                       goto out_unlock;
+       }
+
+       if (ia->ia_valid & ATTR_SIZE) {
+               struct file *f;
+
+               if (ia->ia_size < i_size_read(inode))
+                       /* unmap only */
+                       truncate_setsize(inode, ia->ia_size);
+
+               f = NULL;
+               if (ia->ia_valid & ATTR_FILE)
+                       f = ia->ia_file;
+               mutex_unlock(&a->h_inode->i_mutex);
+               err = vfsub_trunc(&a->h_path, ia->ia_size, ia->ia_valid, f);
+               mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
+       } else
+               err = vfsub_notify_change(&a->h_path, ia);
+       if (!err)
+               au_cpup_attr_changeable(inode);
+
+out_unlock:
+       mutex_unlock(&a->h_inode->i_mutex);
+       au_unpin(&a->pin);
+       if (unlikely(err))
+               au_update_dbstart(dentry);
+out_dentry:
+       di_write_unlock(dentry);
+       if (file) {
+               fi_write_unlock(file);
+               ia->ia_file = file;
+               ia->ia_valid |= ATTR_FILE;
+       }
+out_si:
+       si_read_unlock(sb);
+out_kfree:
+       kfree(a);
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+static void au_refresh_iattr(struct inode *inode, struct kstat *st,
+                            unsigned int nlink)
+{
+       unsigned int n;
+
+       inode->i_mode = st->mode;
+       inode->i_uid = st->uid;
+       inode->i_gid = st->gid;
+       inode->i_atime = st->atime;
+       inode->i_mtime = st->mtime;
+       inode->i_ctime = st->ctime;
+
+       au_cpup_attr_nlink(inode, /*force*/0);
+       if (S_ISDIR(inode->i_mode)) {
+               n = inode->i_nlink;
+               n -= nlink;
+               n += st->nlink;
+               smp_mb();
+               set_nlink(inode, n);
+       }
+
+       spin_lock(&inode->i_lock);
+       inode->i_blocks = st->blocks;
+       i_size_write(inode, st->size);
+       spin_unlock(&inode->i_lock);
+}
+
+static int aufs_getattr(struct vfsmount *mnt __maybe_unused,
+                       struct dentry *dentry, struct kstat *st)
+{
+       int err;
+       unsigned int mnt_flags;
+       aufs_bindex_t bindex;
+       unsigned char udba_none, positive;
+       struct super_block *sb, *h_sb;
+       struct inode *inode;
+       struct vfsmount *h_mnt;
+       struct dentry *h_dentry;
+
+       sb = dentry->d_sb;
+       inode = dentry->d_inode;
+       err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+       if (unlikely(err))
+               goto out;
+       mnt_flags = au_mntflags(sb);
+       udba_none = !!au_opt_test(mnt_flags, UDBA_NONE);
+
+       /* support fstat(2) */
+       if (!d_unlinked(dentry) && !udba_none) {
+               unsigned int sigen = au_sigen(sb);
+               err = au_digen_test(dentry, sigen);
+               if (!err) {
+                       di_read_lock_child(dentry, AuLock_IR);
+                       err = au_dbrange_test(dentry);
+                       if (unlikely(err))
+                               goto out_unlock;
+               } else {
+                       AuDebugOn(IS_ROOT(dentry));
+                       di_write_lock_child(dentry);
+                       err = au_dbrange_test(dentry);
+                       if (!err)
+                               err = au_reval_for_attr(dentry, sigen);
+                       di_downgrade_lock(dentry, AuLock_IR);
+                       if (unlikely(err))
+                               goto out_unlock;
+               }
+       } else
+               di_read_lock_child(dentry, AuLock_IR);
+
+       bindex = au_ibstart(inode);
+       h_mnt = au_sbr_mnt(sb, bindex);
+       h_sb = h_mnt->mnt_sb;
+       if (!au_test_fs_bad_iattr(h_sb) && udba_none)
+               goto out_fill; /* success */
+
+       h_dentry = NULL;
+       if (au_dbstart(dentry) == bindex)
+               h_dentry = dget(au_h_dptr(dentry, bindex));
+       else if (au_opt_test(mnt_flags, PLINK) && au_plink_test(inode)) {
+               h_dentry = au_plink_lkup(inode, bindex);
+               if (IS_ERR(h_dentry))
+                       goto out_fill; /* pretending success */
+       }
+       /* illegally overlapped or something */
+       if (unlikely(!h_dentry))
+               goto out_fill; /* pretending success */
+
+       positive = !!h_dentry->d_inode;
+       if (positive)
+               err = vfs_getattr(h_mnt, h_dentry, st);
+       dput(h_dentry);
+       if (!err) {
+               if (positive)
+                       au_refresh_iattr(inode, st, h_dentry->d_inode->i_nlink);
+               goto out_fill; /* success */
+       }
+       AuTraceErr(err);
+       goto out_unlock;
+
+out_fill:
+       generic_fillattr(inode, st);
+out_unlock:
+       di_read_unlock(dentry, AuLock_IR);
+       si_read_unlock(sb);
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int h_readlink(struct dentry *dentry, int bindex, char __user *buf,
+                     int bufsiz)
+{
+       int err;
+       struct super_block *sb;
+       struct dentry *h_dentry;
+
+       err = -EINVAL;
+       h_dentry = au_h_dptr(dentry, bindex);
+       if (unlikely(!h_dentry->d_inode->i_op->readlink))
+               goto out;
+
+       err = security_inode_readlink(h_dentry);
+       if (unlikely(err))
+               goto out;
+
+       sb = dentry->d_sb;
+       if (!au_test_ro(sb, bindex, dentry->d_inode)) {
+               vfsub_touch_atime(au_sbr_mnt(sb, bindex), h_dentry);
+               fsstack_copy_attr_atime(dentry->d_inode, h_dentry->d_inode);
+       }
+       err = h_dentry->d_inode->i_op->readlink(h_dentry, buf, bufsiz);
+
+out:
+       return err;
+}
+
+static int aufs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+{
+       int err;
+
+       err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN);
+       if (unlikely(err))
+               goto out;
+       err = au_d_hashed_positive(dentry);
+       if (!err)
+               err = h_readlink(dentry, au_dbstart(dentry), buf, bufsiz);
+       aufs_read_unlock(dentry, AuLock_IR);
+
+out:
+       return err;
+}
+
+static void *aufs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+       int err;
+       mm_segment_t old_fs;
+       union {
+               char *k;
+               char __user *u;
+       } buf;
+
+       err = -ENOMEM;
+       buf.k = __getname_gfp(GFP_NOFS);
+       if (unlikely(!buf.k))
+               goto out;
+
+       err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN);
+       if (unlikely(err))
+               goto out_name;
+
+       err = au_d_hashed_positive(dentry);
+       if (!err) {
+               old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               err = h_readlink(dentry, au_dbstart(dentry), buf.u, PATH_MAX);
+               set_fs(old_fs);
+       }
+       aufs_read_unlock(dentry, AuLock_IR);
+
+       if (err >= 0) {
+               buf.k[err] = 0;
+               /* will be freed by put_link */
+               nd_set_link(nd, buf.k);
+               return NULL; /* success */
+       }
+
+out_name:
+       __putname(buf.k);
+out:
+       path_put(&nd->path);
+       AuTraceErr(err);
+       return ERR_PTR(err);
+}
+
+static void aufs_put_link(struct dentry *dentry __maybe_unused,
+                         struct nameidata *nd, void *cookie __maybe_unused)
+{
+       __putname(nd_get_link(nd));
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void aufs_truncate_range(struct inode *inode __maybe_unused,
+                               loff_t start __maybe_unused,
+                               loff_t end __maybe_unused)
+{
+       AuUnsupport();
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct inode_operations aufs_symlink_iop = {
+       .permission     = aufs_permission,
+       .setattr        = aufs_setattr,
+       .getattr        = aufs_getattr,
+       .readlink       = aufs_readlink,
+       .follow_link    = aufs_follow_link,
+       .put_link       = aufs_put_link
+};
+
+struct inode_operations aufs_dir_iop = {
+       .create         = aufs_create,
+       .lookup         = aufs_lookup,
+       .link           = aufs_link,
+       .unlink         = aufs_unlink,
+       .symlink        = aufs_symlink,
+       .mkdir          = aufs_mkdir,
+       .rmdir          = aufs_rmdir,
+       .mknod          = aufs_mknod,
+       .rename         = aufs_rename,
+
+       .permission     = aufs_permission,
+       .setattr        = aufs_setattr,
+       .getattr        = aufs_getattr
+};
+
+struct inode_operations aufs_iop = {
+       .permission     = aufs_permission,
+       .setattr        = aufs_setattr,
+       .getattr        = aufs_getattr,
+       .truncate_range = aufs_truncate_range
+};
diff --git a/fs/aufs/i_op_add.c b/fs/aufs/i_op_add.c
new file mode 100644 (file)
index 0000000..1812d28
--- /dev/null
@@ -0,0 +1,737 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode operations (add entry)
+ */
+
+#include "aufs.h"
+
+/*
+ * final procedure of adding a new entry, except link(2).
+ * remove whiteout, instantiate, copyup the parent dir's times and size
+ * and update version.
+ * if it failed, re-create the removed whiteout.
+ */
+static int epilog(struct inode *dir, aufs_bindex_t bindex,
+                 struct dentry *wh_dentry, struct dentry *dentry)
+{
+       int err, rerr;
+       aufs_bindex_t bwh;
+       struct path h_path;
+       struct inode *inode, *h_dir;
+       struct dentry *wh;
+
+       bwh = -1;
+       if (wh_dentry) {
+               h_dir = wh_dentry->d_parent->d_inode; /* dir inode is locked */
+               IMustLock(h_dir);
+               AuDebugOn(au_h_iptr(dir, bindex) != h_dir);
+               bwh = au_dbwh(dentry);
+               h_path.dentry = wh_dentry;
+               h_path.mnt = au_sbr_mnt(dir->i_sb, bindex);
+               err = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path,
+                                         dentry);
+               if (unlikely(err))
+                       goto out;
+       }
+
+       inode = au_new_inode(dentry, /*must_new*/1);
+       if (!IS_ERR(inode)) {
+               d_instantiate(dentry, inode);
+               dir = dentry->d_parent->d_inode; /* dir inode is locked */
+               IMustLock(dir);
+               if (au_ibstart(dir) == au_dbstart(dentry))
+                       au_cpup_attr_timesizes(dir);
+               dir->i_version++;
+               return 0; /* success */
+       }
+
+       err = PTR_ERR(inode);
+       if (!wh_dentry)
+               goto out;
+
+       /* revert */
+       /* dir inode is locked */
+       wh = au_wh_create(dentry, bwh, wh_dentry->d_parent);
+       rerr = PTR_ERR(wh);
+       if (IS_ERR(wh)) {
+               AuIOErr("%.*s reverting whiteout failed(%d, %d)\n",
+                       AuDLNPair(dentry), err, rerr);
+               err = -EIO;
+       } else
+               dput(wh);
+
+out:
+       return err;
+}
+
+static int au_d_may_add(struct dentry *dentry)
+{
+       int err;
+
+       err = 0;
+       if (unlikely(d_unhashed(dentry)))
+               err = -ENOENT;
+       if (unlikely(dentry->d_inode))
+               err = -EEXIST;
+       return err;
+}
+
+/*
+ * simple tests for the adding inode operations.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
+              struct dentry *h_parent, int isdir)
+{
+       int err;
+       umode_t h_mode;
+       struct dentry *h_dentry;
+       struct inode *h_inode;
+
+       err = -ENAMETOOLONG;
+       if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+               goto out;
+
+       h_dentry = au_h_dptr(dentry, bindex);
+       h_inode = h_dentry->d_inode;
+       if (!dentry->d_inode) {
+               err = -EEXIST;
+               if (unlikely(h_inode))
+                       goto out;
+       } else {
+               /* rename(2) case */
+               err = -EIO;
+               if (unlikely(!h_inode || !h_inode->i_nlink))
+                       goto out;
+
+               h_mode = h_inode->i_mode;
+               if (!isdir) {
+                       err = -EISDIR;
+                       if (unlikely(S_ISDIR(h_mode)))
+                               goto out;
+               } else if (unlikely(!S_ISDIR(h_mode))) {
+                       err = -ENOTDIR;
+                       goto out;
+               }
+       }
+
+       err = 0;
+       /* expected parent dir is locked */
+       if (unlikely(h_parent != h_dentry->d_parent))
+               err = -EIO;
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+/*
+ * initial procedure of adding a new entry.
+ * prepare writable branch and the parent dir, lock it,
+ * and lookup whiteout for the new entry.
+ */
+static struct dentry*
+lock_hdir_lkup_wh(struct dentry *dentry, struct au_dtime *dt,
+                 struct dentry *src_dentry, struct au_pin *pin,
+                 struct au_wr_dir_args *wr_dir_args)
+{
+       struct dentry *wh_dentry, *h_parent;
+       struct super_block *sb;
+       struct au_branch *br;
+       int err;
+       unsigned int udba;
+       aufs_bindex_t bcpup;
+
+       AuDbg("%.*s\n", AuDLNPair(dentry));
+
+       err = au_wr_dir(dentry, src_dentry, wr_dir_args);
+       bcpup = err;
+       wh_dentry = ERR_PTR(err);
+       if (unlikely(err < 0))
+               goto out;
+
+       sb = dentry->d_sb;
+       udba = au_opt_udba(sb);
+       err = au_pin(pin, dentry, bcpup, udba,
+                    AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+       wh_dentry = ERR_PTR(err);
+       if (unlikely(err))
+               goto out;
+
+       h_parent = au_pinned_h_parent(pin);
+       if (udba != AuOpt_UDBA_NONE
+           && au_dbstart(dentry) == bcpup)
+               err = au_may_add(dentry, bcpup, h_parent,
+                                au_ftest_wrdir(wr_dir_args->flags, ISDIR));
+       else if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+               err = -ENAMETOOLONG;
+       wh_dentry = ERR_PTR(err);
+       if (unlikely(err))
+               goto out_unpin;
+
+       br = au_sbr(sb, bcpup);
+       if (dt) {
+               struct path tmp = {
+                       .dentry = h_parent,
+                       .mnt    = au_br_mnt(br)
+               };
+               au_dtime_store(dt, au_pinned_parent(pin), &tmp);
+       }
+
+       wh_dentry = NULL;
+       if (bcpup != au_dbwh(dentry))
+               goto out; /* success */
+
+       wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
+
+out_unpin:
+       if (IS_ERR(wh_dentry))
+               au_unpin(pin);
+out:
+       return wh_dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+enum { Mknod, Symlink, Creat };
+struct simple_arg {
+       int type;
+       union {
+               struct {
+                       int mode;
+                       struct nameidata *nd;
+               } c;
+               struct {
+                       const char *symname;
+               } s;
+               struct {
+                       int mode;
+                       dev_t dev;
+               } m;
+       } u;
+};
+
+static int add_simple(struct inode *dir, struct dentry *dentry,
+                     struct simple_arg *arg)
+{
+       int err;
+       aufs_bindex_t bstart;
+       unsigned char created;
+       struct dentry *wh_dentry, *parent;
+       struct inode *h_dir;
+       /* to reuduce stack size */
+       struct {
+               struct au_dtime dt;
+               struct au_pin pin;
+               struct path h_path;
+               struct au_wr_dir_args wr_dir_args;
+       } *a;
+
+       AuDbg("%.*s\n", AuDLNPair(dentry));
+       IMustLock(dir);
+
+       err = -ENOMEM;
+       a = kmalloc(sizeof(*a), GFP_NOFS);
+       if (unlikely(!a))
+               goto out;
+       a->wr_dir_args.force_btgt = -1;
+       a->wr_dir_args.flags = AuWrDir_ADD_ENTRY;
+
+       parent = dentry->d_parent; /* dir inode is locked */
+       err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+       if (unlikely(err))
+               goto out_free;
+       err = au_d_may_add(dentry);
+       if (unlikely(err))
+               goto out_unlock;
+       di_write_lock_parent(parent);
+       wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
+                                     &a->pin, &a->wr_dir_args);
+       err = PTR_ERR(wh_dentry);
+       if (IS_ERR(wh_dentry))
+               goto out_parent;
+
+       bstart = au_dbstart(dentry);
+       a->h_path.dentry = au_h_dptr(dentry, bstart);
+       a->h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart);
+       h_dir = au_pinned_h_dir(&a->pin);
+       switch (arg->type) {
+       case Creat:
+               err = vfsub_create(h_dir, &a->h_path, arg->u.c.mode);
+               break;
+       case Symlink:
+               err = vfsub_symlink(h_dir, &a->h_path, arg->u.s.symname);
+               break;
+       case Mknod:
+               err = vfsub_mknod(h_dir, &a->h_path, arg->u.m.mode,
+                                 arg->u.m.dev);
+               break;
+       default:
+               BUG();
+       }
+       created = !err;
+       if (!err)
+               err = epilog(dir, bstart, wh_dentry, dentry);
+
+       /* revert */
+       if (unlikely(created && err && a->h_path.dentry->d_inode)) {
+               int rerr;
+               rerr = vfsub_unlink(h_dir, &a->h_path, /*force*/0);
+               if (rerr) {
+                       AuIOErr("%.*s revert failure(%d, %d)\n",
+                               AuDLNPair(dentry), err, rerr);
+                       err = -EIO;
+               }
+               au_dtime_revert(&a->dt);
+       }
+
+       au_unpin(&a->pin);
+       dput(wh_dentry);
+
+out_parent:
+       di_write_unlock(parent);
+out_unlock:
+       if (unlikely(err)) {
+               au_update_dbstart(dentry);
+               d_drop(dentry);
+       }
+       aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+       kfree(a);
+out:
+       return err;
+}
+
+int aufs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+{
+       struct simple_arg arg = {
+               .type = Mknod,
+               .u.m = {
+                       .mode   = mode,
+                       .dev    = dev
+               }
+       };
+       return add_simple(dir, dentry, &arg);
+}
+
+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+       struct simple_arg arg = {
+               .type = Symlink,
+               .u.s.symname = symname
+       };
+       return add_simple(dir, dentry, &arg);
+}
+
+int aufs_create(struct inode *dir, struct dentry *dentry, int mode,
+               struct nameidata *nd)
+{
+       struct simple_arg arg = {
+               .type = Creat,
+               .u.c = {
+                       .mode   = mode,
+                       .nd     = nd
+               }
+       };
+       return add_simple(dir, dentry, &arg);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_link_args {
+       aufs_bindex_t bdst, bsrc;
+       struct au_pin pin;
+       struct path h_path;
+       struct dentry *src_parent, *parent;
+};
+
+static int au_cpup_before_link(struct dentry *src_dentry,
+                              struct au_link_args *a)
+{
+       int err;
+       struct dentry *h_src_dentry;
+       struct au_cp_generic cpg = {
+               .dentry = src_dentry,
+               .bdst   = a->bdst,
+               .bsrc   = a->bsrc,
+               .len    = -1,
+               .pin    = &a->pin,
+               .flags  = AuCpup_DTIME | AuCpup_HOPEN /* | AuCpup_KEEPLINO */
+       };
+
+       di_read_lock_parent(a->src_parent, AuLock_IR);
+       err = au_test_and_cpup_dirs(src_dentry, a->bdst);
+       if (unlikely(err))
+               goto out;
+
+       h_src_dentry = au_h_dptr(src_dentry, a->bsrc);
+       err = au_pin(&a->pin, src_dentry, a->bdst,
+                    au_opt_udba(src_dentry->d_sb),
+                    AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+       if (unlikely(err))
+               goto out;
+
+       err = au_sio_cpup_simple(&cpg);
+       au_unpin(&a->pin);
+
+out:
+       di_read_unlock(a->src_parent, AuLock_IR);
+       return err;
+}
+
+static int au_cpup_or_link(struct dentry *src_dentry, struct dentry *dentry,
+                          struct au_link_args *a)
+{
+       int err;
+       unsigned char plink;
+       aufs_bindex_t bend;
+       struct dentry *h_src_dentry;
+       struct inode *h_inode, *inode;
+       struct super_block *sb;
+       struct file *h_file;
+
+       plink = 0;
+       h_inode = NULL;
+       sb = src_dentry->d_sb;
+       inode = src_dentry->d_inode;
+       if (au_ibstart(inode) <= a->bdst)
+               h_inode = au_h_iptr(inode, a->bdst);
+       if (!h_inode || !h_inode->i_nlink) {
+               /* copyup src_dentry as the name of dentry. */
+               bend = au_dbend(dentry);
+               if (bend < a->bsrc)
+                       au_set_dbend(dentry, a->bsrc);
+               au_set_h_dptr(dentry, a->bsrc,
+                             dget(au_h_dptr(src_dentry, a->bsrc)));
+               dget(a->h_path.dentry);
+               au_set_h_dptr(dentry, a->bdst, NULL);
+               dentry->d_inode = src_dentry->d_inode; /* tmp */
+               h_file = au_h_open_pre(dentry, a->bsrc);
+               if (IS_ERR(h_file))
+                       err = PTR_ERR(h_file);
+               else {
+                       struct au_cp_generic cpg = {
+                               .dentry = dentry,
+                               .bdst   = a->bdst,
+                               .bsrc   = -1,
+                               .len    = -1,
+                               .pin    = &a->pin,
+                               .flags  = AuCpup_KEEPLINO
+                       };
+                       err = au_sio_cpup_simple(&cpg);
+                       au_h_open_post(dentry, a->bsrc, h_file);
+                       if (!err) {
+                               dput(a->h_path.dentry);
+                               a->h_path.dentry = au_h_dptr(dentry, a->bdst);
+                       } else
+                               au_set_h_dptr(dentry, a->bdst,
+                                             a->h_path.dentry);
+               }
+               dentry->d_inode = NULL; /* restore */
+               au_set_h_dptr(dentry, a->bsrc, NULL);
+               au_set_dbend(dentry, bend);
+       } else {
+               /* the inode of src_dentry already exists on a.bdst branch */
+               h_src_dentry = d_find_alias(h_inode);
+               if (!h_src_dentry && au_plink_test(inode)) {
+                       plink = 1;
+                       h_src_dentry = au_plink_lkup(inode, a->bdst);
+                       err = PTR_ERR(h_src_dentry);
+                       if (IS_ERR(h_src_dentry))
+                               goto out;
+
+                       if (unlikely(!h_src_dentry->d_inode)) {
+                               dput(h_src_dentry);
+                               h_src_dentry = NULL;
+                       }
+
+               }
+               if (h_src_dentry) {
+                       err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
+                                        &a->h_path);
+                       dput(h_src_dentry);
+               } else {
+                       AuIOErr("no dentry found for hi%lu on b%d\n",
+                               h_inode->i_ino, a->bdst);
+                       err = -EIO;
+               }
+       }
+
+       if (!err && !plink)
+               au_plink_append(inode, a->bdst, a->h_path.dentry);
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+int aufs_link(struct dentry *src_dentry, struct inode *dir,
+             struct dentry *dentry)
+{
+       int err, rerr;
+       struct au_dtime dt;
+       struct au_link_args *a;
+       struct dentry *wh_dentry, *h_src_dentry;
+       struct inode *inode;
+       struct super_block *sb;
+       struct au_wr_dir_args wr_dir_args = {
+               /* .force_btgt  = -1, */
+               .flags          = AuWrDir_ADD_ENTRY
+       };
+
+       IMustLock(dir);
+       inode = src_dentry->d_inode;
+       IMustLock(inode);
+
+       err = -ENOMEM;
+       a = kzalloc(sizeof(*a), GFP_NOFS);
+       if (unlikely(!a))
+               goto out;
+
+       a->parent = dentry->d_parent; /* dir inode is locked */
+       err = aufs_read_and_write_lock2(dentry, src_dentry,
+                                       AuLock_NOPLM | AuLock_GEN);
+       if (unlikely(err))
+               goto out_kfree;
+       err = au_d_hashed_positive(src_dentry);
+       if (unlikely(err))
+               goto out_unlock;
+       err = au_d_may_add(dentry);
+       if (unlikely(err))
+               goto out_unlock;
+
+       a->src_parent = dget_parent(src_dentry);
+       wr_dir_args.force_btgt = au_ibstart(inode);
+
+       di_write_lock_parent(a->parent);
+       wr_dir_args.force_btgt = au_wbr(dentry, wr_dir_args.force_btgt);
+       wh_dentry = lock_hdir_lkup_wh(dentry, &dt, src_dentry, &a->pin,
+                                     &wr_dir_args);
+       err = PTR_ERR(wh_dentry);
+       if (IS_ERR(wh_dentry))
+               goto out_parent;
+
+       err = 0;
+       sb = dentry->d_sb;
+       a->bdst = au_dbstart(dentry);
+       a->h_path.dentry = au_h_dptr(dentry, a->bdst);
+       a->h_path.mnt = au_sbr_mnt(sb, a->bdst);
+       a->bsrc = au_ibstart(inode);
+       h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
+       if (!h_src_dentry) {
+               a->bsrc = au_dbstart(src_dentry);
+               h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
+               AuDebugOn(!h_src_dentry);
+       } else if (IS_ERR(h_src_dentry))
+               goto out_parent;
+
+       if (au_opt_test(au_mntflags(sb), PLINK)) {
+               if (a->bdst < a->bsrc
+                   /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */)
+                       err = au_cpup_or_link(src_dentry, dentry, a);
+               else
+                       err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
+                                        &a->h_path);
+               dput(h_src_dentry);
+       } else {
+               /*
+                * copyup src_dentry to the branch we process,
+                * and then link(2) to it.
+                */
+               dput(h_src_dentry);
+               if (a->bdst < a->bsrc
+                   /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) {
+                       au_unpin(&a->pin);
+                       di_write_unlock(a->parent);
+                       err = au_cpup_before_link(src_dentry, a);
+                       di_write_lock_parent(a->parent);
+                       if (!err)
+                               err = au_pin(&a->pin, dentry, a->bdst,
+                                            au_opt_udba(sb),
+                                            AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+                       if (unlikely(err))
+                               goto out_wh;
+               }
+               if (!err) {
+                       h_src_dentry = au_h_dptr(src_dentry, a->bdst);
+                       err = -ENOENT;
+                       if (h_src_dentry && h_src_dentry->d_inode)
+                               err = vfsub_link(h_src_dentry,
+                                                au_pinned_h_dir(&a->pin),
+                                                &a->h_path);
+               }
+       }
+       if (unlikely(err))
+               goto out_unpin;
+
+       if (wh_dentry) {
+               a->h_path.dentry = wh_dentry;
+               err = au_wh_unlink_dentry(au_pinned_h_dir(&a->pin), &a->h_path,
+                                         dentry);
+               if (unlikely(err))
+                       goto out_revert;
+       }
+
+       dir->i_version++;
+       if (au_ibstart(dir) == au_dbstart(dentry))
+               au_cpup_attr_timesizes(dir);
+       inc_nlink(inode);
+       inode->i_ctime = dir->i_ctime;
+       d_instantiate(dentry, au_igrab(inode));
+       if (d_unhashed(a->h_path.dentry))
+               /* some filesystem calls d_drop() */
+               d_drop(dentry);
+       goto out_unpin; /* success */
+
+out_revert:
+       rerr = vfsub_unlink(au_pinned_h_dir(&a->pin), &a->h_path, /*force*/0);
+       if (unlikely(rerr)) {
+               AuIOErr("%.*s reverting failed(%d, %d)\n",
+                       AuDLNPair(dentry), err, rerr);
+               err = -EIO;
+       }
+       au_dtime_revert(&dt);
+out_unpin:
+       au_unpin(&a->pin);
+out_wh:
+       dput(wh_dentry);
+out_parent:
+       di_write_unlock(a->parent);
+       dput(a->src_parent);
+out_unlock:
+       if (unlikely(err)) {
+               au_update_dbstart(dentry);
+               d_drop(dentry);
+       }
+       aufs_read_and_write_unlock2(dentry, src_dentry);
+out_kfree:
+       kfree(a);
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+int aufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+       int err, rerr;
+       aufs_bindex_t bindex;
+       unsigned char diropq;
+       struct path h_path;
+       struct dentry *wh_dentry, *parent, *opq_dentry;
+       struct mutex *h_mtx;
+       struct super_block *sb;
+       struct {
+               struct au_pin pin;
+               struct au_dtime dt;
+       } *a; /* reduce the stack usage */
+       struct au_wr_dir_args wr_dir_args = {
+               .force_btgt     = -1,
+               .flags          = AuWrDir_ADD_ENTRY | AuWrDir_ISDIR
+       };
+
+       IMustLock(dir);
+
+       err = -ENOMEM;
+       a = kmalloc(sizeof(*a), GFP_NOFS);
+       if (unlikely(!a))
+               goto out;
+
+       err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+       if (unlikely(err))
+               goto out_free;
+       err = au_d_may_add(dentry);
+       if (unlikely(err))
+               goto out_unlock;
+
+       parent = dentry->d_parent; /* dir inode is locked */
+       di_write_lock_parent(parent);
+       wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
+                                     &a->pin, &wr_dir_args);
+       err = PTR_ERR(wh_dentry);
+       if (IS_ERR(wh_dentry))
+               goto out_parent;
+
+       sb = dentry->d_sb;
+       bindex = au_dbstart(dentry);
+       h_path.dentry = au_h_dptr(dentry, bindex);
+       h_path.mnt = au_sbr_mnt(sb, bindex);
+       err = vfsub_mkdir(au_pinned_h_dir(&a->pin), &h_path, mode);
+       if (unlikely(err))
+               goto out_unpin;
+
+       /* make the dir opaque */
+       diropq = 0;
+       h_mtx = &h_path.dentry->d_inode->i_mutex;
+       if (wh_dentry
+           || au_opt_test(au_mntflags(sb), ALWAYS_DIROPQ)) {
+               mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
+               opq_dentry = au_diropq_create(dentry, bindex);
+               mutex_unlock(h_mtx);
+               err = PTR_ERR(opq_dentry);
+               if (IS_ERR(opq_dentry))
+                       goto out_dir;
+               dput(opq_dentry);
+               diropq = 1;
+       }
+
+       err = epilog(dir, bindex, wh_dentry, dentry);
+       if (!err) {
+               inc_nlink(dir);
+               goto out_unpin; /* success */
+       }
+
+       /* revert */
+       if (diropq) {
+               AuLabel(revert opq);
+               mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
+               rerr = au_diropq_remove(dentry, bindex);
+               mutex_unlock(h_mtx);
+               if (rerr) {
+                       AuIOErr("%.*s reverting diropq failed(%d, %d)\n",
+                               AuDLNPair(dentry), err, rerr);
+                       err = -EIO;
+               }
+       }
+
+out_dir:
+       AuLabel(revert dir);
+       rerr = vfsub_rmdir(au_pinned_h_dir(&a->pin), &h_path);
+       if (rerr) {
+               AuIOErr("%.*s reverting dir failed(%d, %d)\n",
+                       AuDLNPair(dentry), err, rerr);
+               err = -EIO;
+       }
+       au_dtime_revert(&a->dt);
+out_unpin:
+       au_unpin(&a->pin);
+       dput(wh_dentry);
+out_parent:
+       di_write_unlock(parent);
+out_unlock:
+       if (unlikely(err)) {
+               au_update_dbstart(dentry);
+               d_drop(dentry);
+       }
+       aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+       kfree(a);
+out:
+       return err;
+}
diff --git a/fs/aufs/i_op_del.c b/fs/aufs/i_op_del.c
new file mode 100644 (file)
index 0000000..f37ed37
--- /dev/null
@@ -0,0 +1,503 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode operations (del entry)
+ */
+
+#include "aufs.h"
+
+/*
+ * decide if a new whiteout for @dentry is necessary or not.
+ * when it is necessary, prepare the parent dir for the upper branch whose
+ * branch index is @bcpup for creation. the actual creation of the whiteout will
+ * be done by caller.
+ * return value:
+ * 0: wh is unnecessary
+ * plus: wh is necessary
+ * minus: error
+ */
+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup)
+{
+       int need_wh, err;
+       aufs_bindex_t bstart;
+       struct super_block *sb;
+
+       sb = dentry->d_sb;
+       bstart = au_dbstart(dentry);
+       if (*bcpup < 0) {
+               *bcpup = bstart;
+               if (au_test_ro(sb, bstart, dentry->d_inode)) {
+                       err = AuWbrCopyup(au_sbi(sb), dentry);
+                       *bcpup = err;
+                       if (unlikely(err < 0))
+                               goto out;
+               }
+       } else
+               AuDebugOn(bstart < *bcpup
+                         || au_test_ro(sb, *bcpup, dentry->d_inode));
+       AuDbg("bcpup %d, bstart %d\n", *bcpup, bstart);
+
+       if (*bcpup != bstart) {
+               err = au_cpup_dirs(dentry, *bcpup);
+               if (unlikely(err))
+                       goto out;
+               need_wh = 1;
+       } else {
+               struct au_dinfo *dinfo, *tmp;
+
+               need_wh = -ENOMEM;
+               dinfo = au_di(dentry);
+               tmp = au_di_alloc(sb, AuLsc_DI_TMP);
+               if (tmp) {
+                       au_di_cp(tmp, dinfo);
+                       au_di_swap(tmp, dinfo);
+                       /* returns the number of positive dentries */
+                       need_wh = au_lkup_dentry(dentry, bstart + 1, /*type*/0,
+                                                /*nd*/NULL);
+                       au_di_swap(tmp, dinfo);
+                       au_rw_write_unlock(&tmp->di_rwsem);
+                       au_di_free(tmp);
+               }
+       }
+       AuDbg("need_wh %d\n", need_wh);
+       err = need_wh;
+
+out:
+       return err;
+}
+
+/*
+ * simple tests for the del-entry operations.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
+              struct dentry *h_parent, int isdir)
+{
+       int err;
+       umode_t h_mode;
+       struct dentry *h_dentry, *h_latest;
+       struct inode *h_inode;
+
+       h_dentry = au_h_dptr(dentry, bindex);
+       h_inode = h_dentry->d_inode;
+       if (dentry->d_inode) {
+               err = -ENOENT;
+               if (unlikely(!h_inode || !h_inode->i_nlink))
+                       goto out;
+
+               h_mode = h_inode->i_mode;
+               if (!isdir) {
+                       err = -EISDIR;
+                       if (unlikely(S_ISDIR(h_mode)))
+                               goto out;
+               } else if (unlikely(!S_ISDIR(h_mode))) {
+                       err = -ENOTDIR;
+                       goto out;
+               }
+       } else {
+               /* rename(2) case */
+               err = -EIO;
+               if (unlikely(h_inode))
+                       goto out;
+       }
+
+       err = -ENOENT;
+       /* expected parent dir is locked */
+       if (unlikely(h_parent != h_dentry->d_parent))
+               goto out;
+       err = 0;
+
+       /*
+        * rmdir a dir may break the consistency on some filesystem.
+        * let's try heavy test.
+        */
+       err = -EACCES;
+       if (unlikely(au_test_h_perm(h_parent->d_inode, MAY_EXEC | MAY_WRITE)))
+               goto out;
+
+       h_latest = au_sio_lkup_one(&dentry->d_name, h_parent,
+                                  au_sbr(dentry->d_sb, bindex));
+       err = -EIO;
+       if (IS_ERR(h_latest))
+               goto out;
+       if (h_latest == h_dentry)
+               err = 0;
+       dput(h_latest);
+
+out:
+       return err;
+}
+
+/*
+ * decide the branch where we operate for @dentry. the branch index will be set
+ * @rbcpup. after diciding it, 'pin' it and store the timestamps of the parent
+ * dir for reverting.
+ * when a new whiteout is necessary, create it.
+ */
+static struct dentry*
+lock_hdir_create_wh(struct dentry *dentry, int isdir, aufs_bindex_t *rbcpup,
+                   struct au_dtime *dt, struct au_pin *pin)
+{
+       struct dentry *wh_dentry;
+       struct super_block *sb;
+       struct path h_path;
+       int err, need_wh;
+       unsigned int udba;
+       aufs_bindex_t bcpup;
+
+       need_wh = au_wr_dir_need_wh(dentry, isdir, rbcpup);
+       wh_dentry = ERR_PTR(need_wh);
+       if (unlikely(need_wh < 0))
+               goto out;
+
+       sb = dentry->d_sb;
+       udba = au_opt_udba(sb);
+       bcpup = *rbcpup;
+       err = au_pin(pin, dentry, bcpup, udba,
+                    AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+       wh_dentry = ERR_PTR(err);
+       if (unlikely(err))
+               goto out;
+
+       h_path.dentry = au_pinned_h_parent(pin);
+       if (udba != AuOpt_UDBA_NONE
+           && au_dbstart(dentry) == bcpup) {
+               err = au_may_del(dentry, bcpup, h_path.dentry, isdir);
+               wh_dentry = ERR_PTR(err);
+               if (unlikely(err))
+                       goto out_unpin;
+       }
+
+       h_path.mnt = au_sbr_mnt(sb, bcpup);
+       au_dtime_store(dt, au_pinned_parent(pin), &h_path);
+       wh_dentry = NULL;
+       if (!need_wh)
+               goto out; /* success, no need to create whiteout */
+
+       wh_dentry = au_wh_create(dentry, bcpup, h_path.dentry);
+       if (IS_ERR(wh_dentry))
+               goto out_unpin;
+
+       /* returns with the parent is locked and wh_dentry is dget-ed */
+       goto out; /* success */
+
+out_unpin:
+       au_unpin(pin);
+out:
+       return wh_dentry;
+}
+
+/*
+ * when removing a dir, rename it to a unique temporary whiteout-ed name first
+ * in order to be revertible and save time for removing many child whiteouts
+ * under the dir.
+ * returns 1 when there are too many child whiteout and caller should remove
+ * them asynchronously. returns 0 when the number of children is enough small to
+ * remove now or the branch fs is a remote fs.
+ * otherwise return an error.
+ */
+static int renwh_and_rmdir(struct dentry *dentry, aufs_bindex_t bindex,
+                          struct au_nhash *whlist, struct inode *dir)
+{
+       int rmdir_later, err, dirwh;
+       struct dentry *h_dentry;
+       struct super_block *sb;
+
+       sb = dentry->d_sb;
+       SiMustAnyLock(sb);
+       h_dentry = au_h_dptr(dentry, bindex);
+       err = au_whtmp_ren(h_dentry, au_sbr(sb, bindex));
+       if (unlikely(err))
+               goto out;
+
+       /* stop monitoring */
+       au_hn_free(au_hi(dentry->d_inode, bindex));
+
+       if (!au_test_fs_remote(h_dentry->d_sb)) {
+               dirwh = au_sbi(sb)->si_dirwh;
+               rmdir_later = (dirwh <= 1);
+               if (!rmdir_later)
+                       rmdir_later = au_nhash_test_longer_wh(whlist, bindex,
+                                                             dirwh);
+               if (rmdir_later)
+                       return rmdir_later;
+       }
+
+       err = au_whtmp_rmdir(dir, bindex, h_dentry, whlist);
+       if (unlikely(err)) {
+               AuIOErr("rmdir %.*s, b%d failed, %d. ignored\n",
+                       AuDLNPair(h_dentry), bindex, err);
+               err = 0;
+       }
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+/*
+ * final procedure for deleting a entry.
+ * maintain dentry and iattr.
+ */
+static void epilog(struct inode *dir, struct dentry *dentry,
+                  aufs_bindex_t bindex)
+{
+       struct inode *inode;
+
+       inode = dentry->d_inode;
+       d_drop(dentry);
+       inode->i_ctime = dir->i_ctime;
+
+       if (au_ibstart(dir) == bindex)
+               au_cpup_attr_timesizes(dir);
+       dir->i_version++;
+}
+
+/*
+ * when an error happened, remove the created whiteout and revert everything.
+ */
+static int do_revert(int err, struct inode *dir, aufs_bindex_t bindex,
+                    aufs_bindex_t bwh, struct dentry *wh_dentry,
+                    struct dentry *dentry, struct au_dtime *dt)
+{
+       int rerr;
+       struct path h_path = {
+               .dentry = wh_dentry,
+               .mnt    = au_sbr_mnt(dir->i_sb, bindex)
+       };
+
+       rerr = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, dentry);
+       if (!rerr) {
+               au_set_dbwh(dentry, bwh);
+               au_dtime_revert(dt);
+               return 0;
+       }
+
+       AuIOErr("%.*s reverting whiteout failed(%d, %d)\n",
+               AuDLNPair(dentry), err, rerr);
+       return -EIO;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int aufs_unlink(struct inode *dir, struct dentry *dentry)
+{
+       int err;
+       aufs_bindex_t bwh, bindex, bstart;
+       struct inode *inode, *h_dir;
+       struct dentry *parent, *wh_dentry;
+       /* to reuduce stack size */
+       struct {
+               struct au_dtime dt;
+               struct au_pin pin;
+               struct path h_path;
+       } *a;
+
+       IMustLock(dir);
+
+       err = -ENOMEM;
+       a = kmalloc(sizeof(*a), GFP_NOFS);
+       if (unlikely(!a))
+               goto out;
+
+       err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+       if (unlikely(err))
+               goto out_free;
+       err = au_d_hashed_positive(dentry);
+       if (unlikely(err))
+               goto out_unlock;
+       inode = dentry->d_inode;
+       IMustLock(inode);
+       err = -EISDIR;
+       if (unlikely(S_ISDIR(inode->i_mode)))
+               goto out_unlock; /* possible? */
+
+       bstart = au_dbstart(dentry);
+       bwh = au_dbwh(dentry);
+       bindex = -1;
+       parent = dentry->d_parent; /* dir inode is locked */
+       di_write_lock_parent(parent);
+       wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/0, &bindex, &a->dt,
+                                       &a->pin);
+       err = PTR_ERR(wh_dentry);
+       if (IS_ERR(wh_dentry))
+               goto out_parent;
+
+       a->h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart);
+       a->h_path.dentry = au_h_dptr(dentry, bstart);
+       dget(a->h_path.dentry);
+       if (bindex == bstart) {
+               h_dir = au_pinned_h_dir(&a->pin);
+               err = vfsub_unlink(h_dir, &a->h_path, /*force*/0);
+       } else {
+               /* dir inode is locked */
+               h_dir = wh_dentry->d_parent->d_inode;
+               IMustLock(h_dir);
+               err = 0;
+       }
+
+       if (!err) {
+               vfsub_drop_nlink(inode);
+               epilog(dir, dentry, bindex);
+
+               /* update target timestamps */
+               if (bindex == bstart) {
+                       vfsub_update_h_iattr(&a->h_path, /*did*/NULL);
+                       /*ignore*/
+                       inode->i_ctime = a->h_path.dentry->d_inode->i_ctime;
+               } else
+                       /* todo: this timestamp may be reverted later */
+                       inode->i_ctime = h_dir->i_ctime;
+               goto out_unpin; /* success */
+       }
+
+       /* revert */
+       if (wh_dentry) {
+               int rerr;
+
+               rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
+                                &a->dt);
+               if (rerr)
+                       err = rerr;
+       }
+
+out_unpin:
+       au_unpin(&a->pin);
+       dput(wh_dentry);
+       dput(a->h_path.dentry);
+out_parent:
+       di_write_unlock(parent);
+out_unlock:
+       aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+       kfree(a);
+out:
+       return err;
+}
+
+int aufs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+       int err, rmdir_later;
+       aufs_bindex_t bwh, bindex, bstart;
+       struct inode *inode;
+       struct dentry *parent, *wh_dentry, *h_dentry;
+       struct au_whtmp_rmdir *args;
+       /* to reuduce stack size */
+       struct {
+               struct au_dtime dt;
+               struct au_pin pin;
+       } *a;
+
+       IMustLock(dir);
+
+       err = -ENOMEM;
+       a = kmalloc(sizeof(*a), GFP_NOFS);
+       if (unlikely(!a))
+               goto out;
+
+       err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
+       if (unlikely(err))
+               goto out_free;
+       err = au_alive_dir(dentry);
+       if (unlikely(err))
+               goto out_unlock;
+       inode = dentry->d_inode;
+       IMustLock(inode);
+       err = -ENOTDIR;
+       if (unlikely(!S_ISDIR(inode->i_mode)))
+               goto out_unlock; /* possible? */
+
+       err = -ENOMEM;
+       args = au_whtmp_rmdir_alloc(dir->i_sb, GFP_NOFS);
+       if (unlikely(!args))
+               goto out_unlock;
+
+       parent = dentry->d_parent; /* dir inode is locked */
+       di_write_lock_parent(parent);
+       err = au_test_empty(dentry, &args->whlist);
+       if (unlikely(err))
+               goto out_parent;
+
+       bstart = au_dbstart(dentry);
+       bwh = au_dbwh(dentry);
+       bindex = -1;
+       wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/1, &bindex, &a->dt,
+                                       &a->pin);
+       err = PTR_ERR(wh_dentry);
+       if (IS_ERR(wh_dentry))
+               goto out_parent;
+
+       h_dentry = au_h_dptr(dentry, bstart);
+       dget(h_dentry);
+       rmdir_later = 0;
+       if (bindex == bstart) {
+               err = renwh_and_rmdir(dentry, bstart, &args->whlist, dir);
+               if (err > 0) {
+                       rmdir_later = err;
+                       err = 0;
+               }
+       } else {
+               /* stop monitoring */
+               au_hn_free(au_hi(inode, bstart));
+
+               /* dir inode is locked */
+               IMustLock(wh_dentry->d_parent->d_inode);
+               err = 0;
+       }
+
+       if (!err) {
+               vfsub_dead_dir(inode);
+               au_set_dbdiropq(dentry, -1);
+               epilog(dir, dentry, bindex);
+
+               if (rmdir_later) {
+                       au_whtmp_kick_rmdir(dir, bstart, h_dentry, args);
+                       args = NULL;
+               }
+
+               goto out_unpin; /* success */
+       }
+
+       /* revert */
+       AuLabel(revert);
+       if (wh_dentry) {
+               int rerr;
+
+               rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
+                                &a->dt);
+               if (rerr)
+                       err = rerr;
+       }
+
+out_unpin:
+       au_unpin(&a->pin);
+       dput(wh_dentry);
+       dput(h_dentry);
+out_parent:
+       di_write_unlock(parent);
+       if (args)
+               au_whtmp_rmdir_free(args);
+out_unlock:
+       aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+       kfree(a);
+out:
+       AuTraceErr(err);
+       return err;
+}
diff --git a/fs/aufs/i_op_ren.c b/fs/aufs/i_op_ren.c
new file mode 100644 (file)
index 0000000..dbdcb37
--- /dev/null
@@ -0,0 +1,1013 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode operation (rename entry)
+ * todo: this is crazy monster
+ */
+
+#include "aufs.h"
+
+enum { AuSRC, AuDST, AuSrcDst };
+enum { AuPARENT, AuCHILD, AuParentChild };
+
+#define AuRen_ISDIR    1
+#define AuRen_ISSAMEDIR        (1 << 1)
+#define AuRen_WHSRC    (1 << 2)
+#define AuRen_WHDST    (1 << 3)
+#define AuRen_MNT_WRITE        (1 << 4)
+#define AuRen_DT_DSTDIR        (1 << 5)
+#define AuRen_DIROPQ   (1 << 6)
+#define AuRen_CPUP     (1 << 7)
+#define au_ftest_ren(flags, name)      ((flags) & AuRen_##name)
+#define au_fset_ren(flags, name) \
+       do { (flags) |= AuRen_##name; } while (0)
+#define au_fclr_ren(flags, name) \
+       do { (flags) &= ~AuRen_##name; } while (0)
+
+struct au_ren_args {
+       struct {
+               struct dentry *dentry, *h_dentry, *parent, *h_parent,
+                       *wh_dentry;
+               struct inode *dir, *inode;
+               struct au_hinode *hdir;
+               struct au_dtime dt[AuParentChild];
+               aufs_bindex_t bstart;
+       } sd[AuSrcDst];
+
+#define src_dentry     sd[AuSRC].dentry
+#define src_dir                sd[AuSRC].dir
+#define src_inode      sd[AuSRC].inode
+#define src_h_dentry   sd[AuSRC].h_dentry
+#define src_parent     sd[AuSRC].parent
+#define src_h_parent   sd[AuSRC].h_parent
+#define src_wh_dentry  sd[AuSRC].wh_dentry
+#define src_hdir       sd[AuSRC].hdir
+#define src_h_dir      sd[AuSRC].hdir->hi_inode
+#define src_dt         sd[AuSRC].dt
+#define src_bstart     sd[AuSRC].bstart
+
+#define dst_dentry     sd[AuDST].dentry
+#define dst_dir                sd[AuDST].dir
+#define dst_inode      sd[AuDST].inode
+#define dst_h_dentry   sd[AuDST].h_dentry
+#define dst_parent     sd[AuDST].parent
+#define dst_h_parent   sd[AuDST].h_parent
+#define dst_wh_dentry  sd[AuDST].wh_dentry
+#define dst_hdir       sd[AuDST].hdir
+#define dst_h_dir      sd[AuDST].hdir->hi_inode
+#define dst_dt         sd[AuDST].dt
+#define dst_bstart     sd[AuDST].bstart
+
+       struct dentry *h_trap;
+       struct au_branch *br;
+       struct au_hinode *src_hinode;
+       struct path h_path;
+       struct au_nhash whlist;
+       aufs_bindex_t btgt, src_bwh, src_bdiropq;
+
+       unsigned int flags;
+
+       struct au_whtmp_rmdir *thargs;
+       struct dentry *h_dst;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * functions for reverting.
+ * when an error happened in a single rename systemcall, we should revert
+ * everything as if nothing happend.
+ * we don't need to revert the copied-up/down the parent dir since they are
+ * harmless.
+ */
+
+#define RevertFailure(fmt, ...) do { \
+       AuIOErr("revert failure: " fmt " (%d, %d)\n", \
+               ##__VA_ARGS__, err, rerr); \
+       err = -EIO; \
+} while (0)
+
+static void au_ren_rev_diropq(int err, struct au_ren_args *a)
+{
+       int rerr;
+
+       au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD);
+       rerr = au_diropq_remove(a->src_dentry, a->btgt);
+       au_hn_imtx_unlock(a->src_hinode);
+       au_set_dbdiropq(a->src_dentry, a->src_bdiropq);
+       if (rerr)
+               RevertFailure("remove diropq %.*s", AuDLNPair(a->src_dentry));
+}
+
+static void au_ren_rev_rename(int err, struct au_ren_args *a)
+{
+       int rerr;
+
+       a->h_path.dentry = au_lkup_one(&a->src_dentry->d_name, a->src_h_parent,
+                                      a->br, /*nd*/NULL);
+       rerr = PTR_ERR(a->h_path.dentry);
+       if (IS_ERR(a->h_path.dentry)) {
+               RevertFailure("au_lkup_one %.*s", AuDLNPair(a->src_dentry));
+               return;
+       }
+
+       rerr = vfsub_rename(a->dst_h_dir,
+                           au_h_dptr(a->src_dentry, a->btgt),
+                           a->src_h_dir, &a->h_path);
+       d_drop(a->h_path.dentry);
+       dput(a->h_path.dentry);
+       /* au_set_h_dptr(a->src_dentry, a->btgt, NULL); */
+       if (rerr)
+               RevertFailure("rename %.*s", AuDLNPair(a->src_dentry));
+}
+
+static void au_ren_rev_cpup(int err, struct au_ren_args *a)
+{
+       int rerr;
+
+       a->h_path.dentry = a->dst_h_dentry;
+       rerr = vfsub_unlink(a->dst_h_dir, &a->h_path, /*force*/0);
+       au_set_h_dptr(a->src_dentry, a->btgt, NULL);
+       au_set_dbstart(a->src_dentry, a->src_bstart);
+       if (rerr)
+               RevertFailure("unlink %.*s", AuDLNPair(a->dst_h_dentry));
+}
+
+static void au_ren_rev_whtmp(int err, struct au_ren_args *a)
+{
+       int rerr;
+
+       a->h_path.dentry = au_lkup_one(&a->dst_dentry->d_name, a->dst_h_parent,
+                                      a->br, /*nd*/NULL);
+       rerr = PTR_ERR(a->h_path.dentry);
+       if (IS_ERR(a->h_path.dentry)) {
+               RevertFailure("lookup %.*s", AuDLNPair(a->dst_dentry));
+               return;
+       }
+       if (a->h_path.dentry->d_inode) {
+               d_drop(a->h_path.dentry);
+               dput(a->h_path.dentry);
+               return;
+       }
+
+       rerr = vfsub_rename(a->dst_h_dir, a->h_dst, a->dst_h_dir, &a->h_path);
+       d_drop(a->h_path.dentry);
+       dput(a->h_path.dentry);
+       if (!rerr)
+               au_set_h_dptr(a->dst_dentry, a->btgt, dget(a->h_dst));
+       else
+               RevertFailure("rename %.*s", AuDLNPair(a->h_dst));
+}
+
+static void au_ren_rev_whsrc(int err, struct au_ren_args *a)
+{
+       int rerr;
+
+       a->h_path.dentry = a->src_wh_dentry;
+       rerr = au_wh_unlink_dentry(a->src_h_dir, &a->h_path, a->src_dentry);
+       au_set_dbwh(a->src_dentry, a->src_bwh);
+       if (rerr)
+               RevertFailure("unlink %.*s", AuDLNPair(a->src_wh_dentry));
+}
+#undef RevertFailure
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * when we have to copyup the renaming entry, do it with the rename-target name
+ * in order to minimize the cost (the later actual rename is unnecessary).
+ * otherwise rename it on the target branch.
+ */
+static int au_ren_or_cpup(struct au_ren_args *a)
+{
+       int err;
+       struct dentry *d;
+
+       d = a->src_dentry;
+       if (au_dbstart(d) == a->btgt) {
+               a->h_path.dentry = a->dst_h_dentry;
+               if (au_ftest_ren(a->flags, DIROPQ)
+                   && au_dbdiropq(d) == a->btgt)
+                       au_fclr_ren(a->flags, DIROPQ);
+               AuDebugOn(au_dbstart(d) != a->btgt);
+               err = vfsub_rename(a->src_h_dir, au_h_dptr(d, a->btgt),
+                                  a->dst_h_dir, &a->h_path);
+       } else
+               BUG();
+
+       if (!err && a->h_dst)
+               /* it will be set to dinfo later */
+               dget(a->h_dst);
+
+       return err;
+}
+
+/* cf. aufs_rmdir() */
+static int au_ren_del_whtmp(struct au_ren_args *a)
+{
+       int err;
+       struct inode *dir;
+
+       dir = a->dst_dir;
+       SiMustAnyLock(dir->i_sb);
+       if (!au_nhash_test_longer_wh(&a->whlist, a->btgt,
+                                    au_sbi(dir->i_sb)->si_dirwh)
+           || au_test_fs_remote(a->h_dst->d_sb)) {
+               err = au_whtmp_rmdir(dir, a->btgt, a->h_dst, &a->whlist);
+               if (unlikely(err))
+                       pr_warn("failed removing whtmp dir %.*s (%d), "
+                               "ignored.\n", AuDLNPair(a->h_dst), err);
+       } else {
+               au_nhash_wh_free(&a->thargs->whlist);
+               a->thargs->whlist = a->whlist;
+               a->whlist.nh_num = 0;
+               au_whtmp_kick_rmdir(dir, a->btgt, a->h_dst, a->thargs);
+               dput(a->h_dst);
+               a->thargs = NULL;
+       }
+
+       return 0;
+}
+
+/* make it 'opaque' dir. */
+static int au_ren_diropq(struct au_ren_args *a)
+{
+       int err;
+       struct dentry *diropq;
+
+       err = 0;
+       a->src_bdiropq = au_dbdiropq(a->src_dentry);
+       a->src_hinode = au_hi(a->src_inode, a->btgt);
+       au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD);
+       diropq = au_diropq_create(a->src_dentry, a->btgt);
+       au_hn_imtx_unlock(a->src_hinode);
+       if (IS_ERR(diropq))
+               err = PTR_ERR(diropq);
+       dput(diropq);
+
+       return err;
+}
+
+static int do_rename(struct au_ren_args *a)
+{
+       int err;
+       struct dentry *d, *h_d;
+
+       /* prepare workqueue args for asynchronous rmdir */
+       h_d = a->dst_h_dentry;
+       if (au_ftest_ren(a->flags, ISDIR) && h_d->d_inode) {
+               err = -ENOMEM;
+               a->thargs = au_whtmp_rmdir_alloc(a->src_dentry->d_sb, GFP_NOFS);
+               if (unlikely(!a->thargs))
+                       goto out;
+               a->h_dst = dget(h_d);
+       }
+
+       /* create whiteout for src_dentry */
+       if (au_ftest_ren(a->flags, WHSRC)) {
+               a->src_bwh = au_dbwh(a->src_dentry);
+               AuDebugOn(a->src_bwh >= 0);
+               a->src_wh_dentry
+                       = au_wh_create(a->src_dentry, a->btgt, a->src_h_parent);
+               err = PTR_ERR(a->src_wh_dentry);
+               if (IS_ERR(a->src_wh_dentry))
+                       goto out_thargs;
+       }
+
+       /* lookup whiteout for dentry */
+       if (au_ftest_ren(a->flags, WHDST)) {
+               h_d = au_wh_lkup(a->dst_h_parent, &a->dst_dentry->d_name,
+                                a->br);
+               err = PTR_ERR(h_d);
+               if (IS_ERR(h_d))
+                       goto out_whsrc;
+               if (!h_d->d_inode)
+                       dput(h_d);
+               else
+                       a->dst_wh_dentry = h_d;
+       }
+
+       /* rename dentry to tmpwh */
+       if (a->thargs) {
+               err = au_whtmp_ren(a->dst_h_dentry, a->br);
+               if (unlikely(err))
+                       goto out_whdst;
+
+               d = a->dst_dentry;
+               au_set_h_dptr(d, a->btgt, NULL);
+               err = au_lkup_neg(d, a->btgt, /*wh*/0);
+               if (unlikely(err))
+                       goto out_whtmp;
+               a->dst_h_dentry = au_h_dptr(d, a->btgt);
+       }
+
+       BUG_ON(a->dst_h_dentry->d_inode && a->src_bstart != a->btgt);
+
+       /* rename by vfs_rename or cpup */
+       d = a->dst_dentry;
+       if (au_ftest_ren(a->flags, ISDIR)
+           && (a->dst_wh_dentry
+               || au_dbdiropq(d) == a->btgt
+               /* hide the lower to keep xino */
+               || a->btgt < au_dbend(d)
+               || au_opt_test(au_mntflags(d->d_sb), ALWAYS_DIROPQ)))
+               au_fset_ren(a->flags, DIROPQ);
+       err = au_ren_or_cpup(a);
+       if (unlikely(err))
+               /* leave the copied-up one */
+               goto out_whtmp;
+
+       /* make dir opaque */
+       if (au_ftest_ren(a->flags, DIROPQ)) {
+               err = au_ren_diropq(a);
+               if (unlikely(err))
+                       goto out_rename;
+       }
+
+       /* update target timestamps */
+       AuDebugOn(au_dbstart(a->src_dentry) != a->btgt);
+       a->h_path.dentry = au_h_dptr(a->src_dentry, a->btgt);
+       vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/
+       a->src_inode->i_ctime = a->h_path.dentry->d_inode->i_ctime;
+
+       /* remove whiteout for dentry */
+       if (a->dst_wh_dentry) {
+               a->h_path.dentry = a->dst_wh_dentry;
+               err = au_wh_unlink_dentry(a->dst_h_dir, &a->h_path,
+                                         a->dst_dentry);
+               if (unlikely(err))
+                       goto out_diropq;
+       }
+
+       /* remove whtmp */
+       if (a->thargs)
+               au_ren_del_whtmp(a); /* ignore this error */
+
+       err = 0;
+       goto out_success;
+
+out_diropq:
+       if (au_ftest_ren(a->flags, DIROPQ))
+               au_ren_rev_diropq(err, a);
+out_rename:
+       if (!au_ftest_ren(a->flags, CPUP))
+               au_ren_rev_rename(err, a);
+       else
+               au_ren_rev_cpup(err, a);
+       dput(a->h_dst);
+out_whtmp:
+       if (a->thargs)
+               au_ren_rev_whtmp(err, a);
+out_whdst:
+       dput(a->dst_wh_dentry);
+       a->dst_wh_dentry = NULL;
+out_whsrc:
+       if (a->src_wh_dentry)
+               au_ren_rev_whsrc(err, a);
+out_success:
+       dput(a->src_wh_dentry);
+       dput(a->dst_wh_dentry);
+out_thargs:
+       if (a->thargs) {
+               dput(a->h_dst);
+               au_whtmp_rmdir_free(a->thargs);
+               a->thargs = NULL;
+       }
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * test if @dentry dir can be rename destination or not.
+ * success means, it is a logically empty dir.
+ */
+static int may_rename_dstdir(struct dentry *dentry, struct au_nhash *whlist)
+{
+       return au_test_empty(dentry, whlist);
+}
+
+/*
+ * test if @dentry dir can be rename source or not.
+ * if it can, return 0 and @children is filled.
+ * success means,
+ * - it is a logically empty dir.
+ * - or, it exists on writable branch and has no children including whiteouts
+ *       on the lower branch.
+ */
+static int may_rename_srcdir(struct dentry *dentry, aufs_bindex_t btgt)
+{
+       int err;
+       unsigned int rdhash;
+       aufs_bindex_t bstart;
+
+       bstart = au_dbstart(dentry);
+       if (bstart != btgt) {
+               struct au_nhash whlist;
+
+               SiMustAnyLock(dentry->d_sb);
+               rdhash = au_sbi(dentry->d_sb)->si_rdhash;
+               if (!rdhash)
+                       rdhash = au_rdhash_est(au_dir_size(/*file*/NULL,
+                                                          dentry));
+               err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
+               if (unlikely(err))
+                       goto out;
+               err = au_test_empty(dentry, &whlist);
+               au_nhash_wh_free(&whlist);
+               goto out;
+       }
+
+       if (bstart == au_dbtaildir(dentry))
+               return 0; /* success */
+
+       err = au_test_empty_lower(dentry);
+
+out:
+       if (err == -ENOTEMPTY) {
+               AuWarn1("renaming dir who has child(ren) on multiple branches,"
+                       " is not supported\n");
+               err = -EXDEV;
+       }
+       return err;
+}
+
+/* side effect: sets whlist and h_dentry */
+static int au_ren_may_dir(struct au_ren_args *a)
+{
+       int err;
+       unsigned int rdhash;
+       struct dentry *d;
+
+       d = a->dst_dentry;
+       SiMustAnyLock(d->d_sb);
+
+       err = 0;
+       if (au_ftest_ren(a->flags, ISDIR) && a->dst_inode) {
+               rdhash = au_sbi(d->d_sb)->si_rdhash;
+               if (!rdhash)
+                       rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, d));
+               err = au_nhash_alloc(&a->whlist, rdhash, GFP_NOFS);
+               if (unlikely(err))
+                       goto out;
+
+               au_set_dbstart(d, a->dst_bstart);
+               err = may_rename_dstdir(d, &a->whlist);
+               au_set_dbstart(d, a->btgt);
+       }
+       a->dst_h_dentry = au_h_dptr(d, au_dbstart(d));
+       if (unlikely(err))
+               goto out;
+
+       d = a->src_dentry;
+       a->src_h_dentry = au_h_dptr(d, au_dbstart(d));
+       if (au_ftest_ren(a->flags, ISDIR)) {
+               err = may_rename_srcdir(d, a->btgt);
+               if (unlikely(err)) {
+                       au_nhash_wh_free(&a->whlist);
+                       a->whlist.nh_num = 0;
+               }
+       }
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * simple tests for rename.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+static int au_may_ren(struct au_ren_args *a)
+{
+       int err, isdir;
+       struct inode *h_inode;
+
+       if (a->src_bstart == a->btgt) {
+               err = au_may_del(a->src_dentry, a->btgt, a->src_h_parent,
+                                au_ftest_ren(a->flags, ISDIR));
+               if (unlikely(err))
+                       goto out;
+               err = -EINVAL;
+               if (unlikely(a->src_h_dentry == a->h_trap))
+                       goto out;
+       }
+
+       err = 0;
+       if (a->dst_bstart != a->btgt)
+               goto out;
+
+       err = -ENOTEMPTY;
+       if (unlikely(a->dst_h_dentry == a->h_trap))
+               goto out;
+
+       err = -EIO;
+       h_inode = a->dst_h_dentry->d_inode;
+       isdir = !!au_ftest_ren(a->flags, ISDIR);
+       if (!a->dst_dentry->d_inode) {
+               if (unlikely(h_inode))
+                       goto out;
+               err = au_may_add(a->dst_dentry, a->btgt, a->dst_h_parent,
+                                isdir);
+       } else {
+               if (unlikely(!h_inode || !h_inode->i_nlink))
+                       goto out;
+               err = au_may_del(a->dst_dentry, a->btgt, a->dst_h_parent,
+                                isdir);
+               if (unlikely(err))
+                       goto out;
+       }
+
+out:
+       if (unlikely(err == -ENOENT || err == -EEXIST))
+               err = -EIO;
+       AuTraceErr(err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * locking order
+ * (VFS)
+ * - src_dir and dir by lock_rename()
+ * - inode if exitsts
+ * (aufs)
+ * - lock all
+ *   + src_dentry and dentry by aufs_read_and_write_lock2() which calls,
+ *     + si_read_lock
+ *     + di_write_lock2_child()
+ *       + di_write_lock_child()
+ *        + ii_write_lock_child()
+ *       + di_write_lock_child2()
+ *        + ii_write_lock_child2()
+ *     + src_parent and parent
+ *       + di_write_lock_parent()
+ *        + ii_write_lock_parent()
+ *       + di_write_lock_parent2()
+ *        + ii_write_lock_parent2()
+ *   + lower src_dir and dir by vfsub_lock_rename()
+ *   + verify the every relationships between child and parent. if any
+ *     of them failed, unlock all and return -EBUSY.
+ */
+static void au_ren_unlock(struct au_ren_args *a)
+{
+       struct super_block *sb;
+
+       sb = a->dst_dentry->d_sb;
+       if (au_ftest_ren(a->flags, MNT_WRITE))
+               mnt_drop_write(au_br_mnt(a->br));
+       vfsub_unlock_rename(a->src_h_parent, a->src_hdir,
+                           a->dst_h_parent, a->dst_hdir);
+}
+
+static int au_ren_lock(struct au_ren_args *a)
+{
+       int err;
+       unsigned int udba;
+
+       err = 0;
+       a->src_h_parent = au_h_dptr(a->src_parent, a->btgt);
+       a->src_hdir = au_hi(a->src_dir, a->btgt);
+       a->dst_h_parent = au_h_dptr(a->dst_parent, a->btgt);
+       a->dst_hdir = au_hi(a->dst_dir, a->btgt);
+       a->h_trap = vfsub_lock_rename(a->src_h_parent, a->src_hdir,
+                                     a->dst_h_parent, a->dst_hdir);
+       udba = au_opt_udba(a->src_dentry->d_sb);
+       if (unlikely(a->src_hdir->hi_inode != a->src_h_parent->d_inode
+                    || a->dst_hdir->hi_inode != a->dst_h_parent->d_inode))
+               err = au_busy_or_stale();
+       if (!err && au_dbstart(a->src_dentry) == a->btgt)
+               err = au_h_verify(a->src_h_dentry, udba,
+                                 a->src_h_parent->d_inode, a->src_h_parent,
+                                 a->br);
+       if (!err && au_dbstart(a->dst_dentry) == a->btgt)
+               err = au_h_verify(a->dst_h_dentry, udba,
+                                 a->dst_h_parent->d_inode, a->dst_h_parent,
+                                 a->br);
+       if (!err) {
+               err = mnt_want_write(au_br_mnt(a->br));
+               if (unlikely(err))
+                       goto out_unlock;
+               au_fset_ren(a->flags, MNT_WRITE);
+               goto out; /* success */
+       }
+
+       err = au_busy_or_stale();
+
+out_unlock:
+       au_ren_unlock(a);
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_ren_refresh_dir(struct au_ren_args *a)
+{
+       struct inode *dir;
+
+       dir = a->dst_dir;
+       dir->i_version++;
+       if (au_ftest_ren(a->flags, ISDIR)) {
+               /* is this updating defined in POSIX? */
+               au_cpup_attr_timesizes(a->src_inode);
+               au_cpup_attr_nlink(dir, /*force*/1);
+       }
+
+       if (au_ibstart(dir) == a->btgt)
+               au_cpup_attr_timesizes(dir);
+
+       if (au_ftest_ren(a->flags, ISSAMEDIR))
+               return;
+
+       dir = a->src_dir;
+       dir->i_version++;
+       if (au_ftest_ren(a->flags, ISDIR))
+               au_cpup_attr_nlink(dir, /*force*/1);
+       if (au_ibstart(dir) == a->btgt)
+               au_cpup_attr_timesizes(dir);
+}
+
+static void au_ren_refresh(struct au_ren_args *a)
+{
+       aufs_bindex_t bend, bindex;
+       struct dentry *d, *h_d;
+       struct inode *i, *h_i;
+       struct super_block *sb;
+
+       d = a->dst_dentry;
+       d_drop(d);
+       if (a->h_dst)
+               /* already dget-ed by au_ren_or_cpup() */
+               au_set_h_dptr(d, a->btgt, a->h_dst);
+
+       i = a->dst_inode;
+       if (i) {
+               if (!au_ftest_ren(a->flags, ISDIR))
+                       vfsub_drop_nlink(i);
+               else {
+                       vfsub_dead_dir(i);
+                       au_cpup_attr_timesizes(i);
+               }
+               au_update_dbrange(d, /*do_put_zero*/1);
+       } else {
+               bend = a->btgt;
+               for (bindex = au_dbstart(d); bindex < bend; bindex++)
+                       au_set_h_dptr(d, bindex, NULL);
+               bend = au_dbend(d);
+               for (bindex = a->btgt + 1; bindex <= bend; bindex++)
+                       au_set_h_dptr(d, bindex, NULL);
+               au_update_dbrange(d, /*do_put_zero*/0);
+       }
+
+       d = a->src_dentry;
+       au_set_dbwh(d, -1);
+       bend = au_dbend(d);
+       for (bindex = a->btgt + 1; bindex <= bend; bindex++) {
+               h_d = au_h_dptr(d, bindex);
+               if (h_d)
+                       au_set_h_dptr(d, bindex, NULL);
+       }
+       au_set_dbend(d, a->btgt);
+
+       sb = d->d_sb;
+       i = a->src_inode;
+       if (au_opt_test(au_mntflags(sb), PLINK) && au_plink_test(i))
+               return; /* success */
+
+       bend = au_ibend(i);
+       for (bindex = a->btgt + 1; bindex <= bend; bindex++) {
+               h_i = au_h_iptr(i, bindex);
+               if (h_i) {
+                       au_xino_write(sb, bindex, h_i->i_ino, /*ino*/0);
+                       /* ignore this error */
+                       au_set_h_iptr(i, bindex, NULL, 0);
+               }
+       }
+       au_set_ibend(i, a->btgt);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* mainly for link(2) and rename(2) */
+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt)
+{
+       aufs_bindex_t bdiropq, bwh;
+       struct dentry *parent;
+       struct au_branch *br;
+
+       parent = dentry->d_parent;
+       IMustLock(parent->d_inode); /* dir is locked */
+
+       bdiropq = au_dbdiropq(parent);
+       bwh = au_dbwh(dentry);
+       br = au_sbr(dentry->d_sb, btgt);
+       if (au_br_rdonly(br)
+           || (0 <= bdiropq && bdiropq < btgt)
+           || (0 <= bwh && bwh < btgt))
+               btgt = -1;
+
+       AuDbg("btgt %d\n", btgt);
+       return btgt;
+}
+
+/* sets src_bstart, dst_bstart and btgt */
+static int au_ren_wbr(struct au_ren_args *a)
+{
+       int err;
+       struct au_wr_dir_args wr_dir_args = {
+               /* .force_btgt  = -1, */
+               .flags          = AuWrDir_ADD_ENTRY
+       };
+
+       a->src_bstart = au_dbstart(a->src_dentry);
+       a->dst_bstart = au_dbstart(a->dst_dentry);
+       if (au_ftest_ren(a->flags, ISDIR))
+               au_fset_wrdir(wr_dir_args.flags, ISDIR);
+       wr_dir_args.force_btgt = a->src_bstart;
+       if (a->dst_inode && a->dst_bstart < a->src_bstart)
+               wr_dir_args.force_btgt = a->dst_bstart;
+       wr_dir_args.force_btgt = au_wbr(a->dst_dentry, wr_dir_args.force_btgt);
+       err = au_wr_dir(a->dst_dentry, a->src_dentry, &wr_dir_args);
+       a->btgt = err;
+
+       return err;
+}
+
+static void au_ren_dt(struct au_ren_args *a)
+{
+       a->h_path.dentry = a->src_h_parent;
+       au_dtime_store(a->src_dt + AuPARENT, a->src_parent, &a->h_path);
+       if (!au_ftest_ren(a->flags, ISSAMEDIR)) {
+               a->h_path.dentry = a->dst_h_parent;
+               au_dtime_store(a->dst_dt + AuPARENT, a->dst_parent, &a->h_path);
+       }
+
+       au_fclr_ren(a->flags, DT_DSTDIR);
+       if (!au_ftest_ren(a->flags, ISDIR))
+               return;
+
+       a->h_path.dentry = a->src_h_dentry;
+       au_dtime_store(a->src_dt + AuCHILD, a->src_dentry, &a->h_path);
+       if (a->dst_h_dentry->d_inode) {
+               au_fset_ren(a->flags, DT_DSTDIR);
+               a->h_path.dentry = a->dst_h_dentry;
+               au_dtime_store(a->dst_dt + AuCHILD, a->dst_dentry, &a->h_path);
+       }
+}
+
+static void au_ren_rev_dt(int err, struct au_ren_args *a)
+{
+       struct dentry *h_d;
+       struct mutex *h_mtx;
+
+       au_dtime_revert(a->src_dt + AuPARENT);
+       if (!au_ftest_ren(a->flags, ISSAMEDIR))
+               au_dtime_revert(a->dst_dt + AuPARENT);
+
+       if (au_ftest_ren(a->flags, ISDIR) && err != -EIO) {
+               h_d = a->src_dt[AuCHILD].dt_h_path.dentry;
+               h_mtx = &h_d->d_inode->i_mutex;
+               mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
+               au_dtime_revert(a->src_dt + AuCHILD);
+               mutex_unlock(h_mtx);
+
+               if (au_ftest_ren(a->flags, DT_DSTDIR)) {
+                       h_d = a->dst_dt[AuCHILD].dt_h_path.dentry;
+                       h_mtx = &h_d->d_inode->i_mutex;
+                       mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
+                       au_dtime_revert(a->dst_dt + AuCHILD);
+                       mutex_unlock(h_mtx);
+               }
+       }
+}
+
+/* ---------------------------------------------------------------------- */
+
+int aufs_rename(struct inode *_src_dir, struct dentry *_src_dentry,
+               struct inode *_dst_dir, struct dentry *_dst_dentry)
+{
+       int err, flags;
+       /* reduce stack space */
+       struct au_ren_args *a;
+
+       AuDbg("%.*s, %.*s\n", AuDLNPair(_src_dentry), AuDLNPair(_dst_dentry));
+       IMustLock(_src_dir);
+       IMustLock(_dst_dir);
+
+       err = -ENOMEM;
+       BUILD_BUG_ON(sizeof(*a) > PAGE_SIZE);
+       a = kzalloc(sizeof(*a), GFP_NOFS);
+       if (unlikely(!a))
+               goto out;
+
+       a->src_dir = _src_dir;
+       a->src_dentry = _src_dentry;
+       a->src_inode = a->src_dentry->d_inode;
+       a->src_parent = a->src_dentry->d_parent; /* dir inode is locked */
+       a->dst_dir = _dst_dir;
+       a->dst_dentry = _dst_dentry;
+       a->dst_inode = a->dst_dentry->d_inode;
+       a->dst_parent = a->dst_dentry->d_parent; /* dir inode is locked */
+       if (a->dst_inode) {
+               IMustLock(a->dst_inode);
+               au_igrab(a->dst_inode);
+       }
+
+       err = -ENOTDIR;
+       flags = AuLock_FLUSH | AuLock_NOPLM | AuLock_GEN;
+       if (S_ISDIR(a->src_inode->i_mode)) {
+               au_fset_ren(a->flags, ISDIR);
+               if (unlikely(a->dst_inode && !S_ISDIR(a->dst_inode->i_mode)))
+                       goto out_free;
+               err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry,
+                                               AuLock_DIR | flags);
+       } else
+               err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry,
+                                               flags);
+       if (unlikely(err))
+               goto out_free;
+
+       err = au_d_hashed_positive(a->src_dentry);
+       if (unlikely(err))
+               goto out_unlock;
+       err = -ENOENT;
+       if (a->dst_inode) {
+               /*
+                * If it is a dir, VFS unhash dst_dentry before this
+                * function. It means we cannot rely upon d_unhashed().
+                */
+               if (unlikely(!a->dst_inode->i_nlink))
+                       goto out_unlock;
+               if (!S_ISDIR(a->dst_inode->i_mode)) {
+                       err = au_d_hashed_positive(a->dst_dentry);
+                       if (unlikely(err))
+                               goto out_unlock;
+               } else if (unlikely(IS_DEADDIR(a->dst_inode)))
+                       goto out_unlock;
+       } else if (unlikely(d_unhashed(a->dst_dentry)))
+               goto out_unlock;
+
+       /*
+        * is it possible?
+        * yes, it happend (in linux-3.3-rcN) but I don't know why.
+        * there may exist a problem somewhere else.
+        */
+       err = -EINVAL;
+       if (unlikely(a->dst_parent->d_inode == a->src_dentry->d_inode))
+               goto out_unlock;
+
+       au_fset_ren(a->flags, ISSAMEDIR); /* temporary */
+       di_write_lock_parent(a->dst_parent);
+
+       /* which branch we process */
+       err = au_ren_wbr(a);
+       if (unlikely(err < 0))
+               goto out_parent;
+       a->br = au_sbr(a->dst_dentry->d_sb, a->btgt);
+       a->h_path.mnt = au_br_mnt(a->br);
+
+       /* are they available to be renamed */
+       err = au_ren_may_dir(a);
+       if (unlikely(err))
+               goto out_children;
+
+       /* prepare the writable parent dir on the same branch */
+       if (a->dst_bstart == a->btgt) {
+               au_fset_ren(a->flags, WHDST);
+       } else {
+               err = au_cpup_dirs(a->dst_dentry, a->btgt);
+               if (unlikely(err))
+                       goto out_children;
+       }
+
+       if (a->src_dir != a->dst_dir) {
+               /*
+                * this temporary unlock is safe,
+                * because both dir->i_mutex are locked.
+                */
+               di_write_unlock(a->dst_parent);
+               di_write_lock_parent(a->src_parent);
+               err = au_wr_dir_need_wh(a->src_dentry,
+                                       au_ftest_ren(a->flags, ISDIR),
+                                       &a->btgt);
+               di_write_unlock(a->src_parent);
+               di_write_lock2_parent(a->src_parent, a->dst_parent, /*isdir*/1);
+               au_fclr_ren(a->flags, ISSAMEDIR);
+       } else
+               err = au_wr_dir_need_wh(a->src_dentry,
+                                       au_ftest_ren(a->flags, ISDIR),
+                                       &a->btgt);
+       if (unlikely(err < 0))
+               goto out_children;
+       if (err)
+               au_fset_ren(a->flags, WHSRC);
+
+       /* cpup src */
+       if (a->src_bstart != a->btgt) {
+               struct au_pin pin;
+
+               err = au_pin(&pin, a->src_dentry, a->btgt,
+                            au_opt_udba(a->src_dentry->d_sb),
+                            AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+               if (!err) {
+                       struct au_cp_generic cpg = {
+                               .dentry = a->src_dentry,
+                               .bdst   = a->btgt,
+                               .bsrc   = a->src_bstart,
+                               .len    = -1,
+                               .pin    = &pin,
+                               .flags  = AuCpup_DTIME | AuCpup_HOPEN
+                       };
+                       AuDebugOn(au_dbstart(a->src_dentry) != a->src_bstart);
+                       err = au_sio_cpup_simple(&cpg);
+                       au_unpin(&pin);
+               }
+               if (unlikely(err))
+                       goto out_children;
+               a->src_bstart = a->btgt;
+               a->src_h_dentry = au_h_dptr(a->src_dentry, a->btgt);
+               au_fset_ren(a->flags, WHSRC);
+       }
+
+       /* lock them all */
+       err = au_ren_lock(a);
+       if (unlikely(err))
+               /* leave the copied-up one */
+               goto out_children;
+
+       if (!au_opt_test(au_mntflags(a->dst_dir->i_sb), UDBA_NONE))
+               err = au_may_ren(a);
+       else if (unlikely(a->dst_dentry->d_name.len > AUFS_MAX_NAMELEN))
+               err = -ENAMETOOLONG;
+       if (unlikely(err))
+               goto out_hdir;
+
+       /* store timestamps to be revertible */
+       au_ren_dt(a);
+
+       /* here we go */
+       err = do_rename(a);
+       if (unlikely(err))
+               goto out_dt;
+
+       /* update dir attributes */
+       au_ren_refresh_dir(a);
+
+       /* dput/iput all lower dentries */
+       au_ren_refresh(a);
+
+       goto out_hdir; /* success */
+
+out_dt:
+       au_ren_rev_dt(err, a);
+out_hdir:
+       au_ren_unlock(a);
+out_children:
+       au_nhash_wh_free(&a->whlist);
+       if (err && a->dst_inode && a->dst_bstart != a->btgt) {
+               AuDbg("bstart %d, btgt %d\n", a->dst_bstart, a->btgt);
+               au_set_h_dptr(a->dst_dentry, a->btgt, NULL);
+               au_set_dbstart(a->dst_dentry, a->dst_bstart);
+       }
+out_parent:
+       if (!err)
+               d_move(a->src_dentry, a->dst_dentry);
+       else {
+               au_update_dbstart(a->dst_dentry);
+               if (!a->dst_inode)
+                       d_drop(a->dst_dentry);
+       }
+       if (au_ftest_ren(a->flags, ISSAMEDIR))
+               di_write_unlock(a->dst_parent);
+       else
+               di_write_unlock2(a->src_parent, a->dst_parent);
+out_unlock:
+       aufs_read_and_write_unlock2(a->dst_dentry, a->src_dentry);
+out_free:
+       iput(a->dst_inode);
+       if (a->thargs)
+               au_whtmp_rmdir_free(a->thargs);
+       kfree(a);
+out:
+       AuTraceErr(err);
+       return err;
+}
diff --git a/fs/aufs/iinfo.c b/fs/aufs/iinfo.c
new file mode 100644 (file)
index 0000000..b82ebbf
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode private data
+ */
+
+#include "aufs.h"
+
+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex)
+{
+       struct inode *h_inode;
+
+       IiMustAnyLock(inode);
+
+       h_inode = au_ii(inode)->ii_hinode[0 + bindex].hi_inode;
+       AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
+       return h_inode;
+}
+
+/* todo: hard/soft set? */
+void au_hiput(struct au_hinode *hinode)
+{
+       au_hn_free(hinode);
+       dput(hinode->hi_whdentry);
+       iput(hinode->hi_inode);
+}
+
+unsigned int au_hi_flags(struct inode *inode, int isdir)
+{
+       unsigned int flags;
+       const unsigned int mnt_flags = au_mntflags(inode->i_sb);
+
+       flags = 0;
+       if (au_opt_test(mnt_flags, XINO))
+               au_fset_hi(flags, XINO);
+       if (isdir && au_opt_test(mnt_flags, UDBA_HNOTIFY))
+               au_fset_hi(flags, HNOTIFY);
+       return flags;
+}
+
+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
+                  struct inode *h_inode, unsigned int flags)
+{
+       struct au_hinode *hinode;
+       struct inode *hi;
+       struct au_iinfo *iinfo = au_ii(inode);
+
+       IiMustWriteLock(inode);
+
+       hinode = iinfo->ii_hinode + bindex;
+       hi = hinode->hi_inode;
+       AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
+
+       if (hi)
+               au_hiput(hinode);
+       hinode->hi_inode = h_inode;
+       if (h_inode) {
+               int err;
+               struct super_block *sb = inode->i_sb;
+               struct au_branch *br;
+
+               AuDebugOn(inode->i_mode
+                         && (h_inode->i_mode & S_IFMT)
+                         != (inode->i_mode & S_IFMT));
+               if (bindex == iinfo->ii_bstart)
+                       au_cpup_igen(inode, h_inode);
+               br = au_sbr(sb, bindex);
+               hinode->hi_id = br->br_id;
+               if (au_ftest_hi(flags, XINO)) {
+                       err = au_xino_write(sb, bindex, h_inode->i_ino,
+                                           inode->i_ino);
+                       if (unlikely(err))
+                               AuIOErr1("failed au_xino_write() %d\n", err);
+               }
+
+               if (au_ftest_hi(flags, HNOTIFY)
+                   && au_br_hnotifyable(br->br_perm)) {
+                       err = au_hn_alloc(hinode, inode);
+                       if (unlikely(err))
+                               AuIOErr1("au_hn_alloc() %d\n", err);
+               }
+       }
+}
+
+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
+                 struct dentry *h_wh)
+{
+       struct au_hinode *hinode;
+
+       IiMustWriteLock(inode);
+
+       hinode = au_ii(inode)->ii_hinode + bindex;
+       AuDebugOn(hinode->hi_whdentry);
+       hinode->hi_whdentry = h_wh;
+}
+
+void au_update_iigen(struct inode *inode, int half)
+{
+       struct au_iinfo *iinfo;
+       struct au_iigen *iigen;
+       unsigned int sigen;
+
+       sigen = au_sigen(inode->i_sb);
+       iinfo = au_ii(inode);
+       iigen = &iinfo->ii_generation;
+       spin_lock(&iinfo->ii_genspin);
+       iigen->ig_generation = sigen;
+       if (half)
+               au_ig_fset(iigen->ig_flags, HALF_REFRESHED);
+       else
+               au_ig_fclr(iigen->ig_flags, HALF_REFRESHED);
+       spin_unlock(&iinfo->ii_genspin);
+}
+
+/* it may be called at remount time, too */
+void au_update_ibrange(struct inode *inode, int do_put_zero)
+{
+       struct au_iinfo *iinfo;
+       aufs_bindex_t bindex, bend;
+
+       iinfo = au_ii(inode);
+       if (!iinfo)
+               return;
+
+       IiMustWriteLock(inode);
+
+       if (do_put_zero && iinfo->ii_bstart >= 0) {
+               for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend;
+                    bindex++) {
+                       struct inode *h_i;
+
+                       h_i = iinfo->ii_hinode[0 + bindex].hi_inode;
+                       if (h_i && !h_i->i_nlink)
+                               au_set_h_iptr(inode, bindex, NULL, 0);
+               }
+       }
+
+       iinfo->ii_bstart = -1;
+       iinfo->ii_bend = -1;
+       bend = au_sbend(inode->i_sb);
+       for (bindex = 0; bindex <= bend; bindex++)
+               if (iinfo->ii_hinode[0 + bindex].hi_inode) {
+                       iinfo->ii_bstart = bindex;
+                       break;
+               }
+       if (iinfo->ii_bstart >= 0)
+               for (bindex = bend; bindex >= iinfo->ii_bstart; bindex--)
+                       if (iinfo->ii_hinode[0 + bindex].hi_inode) {
+                               iinfo->ii_bend = bindex;
+                               break;
+                       }
+       AuDebugOn(iinfo->ii_bstart > iinfo->ii_bend);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_icntnr_init_once(void *_c)
+{
+       struct au_icntnr *c = _c;
+       struct au_iinfo *iinfo = &c->iinfo;
+       static struct lock_class_key aufs_ii;
+
+       spin_lock_init(&iinfo->ii_genspin);
+       au_rw_init(&iinfo->ii_rwsem);
+       au_rw_class(&iinfo->ii_rwsem, &aufs_ii);
+       inode_init_once(&c->vfs_inode);
+}
+
+int au_iinfo_init(struct inode *inode)
+{
+       struct au_iinfo *iinfo;
+       struct super_block *sb;
+       int nbr, i;
+
+       sb = inode->i_sb;
+       iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
+       nbr = au_sbend(sb) + 1;
+       if (unlikely(nbr <= 0))
+               nbr = 1;
+       iinfo->ii_hinode = kcalloc(nbr, sizeof(*iinfo->ii_hinode), GFP_NOFS);
+       if (iinfo->ii_hinode) {
+               au_ninodes_inc(sb);
+               for (i = 0; i < nbr; i++)
+                       iinfo->ii_hinode[i].hi_id = -1;
+
+               iinfo->ii_generation.ig_generation = au_sigen(sb);
+               iinfo->ii_bstart = -1;
+               iinfo->ii_bend = -1;
+               iinfo->ii_vdir = NULL;
+               return 0;
+       }
+       return -ENOMEM;
+}
+
+int au_ii_realloc(struct au_iinfo *iinfo, int nbr)
+{
+       int err, sz;
+       struct au_hinode *hip;
+
+       AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+       err = -ENOMEM;
+       sz = sizeof(*hip) * (iinfo->ii_bend + 1);
+       if (!sz)
+               sz = sizeof(*hip);
+       hip = au_kzrealloc(iinfo->ii_hinode, sz, sizeof(*hip) * nbr, GFP_NOFS);
+       if (hip) {
+               iinfo->ii_hinode = hip;
+               err = 0;
+       }
+
+       return err;
+}
+
+void au_iinfo_fin(struct inode *inode)
+{
+       struct au_iinfo *iinfo;
+       struct au_hinode *hi;
+       struct super_block *sb;
+       aufs_bindex_t bindex, bend;
+       const unsigned char unlinked = !inode->i_nlink;
+
+       iinfo = au_ii(inode);
+       /* bad_inode case */
+       if (!iinfo)
+               return;
+
+       sb = inode->i_sb;
+       au_ninodes_dec(sb);
+       if (si_pid_test(sb))
+               au_xino_delete_inode(inode, unlinked);
+       else {
+               /*
+                * it is safe to hide the dependency between sbinfo and
+                * sb->s_umount.
+                */
+               lockdep_off();
+               si_noflush_read_lock(sb);
+               au_xino_delete_inode(inode, unlinked);
+               si_read_unlock(sb);
+               lockdep_on();
+       }
+
+       if (iinfo->ii_vdir)
+               au_vdir_free(iinfo->ii_vdir);
+
+       bindex = iinfo->ii_bstart;
+       if (bindex >= 0) {
+               hi = iinfo->ii_hinode + bindex;
+               bend = iinfo->ii_bend;
+               while (bindex++ <= bend) {
+                       if (hi->hi_inode)
+                               au_hiput(hi);
+                       hi++;
+               }
+       }
+       kfree(iinfo->ii_hinode);
+       iinfo->ii_hinode = NULL;
+       AuRwDestroy(&iinfo->ii_rwsem);
+}
diff --git a/fs/aufs/inode.c b/fs/aufs/inode.c
new file mode 100644 (file)
index 0000000..905d240
--- /dev/null
@@ -0,0 +1,492 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode functions
+ */
+
+#include "aufs.h"
+
+struct inode *au_igrab(struct inode *inode)
+{
+       if (inode) {
+               AuDebugOn(!atomic_read(&inode->i_count));
+               ihold(inode);
+       }
+       return inode;
+}
+
+static void au_refresh_hinode_attr(struct inode *inode, int do_version)
+{
+       au_cpup_attr_all(inode, /*force*/0);
+       au_update_iigen(inode, /*half*/1);
+       if (do_version)
+               inode->i_version++;
+}
+
+static int au_ii_refresh(struct inode *inode, int *update)
+{
+       int err, e;
+       umode_t type;
+       aufs_bindex_t bindex, new_bindex;
+       struct super_block *sb;
+       struct au_iinfo *iinfo;
+       struct au_hinode *p, *q, tmp;
+
+       IiMustWriteLock(inode);
+
+       *update = 0;
+       sb = inode->i_sb;
+       type = inode->i_mode & S_IFMT;
+       iinfo = au_ii(inode);
+       err = au_ii_realloc(iinfo, au_sbend(sb) + 1);
+       if (unlikely(err))
+               goto out;
+
+       AuDebugOn(iinfo->ii_bstart < 0);
+       p = iinfo->ii_hinode + iinfo->ii_bstart;
+       for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend;
+            bindex++, p++) {
+               if (!p->hi_inode)
+                       continue;
+
+               AuDebugOn(type != (p->hi_inode->i_mode & S_IFMT));
+               new_bindex = au_br_index(sb, p->hi_id);
+               if (new_bindex == bindex)
+                       continue;
+
+               if (new_bindex < 0) {
+                       *update = 1;
+                       au_hiput(p);
+                       p->hi_inode = NULL;
+                       continue;
+               }
+
+               if (new_bindex < iinfo->ii_bstart)
+                       iinfo->ii_bstart = new_bindex;
+               if (iinfo->ii_bend < new_bindex)
+                       iinfo->ii_bend = new_bindex;
+               /* swap two lower inode, and loop again */
+               q = iinfo->ii_hinode + new_bindex;
+               tmp = *q;
+               *q = *p;
+               *p = tmp;
+               if (tmp.hi_inode) {
+                       bindex--;
+                       p--;
+               }
+       }
+       au_update_ibrange(inode, /*do_put_zero*/0);
+       e = au_dy_irefresh(inode);
+       if (unlikely(e && !err))
+               err = e;
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+int au_refresh_hinode_self(struct inode *inode)
+{
+       int err, update;
+
+       err = au_ii_refresh(inode, &update);
+       if (!err)
+               au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode));
+
+       AuTraceErr(err);
+       return err;
+}
+
+int au_refresh_hinode(struct inode *inode, struct dentry *dentry)
+{
+       int err, e, update;
+       unsigned int flags;
+       umode_t mode;
+       aufs_bindex_t bindex, bend;
+       unsigned char isdir;
+       struct au_hinode *p;
+       struct au_iinfo *iinfo;
+
+       err = au_ii_refresh(inode, &update);
+       if (unlikely(err))
+               goto out;
+
+       update = 0;
+       iinfo = au_ii(inode);
+       p = iinfo->ii_hinode + iinfo->ii_bstart;
+       mode = (inode->i_mode & S_IFMT);
+       isdir = S_ISDIR(mode);
+       flags = au_hi_flags(inode, isdir);
+       bend = au_dbend(dentry);
+       for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) {
+               struct inode *h_i;
+               struct dentry *h_d;
+
+               h_d = au_h_dptr(dentry, bindex);
+               if (!h_d || !h_d->d_inode)
+                       continue;
+
+               AuDebugOn(mode != (h_d->d_inode->i_mode & S_IFMT));
+               if (iinfo->ii_bstart <= bindex && bindex <= iinfo->ii_bend) {
+                       h_i = au_h_iptr(inode, bindex);
+                       if (h_i) {
+                               if (h_i == h_d->d_inode)
+                                       continue;
+                               err = -EIO;
+                               break;
+                       }
+               }
+               if (bindex < iinfo->ii_bstart)
+                       iinfo->ii_bstart = bindex;
+               if (iinfo->ii_bend < bindex)
+                       iinfo->ii_bend = bindex;
+               au_set_h_iptr(inode, bindex, au_igrab(h_d->d_inode), flags);
+               update = 1;
+       }
+       au_update_ibrange(inode, /*do_put_zero*/0);
+       e = au_dy_irefresh(inode);
+       if (unlikely(e && !err))
+               err = e;
+       if (!err)
+               au_refresh_hinode_attr(inode, update && isdir);
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+static int set_inode(struct inode *inode, struct dentry *dentry)
+{
+       int err;
+       unsigned int flags;
+       umode_t mode;
+       aufs_bindex_t bindex, bstart, btail;
+       unsigned char isdir;
+       struct dentry *h_dentry;
+       struct inode *h_inode;
+       struct au_iinfo *iinfo;
+
+       IiMustWriteLock(inode);
+
+       err = 0;
+       isdir = 0;
+       bstart = au_dbstart(dentry);
+       h_inode = au_h_dptr(dentry, bstart)->d_inode;
+       mode = h_inode->i_mode;
+       switch (mode & S_IFMT) {
+       case S_IFREG:
+               btail = au_dbtail(dentry);
+               inode->i_op = &aufs_iop;
+               inode->i_fop = &aufs_file_fop;
+               err = au_dy_iaop(inode, bstart, h_inode);
+               if (unlikely(err))
+                       goto out;
+               break;
+       case S_IFDIR:
+               isdir = 1;
+               btail = au_dbtaildir(dentry);
+               inode->i_op = &aufs_dir_iop;
+               inode->i_fop = &aufs_dir_fop;
+               break;
+       case S_IFLNK:
+               btail = au_dbtail(dentry);
+               inode->i_op = &aufs_symlink_iop;
+               break;
+       case S_IFBLK:
+       case S_IFCHR:
+       case S_IFIFO:
+       case S_IFSOCK:
+               btail = au_dbtail(dentry);
+               inode->i_op = &aufs_iop;
+               au_init_special_fop(inode, mode, h_inode->i_rdev);
+               break;
+       default:
+               AuIOErr("Unknown file type 0%o\n", mode);
+               err = -EIO;
+               goto out;
+       }
+
+       /* do not set hnotify for whiteouted dirs (SHWH mode) */
+       flags = au_hi_flags(inode, isdir);
+       if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)
+           && au_ftest_hi(flags, HNOTIFY)
+           && dentry->d_name.len > AUFS_WH_PFX_LEN
+           && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN))
+               au_fclr_hi(flags, HNOTIFY);
+       iinfo = au_ii(inode);
+       iinfo->ii_bstart = bstart;
+       iinfo->ii_bend = btail;
+       for (bindex = bstart; bindex <= btail; bindex++) {
+               h_dentry = au_h_dptr(dentry, bindex);
+               if (h_dentry)
+                       au_set_h_iptr(inode, bindex,
+                                     au_igrab(h_dentry->d_inode), flags);
+       }
+       au_cpup_attr_all(inode, /*force*/1);
+
+out:
+       return err;
+}
+
+/*
+ * successful returns with iinfo write_locked
+ * minus: errno
+ * zero: success, matched
+ * plus: no error, but unmatched
+ */
+static int reval_inode(struct inode *inode, struct dentry *dentry)
+{
+       int err;
+       unsigned int gen;
+       struct au_iigen iigen;
+       aufs_bindex_t bindex, bend;
+       struct inode *h_inode, *h_dinode;
+
+       /*
+        * before this function, if aufs got any iinfo lock, it must be only
+        * one, the parent dir.
+        * it can happen by UDBA and the obsoleted inode number.
+        */
+       err = -EIO;
+       if (unlikely(inode->i_ino == parent_ino(dentry)))
+               goto out;
+
+       err = 1;
+       ii_write_lock_new_child(inode);
+       h_dinode = au_h_dptr(dentry, au_dbstart(dentry))->d_inode;
+       bend = au_ibend(inode);
+       for (bindex = au_ibstart(inode); bindex <= bend; bindex++) {
+               h_inode = au_h_iptr(inode, bindex);
+               if (!h_inode || h_inode != h_dinode)
+                       continue;
+
+               err = 0;
+               gen = au_iigen(inode, &iigen);
+               if (gen == au_digen(dentry)
+                   && !au_ig_ftest(iigen.ig_flags, HALF_REFRESHED))
+                       break;
+
+               /* fully refresh inode using dentry */
+               err = au_refresh_hinode(inode, dentry);
+               if (!err)
+                       au_update_iigen(inode, /*half*/0);
+               break;
+       }
+
+       if (unlikely(err))
+               ii_write_unlock(inode);
+out:
+       return err;
+}
+
+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+          unsigned int d_type, ino_t *ino)
+{
+       int err;
+       struct mutex *mtx;
+
+       /* prevent hardlinked inode number from race condition */
+       mtx = NULL;
+       if (d_type != DT_DIR) {
+               mtx = &au_sbr(sb, bindex)->br_xino.xi_nondir_mtx;
+               mutex_lock(mtx);
+       }
+       err = au_xino_read(sb, bindex, h_ino, ino);
+       if (unlikely(err))
+               goto out;
+
+       if (!*ino) {
+               err = -EIO;
+               *ino = au_xino_new_ino(sb);
+               if (unlikely(!*ino))
+                       goto out;
+               err = au_xino_write(sb, bindex, h_ino, *ino);
+               if (unlikely(err))
+                       goto out;
+       }
+
+out:
+       if (mtx)
+               mutex_unlock(mtx);
+       return err;
+}
+
+/* successful returns with iinfo write_locked */
+/* todo: return with unlocked? */
+struct inode *au_new_inode(struct dentry *dentry, int must_new)
+{
+       struct inode *inode, *h_inode;
+       struct dentry *h_dentry;
+       struct super_block *sb;
+       struct mutex *mtx;
+       ino_t h_ino, ino;
+       int err;
+       aufs_bindex_t bstart;
+
+       sb = dentry->d_sb;
+       bstart = au_dbstart(dentry);
+       h_dentry = au_h_dptr(dentry, bstart);
+       h_inode = h_dentry->d_inode;
+       h_ino = h_inode->i_ino;
+
+       /*
+        * stop 'race'-ing between hardlinks under different
+        * parents.
+        */
+       mtx = NULL;
+       if (!S_ISDIR(h_inode->i_mode))
+               mtx = &au_sbr(sb, bstart)->br_xino.xi_nondir_mtx;
+
+new_ino:
+       if (mtx)
+               mutex_lock(mtx);
+       err = au_xino_read(sb, bstart, h_ino, &ino);
+       inode = ERR_PTR(err);
+       if (unlikely(err))
+               goto out;
+
+       if (!ino) {
+               ino = au_xino_new_ino(sb);
+               if (unlikely(!ino)) {
+                       inode = ERR_PTR(-EIO);
+                       goto out;
+               }
+       }
+
+       AuDbg("i%lu\n", (unsigned long)ino);
+       inode = au_iget_locked(sb, ino);
+       err = PTR_ERR(inode);
+       if (IS_ERR(inode))
+               goto out;
+
+       AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW));
+       if (inode->i_state & I_NEW) {
+               /* verbose coding for lock class name */
+               if (unlikely(S_ISLNK(h_inode->i_mode)))
+                       au_rw_class(&au_ii(inode)->ii_rwsem,
+                                   au_lc_key + AuLcSymlink_IIINFO);
+               else if (unlikely(S_ISDIR(h_inode->i_mode)))
+                       au_rw_class(&au_ii(inode)->ii_rwsem,
+                                   au_lc_key + AuLcDir_IIINFO);
+               else /* likely */
+                       au_rw_class(&au_ii(inode)->ii_rwsem,
+                                   au_lc_key + AuLcNonDir_IIINFO);
+
+               ii_write_lock_new_child(inode);
+               err = set_inode(inode, dentry);
+               if (!err) {
+                       unlock_new_inode(inode);
+                       goto out; /* success */
+               }
+
+               /*
+                * iget_failed() calls iput(), but we need to call
+                * ii_write_unlock() after iget_failed(). so dirty hack for
+                * i_count.
+                */
+               atomic_inc(&inode->i_count);
+               iget_failed(inode);
+               ii_write_unlock(inode);
+               au_xino_write(sb, bstart, h_ino, /*ino*/0);
+               /* ignore this error */
+               goto out_iput;
+       } else if (!must_new && !IS_DEADDIR(inode) && inode->i_nlink) {
+               /*
+                * horrible race condition between lookup, readdir and copyup
+                * (or something).
+                */
+               if (mtx)
+                       mutex_unlock(mtx);
+               err = reval_inode(inode, dentry);
+               if (unlikely(err < 0)) {
+                       mtx = NULL;
+                       goto out_iput;
+               }
+
+               if (!err) {
+                       mtx = NULL;
+                       goto out; /* success */
+               } else if (mtx)
+                       mutex_lock(mtx);
+       }
+
+       if (unlikely(au_test_fs_unique_ino(h_dentry->d_inode)))
+               AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir,"
+                       " b%d, %s, %.*s, hi%lu, i%lu.\n",
+                       bstart, au_sbtype(h_dentry->d_sb), AuDLNPair(dentry),
+                       (unsigned long)h_ino, (unsigned long)ino);
+       ino = 0;
+       err = au_xino_write(sb, bstart, h_ino, /*ino*/0);
+       if (!err) {
+               iput(inode);
+               if (mtx)
+                       mutex_unlock(mtx);
+               goto new_ino;
+       }
+
+out_iput:
+       iput(inode);
+       inode = ERR_PTR(err);
+out:
+       if (mtx)
+               mutex_unlock(mtx);
+       return inode;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
+              struct inode *inode)
+{
+       int err;
+
+       err = au_br_rdonly(au_sbr(sb, bindex));
+
+       /* pseudo-link after flushed may happen out of bounds */
+       if (!err
+           && inode
+           && au_ibstart(inode) <= bindex
+           && bindex <= au_ibend(inode)) {
+               /*
+                * permission check is unnecessary since vfsub routine
+                * will be called later
+                */
+               struct inode *hi = au_h_iptr(inode, bindex);
+               if (hi)
+                       err = IS_IMMUTABLE(hi) ? -EROFS : 0;
+       }
+
+       return err;
+}
+
+int au_test_h_perm(struct inode *h_inode, int mask)
+{
+       if (!current_fsuid())
+               return 0;
+       return inode_permission(h_inode, mask);
+}
+
+int au_test_h_perm_sio(struct inode *h_inode, int mask)
+{
+       if (au_test_nfs(h_inode->i_sb)
+           && (mask & MAY_WRITE)
+           && S_ISDIR(h_inode->i_mode))
+               mask |= MAY_READ; /* force permission check */
+       return au_test_h_perm(h_inode, mask);
+}
diff --git a/fs/aufs/inode.h b/fs/aufs/inode.h
new file mode 100644 (file)
index 0000000..22e565a
--- /dev/null
@@ -0,0 +1,599 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode operations
+ */
+
+#ifndef __AUFS_INODE_H__
+#define __AUFS_INODE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fsnotify.h>
+#include "rwsem.h"
+
+struct vfsmount;
+
+struct au_hnotify {
+#ifdef CONFIG_AUFS_HNOTIFY
+#ifdef CONFIG_AUFS_HFSNOTIFY
+       /* never use fsnotify_add_vfsmount_mark() */
+       struct fsnotify_mark            hn_mark;
+#endif
+       struct inode                    *hn_aufs_inode; /* no get/put */
+#endif
+} ____cacheline_aligned_in_smp;
+
+struct au_hinode {
+       struct inode            *hi_inode;
+       aufs_bindex_t           hi_id;
+#ifdef CONFIG_AUFS_HNOTIFY
+       struct au_hnotify       *hi_notify;
+#endif
+
+       /* reference to the copied-up whiteout with get/put */
+       struct dentry           *hi_whdentry;
+};
+
+/* ig_flags */
+#define AuIG_HALF_REFRESHED            1
+#define au_ig_ftest(flags, name)       ((flags) & AuIG_##name)
+#define au_ig_fset(flags, name) \
+       do { (flags) |= AuIG_##name; } while (0)
+#define au_ig_fclr(flags, name) \
+       do { (flags) &= ~AuIG_##name; } while (0)
+
+struct au_iigen {
+       __u32           ig_generation, ig_flags;
+};
+
+struct au_vdir;
+struct au_iinfo {
+       spinlock_t              ii_genspin;
+       struct au_iigen         ii_generation;
+       struct super_block      *ii_hsb1;       /* no get/put */
+
+       struct au_rwsem         ii_rwsem;
+       aufs_bindex_t           ii_bstart, ii_bend;
+       __u32                   ii_higen;
+       struct au_hinode        *ii_hinode;
+       struct au_vdir          *ii_vdir;
+};
+
+struct au_icntnr {
+       struct au_iinfo iinfo;
+       struct inode vfs_inode;
+} ____cacheline_aligned_in_smp;
+
+/* au_pin flags */
+#define AuPin_DI_LOCKED                1
+#define AuPin_MNT_WRITE                (1 << 1)
+#define au_ftest_pin(flags, name)      ((flags) & AuPin_##name)
+#define au_fset_pin(flags, name) \
+       do { (flags) |= AuPin_##name; } while (0)
+#define au_fclr_pin(flags, name) \
+       do { (flags) &= ~AuPin_##name; } while (0)
+
+struct au_pin {
+       /* input */
+       struct dentry *dentry;
+       unsigned int udba;
+       unsigned char lsc_di, lsc_hi, flags;
+       aufs_bindex_t bindex;
+
+       /* output */
+       struct dentry *parent;
+       struct au_hinode *hdir;
+       struct vfsmount *h_mnt;
+
+       /* temporary unlock/relock for copyup */
+       struct dentry *h_dentry, *h_parent;
+       struct au_branch *br;
+       struct task_struct *task;
+};
+
+void au_pin_hdir_unlock(struct au_pin *p);
+int au_pin_hdir_relock(struct au_pin *p);
+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task);
+void au_pin_hdir_acquire_nest(struct au_pin *p);
+void au_pin_hdir_release(struct au_pin *p);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_iinfo *au_ii(struct inode *inode)
+{
+       struct au_iinfo *iinfo;
+
+       iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
+       if (iinfo->ii_hinode)
+               return iinfo;
+       return NULL; /* debugging bad_inode case */
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* inode.c */
+struct inode *au_igrab(struct inode *inode);
+int au_refresh_hinode_self(struct inode *inode);
+int au_refresh_hinode(struct inode *inode, struct dentry *dentry);
+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+          unsigned int d_type, ino_t *ino);
+struct inode *au_new_inode(struct dentry *dentry, int must_new);
+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
+              struct inode *inode);
+int au_test_h_perm(struct inode *h_inode, int mask);
+int au_test_h_perm_sio(struct inode *h_inode, int mask);
+
+static inline int au_wh_ino(struct super_block *sb, aufs_bindex_t bindex,
+                           ino_t h_ino, unsigned int d_type, ino_t *ino)
+{
+#ifdef CONFIG_AUFS_SHWH
+       return au_ino(sb, bindex, h_ino, d_type, ino);
+#else
+       return 0;
+#endif
+}
+
+/* i_op.c */
+extern struct inode_operations aufs_iop, aufs_symlink_iop, aufs_dir_iop;
+
+/* au_wr_dir flags */
+#define AuWrDir_ADD_ENTRY      1
+#define AuWrDir_TMP_WHENTRY    (1 << 1)
+#define AuWrDir_ISDIR          (1 << 2)
+#define au_ftest_wrdir(flags, name)    ((flags) & AuWrDir_##name)
+#define au_fset_wrdir(flags, name) \
+       do { (flags) |= AuWrDir_##name; } while (0)
+#define au_fclr_wrdir(flags, name) \
+       do { (flags) &= ~AuWrDir_##name; } while (0)
+
+struct au_wr_dir_args {
+       aufs_bindex_t force_btgt;
+       unsigned char flags;
+};
+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
+             struct au_wr_dir_args *args);
+
+struct dentry *au_pinned_h_parent(struct au_pin *pin);
+void au_pin_init(struct au_pin *pin, struct dentry *dentry,
+                aufs_bindex_t bindex, int lsc_di, int lsc_hi,
+                unsigned int udba, unsigned char flags);
+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
+          unsigned int udba, unsigned char flags) __must_check;
+int au_do_pin(struct au_pin *pin) __must_check;
+void au_unpin(struct au_pin *pin);
+
+/* i_op_add.c */
+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
+              struct dentry *h_parent, int isdir);
+int aufs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev);
+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname);
+int aufs_create(struct inode *dir, struct dentry *dentry, int mode,
+               struct nameidata *nd);
+int aufs_link(struct dentry *src_dentry, struct inode *dir,
+             struct dentry *dentry);
+int aufs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+
+/* i_op_del.c */
+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup);
+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
+              struct dentry *h_parent, int isdir);
+int aufs_unlink(struct inode *dir, struct dentry *dentry);
+int aufs_rmdir(struct inode *dir, struct dentry *dentry);
+
+/* i_op_ren.c */
+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt);
+int aufs_rename(struct inode *src_dir, struct dentry *src_dentry,
+               struct inode *dir, struct dentry *dentry);
+
+/* iinfo.c */
+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex);
+void au_hiput(struct au_hinode *hinode);
+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
+                 struct dentry *h_wh);
+unsigned int au_hi_flags(struct inode *inode, int isdir);
+
+/* hinode flags */
+#define AuHi_XINO      1
+#define AuHi_HNOTIFY   (1 << 1)
+#define au_ftest_hi(flags, name)       ((flags) & AuHi_##name)
+#define au_fset_hi(flags, name) \
+       do { (flags) |= AuHi_##name; } while (0)
+#define au_fclr_hi(flags, name) \
+       do { (flags) &= ~AuHi_##name; } while (0)
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuHi_HNOTIFY
+#define AuHi_HNOTIFY   0
+#endif
+
+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
+                  struct inode *h_inode, unsigned int flags);
+
+void au_update_iigen(struct inode *inode, int half);
+void au_update_ibrange(struct inode *inode, int do_put_zero);
+
+void au_icntnr_init_once(void *_c);
+int au_iinfo_init(struct inode *inode);
+void au_iinfo_fin(struct inode *inode);
+int au_ii_realloc(struct au_iinfo *iinfo, int nbr);
+
+#ifdef CONFIG_PROC_FS
+/* plink.c */
+int au_plink_maint(struct super_block *sb, int flags);
+void au_plink_maint_leave(struct au_sbinfo *sbinfo);
+int au_plink_maint_enter(struct super_block *sb);
+#ifdef CONFIG_AUFS_DEBUG
+void au_plink_list(struct super_block *sb);
+#else
+AuStubVoid(au_plink_list, struct super_block *sb)
+#endif
+int au_plink_test(struct inode *inode);
+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex);
+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
+                    struct dentry *h_dentry);
+void au_plink_put(struct super_block *sb, int verbose);
+void au_plink_clean(struct super_block *sb, int verbose);
+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id);
+#else
+AuStubInt0(au_plink_maint, struct super_block *sb, int flags);
+AuStubVoid(au_plink_maint_leave, struct au_sbinfo *sbinfo);
+AuStubInt0(au_plink_maint_enter, struct super_block *sb);
+AuStubVoid(au_plink_list, struct super_block *sb);
+AuStubInt0(au_plink_test, struct inode *inode);
+AuStub(struct dentry *, au_plink_lkup, return NULL,
+       struct inode *inode, aufs_bindex_t bindex);
+AuStubVoid(au_plink_append, struct inode *inode, aufs_bindex_t bindex,
+          struct dentry *h_dentry);
+AuStubVoid(au_plink_put, struct super_block *sb, int verbose);
+AuStubVoid(au_plink_clean, struct super_block *sb, int verbose);
+AuStubVoid(au_plink_half_refresh, struct super_block *sb, aufs_bindex_t br_id);
+#endif /* CONFIG_PROC_FS */
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for iinfo */
+enum {
+       AuLsc_II_CHILD,         /* child first */
+       AuLsc_II_CHILD2,        /* rename(2), link(2), and cpup at hnotify */
+       AuLsc_II_CHILD3,        /* copyup dirs */
+       AuLsc_II_PARENT,        /* see AuLsc_I_PARENT in vfsub.h */
+       AuLsc_II_PARENT2,
+       AuLsc_II_PARENT3,       /* copyup dirs */
+       AuLsc_II_NEW_CHILD
+};
+
+/*
+ * ii_read_lock_child, ii_write_lock_child,
+ * ii_read_lock_child2, ii_write_lock_child2,
+ * ii_read_lock_child3, ii_write_lock_child3,
+ * ii_read_lock_parent, ii_write_lock_parent,
+ * ii_read_lock_parent2, ii_write_lock_parent2,
+ * ii_read_lock_parent3, ii_write_lock_parent3,
+ * ii_read_lock_new_child, ii_write_lock_new_child,
+ */
+#define AuReadLockFunc(name, lsc) \
+static inline void ii_read_lock_##name(struct inode *i) \
+{ \
+       au_rw_read_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
+}
+
+#define AuWriteLockFunc(name, lsc) \
+static inline void ii_write_lock_##name(struct inode *i) \
+{ \
+       au_rw_write_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
+}
+
+#define AuRWLockFuncs(name, lsc) \
+       AuReadLockFunc(name, lsc) \
+       AuWriteLockFunc(name, lsc)
+
+AuRWLockFuncs(child, CHILD);
+AuRWLockFuncs(child2, CHILD2);
+AuRWLockFuncs(child3, CHILD3);
+AuRWLockFuncs(parent, PARENT);
+AuRWLockFuncs(parent2, PARENT2);
+AuRWLockFuncs(parent3, PARENT3);
+AuRWLockFuncs(new_child, NEW_CHILD);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+/*
+ * ii_read_unlock, ii_write_unlock, ii_downgrade_lock
+ */
+AuSimpleUnlockRwsemFuncs(ii, struct inode *i, &au_ii(i)->ii_rwsem);
+
+#define IiMustNoWaiters(i)     AuRwMustNoWaiters(&au_ii(i)->ii_rwsem)
+#define IiMustAnyLock(i)       AuRwMustAnyLock(&au_ii(i)->ii_rwsem)
+#define IiMustWriteLock(i)     AuRwMustWriteLock(&au_ii(i)->ii_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+static inline void au_icntnr_init(struct au_icntnr *c)
+{
+#ifdef CONFIG_AUFS_DEBUG
+       c->vfs_inode.i_mode = 0;
+#endif
+}
+
+static inline unsigned int au_iigen(struct inode *inode, struct au_iigen *iigen)
+{
+       unsigned int gen;
+       struct au_iinfo *iinfo;
+
+       iinfo = au_ii(inode);
+       spin_lock(&iinfo->ii_genspin);
+       if (iigen)
+               *iigen = iinfo->ii_generation;
+       gen = iinfo->ii_generation.ig_generation;
+       spin_unlock(&iinfo->ii_genspin);
+
+       return gen;
+}
+
+/* tiny test for inode number */
+/* tmpfs generation is too rough */
+static inline int au_test_higen(struct inode *inode, struct inode *h_inode)
+{
+       struct au_iinfo *iinfo;
+
+       iinfo = au_ii(inode);
+       AuRwMustAnyLock(&iinfo->ii_rwsem);
+       return !(iinfo->ii_hsb1 == h_inode->i_sb
+                && iinfo->ii_higen == h_inode->i_generation);
+}
+
+static inline void au_iigen_dec(struct inode *inode)
+{
+       struct au_iinfo *iinfo;
+
+       iinfo = au_ii(inode);
+       spin_lock(&iinfo->ii_genspin);
+       iinfo->ii_generation.ig_generation--;
+       spin_unlock(&iinfo->ii_genspin);
+}
+
+static inline int au_iigen_test(struct inode *inode, unsigned int sigen)
+{
+       int err;
+
+       err = 0;
+       if (unlikely(inode && au_iigen(inode, NULL) != sigen))
+               err = -EIO;
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline aufs_bindex_t au_ii_br_id(struct inode *inode,
+                                       aufs_bindex_t bindex)
+{
+       IiMustAnyLock(inode);
+       return au_ii(inode)->ii_hinode[0 + bindex].hi_id;
+}
+
+static inline aufs_bindex_t au_ibstart(struct inode *inode)
+{
+       IiMustAnyLock(inode);
+       return au_ii(inode)->ii_bstart;
+}
+
+static inline aufs_bindex_t au_ibend(struct inode *inode)
+{
+       IiMustAnyLock(inode);
+       return au_ii(inode)->ii_bend;
+}
+
+static inline struct au_vdir *au_ivdir(struct inode *inode)
+{
+       IiMustAnyLock(inode);
+       return au_ii(inode)->ii_vdir;
+}
+
+static inline struct dentry *au_hi_wh(struct inode *inode, aufs_bindex_t bindex)
+{
+       IiMustAnyLock(inode);
+       return au_ii(inode)->ii_hinode[0 + bindex].hi_whdentry;
+}
+
+static inline void au_set_ibstart(struct inode *inode, aufs_bindex_t bindex)
+{
+       IiMustWriteLock(inode);
+       au_ii(inode)->ii_bstart = bindex;
+}
+
+static inline void au_set_ibend(struct inode *inode, aufs_bindex_t bindex)
+{
+       IiMustWriteLock(inode);
+       au_ii(inode)->ii_bend = bindex;
+}
+
+static inline void au_set_ivdir(struct inode *inode, struct au_vdir *vdir)
+{
+       IiMustWriteLock(inode);
+       au_ii(inode)->ii_vdir = vdir;
+}
+
+static inline struct au_hinode *au_hi(struct inode *inode, aufs_bindex_t bindex)
+{
+       IiMustAnyLock(inode);
+       return au_ii(inode)->ii_hinode + bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct dentry *au_pinned_parent(struct au_pin *pin)
+{
+       if (pin)
+               return pin->parent;
+       return NULL;
+}
+
+static inline struct inode *au_pinned_h_dir(struct au_pin *pin)
+{
+       if (pin && pin->hdir)
+               return pin->hdir->hi_inode;
+       return NULL;
+}
+
+static inline struct au_hinode *au_pinned_hdir(struct au_pin *pin)
+{
+       if (pin)
+               return pin->hdir;
+       return NULL;
+}
+
+static inline void au_pin_set_dentry(struct au_pin *pin, struct dentry *dentry)
+{
+       if (pin)
+               pin->dentry = dentry;
+}
+
+static inline void au_pin_set_parent_lflag(struct au_pin *pin,
+                                          unsigned char lflag)
+{
+       if (pin) {
+               if (lflag)
+                       au_fset_pin(pin->flags, DI_LOCKED);
+               else
+                       au_fclr_pin(pin->flags, DI_LOCKED);
+       }
+}
+
+static inline void au_pin_set_parent(struct au_pin *pin, struct dentry *parent)
+{
+       if (pin) {
+               dput(pin->parent);
+               pin->parent = dget(parent);
+       }
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_branch;
+#ifdef CONFIG_AUFS_HNOTIFY
+struct au_hnotify_op {
+       void (*ctl)(struct au_hinode *hinode, int do_set);
+       int (*alloc)(struct au_hinode *hinode);
+
+       /*
+        * if it returns true, the the caller should free hinode->hi_notify,
+        * otherwise ->free() frees it.
+        */
+       int (*free)(struct au_hinode *hinode,
+                   struct au_hnotify *hn) __must_check;
+
+       void (*fin)(void);
+       int (*init)(void);
+
+       int (*reset_br)(unsigned int udba, struct au_branch *br, int perm);
+       void (*fin_br)(struct au_branch *br);
+       int (*init_br)(struct au_branch *br, int perm);
+};
+
+/* hnotify.c */
+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode);
+void au_hn_free(struct au_hinode *hinode);
+void au_hn_ctl(struct au_hinode *hinode, int do_set);
+void au_hn_reset(struct inode *inode, unsigned int flags);
+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
+              struct qstr *h_child_qstr, struct inode *h_child_inode);
+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm);
+int au_hnotify_init_br(struct au_branch *br, int perm);
+void au_hnotify_fin_br(struct au_branch *br);
+int __init au_hnotify_init(void);
+void au_hnotify_fin(void);
+
+/* hfsnotify.c */
+extern const struct au_hnotify_op au_hnotify_op;
+
+static inline
+void au_hn_init(struct au_hinode *hinode)
+{
+       hinode->hi_notify = NULL;
+}
+
+static inline struct au_hnotify *au_hn(struct au_hinode *hinode)
+{
+       return hinode->hi_notify;
+}
+
+#else
+static inline
+int au_hn_alloc(struct au_hinode *hinode __maybe_unused,
+               struct inode *inode __maybe_unused)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline struct au_hnotify *au_hn(struct au_hinode *hinode)
+{
+       return NULL;
+}
+
+AuStubVoid(au_hn_free, struct au_hinode *hinode __maybe_unused)
+AuStubVoid(au_hn_ctl, struct au_hinode *hinode __maybe_unused,
+          int do_set __maybe_unused)
+AuStubVoid(au_hn_reset, struct inode *inode __maybe_unused,
+          unsigned int flags __maybe_unused)
+AuStubInt0(au_hnotify_reset_br, unsigned int udba __maybe_unused,
+          struct au_branch *br __maybe_unused,
+          int perm __maybe_unused)
+AuStubInt0(au_hnotify_init_br, struct au_branch *br __maybe_unused,
+          int perm __maybe_unused)
+AuStubVoid(au_hnotify_fin_br, struct au_branch *br __maybe_unused)
+AuStubInt0(__init au_hnotify_init, void)
+AuStubVoid(au_hnotify_fin, void)
+AuStubVoid(au_hn_init, struct au_hinode *hinode __maybe_unused)
+#endif /* CONFIG_AUFS_HNOTIFY */
+
+static inline void au_hn_suspend(struct au_hinode *hdir)
+{
+       au_hn_ctl(hdir, /*do_set*/0);
+}
+
+static inline void au_hn_resume(struct au_hinode *hdir)
+{
+       au_hn_ctl(hdir, /*do_set*/1);
+}
+
+static inline void au_hn_imtx_lock(struct au_hinode *hdir)
+{
+       mutex_lock(&hdir->hi_inode->i_mutex);
+       au_hn_suspend(hdir);
+}
+
+static inline void au_hn_imtx_lock_nested(struct au_hinode *hdir,
+                                         unsigned int sc __maybe_unused)
+{
+       mutex_lock_nested(&hdir->hi_inode->i_mutex, sc);
+       au_hn_suspend(hdir);
+}
+
+static inline void au_hn_imtx_unlock(struct au_hinode *hdir)
+{
+       au_hn_resume(hdir);
+       mutex_unlock(&hdir->hi_inode->i_mutex);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_INODE_H__ */
diff --git a/fs/aufs/ioctl.c b/fs/aufs/ioctl.c
new file mode 100644 (file)
index 0000000..dcb837f
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * ioctl
+ * plink-management and readdir in userspace.
+ * assist the pathconf(3) wrapper library.
+ */
+
+#include "aufs.h"
+
+static int au_wbr_fd(struct path *path, struct aufs_wbr_fd __user *arg)
+{
+       int err, fd;
+       aufs_bindex_t wbi, bindex, bend;
+       struct file *h_file;
+       struct super_block *sb;
+       struct dentry *root;
+       struct au_branch *br;
+       struct aufs_wbr_fd wbrfd = {
+               .oflags = au_dir_roflags,
+               .brid   = -1
+       };
+       const int valid = O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_DIRECTORY
+               | O_NOATIME | O_CLOEXEC;
+
+       AuDebugOn(wbrfd.oflags & ~valid);
+
+       if (arg) {
+               err = copy_from_user(&wbrfd, arg, sizeof(wbrfd));
+               if (unlikely(err)) {
+                       err = -EFAULT;
+                       goto out;
+               }
+
+               err = -EINVAL;
+               AuDbg("wbrfd{0%o, %d}\n", wbrfd.oflags, wbrfd.brid);
+               wbrfd.oflags |= au_dir_roflags;
+               AuDbg("0%o\n", wbrfd.oflags);
+               if (unlikely(wbrfd.oflags & ~valid))
+                       goto out;
+       }
+
+       fd = get_unused_fd();
+       err = fd;
+       if (unlikely(fd < 0))
+               goto out;
+
+       h_file = ERR_PTR(-EINVAL);
+       wbi = 0;
+       br = NULL;
+       sb = path->dentry->d_sb;
+       root = sb->s_root;
+       aufs_read_lock(root, AuLock_IR);
+       bend = au_sbend(sb);
+       if (wbrfd.brid >= 0) {
+               wbi = au_br_index(sb, wbrfd.brid);
+               if (unlikely(wbi < 0 || wbi > bend))
+                       goto out_unlock;
+       }
+
+       h_file = ERR_PTR(-ENOENT);
+       br = au_sbr(sb, wbi);
+       if (!au_br_writable(br->br_perm)) {
+               if (arg)
+                       goto out_unlock;
+
+               bindex = wbi + 1;
+               wbi = -1;
+               for (; bindex <= bend; bindex++) {
+                       br = au_sbr(sb, bindex);
+                       if (au_br_writable(br->br_perm)) {
+                               wbi = bindex;
+                               br = au_sbr(sb, wbi);
+                               break;
+                       }
+               }
+       }
+       AuDbg("wbi %d\n", wbi);
+       if (wbi >= 0)
+               h_file = au_h_open(root, wbi, wbrfd.oflags, NULL);
+
+out_unlock:
+       aufs_read_unlock(root, AuLock_IR);
+       err = PTR_ERR(h_file);
+       if (IS_ERR(h_file))
+               goto out_fd;
+
+       atomic_dec(&br->br_count); /* cf. au_h_open() */
+       fd_install(fd, h_file);
+       err = fd;
+       goto out; /* success */
+
+out_fd:
+       put_unused_fd(fd);
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       long err;
+
+       switch (cmd) {
+       case AUFS_CTL_RDU:
+       case AUFS_CTL_RDU_INO:
+               err = au_rdu_ioctl(file, cmd, arg);
+               break;
+
+       case AUFS_CTL_WBR_FD:
+               err = au_wbr_fd(&file->f_path, (void __user *)arg);
+               break;
+
+       case AUFS_CTL_IBUSY:
+               err = au_ibusy_ioctl(file, arg);
+               break;
+
+       default:
+               /* do not call the lower */
+               AuDbg("0x%x\n", cmd);
+               err = -ENOTTY;
+       }
+
+       AuTraceErr(err);
+       return err;
+}
+
+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       long err;
+
+       switch (cmd) {
+       case AUFS_CTL_WBR_FD:
+               err = au_wbr_fd(&file->f_path, (void __user *)arg);
+               break;
+
+       default:
+               /* do not call the lower */
+               AuDbg("0x%x\n", cmd);
+               err = -ENOTTY;
+       }
+
+       AuTraceErr(err);
+       return err;
+}
+
+#ifdef CONFIG_COMPAT
+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
+                          unsigned long arg)
+{
+       long err;
+
+       switch (cmd) {
+       case AUFS_CTL_RDU:
+       case AUFS_CTL_RDU_INO:
+               err = au_rdu_compat_ioctl(file, cmd, arg);
+               break;
+
+       case AUFS_CTL_IBUSY:
+               err = au_ibusy_compat_ioctl(file, arg);
+               break;
+
+       default:
+               err = aufs_ioctl_dir(file, cmd, arg);
+       }
+
+       AuTraceErr(err);
+       return err;
+}
+
+#if 0 /* unused yet */
+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
+                             unsigned long arg)
+{
+       return aufs_ioctl_nondir(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+#endif
diff --git a/fs/aufs/loop.c b/fs/aufs/loop.c
new file mode 100644 (file)
index 0000000..ccae19c
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * support for loopback block device as a branch
+ */
+
+#include <linux/loop.h>
+#include "aufs.h"
+
+/*
+ * test if two lower dentries have overlapping branches.
+ */
+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding)
+{
+       struct super_block *h_sb;
+       struct loop_device *l;
+
+       h_sb = h_adding->d_sb;
+       if (MAJOR(h_sb->s_dev) != LOOP_MAJOR)
+               return 0;
+
+       l = h_sb->s_bdev->bd_disk->private_data;
+       h_adding = l->lo_backing_file->f_dentry;
+       /*
+        * h_adding can be local NFS.
+        * in this case aufs cannot detect the loop.
+        */
+       if (unlikely(h_adding->d_sb == sb))
+               return 1;
+       return !!au_test_subdir(h_adding, sb->s_root);
+}
+
+/* true if a kernel thread named 'loop[0-9].*' accesses a file */
+int au_test_loopback_kthread(void)
+{
+       int ret;
+       struct task_struct *tsk = current;
+       char c, comm[sizeof(tsk->comm)];
+
+       ret = 0;
+       if (tsk->flags & PF_KTHREAD) {
+               get_task_comm(comm, tsk);
+               c = comm[4];
+               ret = ('0' <= c && c <= '9'
+                      && !strncmp(comm, "loop", 4));
+       }
+
+       return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define au_warn_loopback_step  16
+static int au_warn_loopback_nelem = au_warn_loopback_step;
+static unsigned long *au_warn_loopback_array;
+
+void au_warn_loopback(struct super_block *h_sb)
+{
+       int i, new_nelem;
+       unsigned long *a, magic;
+       static DEFINE_SPINLOCK(spin);
+
+       magic = h_sb->s_magic;
+       spin_lock(&spin);
+       a = au_warn_loopback_array;
+       for (i = 0; i < au_warn_loopback_nelem && *a; i++)
+               if (a[i] == magic) {
+                       spin_unlock(&spin);
+                       return;
+               }
+
+       /* h_sb is new to us, print it */
+       if (i < au_warn_loopback_nelem) {
+               a[i] = magic;
+               goto pr;
+       }
+
+       /* expand the array */
+       new_nelem = au_warn_loopback_nelem + au_warn_loopback_step;
+       a = au_kzrealloc(au_warn_loopback_array,
+                        au_warn_loopback_nelem * sizeof(unsigned long),
+                        new_nelem * sizeof(unsigned long), GFP_ATOMIC);
+       if (a) {
+               au_warn_loopback_nelem = new_nelem;
+               au_warn_loopback_array = a;
+               a[i] = magic;
+               goto pr;
+       }
+
+       spin_unlock(&spin);
+       AuWarn1("realloc failed, ignored\n");
+       return;
+
+pr:
+       spin_unlock(&spin);
+       pr_warn("you may want to try another patch for loopback file "
+               "on %s(0x%lx) branch\n", au_sbtype(h_sb), magic);
+}
+
+int au_loopback_init(void)
+{
+       int err;
+       struct super_block *sb __maybe_unused;
+
+       AuDebugOn(sizeof(sb->s_magic) != sizeof(unsigned long));
+
+       err = 0;
+       au_warn_loopback_array = kcalloc(au_warn_loopback_step,
+                                        sizeof(unsigned long), GFP_NOFS);
+       if (unlikely(!au_warn_loopback_array))
+               err = -ENOMEM;
+
+       return err;
+}
+
+void au_loopback_fin(void)
+{
+       kfree(au_warn_loopback_array);
+}
diff --git a/fs/aufs/loop.h b/fs/aufs/loop.h
new file mode 100644 (file)
index 0000000..88d019c
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * support for loopback mount as a branch
+ */
+
+#ifndef __AUFS_LOOP_H__
+#define __AUFS_LOOP_H__
+
+#ifdef __KERNEL__
+
+struct dentry;
+struct super_block;
+
+#ifdef CONFIG_AUFS_BDEV_LOOP
+/* loop.c */
+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding);
+int au_test_loopback_kthread(void);
+void au_warn_loopback(struct super_block *h_sb);
+
+int au_loopback_init(void);
+void au_loopback_fin(void);
+#else
+AuStubInt0(au_test_loopback_overlap, struct super_block *sb,
+          struct dentry *h_adding)
+AuStubInt0(au_test_loopback_kthread, void)
+AuStubVoid(au_warn_loopback, struct super_block *h_sb)
+
+AuStubInt0(au_loopback_init, void)
+AuStubVoid(au_loopback_fin, void)
+#endif /* BLK_DEV_LOOP */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_LOOP_H__ */
diff --git a/fs/aufs/magic.mk b/fs/aufs/magic.mk
new file mode 100644 (file)
index 0000000..3e6387b
--- /dev/null
@@ -0,0 +1,54 @@
+
+# defined in ${srctree}/fs/fuse/inode.c
+# tristate
+ifdef CONFIG_FUSE_FS
+ccflags-y += -DFUSE_SUPER_MAGIC=0x65735546
+endif
+
+# defined in ${srctree}/fs/ocfs2/ocfs2_fs.h
+# tristate
+ifdef CONFIG_OCFS2_FS
+ccflags-y += -DOCFS2_SUPER_MAGIC=0x7461636f
+endif
+
+# defined in ${srctree}/fs/ocfs2/dlm/userdlm.h
+# tristate
+ifdef CONFIG_OCFS2_FS_O2CB
+ccflags-y += -DDLMFS_MAGIC=0x76a9f425
+endif
+
+# defined in ${srctree}/fs/cifs/cifsfs.c
+# tristate
+ifdef CONFIG_CIFS_FS
+ccflags-y += -DCIFS_MAGIC_NUMBER=0xFF534D42
+endif
+
+# defined in ${srctree}/fs/xfs/xfs_sb.h
+# tristate
+ifdef CONFIG_XFS_FS
+ccflags-y += -DXFS_SB_MAGIC=0x58465342
+endif
+
+# defined in ${srctree}/fs/configfs/mount.c
+# tristate
+ifdef CONFIG_CONFIGFS_FS
+ccflags-y += -DCONFIGFS_MAGIC=0x62656570
+endif
+
+# defined in ${srctree}/fs/9p/v9fs.h
+# tristate
+ifdef CONFIG_9P_FS
+ccflags-y += -DV9FS_MAGIC=0x01021997
+endif
+
+# defined in ${srctree}/fs/ubifs/ubifs.h
+# tristate
+ifdef CONFIG_UBIFS_FS
+ccflags-y += -DUBIFS_SUPER_MAGIC=0x24051905
+endif
+
+# defined in ${srctree}/fs/hfsplus/hfsplus_raw.h
+# tristate
+ifdef CONFIG_HFSPLUS_FS
+ccflags-y += -DHFSPLUS_SUPER_MAGIC=0x482b
+endif
diff --git a/fs/aufs/module.c b/fs/aufs/module.c
new file mode 100644 (file)
index 0000000..4e58dbf
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * module global variables and operations
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp)
+{
+       if (new_sz <= nused)
+               return p;
+
+       p = krealloc(p, new_sz, gfp);
+       if (p)
+               memset(p + nused, 0, new_sz - nused);
+       return p;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * aufs caches
+ */
+struct kmem_cache *au_cachep[AuCache_Last];
+static int __init au_cache_init(void)
+{
+       au_cachep[AuCache_DINFO] = AuCacheCtor(au_dinfo, au_di_init_once);
+       if (au_cachep[AuCache_DINFO])
+               /* SLAB_DESTROY_BY_RCU */
+               au_cachep[AuCache_ICNTNR] = AuCacheCtor(au_icntnr,
+                                                       au_icntnr_init_once);
+       if (au_cachep[AuCache_ICNTNR])
+               au_cachep[AuCache_FINFO] = AuCacheCtor(au_finfo,
+                                                      au_fi_init_once);
+       if (au_cachep[AuCache_FINFO])
+               au_cachep[AuCache_VDIR] = AuCache(au_vdir);
+       if (au_cachep[AuCache_VDIR])
+               au_cachep[AuCache_DEHSTR] = AuCache(au_vdir_dehstr);
+       if (au_cachep[AuCache_DEHSTR])
+               return 0;
+
+       return -ENOMEM;
+}
+
+static void au_cache_fin(void)
+{
+       int i;
+
+       /* excluding AuCache_HNOTIFY */
+       BUILD_BUG_ON(AuCache_HNOTIFY + 1 != AuCache_Last);
+       for (i = 0; i < AuCache_HNOTIFY; i++)
+               if (au_cachep[i]) {
+                       kmem_cache_destroy(au_cachep[i]);
+                       au_cachep[i] = NULL;
+               }
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_dir_roflags;
+
+#ifdef CONFIG_AUFS_SBILIST
+/*
+ * iterate_supers_type() doesn't protect us from
+ * remounting (branch management)
+ */
+struct au_splhead au_sbilist;
+#endif
+
+struct lock_class_key au_lc_key[AuLcKey_Last];
+
+/*
+ * functions for module interface.
+ */
+MODULE_LICENSE("GPL");
+/* MODULE_LICENSE("GPL v2"); */
+MODULE_AUTHOR("Junjiro R. Okajima <aufs-users@lists.sourceforge.net>");
+MODULE_DESCRIPTION(AUFS_NAME
+       " -- Advanced multi layered unification filesystem");
+MODULE_VERSION(AUFS_VERSION);
+
+/* this module parameter has no meaning when SYSFS is disabled */
+int sysaufs_brs = 1;
+MODULE_PARM_DESC(brs, "use <sysfs>/fs/aufs/si_*/brN");
+module_param_named(brs, sysaufs_brs, int, S_IRUGO);
+
+/* ---------------------------------------------------------------------- */
+
+static char au_esc_chars[0x20 + 3]; /* 0x01-0x20, backslash, del, and NULL */
+
+int au_seq_path(struct seq_file *seq, struct path *path)
+{
+       return seq_path(seq, path, au_esc_chars);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int __init aufs_init(void)
+{
+       int err, i;
+       char *p;
+
+       p = au_esc_chars;
+       for (i = 1; i <= ' '; i++)
+               *p++ = i;
+       *p++ = '\\';
+       *p++ = '\x7f';
+       *p = 0;
+
+       au_dir_roflags = au_file_roflags(O_DIRECTORY | O_LARGEFILE);
+
+       au_sbilist_init();
+       sysaufs_brs_init();
+       au_debug_init();
+       au_dy_init();
+       err = sysaufs_init();
+       if (unlikely(err))
+               goto out;
+       err = au_procfs_init();
+       if (unlikely(err))
+               goto out_sysaufs;
+       err = au_wkq_init();
+       if (unlikely(err))
+               goto out_procfs;
+       err = au_loopback_init();
+       if (unlikely(err))
+               goto out_wkq;
+       err = au_hnotify_init();
+       if (unlikely(err))
+               goto out_loopback;
+       err = au_sysrq_init();
+       if (unlikely(err))
+               goto out_hin;
+       err = au_cache_init();
+       if (unlikely(err))
+               goto out_sysrq;
+       err = register_filesystem(&aufs_fs_type);
+       if (unlikely(err))
+               goto out_cache;
+       /* since we define pr_fmt, call printk directly */
+       printk(KERN_INFO AUFS_NAME " " AUFS_VERSION "\n");
+       goto out; /* success */
+
+out_cache:
+       au_cache_fin();
+out_sysrq:
+       au_sysrq_fin();
+out_hin:
+       au_hnotify_fin();
+out_loopback:
+       au_loopback_fin();
+out_wkq:
+       au_wkq_fin();
+out_procfs:
+       au_procfs_fin();
+out_sysaufs:
+       sysaufs_fin();
+       au_dy_fin();
+out:
+       return err;
+}
+
+static void __exit aufs_exit(void)
+{
+       unregister_filesystem(&aufs_fs_type);
+       au_cache_fin();
+       au_sysrq_fin();
+       au_hnotify_fin();
+       au_loopback_fin();
+       au_wkq_fin();
+       au_procfs_fin();
+       sysaufs_fin();
+       au_dy_fin();
+}
+
+module_init(aufs_init);
+module_exit(aufs_exit);
diff --git a/fs/aufs/module.h b/fs/aufs/module.h
new file mode 100644 (file)
index 0000000..52bf472
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * module initialization and module-global
+ */
+
+#ifndef __AUFS_MODULE_H__
+#define __AUFS_MODULE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/slab.h>
+
+struct path;
+struct seq_file;
+
+/* module parameters */
+extern int sysaufs_brs;
+
+/* ---------------------------------------------------------------------- */
+
+extern int au_dir_roflags;
+
+enum {
+       AuLcNonDir_FIINFO,
+       AuLcNonDir_DIINFO,
+       AuLcNonDir_IIINFO,
+
+       AuLcDir_FIINFO,
+       AuLcDir_DIINFO,
+       AuLcDir_IIINFO,
+
+       AuLcSymlink_DIINFO,
+       AuLcSymlink_IIINFO,
+
+       AuLcKey_Last
+};
+extern struct lock_class_key au_lc_key[AuLcKey_Last];
+
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp);
+int au_seq_path(struct seq_file *seq, struct path *path);
+
+#ifdef CONFIG_PROC_FS
+/* procfs.c */
+int __init au_procfs_init(void);
+void au_procfs_fin(void);
+#else
+AuStubInt0(au_procfs_init, void);
+AuStubVoid(au_procfs_fin, void);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/* kmem cache */
+enum {
+       AuCache_DINFO,
+       AuCache_ICNTNR,
+       AuCache_FINFO,
+       AuCache_VDIR,
+       AuCache_DEHSTR,
+       AuCache_HNOTIFY, /* must be last */
+       AuCache_Last
+};
+
+#define AuCacheFlags           (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD)
+#define AuCache(type)          KMEM_CACHE(type, AuCacheFlags)
+#define AuCacheCtor(type, ctor)        \
+       kmem_cache_create(#type, sizeof(struct type), \
+                         __alignof__(struct type), AuCacheFlags, ctor)
+
+extern struct kmem_cache *au_cachep[];
+
+#define AuCacheFuncs(name, index) \
+static inline struct au_##name *au_cache_alloc_##name(void) \
+{ return kmem_cache_alloc(au_cachep[AuCache_##index], GFP_NOFS); } \
+static inline void au_cache_free_##name(struct au_##name *p) \
+{ kmem_cache_free(au_cachep[AuCache_##index], p); }
+
+AuCacheFuncs(dinfo, DINFO);
+AuCacheFuncs(icntnr, ICNTNR);
+AuCacheFuncs(finfo, FINFO);
+AuCacheFuncs(vdir, VDIR);
+AuCacheFuncs(vdir_dehstr, DEHSTR);
+#ifdef CONFIG_AUFS_HNOTIFY
+AuCacheFuncs(hnotify, HNOTIFY);
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_MODULE_H__ */
diff --git a/fs/aufs/opts.c b/fs/aufs/opts.c
new file mode 100644 (file)
index 0000000..76a6f80
--- /dev/null
@@ -0,0 +1,1704 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * mount options/flags
+ */
+
+#include <linux/namei.h>
+#include <linux/types.h> /* a distribution requires */
+#include <linux/parser.h>
+#include "aufs.h"
+
+/* ---------------------------------------------------------------------- */
+
+enum {
+       Opt_br,
+       Opt_add, Opt_del, Opt_mod, Opt_reorder, Opt_append, Opt_prepend,
+       Opt_idel, Opt_imod, Opt_ireorder,
+       Opt_dirwh, Opt_rdcache, Opt_rdblk, Opt_rdhash, Opt_rendir,
+       Opt_rdblk_def, Opt_rdhash_def,
+       Opt_xino, Opt_zxino, Opt_noxino,
+       Opt_trunc_xino, Opt_trunc_xino_v, Opt_notrunc_xino,
+       Opt_trunc_xino_path, Opt_itrunc_xino,
+       Opt_trunc_xib, Opt_notrunc_xib,
+       Opt_shwh, Opt_noshwh,
+       Opt_plink, Opt_noplink, Opt_list_plink,
+       Opt_udba,
+       Opt_dio, Opt_nodio,
+       /* Opt_lock, Opt_unlock, */
+       Opt_cmd, Opt_cmd_args,
+       Opt_diropq_a, Opt_diropq_w,
+       Opt_warn_perm, Opt_nowarn_perm,
+       Opt_wbr_copyup, Opt_wbr_create,
+       Opt_refrof, Opt_norefrof,
+       Opt_verbose, Opt_noverbose,
+       Opt_sum, Opt_nosum, Opt_wsum,
+       Opt_tail, Opt_ignore, Opt_ignore_silent, Opt_err
+};
+
+static match_table_t options = {
+       {Opt_br, "br=%s"},
+       {Opt_br, "br:%s"},
+
+       {Opt_add, "add=%d:%s"},
+       {Opt_add, "add:%d:%s"},
+       {Opt_add, "ins=%d:%s"},
+       {Opt_add, "ins:%d:%s"},
+       {Opt_append, "append=%s"},
+       {Opt_append, "append:%s"},
+       {Opt_prepend, "prepend=%s"},
+       {Opt_prepend, "prepend:%s"},
+
+       {Opt_del, "del=%s"},
+       {Opt_del, "del:%s"},
+       /* {Opt_idel, "idel:%d"}, */
+       {Opt_mod, "mod=%s"},
+       {Opt_mod, "mod:%s"},
+       /* {Opt_imod, "imod:%d:%s"}, */
+
+       {Opt_dirwh, "dirwh=%d"},
+
+       {Opt_xino, "xino=%s"},
+       {Opt_noxino, "noxino"},
+       {Opt_trunc_xino, "trunc_xino"},
+       {Opt_trunc_xino_v, "trunc_xino_v=%d:%d"},
+       {Opt_notrunc_xino, "notrunc_xino"},
+       {Opt_trunc_xino_path, "trunc_xino=%s"},
+       {Opt_itrunc_xino, "itrunc_xino=%d"},
+       /* {Opt_zxino, "zxino=%s"}, */
+       {Opt_trunc_xib, "trunc_xib"},
+       {Opt_notrunc_xib, "notrunc_xib"},
+
+#ifdef CONFIG_PROC_FS
+       {Opt_plink, "plink"},
+#else
+       {Opt_ignore_silent, "plink"},
+#endif
+
+       {Opt_noplink, "noplink"},
+
+#ifdef CONFIG_AUFS_DEBUG
+       {Opt_list_plink, "list_plink"},
+#endif
+
+       {Opt_udba, "udba=%s"},
+
+       {Opt_dio, "dio"},
+       {Opt_nodio, "nodio"},
+
+       {Opt_diropq_a, "diropq=always"},
+       {Opt_diropq_a, "diropq=a"},
+       {Opt_diropq_w, "diropq=whiteouted"},
+       {Opt_diropq_w, "diropq=w"},
+
+       {Opt_warn_perm, "warn_perm"},
+       {Opt_nowarn_perm, "nowarn_perm"},
+
+       /* keep them temporary */
+       {Opt_ignore_silent, "coo=%s"},
+       {Opt_ignore_silent, "nodlgt"},
+       {Opt_ignore_silent, "nodirperm1"},
+       {Opt_ignore_silent, "clean_plink"},
+
+#ifdef CONFIG_AUFS_SHWH
+       {Opt_shwh, "shwh"},
+#endif
+       {Opt_noshwh, "noshwh"},
+
+       {Opt_rendir, "rendir=%d"},
+
+       {Opt_refrof, "refrof"},
+       {Opt_norefrof, "norefrof"},
+
+       {Opt_verbose, "verbose"},
+       {Opt_verbose, "v"},
+       {Opt_noverbose, "noverbose"},
+       {Opt_noverbose, "quiet"},
+       {Opt_noverbose, "q"},
+       {Opt_noverbose, "silent"},
+
+       {Opt_sum, "sum"},
+       {Opt_nosum, "nosum"},
+       {Opt_wsum, "wsum"},
+
+       {Opt_rdcache, "rdcache=%d"},
+       {Opt_rdblk, "rdblk=%d"},
+       {Opt_rdblk_def, "rdblk=def"},
+       {Opt_rdhash, "rdhash=%d"},
+       {Opt_rdhash_def, "rdhash=def"},
+
+       {Opt_wbr_create, "create=%s"},
+       {Opt_wbr_create, "create_policy=%s"},
+       {Opt_wbr_copyup, "cpup=%s"},
+       {Opt_wbr_copyup, "copyup=%s"},
+       {Opt_wbr_copyup, "copyup_policy=%s"},
+
+       /* internal use for the scripts */
+       {Opt_ignore_silent, "si=%s"},
+
+       {Opt_br, "dirs=%s"},
+       {Opt_ignore, "debug=%d"},
+       {Opt_ignore, "delete=whiteout"},
+       {Opt_ignore, "delete=all"},
+       {Opt_ignore, "imap=%s"},
+
+       /* temporary workaround, due to old mount(8)? */
+       {Opt_ignore_silent, "relatime"},
+
+       {Opt_err, NULL}
+};
+
+/* ---------------------------------------------------------------------- */
+
+static const char *au_parser_pattern(int val, struct match_token *token)
+{
+       while (token->pattern) {
+               if (token->token == val)
+                       return token->pattern;
+               token++;
+       }
+       BUG();
+       return "??";
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t brperm = {
+       {AuBrPerm_RO, AUFS_BRPERM_RO},
+       {AuBrPerm_RR, AUFS_BRPERM_RR},
+       {AuBrPerm_RW, AUFS_BRPERM_RW},
+       {0, NULL}
+};
+
+static match_table_t brattr = {
+       {AuBrAttr_UNPIN, AUFS_BRATTR_UNPIN},
+       {AuBrRAttr_WH, AUFS_BRRATTR_WH},
+       {AuBrWAttr_NoLinkWH, AUFS_BRWATTR_NLWH},
+       {0, NULL}
+};
+
+#define AuBrStr_LONGEST        AUFS_BRPERM_RW \
+       "+" AUFS_BRATTR_UNPIN \
+       "+" AUFS_BRWATTR_NLWH
+
+static int br_attr_val(char *str, match_table_t table, substring_t args[])
+{
+       int attr, v;
+       char *p;
+
+       attr = 0;
+       do {
+               p = strchr(str, '+');
+               if (p)
+                       *p = 0;
+               v = match_token(str, table, args);
+               if (v)
+                       attr |= v;
+               else {
+                       if (p)
+                               *p = '+';
+                       pr_warn("ignored branch attribute %s\n", str);
+                       break;
+               }
+               if (p)
+                       str = p + 1;
+       } while (p);
+
+       return attr;
+}
+
+static int noinline_for_stack br_perm_val(char *perm)
+{
+       int val;
+       char *p, *q;
+       substring_t args[MAX_OPT_ARGS];
+
+       p = strchr(perm, '+');
+       if (p)
+               *p = 0;
+       val = match_token(perm, brperm, args);
+       if (!val) {
+               if (p)
+                       *p = '+';
+               pr_warn("ignored branch permission %s\n", perm);
+               val = AuBrPerm_RO;
+               goto out;
+       }
+       if (!p)
+               goto out;
+
+       p++;
+       while (1) {
+               q = strchr(p, '+');
+               if (q)
+                       *q = 0;
+               val |= br_attr_val(p, brattr, args);
+               if (q) {
+                       *q = '+';
+                       p = q + 1;
+               } else
+                       break;
+       }
+       switch (val & AuBrPerm_Mask) {
+       case AuBrPerm_RO:
+       case AuBrPerm_RR:
+               if (unlikely(val & AuBrWAttr_NoLinkWH)) {
+                       pr_warn("ignored branch attribute %s\n",
+                               AUFS_BRWATTR_NLWH);
+                       val &= ~AuBrWAttr_NoLinkWH;
+               }
+               break;
+       case AuBrPerm_RW:
+               if (unlikely(val & AuBrRAttr_WH)) {
+                       pr_warn("ignored branch attribute %s\n",
+                               AUFS_BRRATTR_WH);
+                       val &= ~AuBrRAttr_WH;
+               }
+               break;
+       }
+
+out:
+       return val;
+}
+
+/* Caller should free the return value */
+char *au_optstr_br_perm(int brperm)
+{
+       char *p, a[sizeof(AuBrStr_LONGEST)];
+       int sz;
+
+#define SetPerm(str) do {                      \
+               sz = sizeof(str);               \
+               memcpy(a, str, sz);             \
+               p = a + sz - 1;                 \
+       } while (0)
+
+#define AppendAttr(flag, str) do {                     \
+               if (brperm & flag) {            \
+                       sz = sizeof(str);       \
+                       *p++ = '+';             \
+                       memcpy(p, str, sz);     \
+                       p += sz - 1;            \
+               }                               \
+       } while (0)
+
+       switch (brperm & AuBrPerm_Mask) {
+       case AuBrPerm_RO:
+               SetPerm(AUFS_BRPERM_RO);
+               break;
+       case AuBrPerm_RR:
+               SetPerm(AUFS_BRPERM_RR);
+               break;
+       case AuBrPerm_RW:
+               SetPerm(AUFS_BRPERM_RW);
+               break;
+       default:
+               AuDebugOn(1);
+       }
+
+       AppendAttr(AuBrAttr_UNPIN, AUFS_BRATTR_UNPIN);
+       AppendAttr(AuBrRAttr_WH, AUFS_BRRATTR_WH);
+       AppendAttr(AuBrWAttr_NoLinkWH, AUFS_BRWATTR_NLWH);
+
+       AuDebugOn(strlen(a) >= sizeof(a));
+       return kstrdup(a, GFP_NOFS);
+#undef SetPerm
+#undef AppendAttr
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t udbalevel = {
+       {AuOpt_UDBA_REVAL, "reval"},
+       {AuOpt_UDBA_NONE, "none"},
+#ifdef CONFIG_AUFS_HNOTIFY
+       {AuOpt_UDBA_HNOTIFY, "notify"}, /* abstraction */
+#ifdef CONFIG_AUFS_HFSNOTIFY
+       {AuOpt_UDBA_HNOTIFY, "fsnotify"},
+#endif
+#endif
+       {-1, NULL}
+};
+
+static int noinline_for_stack udba_val(char *str)
+{
+       substring_t args[MAX_OPT_ARGS];
+
+       return match_token(str, udbalevel, args);
+}
+
+const char *au_optstr_udba(int udba)
+{
+       return au_parser_pattern(udba, (void *)udbalevel);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t au_wbr_create_policy = {
+       {AuWbrCreate_TDP, "tdp"},
+       {AuWbrCreate_TDP, "top-down-parent"},
+       {AuWbrCreate_RR, "rr"},
+       {AuWbrCreate_RR, "round-robin"},
+       {AuWbrCreate_MFS, "mfs"},
+       {AuWbrCreate_MFS, "most-free-space"},
+       {AuWbrCreate_MFSV, "mfs:%d"},
+       {AuWbrCreate_MFSV, "most-free-space:%d"},
+
+       {AuWbrCreate_MFSRR, "mfsrr:%d"},
+       {AuWbrCreate_MFSRRV, "mfsrr:%d:%d"},
+       {AuWbrCreate_PMFS, "pmfs"},
+       {AuWbrCreate_PMFSV, "pmfs:%d"},
+       {AuWbrCreate_PMFSRR, "pmfsrr:%d"},
+       {AuWbrCreate_PMFSRRV, "pmfsrr:%d:%d"},
+
+       {-1, NULL}
+};
+
+/*
+ * cf. linux/lib/parser.c and cmdline.c
+ * gave up calling memparse() since it uses simple_strtoull() instead of
+ * kstrto...().
+ */
+static int noinline_for_stack
+au_match_ull(substring_t *s, unsigned long long *result)
+{
+       int err;
+       unsigned int len;
+       char a[32];
+
+       err = -ERANGE;
+       len = s->to - s->from;
+       if (len + 1 <= sizeof(a)) {
+               memcpy(a, s->from, len);
+               a[len] = '\0';
+               err = kstrtoull(a, 0, result);
+       }
+       return err;
+}
+
+static int au_wbr_mfs_wmark(substring_t *arg, char *str,
+                           struct au_opt_wbr_create *create)
+{
+       int err;
+       unsigned long long ull;
+
+       err = 0;
+       if (!au_match_ull(arg, &ull))
+               create->mfsrr_watermark = ull;
+       else {
+               pr_err("bad integer in %s\n", str);
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int au_wbr_mfs_sec(substring_t *arg, char *str,
+                         struct au_opt_wbr_create *create)
+{
+       int n, err;
+
+       err = 0;
+       if (!match_int(arg, &n) && 0 <= n && n <= AUFS_MFS_MAX_SEC)
+               create->mfs_second = n;
+       else {
+               pr_err("bad integer in %s\n", str);
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int noinline_for_stack
+au_wbr_create_val(char *str, struct au_opt_wbr_create *create)
+{
+       int err, e;
+       substring_t args[MAX_OPT_ARGS];
+
+       err = match_token(str, au_wbr_create_policy, args);
+       create->wbr_create = err;
+       switch (err) {
+       case AuWbrCreate_MFSRRV:
+       case AuWbrCreate_PMFSRRV:
+               e = au_wbr_mfs_wmark(&args[0], str, create);
+               if (!e)
+                       e = au_wbr_mfs_sec(&args[1], str, create);
+               if (unlikely(e))
+                       err = e;
+               break;
+       case AuWbrCreate_MFSRR:
+       case AuWbrCreate_PMFSRR:
+               e = au_wbr_mfs_wmark(&args[0], str, create);
+               if (unlikely(e)) {
+                       err = e;
+                       break;
+               }
+               /*FALLTHROUGH*/
+       case AuWbrCreate_MFS:
+       case AuWbrCreate_PMFS:
+               create->mfs_second = AUFS_MFS_DEF_SEC;
+               break;
+       case AuWbrCreate_MFSV:
+       case AuWbrCreate_PMFSV:
+               e = au_wbr_mfs_sec(&args[0], str, create);
+               if (unlikely(e))
+                       err = e;
+               break;
+       }
+
+       return err;
+}
+
+const char *au_optstr_wbr_create(int wbr_create)
+{
+       return au_parser_pattern(wbr_create, (void *)au_wbr_create_policy);
+}
+
+static match_table_t au_wbr_copyup_policy = {
+       {AuWbrCopyup_TDP, "tdp"},
+       {AuWbrCopyup_TDP, "top-down-parent"},
+       {AuWbrCopyup_BUP, "bup"},
+       {AuWbrCopyup_BUP, "bottom-up-parent"},
+       {AuWbrCopyup_BU, "bu"},
+       {AuWbrCopyup_BU, "bottom-up"},
+       {-1, NULL}
+};
+
+static int noinline_for_stack au_wbr_copyup_val(char *str)
+{
+       substring_t args[MAX_OPT_ARGS];
+
+       return match_token(str, au_wbr_copyup_policy, args);
+}
+
+const char *au_optstr_wbr_copyup(int wbr_copyup)
+{
+       return au_parser_pattern(wbr_copyup, (void *)au_wbr_copyup_policy);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static const int lkup_dirflags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+
+static void dump_opts(struct au_opts *opts)
+{
+#ifdef CONFIG_AUFS_DEBUG
+       /* reduce stack space */
+       union {
+               struct au_opt_add *add;
+               struct au_opt_del *del;
+               struct au_opt_mod *mod;
+               struct au_opt_xino *xino;
+               struct au_opt_xino_itrunc *xino_itrunc;
+               struct au_opt_wbr_create *create;
+       } u;
+       struct au_opt *opt;
+
+       opt = opts->opt;
+       while (opt->type != Opt_tail) {
+               switch (opt->type) {
+               case Opt_add:
+                       u.add = &opt->add;
+                       AuDbg("add {b%d, %s, 0x%x, %p}\n",
+                                 u.add->bindex, u.add->pathname, u.add->perm,
+                                 u.add->path.dentry);
+                       break;
+               case Opt_del:
+               case Opt_idel:
+                       u.del = &opt->del;
+                       AuDbg("del {%s, %p}\n",
+                             u.del->pathname, u.del->h_path.dentry);
+                       break;
+               case Opt_mod:
+               case Opt_imod:
+                       u.mod = &opt->mod;
+                       AuDbg("mod {%s, 0x%x, %p}\n",
+                                 u.mod->path, u.mod->perm, u.mod->h_root);
+                       break;
+               case Opt_append:
+                       u.add = &opt->add;
+                       AuDbg("append {b%d, %s, 0x%x, %p}\n",
+                                 u.add->bindex, u.add->pathname, u.add->perm,
+                                 u.add->path.dentry);
+                       break;
+               case Opt_prepend:
+                       u.add = &opt->add;
+                       AuDbg("prepend {b%d, %s, 0x%x, %p}\n",
+                                 u.add->bindex, u.add->pathname, u.add->perm,
+                                 u.add->path.dentry);
+                       break;
+               case Opt_dirwh:
+                       AuDbg("dirwh %d\n", opt->dirwh);
+                       break;
+               case Opt_rdcache:
+                       AuDbg("rdcache %d\n", opt->rdcache);
+                       break;
+               case Opt_rdblk:
+                       AuDbg("rdblk %u\n", opt->rdblk);
+                       break;
+               case Opt_rdblk_def:
+                       AuDbg("rdblk_def\n");
+                       break;
+               case Opt_rdhash:
+                       AuDbg("rdhash %u\n", opt->rdhash);
+                       break;
+               case Opt_rdhash_def:
+                       AuDbg("rdhash_def\n");
+                       break;
+               case Opt_xino:
+                       u.xino = &opt->xino;
+                       AuDbg("xino {%s %.*s}\n",
+                                 u.xino->path,
+                                 AuDLNPair(u.xino->file->f_dentry));
+                       break;
+               case Opt_trunc_xino:
+                       AuLabel(trunc_xino);
+                       break;
+               case Opt_notrunc_xino:
+                       AuLabel(notrunc_xino);
+                       break;
+               case Opt_trunc_xino_path:
+               case Opt_itrunc_xino:
+                       u.xino_itrunc = &opt->xino_itrunc;
+                       AuDbg("trunc_xino %d\n", u.xino_itrunc->bindex);
+                       break;
+
+               case Opt_noxino:
+                       AuLabel(noxino);
+                       break;
+               case Opt_trunc_xib:
+                       AuLabel(trunc_xib);
+                       break;
+               case Opt_notrunc_xib:
+                       AuLabel(notrunc_xib);
+                       break;
+               case Opt_shwh:
+                       AuLabel(shwh);
+                       break;
+               case Opt_noshwh:
+                       AuLabel(noshwh);
+                       break;
+               case Opt_plink:
+                       AuLabel(plink);
+                       break;
+               case Opt_noplink:
+                       AuLabel(noplink);
+                       break;
+               case Opt_list_plink:
+                       AuLabel(list_plink);
+                       break;
+               case Opt_udba:
+                       AuDbg("udba %d, %s\n",
+                                 opt->udba, au_optstr_udba(opt->udba));
+                       break;
+               case Opt_dio:
+                       AuLabel(dio);
+                       break;
+               case Opt_nodio:
+                       AuLabel(nodio);
+                       break;
+               case Opt_diropq_a:
+                       AuLabel(diropq_a);
+                       break;
+               case Opt_diropq_w:
+                       AuLabel(diropq_w);
+                       break;
+               case Opt_warn_perm:
+                       AuLabel(warn_perm);
+                       break;
+               case Opt_nowarn_perm:
+                       AuLabel(nowarn_perm);
+                       break;
+               case Opt_refrof:
+                       AuLabel(refrof);
+                       break;
+               case Opt_norefrof:
+                       AuLabel(norefrof);
+                       break;
+               case Opt_verbose:
+                       AuLabel(verbose);
+                       break;
+               case Opt_noverbose:
+                       AuLabel(noverbose);
+                       break;
+               case Opt_sum:
+                       AuLabel(sum);
+                       break;
+               case Opt_nosum:
+                       AuLabel(nosum);
+                       break;
+               case Opt_wsum:
+                       AuLabel(wsum);
+                       break;
+               case Opt_wbr_create:
+                       u.create = &opt->wbr_create;
+                       AuDbg("create %d, %s\n", u.create->wbr_create,
+                                 au_optstr_wbr_create(u.create->wbr_create));
+                       switch (u.create->wbr_create) {
+                       case AuWbrCreate_MFSV:
+                       case AuWbrCreate_PMFSV:
+                               AuDbg("%d sec\n", u.create->mfs_second);
+                               break;
+                       case AuWbrCreate_MFSRR:
+                               AuDbg("%llu watermark\n",
+                                         u.create->mfsrr_watermark);
+                               break;
+                       case AuWbrCreate_MFSRRV:
+                       case AuWbrCreate_PMFSRRV:
+                               AuDbg("%llu watermark, %d sec\n",
+                                         u.create->mfsrr_watermark,
+                                         u.create->mfs_second);
+                               break;
+                       }
+                       break;
+               case Opt_wbr_copyup:
+                       AuDbg("copyup %d, %s\n", opt->wbr_copyup,
+                                 au_optstr_wbr_copyup(opt->wbr_copyup));
+                       break;
+               default:
+                       BUG();
+               }
+               opt++;
+       }
+#endif
+}
+
+void au_opts_free(struct au_opts *opts)
+{
+       struct au_opt *opt;
+
+       opt = opts->opt;
+       while (opt->type != Opt_tail) {
+               switch (opt->type) {
+               case Opt_add:
+               case Opt_append:
+               case Opt_prepend:
+                       path_put(&opt->add.path);
+                       break;
+               case Opt_del:
+               case Opt_idel:
+                       path_put(&opt->del.h_path);
+                       break;
+               case Opt_mod:
+               case Opt_imod:
+                       dput(opt->mod.h_root);
+                       break;
+               case Opt_xino:
+                       fput(opt->xino.file);
+                       break;
+               }
+               opt++;
+       }
+}
+
+static int opt_add(struct au_opt *opt, char *opt_str, unsigned long sb_flags,
+                  aufs_bindex_t bindex)
+{
+       int err;
+       struct au_opt_add *add = &opt->add;
+       char *p;
+
+       add->bindex = bindex;
+       add->perm = AuBrPerm_RO;
+       add->pathname = opt_str;
+       p = strchr(opt_str, '=');
+       if (p) {
+               *p++ = 0;
+               if (*p)
+                       add->perm = br_perm_val(p);
+       }
+
+       err = vfsub_kern_path(add->pathname, lkup_dirflags, &add->path);
+       if (!err) {
+               if (!p) {
+                       add->perm = AuBrPerm_RO;
+                       if (au_test_fs_rr(add->path.dentry->d_sb))
+                               add->perm = AuBrPerm_RR;
+                       else if (!bindex && !(sb_flags & MS_RDONLY))
+                               add->perm = AuBrPerm_RW;
+               }
+               opt->type = Opt_add;
+               goto out;
+       }
+       pr_err("lookup failed %s (%d)\n", add->pathname, err);
+       err = -EINVAL;
+
+out:
+       return err;
+}
+
+static int au_opts_parse_del(struct au_opt_del *del, substring_t args[])
+{
+       int err;
+
+       del->pathname = args[0].from;
+       AuDbg("del path %s\n", del->pathname);
+
+       err = vfsub_kern_path(del->pathname, lkup_dirflags, &del->h_path);
+       if (unlikely(err))
+               pr_err("lookup failed %s (%d)\n", del->pathname, err);
+
+       return err;
+}
+
+#if 0 /* reserved for future use */
+static int au_opts_parse_idel(struct super_block *sb, aufs_bindex_t bindex,
+                             struct au_opt_del *del, substring_t args[])
+{
+       int err;
+       struct dentry *root;
+
+       err = -EINVAL;
+       root = sb->s_root;
+       aufs_read_lock(root, AuLock_FLUSH);
+       if (bindex < 0 || au_sbend(sb) < bindex) {
+               pr_err("out of bounds, %d\n", bindex);
+               goto out;
+       }
+
+       err = 0;
+       del->h_path.dentry = dget(au_h_dptr(root, bindex));
+       del->h_path.mnt = mntget(au_sbr_mnt(sb, bindex));
+
+out:
+       aufs_read_unlock(root, !AuLock_IR);
+       return err;
+}
+#endif
+
+static int noinline_for_stack
+au_opts_parse_mod(struct au_opt_mod *mod, substring_t args[])
+{
+       int err;
+       struct path path;
+       char *p;
+
+       err = -EINVAL;
+       mod->path = args[0].from;
+       p = strchr(mod->path, '=');
+       if (unlikely(!p)) {
+               pr_err("no permssion %s\n", args[0].from);
+               goto out;
+       }
+
+       *p++ = 0;
+       err = vfsub_kern_path(mod->path, lkup_dirflags, &path);
+       if (unlikely(err)) {
+               pr_err("lookup failed %s (%d)\n", mod->path, err);
+               goto out;
+       }
+
+       mod->perm = br_perm_val(p);
+       AuDbg("mod path %s, perm 0x%x, %s\n", mod->path, mod->perm, p);
+       mod->h_root = dget(path.dentry);
+       path_put(&path);
+
+out:
+       return err;
+}
+
+#if 0 /* reserved for future use */
+static int au_opts_parse_imod(struct super_block *sb, aufs_bindex_t bindex,
+                             struct au_opt_mod *mod, substring_t args[])
+{
+       int err;
+       struct dentry *root;
+
+       err = -EINVAL;
+       root = sb->s_root;
+       aufs_read_lock(root, AuLock_FLUSH);
+       if (bindex < 0 || au_sbend(sb) < bindex) {
+               pr_err("out of bounds, %d\n", bindex);
+               goto out;
+       }
+
+       err = 0;
+       mod->perm = br_perm_val(args[1].from);
+       AuDbg("mod path %s, perm 0x%x, %s\n",
+             mod->path, mod->perm, args[1].from);
+       mod->h_root = dget(au_h_dptr(root, bindex));
+
+out:
+       aufs_read_unlock(root, !AuLock_IR);
+       return err;
+}
+#endif
+
+static int au_opts_parse_xino(struct super_block *sb, struct au_opt_xino *xino,
+                             substring_t args[])
+{
+       int err;
+       struct file *file;
+
+       file = au_xino_create(sb, args[0].from, /*silent*/0);
+       err = PTR_ERR(file);
+       if (IS_ERR(file))
+               goto out;
+
+       err = -EINVAL;
+       if (unlikely(file->f_dentry->d_sb == sb)) {
+               fput(file);
+               pr_err("%s must be outside\n", args[0].from);
+               goto out;
+       }
+
+       err = 0;
+       xino->file = file;
+       xino->path = args[0].from;
+
+out:
+       return err;
+}
+
+static int noinline_for_stack
+au_opts_parse_xino_itrunc_path(struct super_block *sb,
+                              struct au_opt_xino_itrunc *xino_itrunc,
+                              substring_t args[])
+{
+       int err;
+       aufs_bindex_t bend, bindex;
+       struct path path;
+       struct dentry *root;
+
+       err = vfsub_kern_path(args[0].from, lkup_dirflags, &path);
+       if (unlikely(err)) {
+               pr_err("lookup failed %s (%d)\n", args[0].from, err);
+               goto out;
+       }
+
+       xino_itrunc->bindex = -1;
+       root = sb->s_root;
+       aufs_read_lock(root, AuLock_FLUSH);
+       bend = au_sbend(sb);
+       for (bindex = 0; bindex <= bend; bindex++) {
+               if (au_h_dptr(root, bindex) == path.dentry) {
+                       xino_itrunc->bindex = bindex;
+                       break;
+               }
+       }
+       aufs_read_unlock(root, !AuLock_IR);
+       path_put(&path);
+
+       if (unlikely(xino_itrunc->bindex < 0)) {
+               pr_err("no such branch %s\n", args[0].from);
+               err = -EINVAL;
+       }
+
+out:
+       return err;
+}
+
+/* called without aufs lock */
+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts)
+{
+       int err, n, token;
+       aufs_bindex_t bindex;
+       unsigned char skipped;
+       struct dentry *root;
+       struct au_opt *opt, *opt_tail;
+       char *opt_str;
+       /* reduce the stack space */
+       union {
+               struct au_opt_xino_itrunc *xino_itrunc;
+               struct au_opt_wbr_create *create;
+       } u;
+       struct {
+               substring_t args[MAX_OPT_ARGS];
+       } *a;
+
+       err = -ENOMEM;
+       a = kmalloc(sizeof(*a), GFP_NOFS);
+       if (unlikely(!a))
+               goto out;
+
+       root = sb->s_root;
+       err = 0;
+       bindex = 0;
+       opt = opts->opt;
+       opt_tail = opt + opts->max_opt - 1;
+       opt->type = Opt_tail;
+       while (!err && (opt_str = strsep(&str, ",")) && *opt_str) {
+               err = -EINVAL;
+               skipped = 0;
+               token = match_token(opt_str, options, a->args);
+               switch (token) {
+               case Opt_br:
+                       err = 0;
+                       while (!err && (opt_str = strsep(&a->args[0].from, ":"))
+                              && *opt_str) {
+                               err = opt_add(opt, opt_str, opts->sb_flags,
+                                             bindex++);
+                               if (unlikely(!err && ++opt > opt_tail)) {
+                                       err = -E2BIG;
+                                       break;
+                               }
+                               opt->type = Opt_tail;
+                               skipped = 1;
+                       }
+                       break;
+               case Opt_add:
+                       if (unlikely(match_int(&a->args[0], &n))) {
+                               pr_err("bad integer in %s\n", opt_str);
+                               break;
+                       }
+                       bindex = n;
+                       err = opt_add(opt, a->args[1].from, opts->sb_flags,
+                                     bindex);
+                       if (!err)
+                               opt->type = token;
+                       break;
+               case Opt_append:
+                       err = opt_add(opt, a->args[0].from, opts->sb_flags,
+                                     /*dummy bindex*/1);
+                       if (!err)
+                               opt->type = token;
+                       break;
+               case Opt_prepend:
+                       err = opt_add(opt, a->args[0].from, opts->sb_flags,
+                                     /*bindex*/0);
+                       if (!err)
+                               opt->type = token;
+                       break;
+               case Opt_del:
+                       err = au_opts_parse_del(&opt->del, a->args);
+                       if (!err)
+                               opt->type = token;
+                       break;
+#if 0 /* reserved for future use */
+               case Opt_idel:
+                       del->pathname = "(indexed)";
+                       if (unlikely(match_int(&args[0], &n))) {
+                               pr_err("bad integer in %s\n", opt_str);
+                               break;
+                       }
+                       err = au_opts_parse_idel(sb, n, &opt->del, a->args);
+                       if (!err)
+                               opt->type = token;
+                       break;
+#endif
+               case Opt_mod:
+                       err = au_opts_parse_mod(&opt->mod, a->args);
+                       if (!err)
+                               opt->type = token;
+                       break;
+#ifdef IMOD /* reserved for future use */
+               case Opt_imod:
+                       u.mod->path = "(indexed)";
+                       if (unlikely(match_int(&a->args[0], &n))) {
+                               pr_err("bad integer in %s\n", opt_str);
+                               break;
+                       }
+                       err = au_opts_parse_imod(sb, n, &opt->mod, a->args);
+                       if (!err)
+                               opt->type = token;
+                       break;
+#endif
+               case Opt_xino:
+                       err = au_opts_parse_xino(sb, &opt->xino, a->args);
+                       if (!err)
+                               opt->type = token;
+                       break;
+
+               case Opt_trunc_xino_path:
+                       err = au_opts_parse_xino_itrunc_path
+                               (sb, &opt->xino_itrunc, a->args);
+                       if (!err)
+                               opt->type = token;
+                       break;
+
+               case Opt_itrunc_xino:
+                       u.xino_itrunc = &opt->xino_itrunc;
+                       if (unlikely(match_int(&a->args[0], &n))) {
+                               pr_err("bad integer in %s\n", opt_str);
+                               break;
+                       }
+                       u.xino_itrunc->bindex = n;
+                       aufs_read_lock(root, AuLock_FLUSH);
+                       if (n < 0 || au_sbend(sb) < n) {
+                               pr_err("out of bounds, %d\n", n);
+                               aufs_read_unlock(root, !AuLock_IR);
+                               break;
+                       }
+                       aufs_read_unlock(root, !AuLock_IR);
+                       err = 0;
+                       opt->type = token;
+                       break;
+
+               case Opt_dirwh:
+                       if (unlikely(match_int(&a->args[0], &opt->dirwh)))
+                               break;
+                       err = 0;
+                       opt->type = token;
+                       break;
+
+               case Opt_rdcache:
+                       if (unlikely(match_int(&a->args[0], &n))) {
+                               pr_err("bad integer in %s\n", opt_str);
+                               break;
+                       }
+                       if (unlikely(n > AUFS_RDCACHE_MAX)) {
+                               pr_err("rdcache must be smaller than %d\n",
+                                      AUFS_RDCACHE_MAX);
+                               break;
+                       }
+                       opt->rdcache = n;
+                       err = 0;
+                       opt->type = token;
+                       break;
+               case Opt_rdblk:
+                       if (unlikely(match_int(&a->args[0], &n)
+                                    || n < 0
+                                    || n > KMALLOC_MAX_SIZE)) {
+                               pr_err("bad integer in %s\n", opt_str);
+                               break;
+                       }
+                       if (unlikely(n && n < NAME_MAX)) {
+                               pr_err("rdblk must be larger than %d\n",
+                                      NAME_MAX);
+                               break;
+                       }
+                       opt->rdblk = n;
+                       err = 0;
+                       opt->type = token;
+                       break;
+               case Opt_rdhash:
+                       if (unlikely(match_int(&a->args[0], &n)
+                                    || n < 0
+                                    || n * sizeof(struct hlist_head)
+                                    > KMALLOC_MAX_SIZE)) {
+                               pr_err("bad integer in %s\n", opt_str);
+                               break;
+                       }
+                       opt->rdhash = n;
+                       err = 0;
+                       opt->type = token;
+                       break;
+
+               case Opt_trunc_xino:
+               case Opt_notrunc_xino:
+               case Opt_noxino:
+               case Opt_trunc_xib:
+               case Opt_notrunc_xib:
+               case Opt_shwh:
+               case Opt_noshwh:
+               case Opt_plink:
+               case Opt_noplink:
+               case Opt_list_plink:
+               case Opt_dio:
+               case Opt_nodio:
+               case Opt_diropq_a:
+               case Opt_diropq_w:
+               case Opt_warn_perm:
+               case Opt_nowarn_perm:
+               case Opt_refrof:
+               case Opt_norefrof:
+               case Opt_verbose:
+               case Opt_noverbose:
+               case Opt_sum:
+               case Opt_nosum:
+               case Opt_wsum:
+               case Opt_rdblk_def:
+               case Opt_rdhash_def:
+                       err = 0;
+                       opt->type = token;
+                       break;
+
+               case Opt_udba:
+                       opt->udba = udba_val(a->args[0].from);
+                       if (opt->udba >= 0) {
+                               err = 0;
+                               opt->type = token;
+                       } else
+                               pr_err("wrong value, %s\n", opt_str);
+                       break;
+
+               case Opt_wbr_create:
+                       u.create = &opt->wbr_create;
+                       u.create->wbr_create
+                               = au_wbr_create_val(a->args[0].from, u.create);
+                       if (u.create->wbr_create >= 0) {
+                               err = 0;
+                               opt->type = token;
+                       } else
+                               pr_err("wrong value, %s\n", opt_str);
+                       break;
+               case Opt_wbr_copyup:
+                       opt->wbr_copyup = au_wbr_copyup_val(a->args[0].from);
+                       if (opt->wbr_copyup >= 0) {
+                               err = 0;
+                               opt->type = token;
+                       } else
+                               pr_err("wrong value, %s\n", opt_str);
+                       break;
+
+               case Opt_ignore:
+                       pr_warn("ignored %s\n", opt_str);
+                       /*FALLTHROUGH*/
+               case Opt_ignore_silent:
+                       skipped = 1;
+                       err = 0;
+                       break;
+               case Opt_err:
+                       pr_err("unknown option %s\n", opt_str);
+                       break;
+               }
+
+               if (!err && !skipped) {
+                       if (unlikely(++opt > opt_tail)) {
+                               err = -E2BIG;
+                               opt--;
+                               opt->type = Opt_tail;
+                               break;
+                       }
+                       opt->type = Opt_tail;
+               }
+       }
+
+       kfree(a);
+       dump_opts(opts);
+       if (unlikely(err))
+               au_opts_free(opts);
+
+out:
+       return err;
+}
+
+static int au_opt_wbr_create(struct super_block *sb,
+                            struct au_opt_wbr_create *create)
+{
+       int err;
+       struct au_sbinfo *sbinfo;
+
+       SiMustWriteLock(sb);
+
+       err = 1; /* handled */
+       sbinfo = au_sbi(sb);
+       if (sbinfo->si_wbr_create_ops->fin) {
+               err = sbinfo->si_wbr_create_ops->fin(sb);
+               if (!err)
+                       err = 1;
+       }
+
+       sbinfo->si_wbr_create = create->wbr_create;
+       sbinfo->si_wbr_create_ops = au_wbr_create_ops + create->wbr_create;
+       switch (create->wbr_create) {
+       case AuWbrCreate_MFSRRV:
+       case AuWbrCreate_MFSRR:
+       case AuWbrCreate_PMFSRR:
+       case AuWbrCreate_PMFSRRV:
+               sbinfo->si_wbr_mfs.mfsrr_watermark = create->mfsrr_watermark;
+               /*FALLTHROUGH*/
+       case AuWbrCreate_MFS:
+       case AuWbrCreate_MFSV:
+       case AuWbrCreate_PMFS:
+       case AuWbrCreate_PMFSV:
+               sbinfo->si_wbr_mfs.mfs_expire
+                       = msecs_to_jiffies(create->mfs_second * MSEC_PER_SEC);
+               break;
+       }
+
+       if (sbinfo->si_wbr_create_ops->init)
+               sbinfo->si_wbr_create_ops->init(sb); /* ignore */
+
+       return err;
+}
+
+/*
+ * returns,
+ * plus: processed without an error
+ * zero: unprocessed
+ */
+static int au_opt_simple(struct super_block *sb, struct au_opt *opt,
+                        struct au_opts *opts)
+{
+       int err;
+       struct au_sbinfo *sbinfo;
+
+       SiMustWriteLock(sb);
+
+       err = 1; /* handled */
+       sbinfo = au_sbi(sb);
+       switch (opt->type) {
+       case Opt_udba:
+               sbinfo->si_mntflags &= ~AuOptMask_UDBA;
+               sbinfo->si_mntflags |= opt->udba;
+               opts->given_udba |= opt->udba;
+               break;
+
+       case Opt_plink:
+               au_opt_set(sbinfo->si_mntflags, PLINK);
+               break;
+       case Opt_noplink:
+               if (au_opt_test(sbinfo->si_mntflags, PLINK))
+                       au_plink_put(sb, /*verbose*/1);
+               au_opt_clr(sbinfo->si_mntflags, PLINK);
+               break;
+       case Opt_list_plink:
+               if (au_opt_test(sbinfo->si_mntflags, PLINK))
+                       au_plink_list(sb);
+               break;
+
+       case Opt_dio:
+               au_opt_set(sbinfo->si_mntflags, DIO);
+               au_fset_opts(opts->flags, REFRESH_DYAOP);
+               break;
+       case Opt_nodio:
+               au_opt_clr(sbinfo->si_mntflags, DIO);
+               au_fset_opts(opts->flags, REFRESH_DYAOP);
+               break;
+
+       case Opt_diropq_a:
+               au_opt_set(sbinfo->si_mntflags, ALWAYS_DIROPQ);
+               break;
+       case Opt_diropq_w:
+               au_opt_clr(sbinfo->si_mntflags, ALWAYS_DIROPQ);
+               break;
+
+       case Opt_warn_perm:
+               au_opt_set(sbinfo->si_mntflags, WARN_PERM);
+               break;
+       case Opt_nowarn_perm:
+               au_opt_clr(sbinfo->si_mntflags, WARN_PERM);
+               break;
+
+       case Opt_refrof:
+               au_opt_set(sbinfo->si_mntflags, REFROF);
+               break;
+       case Opt_norefrof:
+               au_opt_clr(sbinfo->si_mntflags, REFROF);
+               break;
+
+       case Opt_verbose:
+               au_opt_set(sbinfo->si_mntflags, VERBOSE);
+               break;
+       case Opt_noverbose:
+               au_opt_clr(sbinfo->si_mntflags, VERBOSE);
+               break;
+
+       case Opt_sum:
+               au_opt_set(sbinfo->si_mntflags, SUM);
+               break;
+       case Opt_wsum:
+               au_opt_clr(sbinfo->si_mntflags, SUM);
+               au_opt_set(sbinfo->si_mntflags, SUM_W);
+       case Opt_nosum:
+               au_opt_clr(sbinfo->si_mntflags, SUM);
+               au_opt_clr(sbinfo->si_mntflags, SUM_W);
+               break;
+
+       case Opt_wbr_create:
+               err = au_opt_wbr_create(sb, &opt->wbr_create);
+               break;
+       case Opt_wbr_copyup:
+               sbinfo->si_wbr_copyup = opt->wbr_copyup;
+               sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + opt->wbr_copyup;
+               break;
+
+       case Opt_dirwh:
+               sbinfo->si_dirwh = opt->dirwh;
+               break;
+
+       case Opt_rdcache:
+               sbinfo->si_rdcache
+                       = msecs_to_jiffies(opt->rdcache * MSEC_PER_SEC);
+               break;
+       case Opt_rdblk:
+               sbinfo->si_rdblk = opt->rdblk;
+               break;
+       case Opt_rdblk_def:
+               sbinfo->si_rdblk = AUFS_RDBLK_DEF;
+               break;
+       case Opt_rdhash:
+               sbinfo->si_rdhash = opt->rdhash;
+               break;
+       case Opt_rdhash_def:
+               sbinfo->si_rdhash = AUFS_RDHASH_DEF;
+               break;
+
+       case Opt_shwh:
+               au_opt_set(sbinfo->si_mntflags, SHWH);
+               break;
+       case Opt_noshwh:
+               au_opt_clr(sbinfo->si_mntflags, SHWH);
+               break;
+
+       case Opt_trunc_xino:
+               au_opt_set(sbinfo->si_mntflags, TRUNC_XINO);
+               break;
+       case Opt_notrunc_xino:
+               au_opt_clr(sbinfo->si_mntflags, TRUNC_XINO);
+               break;
+
+       case Opt_trunc_xino_path:
+       case Opt_itrunc_xino:
+               err = au_xino_trunc(sb, opt->xino_itrunc.bindex);
+               if (!err)
+                       err = 1;
+               break;
+
+       case Opt_trunc_xib:
+               au_fset_opts(opts->flags, TRUNC_XIB);
+               break;
+       case Opt_notrunc_xib:
+               au_fclr_opts(opts->flags, TRUNC_XIB);
+               break;
+
+       default:
+               err = 0;
+               break;
+       }
+
+       return err;
+}
+
+/*
+ * returns tri-state.
+ * plus: processed without an error
+ * zero: unprocessed
+ * minus: error
+ */
+static int au_opt_br(struct super_block *sb, struct au_opt *opt,
+                    struct au_opts *opts)
+{
+       int err, do_refresh;
+
+       err = 0;
+       switch (opt->type) {
+       case Opt_append:
+               opt->add.bindex = au_sbend(sb) + 1;
+               if (opt->add.bindex < 0)
+                       opt->add.bindex = 0;
+               goto add;
+       case Opt_prepend:
+               opt->add.bindex = 0;
+       add:
+       case Opt_add:
+               err = au_br_add(sb, &opt->add,
+                               au_ftest_opts(opts->flags, REMOUNT));
+               if (!err) {
+                       err = 1;
+                       au_fset_opts(opts->flags, REFRESH);
+               }
+               break;
+
+       case Opt_del:
+       case Opt_idel:
+               err = au_br_del(sb, &opt->del,
+                               au_ftest_opts(opts->flags, REMOUNT));
+               if (!err) {
+                       err = 1;
+                       au_fset_opts(opts->flags, TRUNC_XIB);
+                       au_fset_opts(opts->flags, REFRESH);
+               }
+               break;
+
+       case Opt_mod:
+       case Opt_imod:
+               err = au_br_mod(sb, &opt->mod,
+                               au_ftest_opts(opts->flags, REMOUNT),
+                               &do_refresh);
+               if (!err) {
+                       err = 1;
+                       if (do_refresh)
+                               au_fset_opts(opts->flags, REFRESH);
+               }
+               break;
+       }
+
+       return err;
+}
+
+static int au_opt_xino(struct super_block *sb, struct au_opt *opt,
+                      struct au_opt_xino **opt_xino,
+                      struct au_opts *opts)
+{
+       int err;
+       aufs_bindex_t bend, bindex;
+       struct dentry *root, *parent, *h_root;
+
+       err = 0;
+       switch (opt->type) {
+       case Opt_xino:
+               err = au_xino_set(sb, &opt->xino,
+                                 !!au_ftest_opts(opts->flags, REMOUNT));
+               if (unlikely(err))
+                       break;
+
+               *opt_xino = &opt->xino;
+               au_xino_brid_set(sb, -1);
+
+               /* safe d_parent access */
+               parent = opt->xino.file->f_dentry->d_parent;
+               root = sb->s_root;
+               bend = au_sbend(sb);
+               for (bindex = 0; bindex <= bend; bindex++) {
+                       h_root = au_h_dptr(root, bindex);
+                       if (h_root == parent) {
+                               au_xino_brid_set(sb, au_sbr_id(sb, bindex));
+                               break;
+                       }
+               }
+               break;
+
+       case Opt_noxino:
+               au_xino_clr(sb);
+               au_xino_brid_set(sb, -1);
+               *opt_xino = (void *)-1;
+               break;
+       }
+
+       return err;
+}
+
+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
+                  unsigned int pending)
+{
+       int err;
+       aufs_bindex_t bindex, bend;
+       unsigned char do_plink, skip, do_free;
+       struct au_branch *br;
+       struct au_wbr *wbr;
+       struct dentry *root;
+       struct inode *dir, *h_dir;
+       struct au_sbinfo *sbinfo;
+       struct au_hinode *hdir;
+
+       SiMustAnyLock(sb);
+
+       sbinfo = au_sbi(sb);
+       AuDebugOn(!(sbinfo->si_mntflags & AuOptMask_UDBA));
+
+       if (!(sb_flags & MS_RDONLY)) {
+               if (unlikely(!au_br_writable(au_sbr_perm(sb, 0))))
+                       pr_warn("first branch should be rw\n");
+               if (unlikely(au_opt_test(sbinfo->si_mntflags, SHWH)))
+                       pr_warn("shwh should be used with ro\n");
+       }
+
+       if (au_opt_test((sbinfo->si_mntflags | pending), UDBA_HNOTIFY)
+           && !au_opt_test(sbinfo->si_mntflags, XINO))
+               pr_warn("udba=*notify requires xino\n");
+
+       err = 0;
+       root = sb->s_root;
+       dir = root->d_inode;
+       do_plink = !!au_opt_test(sbinfo->si_mntflags, PLINK);
+       bend = au_sbend(sb);
+       for (bindex = 0; !err && bindex <= bend; bindex++) {
+               skip = 0;
+               h_dir = au_h_iptr(dir, bindex);
+               br = au_sbr(sb, bindex);
+               do_free = 0;
+
+               wbr = br->br_wbr;
+               if (wbr)
+                       wbr_wh_read_lock(wbr);
+
+               if (!au_br_writable(br->br_perm)) {
+                       do_free = !!wbr;
+                       skip = (!wbr
+                               || (!wbr->wbr_whbase
+                                   && !wbr->wbr_plink
+                                   && !wbr->wbr_orph));
+               } else if (!au_br_wh_linkable(br->br_perm)) {
+                       /* skip = (!br->br_whbase && !br->br_orph); */
+                       skip = (!wbr || !wbr->wbr_whbase);
+                       if (skip && wbr) {
+                               if (do_plink)
+                                       skip = !!wbr->wbr_plink;
+                               else
+                                       skip = !wbr->wbr_plink;
+                       }
+               } else {
+                       /* skip = (br->br_whbase && br->br_ohph); */
+                       skip = (wbr && wbr->wbr_whbase);
+                       if (skip) {
+                               if (do_plink)
+                                       skip = !!wbr->wbr_plink;
+                               else
+                                       skip = !wbr->wbr_plink;
+                       }
+               }
+               if (wbr)
+                       wbr_wh_read_unlock(wbr);
+
+               if (skip)
+                       continue;
+
+               hdir = au_hi(dir, bindex);
+               au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+               if (wbr)
+                       wbr_wh_write_lock(wbr);
+               err = au_wh_init(br, sb);
+               if (wbr)
+                       wbr_wh_write_unlock(wbr);
+               au_hn_imtx_unlock(hdir);
+
+               if (!err && do_free) {
+                       kfree(wbr);
+                       br->br_wbr = NULL;
+               }
+       }
+
+       return err;
+}
+
+int au_opts_mount(struct super_block *sb, struct au_opts *opts)
+{
+       int err;
+       unsigned int tmp;
+       aufs_bindex_t bindex, bend;
+       struct au_opt *opt;
+       struct au_opt_xino *opt_xino, xino;
+       struct au_sbinfo *sbinfo;
+       struct au_branch *br;
+
+       SiMustWriteLock(sb);
+
+       err = 0;
+       opt_xino = NULL;
+       opt = opts->opt;
+       while (err >= 0 && opt->type != Opt_tail)
+               err = au_opt_simple(sb, opt++, opts);
+       if (err > 0)
+               err = 0;
+       else if (unlikely(err < 0))
+               goto out;
+
+       /* disable xino and udba temporary */
+       sbinfo = au_sbi(sb);
+       tmp = sbinfo->si_mntflags;
+       au_opt_clr(sbinfo->si_mntflags, XINO);
+       au_opt_set_udba(sbinfo->si_mntflags, UDBA_REVAL);
+
+       opt = opts->opt;
+       while (err >= 0 && opt->type != Opt_tail)
+               err = au_opt_br(sb, opt++, opts);
+       if (err > 0)
+               err = 0;
+       else if (unlikely(err < 0))
+               goto out;
+
+       bend = au_sbend(sb);
+       if (unlikely(bend < 0)) {
+               err = -EINVAL;
+               pr_err("no branches\n");
+               goto out;
+       }
+
+       if (au_opt_test(tmp, XINO))
+               au_opt_set(sbinfo->si_mntflags, XINO);
+       opt = opts->opt;
+       while (!err && opt->type != Opt_tail)
+               err = au_opt_xino(sb, opt++, &opt_xino, opts);
+       if (unlikely(err))
+               goto out;
+
+       err = au_opts_verify(sb, sb->s_flags, tmp);
+       if (unlikely(err))
+               goto out;
+
+       /* restore xino */
+       if (au_opt_test(tmp, XINO) && !opt_xino) {
+               xino.file = au_xino_def(sb);
+               err = PTR_ERR(xino.file);
+               if (IS_ERR(xino.file))
+                       goto out;
+
+               err = au_xino_set(sb, &xino, /*remount*/0);
+               fput(xino.file);
+               if (unlikely(err))
+                       goto out;
+       }
+
+       /* restore udba */
+       tmp &= AuOptMask_UDBA;
+       sbinfo->si_mntflags &= ~AuOptMask_UDBA;
+       sbinfo->si_mntflags |= tmp;
+       bend = au_sbend(sb);
+       for (bindex = 0; bindex <= bend; bindex++) {
+               br = au_sbr(sb, bindex);
+               err = au_hnotify_reset_br(tmp, br, br->br_perm);
+               if (unlikely(err))
+                       AuIOErr("hnotify failed on br %d, %d, ignored\n",
+                               bindex, err);
+               /* go on even if err */
+       }
+       if (au_opt_test(tmp, UDBA_HNOTIFY)) {
+               struct inode *dir = sb->s_root->d_inode;
+               au_hn_reset(dir, au_hi_flags(dir, /*isdir*/1) & ~AuHi_XINO);
+       }
+
+out:
+       return err;
+}
+
+int au_opts_remount(struct super_block *sb, struct au_opts *opts)
+{
+       int err, rerr;
+       struct inode *dir;
+       struct au_opt_xino *opt_xino;
+       struct au_opt *opt;
+       struct au_sbinfo *sbinfo;
+
+       SiMustWriteLock(sb);
+
+       dir = sb->s_root->d_inode;
+       sbinfo = au_sbi(sb);
+       err = 0;
+       opt_xino = NULL;
+       opt = opts->opt;
+       while (err >= 0 && opt->type != Opt_tail) {
+               err = au_opt_simple(sb, opt, opts);
+               if (!err)
+                       err = au_opt_br(sb, opt, opts);
+               if (!err)
+                       err = au_opt_xino(sb, opt, &opt_xino, opts);
+               opt++;
+       }
+       if (err > 0)
+               err = 0;
+       AuTraceErr(err);
+       /* go on even err */
+
+       rerr = au_opts_verify(sb, opts->sb_flags, /*pending*/0);
+       if (unlikely(rerr && !err))
+               err = rerr;
+
+       if (au_ftest_opts(opts->flags, TRUNC_XIB)) {
+               rerr = au_xib_trunc(sb);
+               if (unlikely(rerr && !err))
+                       err = rerr;
+       }
+
+       /* will be handled by the caller */
+       if (!au_ftest_opts(opts->flags, REFRESH)
+           && (opts->given_udba || au_opt_test(sbinfo->si_mntflags, XINO)))
+               au_fset_opts(opts->flags, REFRESH);
+
+       AuDbg("status 0x%x\n", opts->flags);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+unsigned int au_opt_udba(struct super_block *sb)
+{
+       return au_mntflags(sb) & AuOptMask_UDBA;
+}
diff --git a/fs/aufs/opts.h b/fs/aufs/opts.h
new file mode 100644 (file)
index 0000000..182a464
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * mount options/flags
+ */
+
+#ifndef __AUFS_OPTS_H__
+#define __AUFS_OPTS_H__
+
+#ifdef __KERNEL__
+
+#include <linux/path.h>
+
+struct file;
+struct super_block;
+
+/* ---------------------------------------------------------------------- */
+
+/* mount flags */
+#define AuOpt_XINO             1               /* external inode number bitmap
+                                                  and translation table */
+#define AuOpt_TRUNC_XINO       (1 << 1)        /* truncate xino files */
+#define AuOpt_UDBA_NONE                (1 << 2)        /* users direct branch access */
+#define AuOpt_UDBA_REVAL       (1 << 3)
+#define AuOpt_UDBA_HNOTIFY     (1 << 4)
+#define AuOpt_SHWH             (1 << 5)        /* show whiteout */
+#define AuOpt_PLINK            (1 << 6)        /* pseudo-link */
+#define AuOpt_DIRPERM1         (1 << 7)        /* unimplemented */
+#define AuOpt_REFROF           (1 << 8)        /* unimplemented */
+#define AuOpt_ALWAYS_DIROPQ    (1 << 9)        /* policy to creating diropq */
+#define AuOpt_SUM              (1 << 10)       /* summation for statfs(2) */
+#define AuOpt_SUM_W            (1 << 11)       /* unimplemented */
+#define AuOpt_WARN_PERM                (1 << 12)       /* warn when add-branch */
+#define AuOpt_VERBOSE          (1 << 13)       /* busy inode when del-branch */
+#define AuOpt_DIO              (1 << 14)       /* direct io */
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuOpt_UDBA_HNOTIFY
+#define AuOpt_UDBA_HNOTIFY     0
+#endif
+#ifndef CONFIG_AUFS_SHWH
+#undef AuOpt_SHWH
+#define AuOpt_SHWH             0
+#endif
+
+#define AuOpt_Def      (AuOpt_XINO \
+                        | AuOpt_UDBA_REVAL \
+                        | AuOpt_PLINK \
+                        /* | AuOpt_DIRPERM1 */ \
+                        | AuOpt_WARN_PERM)
+#define AuOptMask_UDBA (AuOpt_UDBA_NONE \
+                        | AuOpt_UDBA_REVAL \
+                        | AuOpt_UDBA_HNOTIFY)
+
+#define au_opt_test(flags, name)       (flags & AuOpt_##name)
+#define au_opt_set(flags, name) do { \
+       BUILD_BUG_ON(AuOpt_##name & AuOptMask_UDBA); \
+       ((flags) |= AuOpt_##name); \
+} while (0)
+#define au_opt_set_udba(flags, name) do { \
+       (flags) &= ~AuOptMask_UDBA; \
+       ((flags) |= AuOpt_##name); \
+} while (0)
+#define au_opt_clr(flags, name) do { \
+       ((flags) &= ~AuOpt_##name); \
+} while (0)
+
+static inline unsigned int au_opts_plink(unsigned int mntflags)
+{
+#ifdef CONFIG_PROC_FS
+       return mntflags;
+#else
+       return mntflags & ~AuOpt_PLINK;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies to select one among multiple writable branches */
+enum {
+       AuWbrCreate_TDP,        /* top down parent */
+       AuWbrCreate_RR,         /* round robin */
+       AuWbrCreate_MFS,        /* most free space */
+       AuWbrCreate_MFSV,       /* mfs with seconds */
+       AuWbrCreate_MFSRR,      /* mfs then rr */
+       AuWbrCreate_MFSRRV,     /* mfs then rr with seconds */
+       AuWbrCreate_PMFS,       /* parent and mfs */
+       AuWbrCreate_PMFSV,      /* parent and mfs with seconds */
+       AuWbrCreate_PMFSRR,     /* parent, mfs and round-robin */
+       AuWbrCreate_PMFSRRV,    /* plus seconds */
+
+       AuWbrCreate_Def = AuWbrCreate_TDP
+};
+
+enum {
+       AuWbrCopyup_TDP,        /* top down parent */
+       AuWbrCopyup_BUP,        /* bottom up parent */
+       AuWbrCopyup_BU,         /* bottom up */
+
+       AuWbrCopyup_Def = AuWbrCopyup_TDP
+};
+
+/* ---------------------------------------------------------------------- */
+
+struct au_opt_add {
+       aufs_bindex_t   bindex;
+       char            *pathname;
+       int             perm;
+       struct path     path;
+};
+
+struct au_opt_del {
+       char            *pathname;
+       struct path     h_path;
+};
+
+struct au_opt_mod {
+       char            *path;
+       int             perm;
+       struct dentry   *h_root;
+};
+
+struct au_opt_xino {
+       char            *path;
+       struct file     *file;
+};
+
+struct au_opt_xino_itrunc {
+       aufs_bindex_t   bindex;
+};
+
+struct au_opt_wbr_create {
+       int                     wbr_create;
+       int                     mfs_second;
+       unsigned long long      mfsrr_watermark;
+};
+
+struct au_opt {
+       int type;
+       union {
+               struct au_opt_xino      xino;
+               struct au_opt_xino_itrunc xino_itrunc;
+               struct au_opt_add       add;
+               struct au_opt_del       del;
+               struct au_opt_mod       mod;
+               int                     dirwh;
+               int                     rdcache;
+               unsigned int            rdblk;
+               unsigned int            rdhash;
+               int                     udba;
+               struct au_opt_wbr_create wbr_create;
+               int                     wbr_copyup;
+       };
+};
+
+/* opts flags */
+#define AuOpts_REMOUNT         1
+#define AuOpts_REFRESH         (1 << 1)
+#define AuOpts_TRUNC_XIB       (1 << 2)
+#define AuOpts_REFRESH_DYAOP   (1 << 3)
+#define au_ftest_opts(flags, name)     ((flags) & AuOpts_##name)
+#define au_fset_opts(flags, name) \
+       do { (flags) |= AuOpts_##name; } while (0)
+#define au_fclr_opts(flags, name) \
+       do { (flags) &= ~AuOpts_##name; } while (0)
+
+struct au_opts {
+       struct au_opt   *opt;
+       int             max_opt;
+
+       unsigned int    given_udba;
+       unsigned int    flags;
+       unsigned long   sb_flags;
+};
+
+/* ---------------------------------------------------------------------- */
+
+char *au_optstr_br_perm(int brperm);
+const char *au_optstr_udba(int udba);
+const char *au_optstr_wbr_copyup(int wbr_copyup);
+const char *au_optstr_wbr_create(int wbr_create);
+
+void au_opts_free(struct au_opts *opts);
+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts);
+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
+                  unsigned int pending);
+int au_opts_mount(struct super_block *sb, struct au_opts *opts);
+int au_opts_remount(struct super_block *sb, struct au_opts *opts);
+
+unsigned int au_opt_udba(struct super_block *sb);
+
+/* ---------------------------------------------------------------------- */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_OPTS_H__ */
diff --git a/fs/aufs/plink.c b/fs/aufs/plink.c
new file mode 100644 (file)
index 0000000..f88a4c8
--- /dev/null
@@ -0,0 +1,527 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * pseudo-link
+ */
+
+#include "aufs.h"
+
+/*
+ * the pseudo-link maintenance mode.
+ * during a user process maintains the pseudo-links,
+ * prohibit adding a new plink and branch manipulation.
+ *
+ * Flags
+ * NOPLM:
+ *     For entry functions which will handle plink, and i_mutex is already held
+ *     in VFS.
+ *     They cannot wait and should return an error at once.
+ *     Callers has to check the error.
+ * NOPLMW:
+ *     For entry functions which will handle plink, but i_mutex is not held
+ *     in VFS.
+ *     They can wait the plink maintenance mode to finish.
+ *
+ * They behave like F_SETLK and F_SETLKW.
+ * If the caller never handle plink, then both flags are unnecessary.
+ */
+
+int au_plink_maint(struct super_block *sb, int flags)
+{
+       int err;
+       pid_t pid, ppid;
+       struct au_sbinfo *sbi;
+
+       SiMustAnyLock(sb);
+
+       err = 0;
+       if (!au_opt_test(au_mntflags(sb), PLINK))
+               goto out;
+
+       sbi = au_sbi(sb);
+       pid = sbi->si_plink_maint_pid;
+       if (!pid || pid == current->pid)
+               goto out;
+
+       /* todo: it highly depends upon /sbin/mount.aufs */
+       rcu_read_lock();
+       ppid = task_pid_vnr(rcu_dereference(current->real_parent));
+       rcu_read_unlock();
+       if (pid == ppid)
+               goto out;
+
+       if (au_ftest_lock(flags, NOPLMW)) {
+               /* if there is no i_mutex lock in VFS, we don't need to wait */
+               /* AuDebugOn(!lockdep_depth(current)); */
+               while (sbi->si_plink_maint_pid) {
+                       si_read_unlock(sb);
+                       /* gave up wake_up_bit() */
+                       wait_event(sbi->si_plink_wq, !sbi->si_plink_maint_pid);
+
+                       if (au_ftest_lock(flags, FLUSH))
+                               au_nwt_flush(&sbi->si_nowait);
+                       si_noflush_read_lock(sb);
+               }
+       } else if (au_ftest_lock(flags, NOPLM)) {
+               AuDbg("ppid %d, pid %d\n", ppid, pid);
+               err = -EAGAIN;
+       }
+
+out:
+       return err;
+}
+
+void au_plink_maint_leave(struct au_sbinfo *sbinfo)
+{
+       spin_lock(&sbinfo->si_plink_maint_lock);
+       sbinfo->si_plink_maint_pid = 0;
+       spin_unlock(&sbinfo->si_plink_maint_lock);
+       wake_up_all(&sbinfo->si_plink_wq);
+}
+
+int au_plink_maint_enter(struct super_block *sb)
+{
+       int err;
+       struct au_sbinfo *sbinfo;
+
+       err = 0;
+       sbinfo = au_sbi(sb);
+       /* make sure i am the only one in this fs */
+       si_write_lock(sb, AuLock_FLUSH);
+       if (au_opt_test(au_mntflags(sb), PLINK)) {
+               spin_lock(&sbinfo->si_plink_maint_lock);
+               if (!sbinfo->si_plink_maint_pid)
+                       sbinfo->si_plink_maint_pid = current->pid;
+               else
+                       err = -EBUSY;
+               spin_unlock(&sbinfo->si_plink_maint_lock);
+       }
+       si_write_unlock(sb);
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_DEBUG
+void au_plink_list(struct super_block *sb)
+{
+       int i;
+       struct au_sbinfo *sbinfo;
+       struct hlist_head *plink_hlist;
+       struct hlist_node *pos;
+       struct pseudo_link *plink;
+
+       SiMustAnyLock(sb);
+
+       sbinfo = au_sbi(sb);
+       AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+       AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+       for (i = 0; i < AuPlink_NHASH; i++) {
+               plink_hlist = &sbinfo->si_plink[i].head;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(plink, pos, plink_hlist, hlist)
+                       AuDbg("%lu\n", plink->inode->i_ino);
+               rcu_read_unlock();
+       }
+}
+#endif
+
+/* is the inode pseudo-linked? */
+int au_plink_test(struct inode *inode)
+{
+       int found, i;
+       struct au_sbinfo *sbinfo;
+       struct hlist_head *plink_hlist;
+       struct hlist_node *pos;
+       struct pseudo_link *plink;
+
+       sbinfo = au_sbi(inode->i_sb);
+       AuRwMustAnyLock(&sbinfo->si_rwsem);
+       AuDebugOn(!au_opt_test(au_mntflags(inode->i_sb), PLINK));
+       AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
+
+       found = 0;
+       i = au_plink_hash(inode->i_ino);
+       plink_hlist = &sbinfo->si_plink[i].head;
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(plink, pos, plink_hlist, hlist)
+               if (plink->inode == inode) {
+                       found = 1;
+                       break;
+               }
+       rcu_read_unlock();
+       return found;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * generate a name for plink.
+ * the file will be stored under AUFS_WH_PLINKDIR.
+ */
+/* 20 is max digits length of ulong 64 */
+#define PLINK_NAME_LEN ((20 + 1) * 2)
+
+static int plink_name(char *name, int len, struct inode *inode,
+                     aufs_bindex_t bindex)
+{
+       int rlen;
+       struct inode *h_inode;
+
+       h_inode = au_h_iptr(inode, bindex);
+       rlen = snprintf(name, len, "%lu.%lu", inode->i_ino, h_inode->i_ino);
+       return rlen;
+}
+
+struct au_do_plink_lkup_args {
+       struct dentry **errp;
+       struct qstr *tgtname;
+       struct dentry *h_parent;
+       struct au_branch *br;
+};
+
+static struct dentry *au_do_plink_lkup(struct qstr *tgtname,
+                                      struct dentry *h_parent,
+                                      struct au_branch *br)
+{
+       struct dentry *h_dentry;
+       struct mutex *h_mtx;
+
+       h_mtx = &h_parent->d_inode->i_mutex;
+       mutex_lock_nested(h_mtx, AuLsc_I_CHILD2);
+       h_dentry = au_lkup_one(tgtname, h_parent, br, /*nd*/NULL);
+       mutex_unlock(h_mtx);
+       return h_dentry;
+}
+
+static void au_call_do_plink_lkup(void *args)
+{
+       struct au_do_plink_lkup_args *a = args;
+       *a->errp = au_do_plink_lkup(a->tgtname, a->h_parent, a->br);
+}
+
+/* lookup the plink-ed @inode under the branch at @bindex */
+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex)
+{
+       struct dentry *h_dentry, *h_parent;
+       struct au_branch *br;
+       struct inode *h_dir;
+       int wkq_err;
+       char a[PLINK_NAME_LEN];
+       struct qstr tgtname = {
+               .name   = a
+       };
+
+       AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
+
+       br = au_sbr(inode->i_sb, bindex);
+       h_parent = br->br_wbr->wbr_plink;
+       h_dir = h_parent->d_inode;
+       tgtname.len = plink_name(a, sizeof(a), inode, bindex);
+
+       if (current_fsuid()) {
+               struct au_do_plink_lkup_args args = {
+                       .errp           = &h_dentry,
+                       .tgtname        = &tgtname,
+                       .h_parent       = h_parent,
+                       .br             = br
+               };
+
+               wkq_err = au_wkq_wait(au_call_do_plink_lkup, &args);
+               if (unlikely(wkq_err))
+                       h_dentry = ERR_PTR(wkq_err);
+       } else
+               h_dentry = au_do_plink_lkup(&tgtname, h_parent, br);
+
+       return h_dentry;
+}
+
+/* create a pseudo-link */
+static int do_whplink(struct qstr *tgt, struct dentry *h_parent,
+                     struct dentry *h_dentry, struct au_branch *br)
+{
+       int err;
+       struct path h_path = {
+               .mnt = au_br_mnt(br)
+       };
+       struct inode *h_dir;
+
+       h_dir = h_parent->d_inode;
+       mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_CHILD2);
+again:
+       h_path.dentry = au_lkup_one(tgt, h_parent, br, /*nd*/NULL);
+       err = PTR_ERR(h_path.dentry);
+       if (IS_ERR(h_path.dentry))
+               goto out;
+
+       err = 0;
+       /* wh.plink dir is not monitored */
+       /* todo: is it really safe? */
+       if (h_path.dentry->d_inode
+           && h_path.dentry->d_inode != h_dentry->d_inode) {
+               err = vfsub_unlink(h_dir, &h_path, /*force*/0);
+               dput(h_path.dentry);
+               h_path.dentry = NULL;
+               if (!err)
+                       goto again;
+       }
+       if (!err && !h_path.dentry->d_inode)
+               err = vfsub_link(h_dentry, h_dir, &h_path);
+       dput(h_path.dentry);
+
+out:
+       mutex_unlock(&h_dir->i_mutex);
+       return err;
+}
+
+struct do_whplink_args {
+       int *errp;
+       struct qstr *tgt;
+       struct dentry *h_parent;
+       struct dentry *h_dentry;
+       struct au_branch *br;
+};
+
+static void call_do_whplink(void *args)
+{
+       struct do_whplink_args *a = args;
+       *a->errp = do_whplink(a->tgt, a->h_parent, a->h_dentry, a->br);
+}
+
+static int whplink(struct dentry *h_dentry, struct inode *inode,
+                  aufs_bindex_t bindex, struct au_branch *br)
+{
+       int err, wkq_err;
+       struct au_wbr *wbr;
+       struct dentry *h_parent;
+       struct inode *h_dir;
+       char a[PLINK_NAME_LEN];
+       struct qstr tgtname = {
+               .name = a
+       };
+
+       wbr = au_sbr(inode->i_sb, bindex)->br_wbr;
+       h_parent = wbr->wbr_plink;
+       h_dir = h_parent->d_inode;
+       tgtname.len = plink_name(a, sizeof(a), inode, bindex);
+
+       /* always superio. */
+       if (current_fsuid()) {
+               struct do_whplink_args args = {
+                       .errp           = &err,
+                       .tgt            = &tgtname,
+                       .h_parent       = h_parent,
+                       .h_dentry       = h_dentry,
+                       .br             = br
+               };
+               wkq_err = au_wkq_wait(call_do_whplink, &args);
+               if (unlikely(wkq_err))
+                       err = wkq_err;
+       } else
+               err = do_whplink(&tgtname, h_parent, h_dentry, br);
+
+       return err;
+}
+
+/* free a single plink */
+static void do_put_plink(struct pseudo_link *plink, int do_del)
+{
+       if (do_del)
+               hlist_del(&plink->hlist);
+       iput(plink->inode);
+       kfree(plink);
+}
+
+static void do_put_plink_rcu(struct rcu_head *rcu)
+{
+       struct pseudo_link *plink;
+
+       plink = container_of(rcu, struct pseudo_link, rcu);
+       iput(plink->inode);
+       kfree(plink);
+}
+
+/*
+ * create a new pseudo-link for @h_dentry on @bindex.
+ * the linked inode is held in aufs @inode.
+ */
+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
+                    struct dentry *h_dentry)
+{
+       struct super_block *sb;
+       struct au_sbinfo *sbinfo;
+       struct hlist_head *plink_hlist;
+       struct hlist_node *pos;
+       struct pseudo_link *plink, *tmp;
+       struct au_sphlhead *sphl;
+       int found, err, cnt, i;
+
+       sb = inode->i_sb;
+       sbinfo = au_sbi(sb);
+       AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+       AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+       found = au_plink_test(inode);
+       if (found)
+               return;
+
+       i = au_plink_hash(inode->i_ino);
+       sphl = sbinfo->si_plink + i;
+       plink_hlist = &sphl->head;
+       tmp = kmalloc(sizeof(*plink), GFP_NOFS);
+       if (tmp)
+               tmp->inode = au_igrab(inode);
+       else {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       spin_lock(&sphl->spin);
+       hlist_for_each_entry(plink, pos, plink_hlist, hlist) {
+               if (plink->inode == inode) {
+                       found = 1;
+                       break;
+               }
+       }
+       if (!found)
+               hlist_add_head_rcu(&tmp->hlist, plink_hlist);
+       spin_unlock(&sphl->spin);
+       if (!found) {
+               cnt = au_sphl_count(sphl);
+#define msg "unexpectedly unblanced or too many pseudo-links"
+               if (cnt > AUFS_PLINK_WARN)
+                       AuWarn1(msg ", %d\n", cnt);
+#undef msg
+               err = whplink(h_dentry, inode, bindex, au_sbr(sb, bindex));
+       } else {
+               do_put_plink(tmp, 0);
+               return;
+       }
+
+out:
+       if (unlikely(err)) {
+               pr_warn("err %d, damaged pseudo link.\n", err);
+               if (tmp) {
+                       au_sphl_del_rcu(&tmp->hlist, sphl);
+                       call_rcu(&tmp->rcu, do_put_plink_rcu);
+               }
+       }
+}
+
+/* free all plinks */
+void au_plink_put(struct super_block *sb, int verbose)
+{
+       int i, warned;
+       struct au_sbinfo *sbinfo;
+       struct hlist_head *plink_hlist;
+       struct hlist_node *pos, *tmp;
+       struct pseudo_link *plink;
+
+       SiMustWriteLock(sb);
+
+       sbinfo = au_sbi(sb);
+       AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+       AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+       /* no spin_lock since sbinfo is write-locked */
+       warned = 0;
+       for (i = 0; i < AuPlink_NHASH; i++) {
+               plink_hlist = &sbinfo->si_plink[i].head;
+               if (!warned && verbose && !hlist_empty(plink_hlist)) {
+                       pr_warn("pseudo-link is not flushed");
+                       warned = 1;
+               }
+               hlist_for_each_entry_safe(plink, pos, tmp, plink_hlist, hlist)
+                       do_put_plink(plink, 0);
+               INIT_HLIST_HEAD(plink_hlist);
+       }
+}
+
+void au_plink_clean(struct super_block *sb, int verbose)
+{
+       struct dentry *root;
+
+       root = sb->s_root;
+       aufs_write_lock(root);
+       if (au_opt_test(au_mntflags(sb), PLINK))
+               au_plink_put(sb, verbose);
+       aufs_write_unlock(root);
+}
+
+static int au_plink_do_half_refresh(struct inode *inode, aufs_bindex_t br_id)
+{
+       int do_put;
+       aufs_bindex_t bstart, bend, bindex;
+
+       do_put = 0;
+       bstart = au_ibstart(inode);
+       bend = au_ibend(inode);
+       if (bstart >= 0) {
+               for (bindex = bstart; bindex <= bend; bindex++) {
+                       if (!au_h_iptr(inode, bindex)
+                           || au_ii_br_id(inode, bindex) != br_id)
+                               continue;
+                       au_set_h_iptr(inode, bindex, NULL, 0);
+                       do_put = 1;
+                       break;
+               }
+               if (do_put)
+                       for (bindex = bstart; bindex <= bend; bindex++)
+                               if (au_h_iptr(inode, bindex)) {
+                                       do_put = 0;
+                                       break;
+                               }
+       } else
+               do_put = 1;
+
+       return do_put;
+}
+
+/* free the plinks on a branch specified by @br_id */
+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id)
+{
+       struct au_sbinfo *sbinfo;
+       struct hlist_head *plink_hlist;
+       struct hlist_node *pos, *tmp;
+       struct pseudo_link *plink;
+       struct inode *inode;
+       int i, do_put;
+
+       SiMustWriteLock(sb);
+
+       sbinfo = au_sbi(sb);
+       AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+       AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+       /* no spin_lock since sbinfo is write-locked */
+       for (i = 0; i < AuPlink_NHASH; i++) {
+               plink_hlist = &sbinfo->si_plink[i].head;
+               hlist_for_each_entry_safe(plink, pos, tmp, plink_hlist, hlist) {
+                       inode = au_igrab(plink->inode);
+                       ii_write_lock_child(inode);
+                       do_put = au_plink_do_half_refresh(inode, br_id);
+                       if (do_put)
+                               do_put_plink(plink, 1);
+                       ii_write_unlock(inode);
+                       iput(inode);
+               }
+       }
+}
diff --git a/fs/aufs/poll.c b/fs/aufs/poll.c
new file mode 100644 (file)
index 0000000..aa5e2ae
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * poll operation
+ * There is only one filesystem which implements ->poll operation, currently.
+ */
+
+#include "aufs.h"
+
+unsigned int aufs_poll(struct file *file, poll_table *wait)
+{
+       unsigned int mask;
+       int err;
+       struct file *h_file;
+       struct dentry *dentry;
+       struct super_block *sb;
+
+       /* We should pretend an error happened. */
+       mask = POLLERR /* | POLLIN | POLLOUT */;
+       dentry = file->f_dentry;
+       sb = dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+       err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+       if (unlikely(err))
+               goto out;
+
+       /* it is not an error if h_file has no operation */
+       mask = DEFAULT_POLLMASK;
+       h_file = au_hf_top(file);
+       if (h_file->f_op && h_file->f_op->poll)
+               mask = h_file->f_op->poll(h_file, wait);
+
+       di_read_unlock(dentry, AuLock_IR);
+       fi_read_unlock(file);
+
+out:
+       si_read_unlock(sb);
+       AuTraceErr((int)mask);
+       return mask;
+}
diff --git a/fs/aufs/procfs.c b/fs/aufs/procfs.c
new file mode 100644 (file)
index 0000000..7201cdf
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2010-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * procfs interfaces
+ */
+
+#include <linux/proc_fs.h>
+#include "aufs.h"
+
+static int au_procfs_plm_release(struct inode *inode, struct file *file)
+{
+       struct au_sbinfo *sbinfo;
+
+       sbinfo = file->private_data;
+       if (sbinfo) {
+               au_plink_maint_leave(sbinfo);
+               kobject_put(&sbinfo->si_kobj);
+       }
+
+       return 0;
+}
+
+static void au_procfs_plm_write_clean(struct file *file)
+{
+       struct au_sbinfo *sbinfo;
+
+       sbinfo = file->private_data;
+       if (sbinfo)
+               au_plink_clean(sbinfo->si_sb, /*verbose*/0);
+}
+
+static int au_procfs_plm_write_si(struct file *file, unsigned long id)
+{
+       int err;
+       struct super_block *sb;
+       struct au_sbinfo *sbinfo;
+
+       err = -EBUSY;
+       if (unlikely(file->private_data))
+               goto out;
+
+       sb = NULL;
+       /* don't use au_sbilist_lock() here */
+       spin_lock(&au_sbilist.spin);
+       list_for_each_entry(sbinfo, &au_sbilist.head, si_list)
+               if (id == sysaufs_si_id(sbinfo)) {
+                       kobject_get(&sbinfo->si_kobj);
+                       sb = sbinfo->si_sb;
+                       break;
+               }
+       spin_unlock(&au_sbilist.spin);
+
+       err = -EINVAL;
+       if (unlikely(!sb))
+               goto out;
+
+       err = au_plink_maint_enter(sb);
+       if (!err)
+               /* keep kobject_get() */
+               file->private_data = sbinfo;
+       else
+               kobject_put(&sbinfo->si_kobj);
+out:
+       return err;
+}
+
+/*
+ * Accept a valid "si=xxxx" only.
+ * Once it is accepted successfully, accept "clean" too.
+ */
+static ssize_t au_procfs_plm_write(struct file *file, const char __user *ubuf,
+                                  size_t count, loff_t *ppos)
+{
+       ssize_t err;
+       unsigned long id;
+       /* last newline is allowed */
+       char buf[3 + sizeof(unsigned long) * 2 + 1];
+
+       err = -EACCES;
+       if (unlikely(!capable(CAP_SYS_ADMIN)))
+               goto out;
+
+       err = -EINVAL;
+       if (unlikely(count > sizeof(buf)))
+               goto out;
+
+       err = copy_from_user(buf, ubuf, count);
+       if (unlikely(err)) {
+               err = -EFAULT;
+               goto out;
+       }
+       buf[count] = 0;
+
+       err = -EINVAL;
+       if (!strcmp("clean", buf)) {
+               au_procfs_plm_write_clean(file);
+               goto out_success;
+       } else if (unlikely(strncmp("si=", buf, 3)))
+               goto out;
+
+       err = kstrtoul(buf + 3, 16, &id);
+       if (unlikely(err))
+               goto out;
+
+       err = au_procfs_plm_write_si(file, id);
+       if (unlikely(err))
+               goto out;
+
+out_success:
+       err = count; /* success */
+out:
+       return err;
+}
+
+static const struct file_operations au_procfs_plm_fop = {
+       .write          = au_procfs_plm_write,
+       .release        = au_procfs_plm_release,
+       .owner          = THIS_MODULE
+};
+
+/* ---------------------------------------------------------------------- */
+
+static struct proc_dir_entry *au_procfs_dir;
+
+void au_procfs_fin(void)
+{
+       remove_proc_entry(AUFS_PLINK_MAINT_NAME, au_procfs_dir);
+       remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
+}
+
+int __init au_procfs_init(void)
+{
+       int err;
+       struct proc_dir_entry *entry;
+
+       err = -ENOMEM;
+       au_procfs_dir = proc_mkdir(AUFS_PLINK_MAINT_DIR, NULL);
+       if (unlikely(!au_procfs_dir))
+               goto out;
+
+       entry = proc_create(AUFS_PLINK_MAINT_NAME, S_IFREG | S_IWUSR,
+                           au_procfs_dir, &au_procfs_plm_fop);
+       if (unlikely(!entry))
+               goto out_dir;
+
+       err = 0;
+       goto out; /* success */
+
+
+out_dir:
+       remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
+out:
+       return err;
+}
diff --git a/fs/aufs/rdu.c b/fs/aufs/rdu.c
new file mode 100644 (file)
index 0000000..9c6046a
--- /dev/null
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * readdir in userspace.
+ */
+
+#include <linux/compat.h>
+#include <linux/fs_stack.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+/* bits for struct aufs_rdu.flags */
+#define        AuRdu_CALLED    1
+#define        AuRdu_CONT      (1 << 1)
+#define        AuRdu_FULL      (1 << 2)
+#define au_ftest_rdu(flags, name)      ((flags) & AuRdu_##name)
+#define au_fset_rdu(flags, name) \
+       do { (flags) |= AuRdu_##name; } while (0)
+#define au_fclr_rdu(flags, name) \
+       do { (flags) &= ~AuRdu_##name; } while (0)
+
+struct au_rdu_arg {
+       struct aufs_rdu                 *rdu;
+       union au_rdu_ent_ul             ent;
+       unsigned long                   end;
+
+       struct super_block              *sb;
+       int                             err;
+};
+
+static int au_rdu_fill(void *__arg, const char *name, int nlen,
+                      loff_t offset, u64 h_ino, unsigned int d_type)
+{
+       int err, len;
+       struct au_rdu_arg *arg = __arg;
+       struct aufs_rdu *rdu = arg->rdu;
+       struct au_rdu_ent ent;
+
+       err = 0;
+       arg->err = 0;
+       au_fset_rdu(rdu->cookie.flags, CALLED);
+       len = au_rdu_len(nlen);
+       if (arg->ent.ul + len  < arg->end) {
+               ent.ino = h_ino;
+               ent.bindex = rdu->cookie.bindex;
+               ent.type = d_type;
+               ent.nlen = nlen;
+               if (unlikely(nlen > AUFS_MAX_NAMELEN))
+                       ent.type = DT_UNKNOWN;
+
+               /* unnecessary to support mmap_sem since this is a dir */
+               err = -EFAULT;
+               if (copy_to_user(arg->ent.e, &ent, sizeof(ent)))
+                       goto out;
+               if (copy_to_user(arg->ent.e->name, name, nlen))
+                       goto out;
+               /* the terminating NULL */
+               if (__put_user(0, arg->ent.e->name + nlen))
+                       goto out;
+               err = 0;
+               /* AuDbg("%p, %.*s\n", arg->ent.p, nlen, name); */
+               arg->ent.ul += len;
+               rdu->rent++;
+       } else {
+               err = -EFAULT;
+               au_fset_rdu(rdu->cookie.flags, FULL);
+               rdu->full = 1;
+               rdu->tail = arg->ent;
+       }
+
+out:
+       /* AuTraceErr(err); */
+       return err;
+}
+
+static int au_rdu_do(struct file *h_file, struct au_rdu_arg *arg)
+{
+       int err;
+       loff_t offset;
+       struct au_rdu_cookie *cookie = &arg->rdu->cookie;
+
+       offset = vfsub_llseek(h_file, cookie->h_pos, SEEK_SET);
+       err = offset;
+       if (unlikely(offset != cookie->h_pos))
+               goto out;
+
+       err = 0;
+       do {
+               arg->err = 0;
+               au_fclr_rdu(cookie->flags, CALLED);
+               /* smp_mb(); */
+               err = vfsub_readdir(h_file, au_rdu_fill, arg);
+               if (err >= 0)
+                       err = arg->err;
+       } while (!err
+                && au_ftest_rdu(cookie->flags, CALLED)
+                && !au_ftest_rdu(cookie->flags, FULL));
+       cookie->h_pos = h_file->f_pos;
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+static int au_rdu(struct file *file, struct aufs_rdu *rdu)
+{
+       int err;
+       aufs_bindex_t bend;
+       struct au_rdu_arg arg;
+       struct dentry *dentry;
+       struct inode *inode;
+       struct file *h_file;
+       struct au_rdu_cookie *cookie = &rdu->cookie;
+
+       err = !access_ok(VERIFY_WRITE, rdu->ent.e, rdu->sz);
+       if (unlikely(err)) {
+               err = -EFAULT;
+               AuTraceErr(err);
+               goto out;
+       }
+       rdu->rent = 0;
+       rdu->tail = rdu->ent;
+       rdu->full = 0;
+       arg.rdu = rdu;
+       arg.ent = rdu->ent;
+       arg.end = arg.ent.ul;
+       arg.end += rdu->sz;
+
+       err = -ENOTDIR;
+       if (unlikely(!file->f_op || !file->f_op->readdir))
+               goto out;
+
+       err = security_file_permission(file, MAY_READ);
+       AuTraceErr(err);
+       if (unlikely(err))
+               goto out;
+
+       dentry = file->f_dentry;
+       inode = dentry->d_inode;
+#if 1
+       mutex_lock(&inode->i_mutex);
+#else
+       err = mutex_lock_killable(&inode->i_mutex);
+       AuTraceErr(err);
+       if (unlikely(err))
+               goto out;
+#endif
+
+       arg.sb = inode->i_sb;
+       err = si_read_lock(arg.sb, AuLock_FLUSH | AuLock_NOPLM);
+       if (unlikely(err))
+               goto out_mtx;
+       err = au_alive_dir(dentry);
+       if (unlikely(err))
+               goto out_si;
+       /* todo: reval? */
+       fi_read_lock(file);
+
+       err = -EAGAIN;
+       if (unlikely(au_ftest_rdu(cookie->flags, CONT)
+                    && cookie->generation != au_figen(file)))
+               goto out_unlock;
+
+       err = 0;
+       if (!rdu->blk) {
+               rdu->blk = au_sbi(arg.sb)->si_rdblk;
+               if (!rdu->blk)
+                       rdu->blk = au_dir_size(file, /*dentry*/NULL);
+       }
+       bend = au_fbstart(file);
+       if (cookie->bindex < bend)
+               cookie->bindex = bend;
+       bend = au_fbend_dir(file);
+       /* AuDbg("b%d, b%d\n", cookie->bindex, bend); */
+       for (; !err && cookie->bindex <= bend;
+            cookie->bindex++, cookie->h_pos = 0) {
+               h_file = au_hf_dir(file, cookie->bindex);
+               if (!h_file)
+                       continue;
+
+               au_fclr_rdu(cookie->flags, FULL);
+               err = au_rdu_do(h_file, &arg);
+               AuTraceErr(err);
+               if (unlikely(au_ftest_rdu(cookie->flags, FULL) || err))
+                       break;
+       }
+       AuDbg("rent %llu\n", rdu->rent);
+
+       if (!err && !au_ftest_rdu(cookie->flags, CONT)) {
+               rdu->shwh = !!au_opt_test(au_sbi(arg.sb)->si_mntflags, SHWH);
+               au_fset_rdu(cookie->flags, CONT);
+               cookie->generation = au_figen(file);
+       }
+
+       ii_read_lock_child(inode);
+       fsstack_copy_attr_atime(inode, au_h_iptr(inode, au_ibstart(inode)));
+       ii_read_unlock(inode);
+
+out_unlock:
+       fi_read_unlock(file);
+out_si:
+       si_read_unlock(arg.sb);
+out_mtx:
+       mutex_unlock(&inode->i_mutex);
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+static int au_rdu_ino(struct file *file, struct aufs_rdu *rdu)
+{
+       int err;
+       ino_t ino;
+       unsigned long long nent;
+       union au_rdu_ent_ul *u;
+       struct au_rdu_ent ent;
+       struct super_block *sb;
+
+       err = 0;
+       nent = rdu->nent;
+       u = &rdu->ent;
+       sb = file->f_dentry->d_sb;
+       si_read_lock(sb, AuLock_FLUSH);
+       while (nent-- > 0) {
+               /* unnecessary to support mmap_sem since this is a dir */
+               err = copy_from_user(&ent, u->e, sizeof(ent));
+               if (!err)
+                       err = !access_ok(VERIFY_WRITE, &u->e->ino, sizeof(ino));
+               if (unlikely(err)) {
+                       err = -EFAULT;
+                       AuTraceErr(err);
+                       break;
+               }
+
+               /* AuDbg("b%d, i%llu\n", ent.bindex, ent.ino); */
+               if (!ent.wh)
+                       err = au_ino(sb, ent.bindex, ent.ino, ent.type, &ino);
+               else
+                       err = au_wh_ino(sb, ent.bindex, ent.ino, ent.type,
+                                       &ino);
+               if (unlikely(err)) {
+                       AuTraceErr(err);
+                       break;
+               }
+
+               err = __put_user(ino, &u->e->ino);
+               if (unlikely(err)) {
+                       err = -EFAULT;
+                       AuTraceErr(err);
+                       break;
+               }
+               u->ul += au_rdu_len(ent.nlen);
+       }
+       si_read_unlock(sb);
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_rdu_verify(struct aufs_rdu *rdu)
+{
+       AuDbg("rdu{%llu, %p, %u | %u | %llu, %u, %u | "
+             "%llu, b%d, 0x%x, g%u}\n",
+             rdu->sz, rdu->ent.e, rdu->verify[AufsCtlRduV_SZ],
+             rdu->blk,
+             rdu->rent, rdu->shwh, rdu->full,
+             rdu->cookie.h_pos, rdu->cookie.bindex, rdu->cookie.flags,
+             rdu->cookie.generation);
+
+       if (rdu->verify[AufsCtlRduV_SZ] == sizeof(*rdu))
+               return 0;
+
+       AuDbg("%u:%u\n",
+             rdu->verify[AufsCtlRduV_SZ], (unsigned int)sizeof(*rdu));
+       return -EINVAL;
+}
+
+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       long err, e;
+       struct aufs_rdu rdu;
+       void __user *p = (void __user *)arg;
+
+       err = copy_from_user(&rdu, p, sizeof(rdu));
+       if (unlikely(err)) {
+               err = -EFAULT;
+               AuTraceErr(err);
+               goto out;
+       }
+       err = au_rdu_verify(&rdu);
+       if (unlikely(err))
+               goto out;
+
+       switch (cmd) {
+       case AUFS_CTL_RDU:
+               err = au_rdu(file, &rdu);
+               if (unlikely(err))
+                       break;
+
+               e = copy_to_user(p, &rdu, sizeof(rdu));
+               if (unlikely(e)) {
+                       err = -EFAULT;
+                       AuTraceErr(err);
+               }
+               break;
+       case AUFS_CTL_RDU_INO:
+               err = au_rdu_ino(file, &rdu);
+               break;
+
+       default:
+               /* err = -ENOTTY; */
+               err = -EINVAL;
+       }
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+#ifdef CONFIG_COMPAT
+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       long err, e;
+       struct aufs_rdu rdu;
+       void __user *p = compat_ptr(arg);
+
+       /* todo: get_user()? */
+       err = copy_from_user(&rdu, p, sizeof(rdu));
+       if (unlikely(err)) {
+               err = -EFAULT;
+               AuTraceErr(err);
+               goto out;
+       }
+       rdu.ent.e = compat_ptr(rdu.ent.ul);
+       err = au_rdu_verify(&rdu);
+       if (unlikely(err))
+               goto out;
+
+       switch (cmd) {
+       case AUFS_CTL_RDU:
+               err = au_rdu(file, &rdu);
+               if (unlikely(err))
+                       break;
+
+               rdu.ent.ul = ptr_to_compat(rdu.ent.e);
+               rdu.tail.ul = ptr_to_compat(rdu.tail.e);
+               e = copy_to_user(p, &rdu, sizeof(rdu));
+               if (unlikely(e)) {
+                       err = -EFAULT;
+                       AuTraceErr(err);
+               }
+               break;
+       case AUFS_CTL_RDU_INO:
+               err = au_rdu_ino(file, &rdu);
+               break;
+
+       default:
+               /* err = -ENOTTY; */
+               err = -EINVAL;
+       }
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+#endif
diff --git a/fs/aufs/rwsem.h b/fs/aufs/rwsem.h
new file mode 100644 (file)
index 0000000..a1eb04b
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * simple read-write semaphore wrappers
+ */
+
+#ifndef __AUFS_RWSEM_H__
+#define __AUFS_RWSEM_H__
+
+#ifdef __KERNEL__
+
+#include "debug.h"
+
+struct au_rwsem {
+       struct rw_semaphore     rwsem;
+#ifdef CONFIG_AUFS_DEBUG
+       /* just for debugging, not almighty counter */
+       atomic_t                rcnt, wcnt;
+#endif
+};
+
+#ifdef CONFIG_AUFS_DEBUG
+#define AuDbgCntInit(rw) do { \
+       atomic_set(&(rw)->rcnt, 0); \
+       atomic_set(&(rw)->wcnt, 0); \
+       smp_mb(); /* atomic set */ \
+} while (0)
+
+#define AuDbgRcntInc(rw)       atomic_inc(&(rw)->rcnt)
+#define AuDbgRcntDec(rw)       WARN_ON(atomic_dec_return(&(rw)->rcnt) < 0)
+#define AuDbgWcntInc(rw)       atomic_inc(&(rw)->wcnt)
+#define AuDbgWcntDec(rw)       WARN_ON(atomic_dec_return(&(rw)->wcnt) < 0)
+#else
+#define AuDbgCntInit(rw)       do {} while (0)
+#define AuDbgRcntInc(rw)       do {} while (0)
+#define AuDbgRcntDec(rw)       do {} while (0)
+#define AuDbgWcntInc(rw)       do {} while (0)
+#define AuDbgWcntDec(rw)       do {} while (0)
+#endif /* CONFIG_AUFS_DEBUG */
+
+/* to debug easier, do not make them inlined functions */
+#define AuRwMustNoWaiters(rw)  AuDebugOn(!list_empty(&(rw)->rwsem.wait_list))
+/* rwsem_is_locked() is unusable */
+#define AuRwMustReadLock(rw)   AuDebugOn(atomic_read(&(rw)->rcnt) <= 0)
+#define AuRwMustWriteLock(rw)  AuDebugOn(atomic_read(&(rw)->wcnt) <= 0)
+#define AuRwMustAnyLock(rw)    AuDebugOn(atomic_read(&(rw)->rcnt) <= 0 \
+                                       && atomic_read(&(rw)->wcnt) <= 0)
+#define AuRwDestroy(rw)                AuDebugOn(atomic_read(&(rw)->rcnt) \
+                                       || atomic_read(&(rw)->wcnt))
+
+#define au_rw_class(rw, key)   lockdep_set_class(&(rw)->rwsem, key)
+
+static inline void au_rw_init(struct au_rwsem *rw)
+{
+       AuDbgCntInit(rw);
+       init_rwsem(&rw->rwsem);
+}
+
+static inline void au_rw_init_wlock(struct au_rwsem *rw)
+{
+       au_rw_init(rw);
+       down_write(&rw->rwsem);
+       AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_init_wlock_nested(struct au_rwsem *rw,
+                                          unsigned int lsc)
+{
+       au_rw_init(rw);
+       down_write_nested(&rw->rwsem, lsc);
+       AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_read_lock(struct au_rwsem *rw)
+{
+       down_read(&rw->rwsem);
+       AuDbgRcntInc(rw);
+}
+
+static inline void au_rw_read_lock_nested(struct au_rwsem *rw, unsigned int lsc)
+{
+       down_read_nested(&rw->rwsem, lsc);
+       AuDbgRcntInc(rw);
+}
+
+static inline void au_rw_read_unlock(struct au_rwsem *rw)
+{
+       AuRwMustReadLock(rw);
+       AuDbgRcntDec(rw);
+       up_read(&rw->rwsem);
+}
+
+static inline void au_rw_dgrade_lock(struct au_rwsem *rw)
+{
+       AuRwMustWriteLock(rw);
+       AuDbgRcntInc(rw);
+       AuDbgWcntDec(rw);
+       downgrade_write(&rw->rwsem);
+}
+
+static inline void au_rw_write_lock(struct au_rwsem *rw)
+{
+       down_write(&rw->rwsem);
+       AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_write_lock_nested(struct au_rwsem *rw,
+                                          unsigned int lsc)
+{
+       down_write_nested(&rw->rwsem, lsc);
+       AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_write_unlock(struct au_rwsem *rw)
+{
+       AuRwMustWriteLock(rw);
+       AuDbgWcntDec(rw);
+       up_write(&rw->rwsem);
+}
+
+/* why is not _nested version defined */
+static inline int au_rw_read_trylock(struct au_rwsem *rw)
+{
+       int ret = down_read_trylock(&rw->rwsem);
+       if (ret)
+               AuDbgRcntInc(rw);
+       return ret;
+}
+
+static inline int au_rw_write_trylock(struct au_rwsem *rw)
+{
+       int ret = down_write_trylock(&rw->rwsem);
+       if (ret)
+               AuDbgWcntInc(rw);
+       return ret;
+}
+
+#undef AuDbgCntInit
+#undef AuDbgRcntInc
+#undef AuDbgRcntDec
+#undef AuDbgWcntInc
+#undef AuDbgWcntDec
+
+#define AuSimpleLockRwsemFuncs(prefix, param, rwsem) \
+static inline void prefix##_read_lock(param) \
+{ au_rw_read_lock(rwsem); } \
+static inline void prefix##_write_lock(param) \
+{ au_rw_write_lock(rwsem); } \
+static inline int prefix##_read_trylock(param) \
+{ return au_rw_read_trylock(rwsem); } \
+static inline int prefix##_write_trylock(param) \
+{ return au_rw_write_trylock(rwsem); }
+/* why is not _nested version defined */
+/* static inline void prefix##_read_trylock_nested(param, lsc)
+{ au_rw_read_trylock_nested(rwsem, lsc)); }
+static inline void prefix##_write_trylock_nestd(param, lsc)
+{ au_rw_write_trylock_nested(rwsem, lsc); } */
+
+#define AuSimpleUnlockRwsemFuncs(prefix, param, rwsem) \
+static inline void prefix##_read_unlock(param) \
+{ au_rw_read_unlock(rwsem); } \
+static inline void prefix##_write_unlock(param) \
+{ au_rw_write_unlock(rwsem); } \
+static inline void prefix##_downgrade_lock(param) \
+{ au_rw_dgrade_lock(rwsem); }
+
+#define AuSimpleRwsemFuncs(prefix, param, rwsem) \
+       AuSimpleLockRwsemFuncs(prefix, param, rwsem) \
+       AuSimpleUnlockRwsemFuncs(prefix, param, rwsem)
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_RWSEM_H__ */
diff --git a/fs/aufs/sbinfo.c b/fs/aufs/sbinfo.c
new file mode 100644 (file)
index 0000000..5e8713a
--- /dev/null
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * superblock private data
+ */
+
+#include "aufs.h"
+
+/*
+ * they are necessary regardless sysfs is disabled.
+ */
+void au_si_free(struct kobject *kobj)
+{
+       int i;
+       struct au_sbinfo *sbinfo;
+       char *locked __maybe_unused; /* debug only */
+
+       sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
+       for (i = 0; i < AuPlink_NHASH; i++)
+               AuDebugOn(!hlist_empty(&sbinfo->si_plink[i].head));
+       AuDebugOn(atomic_read(&sbinfo->si_nowait.nw_len));
+
+       au_rw_write_lock(&sbinfo->si_rwsem);
+       au_br_free(sbinfo);
+       au_rw_write_unlock(&sbinfo->si_rwsem);
+
+       AuDebugOn(radix_tree_gang_lookup
+                 (&sbinfo->au_si_pid.tree, (void **)&locked,
+                  /*first_index*/PID_MAX_DEFAULT - 1,
+                  /*max_items*/sizeof(locked)/sizeof(*locked)));
+
+       kfree(sbinfo->si_branch);
+       kfree(sbinfo->au_si_pid.bitmap);
+       mutex_destroy(&sbinfo->si_xib_mtx);
+       AuRwDestroy(&sbinfo->si_rwsem);
+
+       kfree(sbinfo);
+}
+
+int au_si_alloc(struct super_block *sb)
+{
+       int err, i;
+       struct au_sbinfo *sbinfo;
+       static struct lock_class_key aufs_si;
+
+       err = -ENOMEM;
+       sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS);
+       if (unlikely(!sbinfo))
+               goto out;
+
+       BUILD_BUG_ON(sizeof(unsigned long) !=
+                    sizeof(*sbinfo->au_si_pid.bitmap));
+       sbinfo->au_si_pid.bitmap = kcalloc(BITS_TO_LONGS(PID_MAX_DEFAULT),
+                                       sizeof(*sbinfo->au_si_pid.bitmap),
+                                       GFP_NOFS);
+       if (unlikely(!sbinfo->au_si_pid.bitmap))
+               goto out_sbinfo;
+
+       /* will be reallocated separately */
+       sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS);
+       if (unlikely(!sbinfo->si_branch))
+               goto out_pidmap;
+
+       err = sysaufs_si_init(sbinfo);
+       if (unlikely(err))
+               goto out_br;
+
+       au_nwt_init(&sbinfo->si_nowait);
+       au_rw_init_wlock(&sbinfo->si_rwsem);
+       au_rw_class(&sbinfo->si_rwsem, &aufs_si);
+       spin_lock_init(&sbinfo->au_si_pid.tree_lock);
+       INIT_RADIX_TREE(&sbinfo->au_si_pid.tree, GFP_ATOMIC | __GFP_NOFAIL);
+
+       atomic_long_set(&sbinfo->si_ninodes, 0);
+       atomic_long_set(&sbinfo->si_nfiles, 0);
+
+       sbinfo->si_bend = -1;
+       sbinfo->si_last_br_id = AUFS_BRANCH_MAX / 2;
+
+       sbinfo->si_wbr_copyup = AuWbrCopyup_Def;
+       sbinfo->si_wbr_create = AuWbrCreate_Def;
+       sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup;
+       sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create;
+
+       sbinfo->si_mntflags = au_opts_plink(AuOpt_Def);
+
+       sbinfo->si_xino_jiffy = jiffies;
+       sbinfo->si_xino_expire
+               = msecs_to_jiffies(AUFS_XINO_DEF_SEC * MSEC_PER_SEC);
+       mutex_init(&sbinfo->si_xib_mtx);
+       sbinfo->si_xino_brid = -1;
+       /* leave si_xib_last_pindex and si_xib_next_bit */
+
+       sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC);
+       sbinfo->si_rdblk = AUFS_RDBLK_DEF;
+       sbinfo->si_rdhash = AUFS_RDHASH_DEF;
+       sbinfo->si_dirwh = AUFS_DIRWH_DEF;
+
+       for (i = 0; i < AuPlink_NHASH; i++)
+               au_sphl_init(sbinfo->si_plink + i);
+       init_waitqueue_head(&sbinfo->si_plink_wq);
+       spin_lock_init(&sbinfo->si_plink_maint_lock);
+
+       /* leave other members for sysaufs and si_mnt. */
+       sbinfo->si_sb = sb;
+       sb->s_fs_info = sbinfo;
+       si_pid_set(sb);
+       au_debug_sbinfo_init(sbinfo);
+       return 0; /* success */
+
+out_br:
+       kfree(sbinfo->si_branch);
+out_pidmap:
+       kfree(sbinfo->au_si_pid.bitmap);
+out_sbinfo:
+       kfree(sbinfo);
+out:
+       return err;
+}
+
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr)
+{
+       int err, sz;
+       struct au_branch **brp;
+
+       AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+       err = -ENOMEM;
+       sz = sizeof(*brp) * (sbinfo->si_bend + 1);
+       if (unlikely(!sz))
+               sz = sizeof(*brp);
+       brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS);
+       if (brp) {
+               sbinfo->si_branch = brp;
+               err = 0;
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+unsigned int au_sigen_inc(struct super_block *sb)
+{
+       unsigned int gen;
+
+       SiMustWriteLock(sb);
+
+       gen = ++au_sbi(sb)->si_generation;
+       au_update_digen(sb->s_root);
+       au_update_iigen(sb->s_root->d_inode, /*half*/0);
+       sb->s_root->d_inode->i_version++;
+       return gen;
+}
+
+aufs_bindex_t au_new_br_id(struct super_block *sb)
+{
+       aufs_bindex_t br_id;
+       int i;
+       struct au_sbinfo *sbinfo;
+
+       SiMustWriteLock(sb);
+
+       sbinfo = au_sbi(sb);
+       for (i = 0; i <= AUFS_BRANCH_MAX; i++) {
+               br_id = ++sbinfo->si_last_br_id;
+               AuDebugOn(br_id < 0);
+               if (br_id && au_br_index(sb, br_id) < 0)
+                       return br_id;
+       }
+
+       return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* it is ok that new 'nwt' tasks are appended while we are sleeping */
+int si_read_lock(struct super_block *sb, int flags)
+{
+       int err;
+
+       err = 0;
+       if (au_ftest_lock(flags, FLUSH))
+               au_nwt_flush(&au_sbi(sb)->si_nowait);
+
+       si_noflush_read_lock(sb);
+       err = au_plink_maint(sb, flags);
+       if (unlikely(err))
+               si_read_unlock(sb);
+
+       return err;
+}
+
+int si_write_lock(struct super_block *sb, int flags)
+{
+       int err;
+
+       if (au_ftest_lock(flags, FLUSH))
+               au_nwt_flush(&au_sbi(sb)->si_nowait);
+
+       si_noflush_write_lock(sb);
+       err = au_plink_maint(sb, flags);
+       if (unlikely(err))
+               si_write_unlock(sb);
+
+       return err;
+}
+
+/* dentry and super_block lock. call at entry point */
+int aufs_read_lock(struct dentry *dentry, int flags)
+{
+       int err;
+       struct super_block *sb;
+
+       sb = dentry->d_sb;
+       err = si_read_lock(sb, flags);
+       if (unlikely(err))
+               goto out;
+
+       if (au_ftest_lock(flags, DW))
+               di_write_lock_child(dentry);
+       else
+               di_read_lock_child(dentry, flags);
+
+       if (au_ftest_lock(flags, GEN)) {
+               err = au_digen_test(dentry, au_sigen(sb));
+               AuDebugOn(!err && au_dbrange_test(dentry));
+               if (unlikely(err))
+                       aufs_read_unlock(dentry, flags);
+       }
+
+out:
+       return err;
+}
+
+void aufs_read_unlock(struct dentry *dentry, int flags)
+{
+       if (au_ftest_lock(flags, DW))
+               di_write_unlock(dentry);
+       else
+               di_read_unlock(dentry, flags);
+       si_read_unlock(dentry->d_sb);
+}
+
+void aufs_write_lock(struct dentry *dentry)
+{
+       si_write_lock(dentry->d_sb, AuLock_FLUSH | AuLock_NOPLMW);
+       di_write_lock_child(dentry);
+}
+
+void aufs_write_unlock(struct dentry *dentry)
+{
+       di_write_unlock(dentry);
+       si_write_unlock(dentry->d_sb);
+}
+
+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags)
+{
+       int err;
+       unsigned int sigen;
+       struct super_block *sb;
+
+       sb = d1->d_sb;
+       err = si_read_lock(sb, flags);
+       if (unlikely(err))
+               goto out;
+
+       di_write_lock2_child(d1, d2, au_ftest_lock(flags, DIR));
+
+       if (au_ftest_lock(flags, GEN)) {
+               sigen = au_sigen(sb);
+               err = au_digen_test(d1, sigen);
+               AuDebugOn(!err && au_dbrange_test(d1));
+               if (!err) {
+                       err = au_digen_test(d2, sigen);
+                       AuDebugOn(!err && au_dbrange_test(d2));
+               }
+               if (unlikely(err))
+                       aufs_read_and_write_unlock2(d1, d2);
+       }
+
+out:
+       return err;
+}
+
+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2)
+{
+       di_write_unlock2(d1, d2);
+       si_read_unlock(d1->d_sb);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int si_pid_test_slow(struct super_block *sb)
+{
+       void *p;
+
+       rcu_read_lock();
+       p = radix_tree_lookup(&au_sbi(sb)->au_si_pid.tree, current->pid);
+       rcu_read_unlock();
+
+       return (long)!!p;
+}
+
+void si_pid_set_slow(struct super_block *sb)
+{
+       int err;
+       struct au_sbinfo *sbinfo;
+
+       AuDebugOn(si_pid_test_slow(sb));
+
+       sbinfo = au_sbi(sb);
+       err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+       AuDebugOn(err);
+       spin_lock(&sbinfo->au_si_pid.tree_lock);
+       err = radix_tree_insert(&sbinfo->au_si_pid.tree, current->pid,
+                               /*any valid ptr*/sb);
+       spin_unlock(&sbinfo->au_si_pid.tree_lock);
+       AuDebugOn(err);
+       radix_tree_preload_end();
+}
+
+void si_pid_clr_slow(struct super_block *sb)
+{
+       void *p;
+       struct au_sbinfo *sbinfo;
+
+       AuDebugOn(!si_pid_test_slow(sb));
+
+       sbinfo = au_sbi(sb);
+       spin_lock(&sbinfo->au_si_pid.tree_lock);
+       p = radix_tree_delete(&sbinfo->au_si_pid.tree, current->pid);
+       spin_unlock(&sbinfo->au_si_pid.tree_lock);
+}
diff --git a/fs/aufs/spl.h b/fs/aufs/spl.h
new file mode 100644 (file)
index 0000000..2d53e87
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * simple list protected by a spinlock
+ */
+
+#ifndef __AUFS_SPL_H__
+#define __AUFS_SPL_H__
+
+#ifdef __KERNEL__
+
+struct au_splhead {
+       spinlock_t              spin;
+       struct list_head        head;
+};
+
+static inline void au_spl_init(struct au_splhead *spl)
+{
+       spin_lock_init(&spl->spin);
+       INIT_LIST_HEAD(&spl->head);
+}
+
+static inline void au_spl_add(struct list_head *list, struct au_splhead *spl)
+{
+       spin_lock(&spl->spin);
+       list_add(list, &spl->head);
+       spin_unlock(&spl->spin);
+}
+
+static inline void au_spl_del(struct list_head *list, struct au_splhead *spl)
+{
+       spin_lock(&spl->spin);
+       list_del(list);
+       spin_unlock(&spl->spin);
+}
+
+static inline void au_spl_del_rcu(struct list_head *list,
+                                 struct au_splhead *spl)
+{
+       spin_lock(&spl->spin);
+       list_del_rcu(list);
+       spin_unlock(&spl->spin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_sphlhead {
+       spinlock_t              spin;
+       struct hlist_head       head;
+};
+
+static inline void au_sphl_init(struct au_sphlhead *sphl)
+{
+       spin_lock_init(&sphl->spin);
+       INIT_HLIST_HEAD(&sphl->head);
+}
+
+static inline void au_sphl_add(struct hlist_node *hlist,
+                              struct au_sphlhead *sphl)
+{
+       spin_lock(&sphl->spin);
+       hlist_add_head(hlist, &sphl->head);
+       spin_unlock(&sphl->spin);
+}
+
+static inline void au_sphl_del(struct hlist_node *hlist,
+                              struct au_sphlhead *sphl)
+{
+       spin_lock(&sphl->spin);
+       hlist_del(hlist);
+       spin_unlock(&sphl->spin);
+}
+
+static inline void au_sphl_del_rcu(struct hlist_node *hlist,
+                                  struct au_sphlhead *sphl)
+{
+       spin_lock(&sphl->spin);
+       hlist_del_rcu(hlist);
+       spin_unlock(&sphl->spin);
+}
+
+static inline unsigned long au_sphl_count(struct au_sphlhead *sphl)
+{
+       unsigned long cnt;
+       struct hlist_node *pos;
+
+       cnt = 0;
+       spin_lock(&sphl->spin);
+       hlist_for_each(pos, &sphl->head)
+               cnt++;
+       spin_unlock(&sphl->spin);
+       return cnt;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_SPL_H__ */
diff --git a/fs/aufs/super.c b/fs/aufs/super.c
new file mode 100644 (file)
index 0000000..d105672
--- /dev/null
@@ -0,0 +1,1009 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * mount and super_block operations
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include <linux/vmalloc.h>
+#include <linux/writeback.h>
+#include "aufs.h"
+
+/*
+ * super_operations
+ */
+static struct inode *aufs_alloc_inode(struct super_block *sb __maybe_unused)
+{
+       struct au_icntnr *c;
+
+       c = au_cache_alloc_icntnr();
+       if (c) {
+               au_icntnr_init(c);
+               c->vfs_inode.i_version = 1; /* sigen(sb); */
+               c->iinfo.ii_hinode = NULL;
+               return &c->vfs_inode;
+       }
+       return NULL;
+}
+
+static void aufs_destroy_inode_cb(struct rcu_head *head)
+{
+       struct inode *inode = container_of(head, struct inode, i_rcu);
+
+       INIT_LIST_HEAD(&inode->i_dentry);
+       au_cache_free_icntnr(container_of(inode, struct au_icntnr, vfs_inode));
+}
+
+static void aufs_destroy_inode(struct inode *inode)
+{
+       au_iinfo_fin(inode);
+       call_rcu(&inode->i_rcu, aufs_destroy_inode_cb);
+}
+
+struct inode *au_iget_locked(struct super_block *sb, ino_t ino)
+{
+       struct inode *inode;
+       int err;
+
+       inode = iget_locked(sb, ino);
+       if (unlikely(!inode)) {
+               inode = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+       if (!(inode->i_state & I_NEW))
+               goto out;
+
+       err = au_xigen_new(inode);
+       if (!err)
+               err = au_iinfo_init(inode);
+       if (!err)
+               inode->i_version++;
+       else {
+               iget_failed(inode);
+               inode = ERR_PTR(err);
+       }
+
+out:
+       /* never return NULL */
+       AuDebugOn(!inode);
+       AuTraceErrPtr(inode);
+       return inode;
+}
+
+/* lock free root dinfo */
+static int au_show_brs(struct seq_file *seq, struct super_block *sb)
+{
+       int err;
+       aufs_bindex_t bindex, bend;
+       struct path path;
+       struct au_hdentry *hdp;
+       struct au_branch *br;
+       char *perm;
+
+       err = 0;
+       bend = au_sbend(sb);
+       hdp = au_di(sb->s_root)->di_hdentry;
+       for (bindex = 0; !err && bindex <= bend; bindex++) {
+               br = au_sbr(sb, bindex);
+               path.mnt = au_br_mnt(br);
+               path.dentry = hdp[bindex].hd_dentry;
+               err = au_seq_path(seq, &path);
+               if (err > 0) {
+                       perm = au_optstr_br_perm(br->br_perm);
+                       if (perm) {
+                               err = seq_printf(seq, "=%s", perm);
+                               kfree(perm);
+                               if (err == -1)
+                                       err = -E2BIG;
+                       } else
+                               err = -ENOMEM;
+               }
+               if (!err && bindex != bend)
+                       err = seq_putc(seq, ':');
+       }
+
+       return err;
+}
+
+static void au_show_wbr_create(struct seq_file *m, int v,
+                              struct au_sbinfo *sbinfo)
+{
+       const char *pat;
+
+       AuRwMustAnyLock(&sbinfo->si_rwsem);
+
+       seq_puts(m, ",create=");
+       pat = au_optstr_wbr_create(v);
+       switch (v) {
+       case AuWbrCreate_TDP:
+       case AuWbrCreate_RR:
+       case AuWbrCreate_MFS:
+       case AuWbrCreate_PMFS:
+               seq_puts(m, pat);
+               break;
+       case AuWbrCreate_MFSV:
+               seq_printf(m, /*pat*/"mfs:%lu",
+                          jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+                          / MSEC_PER_SEC);
+               break;
+       case AuWbrCreate_PMFSV:
+               seq_printf(m, /*pat*/"pmfs:%lu",
+                          jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+                          / MSEC_PER_SEC);
+               break;
+       case AuWbrCreate_MFSRR:
+               seq_printf(m, /*pat*/"mfsrr:%llu",
+                          sbinfo->si_wbr_mfs.mfsrr_watermark);
+               break;
+       case AuWbrCreate_MFSRRV:
+               seq_printf(m, /*pat*/"mfsrr:%llu:%lu",
+                          sbinfo->si_wbr_mfs.mfsrr_watermark,
+                          jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+                          / MSEC_PER_SEC);
+               break;
+       case AuWbrCreate_PMFSRR:
+               seq_printf(m, /*pat*/"pmfsrr:%llu",
+                          sbinfo->si_wbr_mfs.mfsrr_watermark);
+               break;
+       case AuWbrCreate_PMFSRRV:
+               seq_printf(m, /*pat*/"pmfsrr:%llu:%lu",
+                          sbinfo->si_wbr_mfs.mfsrr_watermark,
+                          jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+                          / MSEC_PER_SEC);
+               break;
+       }
+}
+
+static int au_show_xino(struct seq_file *seq, struct vfsmount *mnt)
+{
+#ifdef CONFIG_SYSFS
+       return 0;
+#else
+       int err;
+       const int len = sizeof(AUFS_XINO_FNAME) - 1;
+       aufs_bindex_t bindex, brid;
+       struct super_block *sb;
+       struct qstr *name;
+       struct file *f;
+       struct dentry *d, *h_root;
+       struct au_hdentry *hdp;
+
+       AuRwMustAnyLock(&sbinfo->si_rwsem);
+
+       err = 0;
+       sb = mnt->mnt_sb;
+       f = au_sbi(sb)->si_xib;
+       if (!f)
+               goto out;
+
+       /* stop printing the default xino path on the first writable branch */
+       h_root = NULL;
+       brid = au_xino_brid(sb);
+       if (brid >= 0) {
+               bindex = au_br_index(sb, brid);
+               hdp = au_di(sb->s_root)->di_hdentry;
+               h_root = hdp[0 + bindex].hd_dentry;
+       }
+       d = f->f_dentry;
+       name = &d->d_name;
+       /* safe ->d_parent because the file is unlinked */
+       if (d->d_parent == h_root
+           && name->len == len
+           && !memcmp(name->name, AUFS_XINO_FNAME, len))
+               goto out;
+
+       seq_puts(seq, ",xino=");
+       err = au_xino_path(seq, f);
+
+out:
+       return err;
+#endif
+}
+
+/* seq_file will re-call me in case of too long string */
+static int aufs_show_options(struct seq_file *m, struct vfsmount *mnt)
+{
+       int err;
+       unsigned int mnt_flags, v;
+       struct super_block *sb;
+       struct au_sbinfo *sbinfo;
+
+#define AuBool(name, str) do { \
+       v = au_opt_test(mnt_flags, name); \
+       if (v != au_opt_test(AuOpt_Def, name)) \
+               seq_printf(m, ",%s" #str, v ? "" : "no"); \
+} while (0)
+
+#define AuStr(name, str) do { \
+       v = mnt_flags & AuOptMask_##name; \
+       if (v != (AuOpt_Def & AuOptMask_##name)) \
+               seq_printf(m, "," #str "=%s", au_optstr_##str(v)); \
+} while (0)
+
+#define AuUInt(name, str, val) do { \
+       if (val != AUFS_##name##_DEF) \
+               seq_printf(m, "," #str "=%u", val); \
+} while (0)
+
+       /* lock free root dinfo */
+       sb = mnt->mnt_sb;
+       si_noflush_read_lock(sb);
+       sbinfo = au_sbi(sb);
+       seq_printf(m, ",si=%lx", sysaufs_si_id(sbinfo));
+
+       mnt_flags = au_mntflags(sb);
+       if (au_opt_test(mnt_flags, XINO)) {
+               err = au_show_xino(m, mnt);
+               if (unlikely(err))
+                       goto out;
+       } else
+               seq_puts(m, ",noxino");
+
+       AuBool(TRUNC_XINO, trunc_xino);
+       AuStr(UDBA, udba);
+       AuBool(SHWH, shwh);
+       AuBool(PLINK, plink);
+       AuBool(DIO, dio);
+       /* AuBool(DIRPERM1, dirperm1); */
+       /* AuBool(REFROF, refrof); */
+
+       v = sbinfo->si_wbr_create;
+       if (v != AuWbrCreate_Def)
+               au_show_wbr_create(m, v, sbinfo);
+
+       v = sbinfo->si_wbr_copyup;
+       if (v != AuWbrCopyup_Def)
+               seq_printf(m, ",cpup=%s", au_optstr_wbr_copyup(v));
+
+       v = au_opt_test(mnt_flags, ALWAYS_DIROPQ);
+       if (v != au_opt_test(AuOpt_Def, ALWAYS_DIROPQ))
+               seq_printf(m, ",diropq=%c", v ? 'a' : 'w');
+
+       AuUInt(DIRWH, dirwh, sbinfo->si_dirwh);
+
+       v = jiffies_to_msecs(sbinfo->si_rdcache) / MSEC_PER_SEC;
+       AuUInt(RDCACHE, rdcache, v);
+
+       AuUInt(RDBLK, rdblk, sbinfo->si_rdblk);
+       AuUInt(RDHASH, rdhash, sbinfo->si_rdhash);
+
+       AuBool(SUM, sum);
+       /* AuBool(SUM_W, wsum); */
+       AuBool(WARN_PERM, warn_perm);
+       AuBool(VERBOSE, verbose);
+
+out:
+       /* be sure to print "br:" last */
+       if (!sysaufs_brs) {
+               seq_puts(m, ",br:");
+               au_show_brs(m, sb);
+       }
+       si_read_unlock(sb);
+       return 0;
+
+#undef AuBool
+#undef AuStr
+#undef AuUInt
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* sum mode which returns the summation for statfs(2) */
+
+static u64 au_add_till_max(u64 a, u64 b)
+{
+       u64 old;
+
+       old = a;
+       a += b;
+       if (old <= a)
+               return a;
+       return ULLONG_MAX;
+}
+
+static u64 au_mul_till_max(u64 a, long mul)
+{
+       u64 old;
+
+       old = a;
+       a *= mul;
+       if (old <= a)
+               return a;
+       return ULLONG_MAX;
+}
+
+static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf)
+{
+       int err;
+       long bsize, factor;
+       u64 blocks, bfree, bavail, files, ffree;
+       aufs_bindex_t bend, bindex, i;
+       unsigned char shared;
+       struct path h_path;
+       struct super_block *h_sb;
+
+       err = 0;
+       bsize = LONG_MAX;
+       files = 0;
+       ffree = 0;
+       blocks = 0;
+       bfree = 0;
+       bavail = 0;
+       bend = au_sbend(sb);
+       for (bindex = 0; bindex <= bend; bindex++) {
+               h_path.mnt = au_sbr_mnt(sb, bindex);
+               h_sb = h_path.mnt->mnt_sb;
+               shared = 0;
+               for (i = 0; !shared && i < bindex; i++)
+                       shared = (au_sbr_sb(sb, i) == h_sb);
+               if (shared)
+                       continue;
+
+               /* sb->s_root for NFS is unreliable */
+               h_path.dentry = h_path.mnt->mnt_root;
+               err = vfs_statfs(&h_path, buf);
+               if (unlikely(err))
+                       goto out;
+
+               if (bsize > buf->f_bsize) {
+                       /*
+                        * we will reduce bsize, so we have to expand blocks
+                        * etc. to match them again
+                        */
+                       factor = (bsize / buf->f_bsize);
+                       blocks = au_mul_till_max(blocks, factor);
+                       bfree = au_mul_till_max(bfree, factor);
+                       bavail = au_mul_till_max(bavail, factor);
+                       bsize = buf->f_bsize;
+               }
+
+               factor = (buf->f_bsize / bsize);
+               blocks = au_add_till_max(blocks,
+                               au_mul_till_max(buf->f_blocks, factor));
+               bfree = au_add_till_max(bfree,
+                               au_mul_till_max(buf->f_bfree, factor));
+               bavail = au_add_till_max(bavail,
+                               au_mul_till_max(buf->f_bavail, factor));
+               files = au_add_till_max(files, buf->f_files);
+               ffree = au_add_till_max(ffree, buf->f_ffree);
+       }
+
+       buf->f_bsize = bsize;
+       buf->f_blocks = blocks;
+       buf->f_bfree = bfree;
+       buf->f_bavail = bavail;
+       buf->f_files = files;
+       buf->f_ffree = ffree;
+       buf->f_frsize = 0;
+
+out:
+       return err;
+}
+
+static int aufs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+       int err;
+       struct path h_path;
+       struct super_block *sb;
+
+       /* lock free root dinfo */
+       sb = dentry->d_sb;
+       si_noflush_read_lock(sb);
+       if (!au_opt_test(au_mntflags(sb), SUM)) {
+               /* sb->s_root for NFS is unreliable */
+               h_path.mnt = au_sbr_mnt(sb, 0);
+               h_path.dentry = h_path.mnt->mnt_root;
+               err = vfs_statfs(&h_path, buf);
+       } else
+               err = au_statfs_sum(sb, buf);
+       si_read_unlock(sb);
+
+       if (!err) {
+               buf->f_type = AUFS_SUPER_MAGIC;
+               buf->f_namelen = AUFS_MAX_NAMELEN;
+               memset(&buf->f_fsid, 0, sizeof(buf->f_fsid));
+       }
+       /* buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; */
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_sync_fs(struct super_block *sb, int wait)
+{
+       int err, e;
+       aufs_bindex_t bend, bindex;
+       struct au_branch *br;
+       struct super_block *h_sb;
+
+       err = 0;
+       si_noflush_read_lock(sb);
+       bend = au_sbend(sb);
+       for (bindex = 0; bindex <= bend; bindex++) {
+               br = au_sbr(sb, bindex);
+               if (!au_br_writable(br->br_perm))
+                       continue;
+
+               h_sb = au_sbr_sb(sb, bindex);
+               if (h_sb->s_op->sync_fs) {
+                       e = h_sb->s_op->sync_fs(h_sb, wait);
+                       if (unlikely(e && !err))
+                               err = e;
+                       /* go on even if an error happens */
+               }
+       }
+       si_read_unlock(sb);
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* final actions when unmounting a file system */
+static void aufs_put_super(struct super_block *sb)
+{
+       struct au_sbinfo *sbinfo;
+
+       sbinfo = au_sbi(sb);
+       if (!sbinfo)
+               return;
+
+       dbgaufs_si_fin(sbinfo);
+       kobject_put(&sbinfo->si_kobj);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_array_free(void *array)
+{
+       if (array) {
+               if (!is_vmalloc_addr(array))
+                       kfree(array);
+               else
+                       vfree(array);
+       }
+}
+
+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb, void *arg)
+{
+       void *array;
+       unsigned long long n;
+
+       array = NULL;
+       n = 0;
+       if (!*hint)
+               goto out;
+
+       if (*hint > ULLONG_MAX / sizeof(array)) {
+               array = ERR_PTR(-EMFILE);
+               pr_err("hint %llu\n", *hint);
+               goto out;
+       }
+
+       array = kmalloc(sizeof(array) * *hint, GFP_NOFS);
+       if (unlikely(!array))
+               array = vmalloc(sizeof(array) * *hint);
+       if (unlikely(!array)) {
+               array = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       n = cb(array, *hint, arg);
+       AuDebugOn(n > *hint);
+
+out:
+       *hint = n;
+       return array;
+}
+
+static unsigned long long au_iarray_cb(void *a,
+                                      unsigned long long max __maybe_unused,
+                                      void *arg)
+{
+       unsigned long long n;
+       struct inode **p, *inode;
+       struct list_head *head;
+
+       n = 0;
+       p = a;
+       head = arg;
+       spin_lock(&inode_sb_list_lock);
+       list_for_each_entry(inode, head, i_sb_list) {
+               if (!is_bad_inode(inode)
+                   && au_ii(inode)->ii_bstart >= 0) {
+                       spin_lock(&inode->i_lock);
+                       if (atomic_read(&inode->i_count)) {
+                               au_igrab(inode);
+                               *p++ = inode;
+                               n++;
+                               AuDebugOn(n > max);
+                       }
+                       spin_unlock(&inode->i_lock);
+               }
+       }
+       spin_unlock(&inode_sb_list_lock);
+
+       return n;
+}
+
+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max)
+{
+       *max = atomic_long_read(&au_sbi(sb)->si_ninodes);
+       return au_array_alloc(max, au_iarray_cb, &sb->s_inodes);
+}
+
+void au_iarray_free(struct inode **a, unsigned long long max)
+{
+       unsigned long long ull;
+
+       for (ull = 0; ull < max; ull++)
+               iput(a[ull]);
+       au_array_free(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * refresh dentry and inode at remount time.
+ */
+/* todo: consolidate with simple_reval_dpath() and au_reval_for_attr() */
+static int au_do_refresh(struct dentry *dentry, unsigned int dir_flags,
+                     struct dentry *parent)
+{
+       int err;
+
+       di_write_lock_child(dentry);
+       di_read_lock_parent(parent, AuLock_IR);
+       err = au_refresh_dentry(dentry, parent);
+       if (!err && dir_flags)
+               au_hn_reset(dentry->d_inode, dir_flags);
+       di_read_unlock(parent, AuLock_IR);
+       di_write_unlock(dentry);
+
+       return err;
+}
+
+static int au_do_refresh_d(struct dentry *dentry, unsigned int sigen,
+                          struct au_sbinfo *sbinfo,
+                          const unsigned int dir_flags)
+{
+       int err;
+       struct dentry *parent;
+       struct inode *inode;
+
+       err = 0;
+       parent = dget_parent(dentry);
+       if (!au_digen_test(parent, sigen) && au_digen_test(dentry, sigen)) {
+               inode = dentry->d_inode;
+               if (inode) {
+                       if (!S_ISDIR(inode->i_mode))
+                               err = au_do_refresh(dentry, /*dir_flags*/0,
+                                                parent);
+                       else {
+                               err = au_do_refresh(dentry, dir_flags, parent);
+                               if (unlikely(err))
+                                       au_fset_si(sbinfo, FAILED_REFRESH_DIR);
+                       }
+               } else
+                       err = au_do_refresh(dentry, /*dir_flags*/0, parent);
+               AuDbgDentry(dentry);
+       }
+       dput(parent);
+
+       AuTraceErr(err);
+       return err;
+}
+
+static int au_refresh_d(struct super_block *sb)
+{
+       int err, i, j, ndentry, e;
+       unsigned int sigen;
+       struct au_dcsub_pages dpages;
+       struct au_dpage *dpage;
+       struct dentry **dentries, *d;
+       struct au_sbinfo *sbinfo;
+       struct dentry *root = sb->s_root;
+       const unsigned int dir_flags = au_hi_flags(root->d_inode, /*isdir*/1);
+
+       err = au_dpages_init(&dpages, GFP_NOFS);
+       if (unlikely(err))
+               goto out;
+       err = au_dcsub_pages(&dpages, root, NULL, NULL);
+       if (unlikely(err))
+               goto out_dpages;
+
+       sigen = au_sigen(sb);
+       sbinfo = au_sbi(sb);
+       for (i = 0; i < dpages.ndpage; i++) {
+               dpage = dpages.dpages + i;
+               dentries = dpage->dentries;
+               ndentry = dpage->ndentry;
+               for (j = 0; j < ndentry; j++) {
+                       d = dentries[j];
+                       e = au_do_refresh_d(d, sigen, sbinfo, dir_flags);
+                       if (unlikely(e && !err))
+                               err = e;
+                       /* go on even err */
+               }
+       }
+
+out_dpages:
+       au_dpages_free(&dpages);
+out:
+       return err;
+}
+
+static int au_refresh_i(struct super_block *sb)
+{
+       int err, e;
+       unsigned int sigen;
+       unsigned long long max, ull;
+       struct inode *inode, **array;
+
+       array = au_iarray_alloc(sb, &max);
+       err = PTR_ERR(array);
+       if (IS_ERR(array))
+               goto out;
+
+       err = 0;
+       sigen = au_sigen(sb);
+       for (ull = 0; ull < max; ull++) {
+               inode = array[ull];
+               if (au_iigen(inode, NULL) != sigen) {
+                       ii_write_lock_child(inode);
+                       e = au_refresh_hinode_self(inode);
+                       ii_write_unlock(inode);
+                       if (unlikely(e)) {
+                               pr_err("error %d, i%lu\n", e, inode->i_ino);
+                               if (!err)
+                                       err = e;
+                               /* go on even if err */
+                       }
+               }
+       }
+
+       au_iarray_free(array, max);
+
+out:
+       return err;
+}
+
+static void au_remount_refresh(struct super_block *sb)
+{
+       int err, e;
+       unsigned int udba;
+       aufs_bindex_t bindex, bend;
+       struct dentry *root;
+       struct inode *inode;
+       struct au_branch *br;
+
+       au_sigen_inc(sb);
+       au_fclr_si(au_sbi(sb), FAILED_REFRESH_DIR);
+
+       root = sb->s_root;
+       DiMustNoWaiters(root);
+       inode = root->d_inode;
+       IiMustNoWaiters(inode);
+
+       udba = au_opt_udba(sb);
+       bend = au_sbend(sb);
+       for (bindex = 0; bindex <= bend; bindex++) {
+               br = au_sbr(sb, bindex);
+               err = au_hnotify_reset_br(udba, br, br->br_perm);
+               if (unlikely(err))
+                       AuIOErr("hnotify failed on br %d, %d, ignored\n",
+                               bindex, err);
+               /* go on even if err */
+       }
+       au_hn_reset(inode, au_hi_flags(inode, /*isdir*/1));
+
+       di_write_unlock(root);
+       err = au_refresh_d(sb);
+       e = au_refresh_i(sb);
+       if (unlikely(e && !err))
+               err = e;
+       /* aufs_write_lock() calls ..._child() */
+       di_write_lock_child(root);
+
+       au_cpup_attr_all(inode, /*force*/1);
+
+       if (unlikely(err))
+               AuIOErr("refresh failed, ignored, %d\n", err);
+}
+
+/* stop extra interpretation of errno in mount(8), and strange error messages */
+static int cvt_err(int err)
+{
+       AuTraceErr(err);
+
+       switch (err) {
+       case -ENOENT:
+       case -ENOTDIR:
+       case -EEXIST:
+       case -EIO:
+               err = -EINVAL;
+       }
+       return err;
+}
+
+static int aufs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+       int err, do_dx;
+       unsigned int mntflags;
+       struct au_opts opts;
+       struct dentry *root;
+       struct inode *inode;
+       struct au_sbinfo *sbinfo;
+
+       err = 0;
+       root = sb->s_root;
+       if (!data || !*data) {
+               err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+               if (!err) {
+                       di_write_lock_child(root);
+                       err = au_opts_verify(sb, *flags, /*pending*/0);
+                       aufs_write_unlock(root);
+               }
+               goto out;
+       }
+
+       err = -ENOMEM;
+       memset(&opts, 0, sizeof(opts));
+       opts.opt = (void *)__get_free_page(GFP_NOFS);
+       if (unlikely(!opts.opt))
+               goto out;
+       opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
+       opts.flags = AuOpts_REMOUNT;
+       opts.sb_flags = *flags;
+
+       /* parse it before aufs lock */
+       err = au_opts_parse(sb, data, &opts);
+       if (unlikely(err))
+               goto out_opts;
+
+       sbinfo = au_sbi(sb);
+       inode = root->d_inode;
+       mutex_lock(&inode->i_mutex);
+       err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+       if (unlikely(err))
+               goto out_mtx;
+       di_write_lock_child(root);
+
+       /* au_opts_remount() may return an error */
+       err = au_opts_remount(sb, &opts);
+       au_opts_free(&opts);
+
+       if (au_ftest_opts(opts.flags, REFRESH))
+               au_remount_refresh(sb);
+
+       if (au_ftest_opts(opts.flags, REFRESH_DYAOP)) {
+               mntflags = au_mntflags(sb);
+               do_dx = !!au_opt_test(mntflags, DIO);
+               au_dy_arefresh(do_dx);
+       }
+
+       aufs_write_unlock(root);
+
+out_mtx:
+       mutex_unlock(&inode->i_mutex);
+out_opts:
+       free_page((unsigned long)opts.opt);
+out:
+       err = cvt_err(err);
+       AuTraceErr(err);
+       return err;
+}
+
+static const struct super_operations aufs_sop = {
+       .alloc_inode    = aufs_alloc_inode,
+       .destroy_inode  = aufs_destroy_inode,
+       /* always deleting, no clearing */
+       .drop_inode     = generic_delete_inode,
+       .show_options   = aufs_show_options,
+       .statfs         = aufs_statfs,
+       .put_super      = aufs_put_super,
+       .sync_fs        = aufs_sync_fs,
+       .remount_fs     = aufs_remount_fs
+};
+
+/* ---------------------------------------------------------------------- */
+
+static int alloc_root(struct super_block *sb)
+{
+       int err;
+       struct inode *inode;
+       struct dentry *root;
+
+       err = -ENOMEM;
+       inode = au_iget_locked(sb, AUFS_ROOT_INO);
+       err = PTR_ERR(inode);
+       if (IS_ERR(inode))
+               goto out;
+
+       inode->i_op = &aufs_dir_iop;
+       inode->i_fop = &aufs_dir_fop;
+       inode->i_mode = S_IFDIR;
+       set_nlink(inode, 2);
+       unlock_new_inode(inode);
+
+       root = d_alloc_root(inode);
+       if (unlikely(!root))
+               goto out_iput;
+       err = PTR_ERR(root);
+       if (IS_ERR(root))
+               goto out_iput;
+
+       err = au_di_init(root);
+       if (!err) {
+               sb->s_root = root;
+               return 0; /* success */
+       }
+       dput(root);
+       goto out; /* do not iput */
+
+out_iput:
+       iget_failed(inode);
+out:
+       return err;
+
+}
+
+static int aufs_fill_super(struct super_block *sb, void *raw_data,
+                          int silent __maybe_unused)
+{
+       int err;
+       struct au_opts opts;
+       struct dentry *root;
+       struct inode *inode;
+       char *arg = raw_data;
+
+       if (unlikely(!arg || !*arg)) {
+               err = -EINVAL;
+               pr_err("no arg\n");
+               goto out;
+       }
+
+       err = -ENOMEM;
+       memset(&opts, 0, sizeof(opts));
+       opts.opt = (void *)__get_free_page(GFP_NOFS);
+       if (unlikely(!opts.opt))
+               goto out;
+       opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
+       opts.sb_flags = sb->s_flags;
+
+       err = au_si_alloc(sb);
+       if (unlikely(err))
+               goto out_opts;
+
+       /* all timestamps always follow the ones on the branch */
+       sb->s_flags |= MS_NOATIME | MS_NODIRATIME;
+       sb->s_op = &aufs_sop;
+       sb->s_d_op = &aufs_dop;
+       sb->s_magic = AUFS_SUPER_MAGIC;
+       sb->s_maxbytes = 0;
+       au_export_init(sb);
+
+       err = alloc_root(sb);
+       if (unlikely(err)) {
+               si_write_unlock(sb);
+               goto out_info;
+       }
+       root = sb->s_root;
+       inode = root->d_inode;
+
+       /*
+        * actually we can parse options regardless aufs lock here.
+        * but at remount time, parsing must be done before aufs lock.
+        * so we follow the same rule.
+        */
+       ii_write_lock_parent(inode);
+       aufs_write_unlock(root);
+       err = au_opts_parse(sb, arg, &opts);
+       if (unlikely(err))
+               goto out_root;
+
+       /* lock vfs_inode first, then aufs. */
+       mutex_lock(&inode->i_mutex);
+       aufs_write_lock(root);
+       err = au_opts_mount(sb, &opts);
+       au_opts_free(&opts);
+       aufs_write_unlock(root);
+       mutex_unlock(&inode->i_mutex);
+       if (!err)
+               goto out_opts; /* success */
+
+out_root:
+       dput(root);
+       sb->s_root = NULL;
+out_info:
+       dbgaufs_si_fin(au_sbi(sb));
+       kobject_put(&au_sbi(sb)->si_kobj);
+       sb->s_fs_info = NULL;
+out_opts:
+       free_page((unsigned long)opts.opt);
+out:
+       AuTraceErr(err);
+       err = cvt_err(err);
+       AuTraceErr(err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *aufs_mount(struct file_system_type *fs_type, int flags,
+                                const char *dev_name __maybe_unused,
+                                void *raw_data)
+{
+       struct dentry *root;
+       struct super_block *sb;
+
+       /* all timestamps always follow the ones on the branch */
+       /* mnt->mnt_flags |= MNT_NOATIME | MNT_NODIRATIME; */
+       root = mount_nodev(fs_type, flags, raw_data, aufs_fill_super);
+       if (IS_ERR(root))
+               goto out;
+
+       sb = root->d_sb;
+       si_write_lock(sb, !AuLock_FLUSH);
+       sysaufs_brs_add(sb, 0);
+       si_write_unlock(sb);
+       au_sbilist_add(sb);
+
+out:
+       return root;
+}
+
+static void aufs_kill_sb(struct super_block *sb)
+{
+       struct au_sbinfo *sbinfo;
+
+       sbinfo = au_sbi(sb);
+       if (sbinfo) {
+               au_sbilist_del(sb);
+               aufs_write_lock(sb->s_root);
+               if (sbinfo->si_wbr_create_ops->fin)
+                       sbinfo->si_wbr_create_ops->fin(sb);
+               if (au_opt_test(sbinfo->si_mntflags, UDBA_HNOTIFY)) {
+                       au_opt_set_udba(sbinfo->si_mntflags, UDBA_NONE);
+                       au_remount_refresh(sb);
+               }
+               if (au_opt_test(sbinfo->si_mntflags, PLINK))
+                       au_plink_put(sb, /*verbose*/1);
+               au_xino_clr(sb);
+               sbinfo->si_sb = NULL;
+               aufs_write_unlock(sb->s_root);
+               au_nwt_flush(&sbinfo->si_nowait);
+       }
+       generic_shutdown_super(sb);
+}
+
+struct file_system_type aufs_fs_type = {
+       .name           = AUFS_FSTYPE,
+       .fs_flags       =
+               FS_RENAME_DOES_D_MOVE   /* a race between rename and others */
+               | FS_REVAL_DOT,         /* for NFS branch and udba */
+       .mount          = aufs_mount,
+       .kill_sb        = aufs_kill_sb,
+       /* no need to __module_get() and module_put(). */
+       .owner          = THIS_MODULE,
+};
diff --git a/fs/aufs/super.h b/fs/aufs/super.h
new file mode 100644 (file)
index 0000000..14b1c07
--- /dev/null
@@ -0,0 +1,564 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * super_block operations
+ */
+
+#ifndef __AUFS_SUPER_H__
+#define __AUFS_SUPER_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include "rwsem.h"
+#include "spl.h"
+#include "wkq.h"
+
+typedef ssize_t (*au_readf_t)(struct file *, char __user *, size_t, loff_t *);
+typedef ssize_t (*au_writef_t)(struct file *, const char __user *, size_t,
+                              loff_t *);
+
+/* policies to select one among multiple writable branches */
+struct au_wbr_copyup_operations {
+       int (*copyup)(struct dentry *dentry);
+};
+
+#define AuWbr_DIR      1               /* target is a dir */
+#define AuWbr_PARENT   (1 << 1)        /* always require a parent */
+
+#define au_ftest_wbr(flags, name)      ((flags) & AuWbr_##name)
+#define au_fset_wbr(flags, name)       { (flags) |= AuWbr_##name; }
+#define au_fclr_wbr(flags, name)       { (flags) &= ~AuWbr_##name; }
+
+struct au_wbr_create_operations {
+       int (*create)(struct dentry *dentry, unsigned int flags);
+       int (*init)(struct super_block *sb);
+       int (*fin)(struct super_block *sb);
+};
+
+struct au_wbr_mfs {
+       struct mutex    mfs_lock; /* protect this structure */
+       unsigned long   mfs_jiffy;
+       unsigned long   mfs_expire;
+       aufs_bindex_t   mfs_bindex;
+
+       unsigned long long      mfsrr_bytes;
+       unsigned long long      mfsrr_watermark;
+};
+
+struct pseudo_link {
+       union {
+               struct hlist_node hlist;
+               struct rcu_head rcu;
+       };
+       struct inode *inode;
+};
+
+#define AuPlink_NHASH 100
+static inline int au_plink_hash(ino_t ino)
+{
+       return ino % AuPlink_NHASH;
+}
+
+struct au_branch;
+struct au_sbinfo {
+       /* nowait tasks in the system-wide workqueue */
+       struct au_nowait_tasks  si_nowait;
+
+       /*
+        * tried sb->s_umount, but failed due to the dependecy between i_mutex.
+        * rwsem for au_sbinfo is necessary.
+        */
+       struct au_rwsem         si_rwsem;
+
+       /* prevent recursive locking in deleting inode */
+       struct {
+               unsigned long           *bitmap;
+               spinlock_t              tree_lock;
+               struct radix_tree_root  tree;
+       } au_si_pid;
+
+       /*
+        * dirty approach to protect sb->sb_inodes and ->s_files from remount.
+        */
+       atomic_long_t           si_ninodes, si_nfiles;
+
+       /* branch management */
+       unsigned int            si_generation;
+
+       /* see above flags */
+       unsigned char           au_si_status;
+
+       aufs_bindex_t           si_bend;
+
+       /* dirty trick to keep br_id plus */
+       unsigned int            si_last_br_id :
+                               sizeof(aufs_bindex_t) * BITS_PER_BYTE - 1;
+       struct au_branch        **si_branch;
+
+       /* policy to select a writable branch */
+       unsigned char           si_wbr_copyup;
+       unsigned char           si_wbr_create;
+       struct au_wbr_copyup_operations *si_wbr_copyup_ops;
+       struct au_wbr_create_operations *si_wbr_create_ops;
+
+       /* round robin */
+       atomic_t                si_wbr_rr_next;
+
+       /* most free space */
+       struct au_wbr_mfs       si_wbr_mfs;
+
+       /* mount flags */
+       /* include/asm-ia64/siginfo.h defines a macro named si_flags */
+       unsigned int            si_mntflags;
+
+       /* external inode number (bitmap and translation table) */
+       au_readf_t              si_xread;
+       au_writef_t             si_xwrite;
+       struct file             *si_xib;
+       struct mutex            si_xib_mtx; /* protect xib members */
+       unsigned long           *si_xib_buf;
+       unsigned long           si_xib_last_pindex;
+       int                     si_xib_next_bit;
+       aufs_bindex_t           si_xino_brid;
+       unsigned long           si_xino_jiffy;
+       unsigned long           si_xino_expire;
+       /* reserved for future use */
+       /* unsigned long long   si_xib_limit; */        /* Max xib file size */
+
+#ifdef CONFIG_AUFS_EXPORT
+       /* i_generation */
+       struct file             *si_xigen;
+       atomic_t                si_xigen_next;
+#endif
+
+       /* vdir parameters */
+       unsigned long           si_rdcache;     /* max cache time in jiffies */
+       unsigned int            si_rdblk;       /* deblk size */
+       unsigned int            si_rdhash;      /* hash size */
+
+       /*
+        * If the number of whiteouts are larger than si_dirwh, leave all of
+        * them after au_whtmp_ren to reduce the cost of rmdir(2).
+        * future fsck.aufs or kernel thread will remove them later.
+        * Otherwise, remove all whiteouts and the dir in rmdir(2).
+        */
+       unsigned int            si_dirwh;
+
+       /*
+        * rename(2) a directory with all children.
+        */
+       /* reserved for future use */
+       /* int                  si_rendir; */
+
+       /* pseudo_link list */
+       struct au_sphlhead      si_plink[AuPlink_NHASH];
+       wait_queue_head_t       si_plink_wq;
+       spinlock_t              si_plink_maint_lock;
+       pid_t                   si_plink_maint_pid;
+
+       /*
+        * sysfs and lifetime management.
+        * this is not a small structure and it may be a waste of memory in case
+        * of sysfs is disabled, particulary when many aufs-es are mounted.
+        * but using sysfs is majority.
+        */
+       struct kobject          si_kobj;
+#ifdef CONFIG_DEBUG_FS
+       struct dentry            *si_dbgaufs;
+       struct dentry            *si_dbgaufs_plink;
+       struct dentry            *si_dbgaufs_xib;
+#ifdef CONFIG_AUFS_EXPORT
+       struct dentry            *si_dbgaufs_xigen;
+#endif
+#endif
+
+#ifdef CONFIG_AUFS_SBILIST
+       struct list_head        si_list;
+#endif
+
+       /* dirty, necessary for unmounting, sysfs and sysrq */
+       struct super_block      *si_sb;
+};
+
+/* sbinfo status flags */
+/*
+ * set true when refresh_dirs() failed at remount time.
+ * then try refreshing dirs at access time again.
+ * if it is false, refreshing dirs at access time is unnecesary
+ */
+#define AuSi_FAILED_REFRESH_DIR        1
+static inline unsigned char au_do_ftest_si(struct au_sbinfo *sbi,
+                                          unsigned int flag)
+{
+       AuRwMustAnyLock(&sbi->si_rwsem);
+       return sbi->au_si_status & flag;
+}
+#define au_ftest_si(sbinfo, name)      au_do_ftest_si(sbinfo, AuSi_##name)
+#define au_fset_si(sbinfo, name) do { \
+       AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
+       (sbinfo)->au_si_status |= AuSi_##name; \
+} while (0)
+#define au_fclr_si(sbinfo, name) do { \
+       AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
+       (sbinfo)->au_si_status &= ~AuSi_##name; \
+} while (0)
+
+/* ---------------------------------------------------------------------- */
+
+/* policy to select one among writable branches */
+#define AuWbrCopyup(sbinfo, ...) \
+       ((sbinfo)->si_wbr_copyup_ops->copyup(__VA_ARGS__))
+#define AuWbrCreate(sbinfo, ...) \
+       ((sbinfo)->si_wbr_create_ops->create(__VA_ARGS__))
+
+/* flags for si_read_lock()/aufs_read_lock()/di_read_lock() */
+#define AuLock_DW              1               /* write-lock dentry */
+#define AuLock_IR              (1 << 1)        /* read-lock inode */
+#define AuLock_IW              (1 << 2)        /* write-lock inode */
+#define AuLock_FLUSH           (1 << 3)        /* wait for 'nowait' tasks */
+#define AuLock_DIR             (1 << 4)        /* target is a dir */
+#define AuLock_NOPLM           (1 << 5)        /* return err in plm mode */
+#define AuLock_NOPLMW          (1 << 6)        /* wait for plm mode ends */
+#define AuLock_GEN             (1 << 7)        /* test digen/iigen */
+#define au_ftest_lock(flags, name)     ((flags) & AuLock_##name)
+#define au_fset_lock(flags, name) \
+       do { (flags) |= AuLock_##name; } while (0)
+#define au_fclr_lock(flags, name) \
+       do { (flags) &= ~AuLock_##name; } while (0)
+
+/* ---------------------------------------------------------------------- */
+
+/* super.c */
+extern struct file_system_type aufs_fs_type;
+struct inode *au_iget_locked(struct super_block *sb, ino_t ino);
+typedef unsigned long long (*au_arraycb_t)(void *array, unsigned long long max,
+                                          void *arg);
+void au_array_free(void *array);
+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb, void *arg);
+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max);
+void au_iarray_free(struct inode **a, unsigned long long max);
+
+/* sbinfo.c */
+void au_si_free(struct kobject *kobj);
+int au_si_alloc(struct super_block *sb);
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr);
+
+unsigned int au_sigen_inc(struct super_block *sb);
+aufs_bindex_t au_new_br_id(struct super_block *sb);
+
+int si_read_lock(struct super_block *sb, int flags);
+int si_write_lock(struct super_block *sb, int flags);
+int aufs_read_lock(struct dentry *dentry, int flags);
+void aufs_read_unlock(struct dentry *dentry, int flags);
+void aufs_write_lock(struct dentry *dentry);
+void aufs_write_unlock(struct dentry *dentry);
+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags);
+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2);
+
+int si_pid_test_slow(struct super_block *sb);
+void si_pid_set_slow(struct super_block *sb);
+void si_pid_clr_slow(struct super_block *sb);
+
+/* wbr_policy.c */
+extern struct au_wbr_copyup_operations au_wbr_copyup_ops[];
+extern struct au_wbr_create_operations au_wbr_create_ops[];
+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_sbinfo *au_sbi(struct super_block *sb)
+{
+       return sb->s_fs_info;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_EXPORT
+int au_test_nfsd(void);
+void au_export_init(struct super_block *sb);
+void au_xigen_inc(struct inode *inode);
+int au_xigen_new(struct inode *inode);
+int au_xigen_set(struct super_block *sb, struct file *base);
+void au_xigen_clr(struct super_block *sb);
+
+static inline int au_busy_or_stale(void)
+{
+       if (!au_test_nfsd())
+               return -EBUSY;
+       return -ESTALE;
+}
+#else
+AuStubInt0(au_test_nfsd, void)
+AuStubVoid(au_export_init, struct super_block *sb)
+AuStubVoid(au_xigen_inc, struct inode *inode)
+AuStubInt0(au_xigen_new, struct inode *inode)
+AuStubInt0(au_xigen_set, struct super_block *sb, struct file *base)
+AuStubVoid(au_xigen_clr, struct super_block *sb)
+static inline int au_busy_or_stale(void)
+{
+       return -EBUSY;
+}
+#endif /* CONFIG_AUFS_EXPORT */
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_SBILIST
+/* module.c */
+extern struct au_splhead au_sbilist;
+
+static inline void au_sbilist_init(void)
+{
+       au_spl_init(&au_sbilist);
+}
+
+static inline void au_sbilist_add(struct super_block *sb)
+{
+       au_spl_add(&au_sbi(sb)->si_list, &au_sbilist);
+}
+
+static inline void au_sbilist_del(struct super_block *sb)
+{
+       au_spl_del(&au_sbi(sb)->si_list, &au_sbilist);
+}
+
+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
+static inline void au_sbilist_lock(void)
+{
+       spin_lock(&au_sbilist.spin);
+}
+
+static inline void au_sbilist_unlock(void)
+{
+       spin_unlock(&au_sbilist.spin);
+}
+#define AuGFP_SBILIST  GFP_ATOMIC
+#else
+AuStubVoid(au_sbilist_lock, void)
+AuStubVoid(au_sbilist_unlock, void)
+#define AuGFP_SBILIST  GFP_NOFS
+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
+#else
+AuStubVoid(au_sbilist_init, void)
+AuStubVoid(au_sbilist_add, struct super_block*)
+AuStubVoid(au_sbilist_del, struct super_block*)
+AuStubVoid(au_sbilist_lock, void)
+AuStubVoid(au_sbilist_unlock, void)
+#define AuGFP_SBILIST  GFP_NOFS
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline void dbgaufs_si_null(struct au_sbinfo *sbinfo)
+{
+       /*
+        * This function is a dynamic '__init' fucntion actually,
+        * so the tiny check for si_rwsem is unnecessary.
+        */
+       /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+#ifdef CONFIG_DEBUG_FS
+       sbinfo->si_dbgaufs = NULL;
+       sbinfo->si_dbgaufs_plink = NULL;
+       sbinfo->si_dbgaufs_xib = NULL;
+#ifdef CONFIG_AUFS_EXPORT
+       sbinfo->si_dbgaufs_xigen = NULL;
+#endif
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline pid_t si_pid_bit(void)
+{
+       /* the origin of pid is 1, but the bitmap's is 0 */
+       return current->pid - 1;
+}
+
+static inline int si_pid_test(struct super_block *sb)
+{
+       pid_t bit = si_pid_bit();
+       if (bit < PID_MAX_DEFAULT)
+               return test_bit(bit, au_sbi(sb)->au_si_pid.bitmap);
+       else
+               return si_pid_test_slow(sb);
+}
+
+static inline void si_pid_set(struct super_block *sb)
+{
+       pid_t bit = si_pid_bit();
+       if (bit < PID_MAX_DEFAULT) {
+               AuDebugOn(test_bit(bit, au_sbi(sb)->au_si_pid.bitmap));
+               set_bit(bit, au_sbi(sb)->au_si_pid.bitmap);
+               /* smp_mb(); */
+       } else
+               si_pid_set_slow(sb);
+}
+
+static inline void si_pid_clr(struct super_block *sb)
+{
+       pid_t bit = si_pid_bit();
+       if (bit < PID_MAX_DEFAULT) {
+               AuDebugOn(!test_bit(bit, au_sbi(sb)->au_si_pid.bitmap));
+               clear_bit(bit, au_sbi(sb)->au_si_pid.bitmap);
+               /* smp_mb(); */
+       } else
+               si_pid_clr_slow(sb);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* lock superblock. mainly for entry point functions */
+/*
+ * __si_read_lock, __si_write_lock,
+ * __si_read_unlock, __si_write_unlock, __si_downgrade_lock
+ */
+AuSimpleRwsemFuncs(__si, struct super_block *sb, &au_sbi(sb)->si_rwsem);
+
+#define SiMustNoWaiters(sb)    AuRwMustNoWaiters(&au_sbi(sb)->si_rwsem)
+#define SiMustAnyLock(sb)      AuRwMustAnyLock(&au_sbi(sb)->si_rwsem)
+#define SiMustWriteLock(sb)    AuRwMustWriteLock(&au_sbi(sb)->si_rwsem)
+
+static inline void si_noflush_read_lock(struct super_block *sb)
+{
+       __si_read_lock(sb);
+       si_pid_set(sb);
+}
+
+static inline int si_noflush_read_trylock(struct super_block *sb)
+{
+       int locked = __si_read_trylock(sb);
+       if (locked)
+               si_pid_set(sb);
+       return locked;
+}
+
+static inline void si_noflush_write_lock(struct super_block *sb)
+{
+       __si_write_lock(sb);
+       si_pid_set(sb);
+}
+
+static inline int si_noflush_write_trylock(struct super_block *sb)
+{
+       int locked = __si_write_trylock(sb);
+       if (locked)
+               si_pid_set(sb);
+       return locked;
+}
+
+#if 0 /* unused */
+static inline int si_read_trylock(struct super_block *sb, int flags)
+{
+       if (au_ftest_lock(flags, FLUSH))
+               au_nwt_flush(&au_sbi(sb)->si_nowait);
+       return si_noflush_read_trylock(sb);
+}
+#endif
+
+static inline void si_read_unlock(struct super_block *sb)
+{
+       si_pid_clr(sb);
+       __si_read_unlock(sb);
+}
+
+#if 0 /* unused */
+static inline int si_write_trylock(struct super_block *sb, int flags)
+{
+       if (au_ftest_lock(flags, FLUSH))
+               au_nwt_flush(&au_sbi(sb)->si_nowait);
+       return si_noflush_write_trylock(sb);
+}
+#endif
+
+static inline void si_write_unlock(struct super_block *sb)
+{
+       si_pid_clr(sb);
+       __si_write_unlock(sb);
+}
+
+#if 0 /* unused */
+static inline void si_downgrade_lock(struct super_block *sb)
+{
+       __si_downgrade_lock(sb);
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline aufs_bindex_t au_sbend(struct super_block *sb)
+{
+       SiMustAnyLock(sb);
+       return au_sbi(sb)->si_bend;
+}
+
+static inline unsigned int au_mntflags(struct super_block *sb)
+{
+       SiMustAnyLock(sb);
+       return au_sbi(sb)->si_mntflags;
+}
+
+static inline unsigned int au_sigen(struct super_block *sb)
+{
+       SiMustAnyLock(sb);
+       return au_sbi(sb)->si_generation;
+}
+
+static inline void au_ninodes_inc(struct super_block *sb)
+{
+       atomic_long_inc(&au_sbi(sb)->si_ninodes);
+}
+
+static inline void au_ninodes_dec(struct super_block *sb)
+{
+       AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_ninodes));
+       atomic_long_dec(&au_sbi(sb)->si_ninodes);
+}
+
+static inline void au_nfiles_inc(struct super_block *sb)
+{
+       atomic_long_inc(&au_sbi(sb)->si_nfiles);
+}
+
+static inline void au_nfiles_dec(struct super_block *sb)
+{
+       AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_nfiles));
+       atomic_long_dec(&au_sbi(sb)->si_nfiles);
+}
+
+static inline struct au_branch *au_sbr(struct super_block *sb,
+                                      aufs_bindex_t bindex)
+{
+       SiMustAnyLock(sb);
+       return au_sbi(sb)->si_branch[0 + bindex];
+}
+
+static inline void au_xino_brid_set(struct super_block *sb, aufs_bindex_t brid)
+{
+       SiMustWriteLock(sb);
+       au_sbi(sb)->si_xino_brid = brid;
+}
+
+static inline aufs_bindex_t au_xino_brid(struct super_block *sb)
+{
+       SiMustAnyLock(sb);
+       return au_sbi(sb)->si_xino_brid;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_SUPER_H__ */
diff --git a/fs/aufs/sysaufs.c b/fs/aufs/sysaufs.c
new file mode 100644 (file)
index 0000000..f68e844
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sysfs interface and lifetime management
+ * they are necessary regardless sysfs is disabled.
+ */
+
+#include <linux/random.h>
+#include "aufs.h"
+
+unsigned long sysaufs_si_mask;
+struct kset *sysaufs_kset;
+
+#define AuSiAttr(_name) { \
+       .attr   = { .name = __stringify(_name), .mode = 0444 }, \
+       .show   = sysaufs_si_##_name,                           \
+}
+
+static struct sysaufs_si_attr sysaufs_si_attr_xi_path = AuSiAttr(xi_path);
+struct attribute *sysaufs_si_attrs[] = {
+       &sysaufs_si_attr_xi_path.attr,
+       NULL,
+};
+
+static const struct sysfs_ops au_sbi_ops = {
+       .show   = sysaufs_si_show
+};
+
+static struct kobj_type au_sbi_ktype = {
+       .release        = au_si_free,
+       .sysfs_ops      = &au_sbi_ops,
+       .default_attrs  = sysaufs_si_attrs
+};
+
+/* ---------------------------------------------------------------------- */
+
+int sysaufs_si_init(struct au_sbinfo *sbinfo)
+{
+       int err;
+
+       sbinfo->si_kobj.kset = sysaufs_kset;
+       /* cf. sysaufs_name() */
+       err = kobject_init_and_add
+               (&sbinfo->si_kobj, &au_sbi_ktype, /*&sysaufs_kset->kobj*/NULL,
+                SysaufsSiNamePrefix "%lx", sysaufs_si_id(sbinfo));
+
+       dbgaufs_si_null(sbinfo);
+       if (!err) {
+               err = dbgaufs_si_init(sbinfo);
+               if (unlikely(err))
+                       kobject_put(&sbinfo->si_kobj);
+       }
+       return err;
+}
+
+void sysaufs_fin(void)
+{
+       dbgaufs_fin();
+       sysfs_remove_group(&sysaufs_kset->kobj, sysaufs_attr_group);
+       kset_unregister(sysaufs_kset);
+}
+
+int __init sysaufs_init(void)
+{
+       int err;
+
+       do {
+               get_random_bytes(&sysaufs_si_mask, sizeof(sysaufs_si_mask));
+       } while (!sysaufs_si_mask);
+
+       err = -EINVAL;
+       sysaufs_kset = kset_create_and_add(AUFS_NAME, NULL, fs_kobj);
+       if (unlikely(!sysaufs_kset))
+               goto out;
+       err = PTR_ERR(sysaufs_kset);
+       if (IS_ERR(sysaufs_kset))
+               goto out;
+       err = sysfs_create_group(&sysaufs_kset->kobj, sysaufs_attr_group);
+       if (unlikely(err)) {
+               kset_unregister(sysaufs_kset);
+               goto out;
+       }
+
+       err = dbgaufs_init();
+       if (unlikely(err))
+               sysaufs_fin();
+out:
+       return err;
+}
diff --git a/fs/aufs/sysaufs.h b/fs/aufs/sysaufs.h
new file mode 100644 (file)
index 0000000..2fc17d9
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sysfs interface and mount lifetime management
+ */
+
+#ifndef __SYSAUFS_H__
+#define __SYSAUFS_H__
+
+#ifdef __KERNEL__
+
+#include <linux/sysfs.h>
+#include "module.h"
+
+struct super_block;
+struct au_sbinfo;
+
+struct sysaufs_si_attr {
+       struct attribute attr;
+       int (*show)(struct seq_file *seq, struct super_block *sb);
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* sysaufs.c */
+extern unsigned long sysaufs_si_mask;
+extern struct kset *sysaufs_kset;
+extern struct attribute *sysaufs_si_attrs[];
+int sysaufs_si_init(struct au_sbinfo *sbinfo);
+int __init sysaufs_init(void);
+void sysaufs_fin(void);
+
+/* ---------------------------------------------------------------------- */
+
+/* some people doesn't like to show a pointer in kernel */
+static inline unsigned long sysaufs_si_id(struct au_sbinfo *sbinfo)
+{
+       return sysaufs_si_mask ^ (unsigned long)sbinfo;
+}
+
+#define SysaufsSiNamePrefix    "si_"
+#define SysaufsSiNameLen       (sizeof(SysaufsSiNamePrefix) + 16)
+static inline void sysaufs_name(struct au_sbinfo *sbinfo, char *name)
+{
+       snprintf(name, SysaufsSiNameLen, SysaufsSiNamePrefix "%lx",
+                sysaufs_si_id(sbinfo));
+}
+
+struct au_branch;
+#ifdef CONFIG_SYSFS
+/* sysfs.c */
+extern struct attribute_group *sysaufs_attr_group;
+
+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb);
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+                        char *buf);
+
+void sysaufs_br_init(struct au_branch *br);
+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
+
+#define sysaufs_brs_init()     do {} while (0)
+
+#else
+#define sysaufs_attr_group     NULL
+
+AuStubInt0(sysaufs_si_xi_path, struct seq_file *seq, struct super_block *sb)
+
+static inline
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+                        char *buf)
+{
+       return 0;
+}
+
+AuStubVoid(sysaufs_br_init, struct au_branch *br)
+AuStubVoid(sysaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(sysaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
+
+static inline void sysaufs_brs_init(void)
+{
+       sysaufs_brs = 0;
+}
+
+#endif /* CONFIG_SYSFS */
+
+#endif /* __KERNEL__ */
+#endif /* __SYSAUFS_H__ */
diff --git a/fs/aufs/sysfs.c b/fs/aufs/sysfs.c
new file mode 100644 (file)
index 0000000..722152b
--- /dev/null
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sysfs interface
+ */
+
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+#ifdef CONFIG_AUFS_FS_MODULE
+/* this entry violates the "one line per file" policy of sysfs */
+static ssize_t config_show(struct kobject *kobj, struct kobj_attribute *attr,
+                          char *buf)
+{
+       ssize_t err;
+       static char *conf =
+/* this file is generated at compiling */
+#include "conf.str"
+               ;
+
+       err = snprintf(buf, PAGE_SIZE, conf);
+       if (unlikely(err >= PAGE_SIZE))
+               err = -EFBIG;
+       return err;
+}
+
+static struct kobj_attribute au_config_attr = __ATTR_RO(config);
+#endif
+
+static struct attribute *au_attr[] = {
+#ifdef CONFIG_AUFS_FS_MODULE
+       &au_config_attr.attr,
+#endif
+       NULL,   /* need to NULL terminate the list of attributes */
+};
+
+static struct attribute_group sysaufs_attr_group_body = {
+       .attrs = au_attr
+};
+
+struct attribute_group *sysaufs_attr_group = &sysaufs_attr_group_body;
+
+/* ---------------------------------------------------------------------- */
+
+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb)
+{
+       int err;
+
+       SiMustAnyLock(sb);
+
+       err = 0;
+       if (au_opt_test(au_mntflags(sb), XINO)) {
+               err = au_xino_path(seq, au_sbi(sb)->si_xib);
+               seq_putc(seq, '\n');
+       }
+       return err;
+}
+
+/*
+ * the lifetime of branch is independent from the entry under sysfs.
+ * sysfs handles the lifetime of the entry, and never call ->show() after it is
+ * unlinked.
+ */
+static int sysaufs_si_br(struct seq_file *seq, struct super_block *sb,
+                        aufs_bindex_t bindex, int idx)
+{
+       int err;
+       struct path path;
+       struct dentry *root;
+       struct au_branch *br;
+       char *perm;
+
+       AuDbg("b%d\n", bindex);
+
+       err = 0;
+       root = sb->s_root;
+       di_read_lock_parent(root, !AuLock_IR);
+       br = au_sbr(sb, bindex);
+
+       switch (idx) {
+       case AuBrSysfs_BR:
+               path.mnt = au_br_mnt(br);
+               path.dentry = au_h_dptr(root, bindex);
+               au_seq_path(seq, &path);
+               di_read_unlock(root, !AuLock_IR);
+               perm = au_optstr_br_perm(br->br_perm);
+               if (perm) {
+                       err = seq_printf(seq, "=%s\n", perm);
+                       kfree(perm);
+                       if (err == -1)
+                               err = -E2BIG;
+               } else
+                       err = -ENOMEM;
+               break;
+       case AuBrSysfs_BRID:
+               err = seq_printf(seq, "%d\n", br->br_id);
+               di_read_unlock(root, !AuLock_IR);
+               if (err == -1)
+                       err = -E2BIG;
+               break;
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct seq_file *au_seq(char *p, ssize_t len)
+{
+       struct seq_file *seq;
+
+       seq = kzalloc(sizeof(*seq), GFP_NOFS);
+       if (seq) {
+               /* mutex_init(&seq.lock); */
+               seq->buf = p;
+               seq->size = len;
+               return seq; /* success */
+       }
+
+       seq = ERR_PTR(-ENOMEM);
+       return seq;
+}
+
+#define SysaufsBr_PREFIX       "br"
+#define SysaufsBrid_PREFIX     "brid"
+
+/* todo: file size may exceed PAGE_SIZE */
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+                       char *buf)
+{
+       ssize_t err;
+       int idx;
+       long l;
+       aufs_bindex_t bend;
+       struct au_sbinfo *sbinfo;
+       struct super_block *sb;
+       struct seq_file *seq;
+       char *name;
+       struct attribute **cattr;
+
+       sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
+       sb = sbinfo->si_sb;
+
+       /*
+        * prevent a race condition between sysfs and aufs.
+        * for instance, sysfs_file_read() calls sysfs_get_active_two() which
+        * prohibits maintaining the sysfs entries.
+        * hew we acquire read lock after sysfs_get_active_two().
+        * on the other hand, the remount process may maintain the sysfs/aufs
+        * entries after acquiring write lock.
+        * it can cause a deadlock.
+        * simply we gave up processing read here.
+        */
+       err = -EBUSY;
+       if (unlikely(!si_noflush_read_trylock(sb)))
+               goto out;
+
+       seq = au_seq(buf, PAGE_SIZE);
+       err = PTR_ERR(seq);
+       if (IS_ERR(seq))
+               goto out_unlock;
+
+       name = (void *)attr->name;
+       cattr = sysaufs_si_attrs;
+       while (*cattr) {
+               if (!strcmp(name, (*cattr)->name)) {
+                       err = container_of(*cattr, struct sysaufs_si_attr, attr)
+                               ->show(seq, sb);
+                       goto out_seq;
+               }
+               cattr++;
+       }
+
+       if (!strncmp(name, SysaufsBrid_PREFIX,
+                    sizeof(SysaufsBrid_PREFIX) - 1)) {
+               idx = AuBrSysfs_BRID;
+               name += sizeof(SysaufsBrid_PREFIX) - 1;
+       } else if (!strncmp(name, SysaufsBr_PREFIX,
+                           sizeof(SysaufsBr_PREFIX) - 1)) {
+               idx = AuBrSysfs_BR;
+               name += sizeof(SysaufsBr_PREFIX) - 1;
+       } else
+                 BUG();
+
+       err = kstrtol(name, 10, &l);
+       if (!err) {
+               bend = au_sbend(sb);
+               if (l <= bend)
+                       err = sysaufs_si_br(seq, sb, (aufs_bindex_t)l, idx);
+               else
+                       err = -ENOENT;
+       }
+
+out_seq:
+       if (!err) {
+               err = seq->count;
+               /* sysfs limit */
+               if (unlikely(err == PAGE_SIZE))
+                       err = -EFBIG;
+       }
+       kfree(seq);
+out_unlock:
+       si_read_unlock(sb);
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void sysaufs_br_init(struct au_branch *br)
+{
+       int i;
+       struct au_brsysfs *br_sysfs;
+       struct attribute *attr;
+
+       br_sysfs = br->br_sysfs;
+       for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+               attr = &br_sysfs->attr;
+               sysfs_attr_init(attr);
+               attr->name = br_sysfs->name;
+               attr->mode = S_IRUGO;
+               br_sysfs++;
+       }
+}
+
+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
+{
+       struct au_branch *br;
+       struct kobject *kobj;
+       struct au_brsysfs *br_sysfs;
+       int i;
+       aufs_bindex_t bend;
+
+       dbgaufs_brs_del(sb, bindex);
+
+       if (!sysaufs_brs)
+               return;
+
+       kobj = &au_sbi(sb)->si_kobj;
+       bend = au_sbend(sb);
+       for (; bindex <= bend; bindex++) {
+               br = au_sbr(sb, bindex);
+               br_sysfs = br->br_sysfs;
+               for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+                       sysfs_remove_file(kobj, &br_sysfs->attr);
+                       br_sysfs++;
+               }
+       }
+}
+
+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
+{
+       int err, i;
+       aufs_bindex_t bend;
+       struct kobject *kobj;
+       struct au_branch *br;
+       struct au_brsysfs *br_sysfs;
+
+       dbgaufs_brs_add(sb, bindex);
+
+       if (!sysaufs_brs)
+               return;
+
+       kobj = &au_sbi(sb)->si_kobj;
+       bend = au_sbend(sb);
+       for (; bindex <= bend; bindex++) {
+               br = au_sbr(sb, bindex);
+               br_sysfs = br->br_sysfs;
+               snprintf(br_sysfs[AuBrSysfs_BR].name, sizeof(br_sysfs->name),
+                        SysaufsBr_PREFIX "%d", bindex);
+               snprintf(br_sysfs[AuBrSysfs_BRID].name, sizeof(br_sysfs->name),
+                        SysaufsBrid_PREFIX "%d", bindex);
+               for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+                       err = sysfs_create_file(kobj, &br_sysfs->attr);
+                       if (unlikely(err))
+                               pr_warn("failed %s under sysfs(%d)\n",
+                                       br_sysfs->name, err);
+                       br_sysfs++;
+               }
+       }
+}
diff --git a/fs/aufs/sysrq.c b/fs/aufs/sysrq.c
new file mode 100644 (file)
index 0000000..54f3250
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * magic sysrq hanlder
+ */
+
+/* #include <linux/sysrq.h> */
+#include <linux/writeback.h>
+#include "aufs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static void sysrq_sb(struct super_block *sb)
+{
+       char *plevel;
+       struct au_sbinfo *sbinfo;
+       struct file *file;
+
+       plevel = au_plevel;
+       au_plevel = KERN_WARNING;
+
+       /* since we define pr_fmt, call printk directly */
+#define pr(str) printk(KERN_WARNING AUFS_NAME ": " str)
+
+       sbinfo = au_sbi(sb);
+       printk(KERN_WARNING "si=%lx\n", sysaufs_si_id(sbinfo));
+       pr("superblock\n");
+       au_dpri_sb(sb);
+
+#if 0
+       pr("root dentry\n");
+       au_dpri_dentry(sb->s_root);
+       pr("root inode\n");
+       au_dpri_inode(sb->s_root->d_inode);
+#endif
+
+#if 0
+       do {
+               int err, i, j, ndentry;
+               struct au_dcsub_pages dpages;
+               struct au_dpage *dpage;
+
+               err = au_dpages_init(&dpages, GFP_ATOMIC);
+               if (unlikely(err))
+                       break;
+               err = au_dcsub_pages(&dpages, sb->s_root, NULL, NULL);
+               if (!err)
+                       for (i = 0; i < dpages.ndpage; i++) {
+                               dpage = dpages.dpages + i;
+                               ndentry = dpage->ndentry;
+                               for (j = 0; j < ndentry; j++)
+                                       au_dpri_dentry(dpage->dentries[j]);
+                       }
+               au_dpages_free(&dpages);
+       } while (0);
+#endif
+
+#if 1
+       {
+               struct inode *i;
+               pr("isolated inode\n");
+               spin_lock(&inode_sb_list_lock);
+               list_for_each_entry(i, &sb->s_inodes, i_sb_list) {
+                       spin_lock(&i->i_lock);
+                       if (1 || list_empty(&i->i_dentry))
+                               au_dpri_inode(i);
+                       spin_unlock(&i->i_lock);
+               }
+               spin_unlock(&inode_sb_list_lock);
+       }
+#endif
+       pr("files\n");
+       lg_global_lock(files_lglock);
+       do_file_list_for_each_entry(sb, file) {
+               umode_t mode;
+               mode = file->f_dentry->d_inode->i_mode;
+               if (!special_file(mode) || au_special_file(mode))
+                       au_dpri_file(file);
+       } while_file_list_for_each_entry;
+       lg_global_unlock(files_lglock);
+       pr("done\n");
+
+#undef pr
+       au_plevel = plevel;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* module parameter */
+static char *aufs_sysrq_key = "a";
+module_param_named(sysrq, aufs_sysrq_key, charp, S_IRUGO);
+MODULE_PARM_DESC(sysrq, "MagicSysRq key for " AUFS_NAME);
+
+static void au_sysrq(int key __maybe_unused)
+{
+       struct au_sbinfo *sbinfo;
+
+       lockdep_off();
+       au_sbilist_lock();
+       list_for_each_entry(sbinfo, &au_sbilist.head, si_list)
+               sysrq_sb(sbinfo->si_sb);
+       au_sbilist_unlock();
+       lockdep_on();
+}
+
+static struct sysrq_key_op au_sysrq_op = {
+       .handler        = au_sysrq,
+       .help_msg       = "Aufs",
+       .action_msg     = "Aufs",
+       .enable_mask    = SYSRQ_ENABLE_DUMP
+};
+
+/* ---------------------------------------------------------------------- */
+
+int __init au_sysrq_init(void)
+{
+       int err;
+       char key;
+
+       err = -1;
+       key = *aufs_sysrq_key;
+       if ('a' <= key && key <= 'z')
+               err = register_sysrq_key(key, &au_sysrq_op);
+       if (unlikely(err))
+               pr_err("err %d, sysrq=%c\n", err, key);
+       return err;
+}
+
+void au_sysrq_fin(void)
+{
+       int err;
+       err = unregister_sysrq_key(*aufs_sysrq_key, &au_sysrq_op);
+       if (unlikely(err))
+               pr_err("err %d (ignored)\n", err);
+}
diff --git a/fs/aufs/vdir.c b/fs/aufs/vdir.c
new file mode 100644 (file)
index 0000000..fe4db05
--- /dev/null
@@ -0,0 +1,885 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * virtual or vertical directory
+ */
+
+#include "aufs.h"
+
+static unsigned int calc_size(int nlen)
+{
+       return ALIGN(sizeof(struct au_vdir_de) + nlen, sizeof(ino_t));
+}
+
+static int set_deblk_end(union au_vdir_deblk_p *p,
+                        union au_vdir_deblk_p *deblk_end)
+{
+       if (calc_size(0) <= deblk_end->deblk - p->deblk) {
+               p->de->de_str.len = 0;
+               /* smp_mb(); */
+               return 0;
+       }
+       return -1; /* error */
+}
+
+/* returns true or false */
+static int is_deblk_end(union au_vdir_deblk_p *p,
+                       union au_vdir_deblk_p *deblk_end)
+{
+       if (calc_size(0) <= deblk_end->deblk - p->deblk)
+               return !p->de->de_str.len;
+       return 1;
+}
+
+static unsigned char *last_deblk(struct au_vdir *vdir)
+{
+       return vdir->vd_deblk[vdir->vd_nblk - 1];
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* estimate the apropriate size for name hash table */
+unsigned int au_rdhash_est(loff_t sz)
+{
+       unsigned int n;
+
+       n = UINT_MAX;
+       sz >>= 10;
+       if (sz < n)
+               n = sz;
+       if (sz < AUFS_RDHASH_DEF)
+               n = AUFS_RDHASH_DEF;
+       /* pr_info("n %u\n", n); */
+       return n;
+}
+
+/*
+ * the allocated memory has to be freed by
+ * au_nhash_wh_free() or au_nhash_de_free().
+ */
+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp)
+{
+       struct hlist_head *head;
+       unsigned int u;
+
+       head = kmalloc(sizeof(*nhash->nh_head) * num_hash, gfp);
+       if (head) {
+               nhash->nh_num = num_hash;
+               nhash->nh_head = head;
+               for (u = 0; u < num_hash; u++)
+                       INIT_HLIST_HEAD(head++);
+               return 0; /* success */
+       }
+
+       return -ENOMEM;
+}
+
+static void nhash_count(struct hlist_head *head)
+{
+#if 0
+       unsigned long n;
+       struct hlist_node *pos;
+
+       n = 0;
+       hlist_for_each(pos, head)
+               n++;
+       pr_info("%lu\n", n);
+#endif
+}
+
+static void au_nhash_wh_do_free(struct hlist_head *head)
+{
+       struct au_vdir_wh *tpos;
+       struct hlist_node *pos, *node;
+
+       hlist_for_each_entry_safe(tpos, pos, node, head, wh_hash) {
+               /* hlist_del(pos); */
+               kfree(tpos);
+       }
+}
+
+static void au_nhash_de_do_free(struct hlist_head *head)
+{
+       struct au_vdir_dehstr *tpos;
+       struct hlist_node *pos, *node;
+
+       hlist_for_each_entry_safe(tpos, pos, node, head, hash) {
+               /* hlist_del(pos); */
+               au_cache_free_vdir_dehstr(tpos);
+       }
+}
+
+static void au_nhash_do_free(struct au_nhash *nhash,
+                            void (*free)(struct hlist_head *head))
+{
+       unsigned int n;
+       struct hlist_head *head;
+
+       n = nhash->nh_num;
+       if (!n)
+               return;
+
+       head = nhash->nh_head;
+       while (n-- > 0) {
+               nhash_count(head);
+               free(head++);
+       }
+       kfree(nhash->nh_head);
+}
+
+void au_nhash_wh_free(struct au_nhash *whlist)
+{
+       au_nhash_do_free(whlist, au_nhash_wh_do_free);
+}
+
+static void au_nhash_de_free(struct au_nhash *delist)
+{
+       au_nhash_do_free(delist, au_nhash_de_do_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
+                           int limit)
+{
+       int num;
+       unsigned int u, n;
+       struct hlist_head *head;
+       struct au_vdir_wh *tpos;
+       struct hlist_node *pos;
+
+       num = 0;
+       n = whlist->nh_num;
+       head = whlist->nh_head;
+       for (u = 0; u < n; u++, head++)
+               hlist_for_each_entry(tpos, pos, head, wh_hash)
+                       if (tpos->wh_bindex == btgt && ++num > limit)
+                               return 1;
+       return 0;
+}
+
+static struct hlist_head *au_name_hash(struct au_nhash *nhash,
+                                      unsigned char *name,
+                                      unsigned int len)
+{
+       unsigned int v;
+       /* const unsigned int magic_bit = 12; */
+
+       AuDebugOn(!nhash->nh_num || !nhash->nh_head);
+
+       v = 0;
+       while (len--)
+               v += *name++;
+       /* v = hash_long(v, magic_bit); */
+       v %= nhash->nh_num;
+       return nhash->nh_head + v;
+}
+
+static int au_nhash_test_name(struct au_vdir_destr *str, const char *name,
+                             int nlen)
+{
+       return str->len == nlen && !memcmp(str->name, name, nlen);
+}
+
+/* returns found or not */
+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen)
+{
+       struct hlist_head *head;
+       struct au_vdir_wh *tpos;
+       struct hlist_node *pos;
+       struct au_vdir_destr *str;
+
+       head = au_name_hash(whlist, name, nlen);
+       hlist_for_each_entry(tpos, pos, head, wh_hash) {
+               str = &tpos->wh_str;
+               AuDbg("%.*s\n", str->len, str->name);
+               if (au_nhash_test_name(str, name, nlen))
+                       return 1;
+       }
+       return 0;
+}
+
+/* returns found(true) or not */
+static int test_known(struct au_nhash *delist, char *name, int nlen)
+{
+       struct hlist_head *head;
+       struct au_vdir_dehstr *tpos;
+       struct hlist_node *pos;
+       struct au_vdir_destr *str;
+
+       head = au_name_hash(delist, name, nlen);
+       hlist_for_each_entry(tpos, pos, head, hash) {
+               str = tpos->str;
+               AuDbg("%.*s\n", str->len, str->name);
+               if (au_nhash_test_name(str, name, nlen))
+                       return 1;
+       }
+       return 0;
+}
+
+static void au_shwh_init_wh(struct au_vdir_wh *wh, ino_t ino,
+                           unsigned char d_type)
+{
+#ifdef CONFIG_AUFS_SHWH
+       wh->wh_ino = ino;
+       wh->wh_type = d_type;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
+                      unsigned int d_type, aufs_bindex_t bindex,
+                      unsigned char shwh)
+{
+       int err;
+       struct au_vdir_destr *str;
+       struct au_vdir_wh *wh;
+
+       AuDbg("%.*s\n", nlen, name);
+       AuDebugOn(!whlist->nh_num || !whlist->nh_head);
+
+       err = -ENOMEM;
+       wh = kmalloc(sizeof(*wh) + nlen, GFP_NOFS);
+       if (unlikely(!wh))
+               goto out;
+
+       err = 0;
+       wh->wh_bindex = bindex;
+       if (shwh)
+               au_shwh_init_wh(wh, ino, d_type);
+       str = &wh->wh_str;
+       str->len = nlen;
+       memcpy(str->name, name, nlen);
+       hlist_add_head(&wh->wh_hash, au_name_hash(whlist, name, nlen));
+       /* smp_mb(); */
+
+out:
+       return err;
+}
+
+static int append_deblk(struct au_vdir *vdir)
+{
+       int err;
+       unsigned long ul;
+       const unsigned int deblk_sz = vdir->vd_deblk_sz;
+       union au_vdir_deblk_p p, deblk_end;
+       unsigned char **o;
+
+       err = -ENOMEM;
+       o = krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1),
+                    GFP_NOFS);
+       if (unlikely(!o))
+               goto out;
+
+       vdir->vd_deblk = o;
+       p.deblk = kmalloc(deblk_sz, GFP_NOFS);
+       if (p.deblk) {
+               ul = vdir->vd_nblk++;
+               vdir->vd_deblk[ul] = p.deblk;
+               vdir->vd_last.ul = ul;
+               vdir->vd_last.p.deblk = p.deblk;
+               deblk_end.deblk = p.deblk + deblk_sz;
+               err = set_deblk_end(&p, &deblk_end);
+       }
+
+out:
+       return err;
+}
+
+static int append_de(struct au_vdir *vdir, char *name, int nlen, ino_t ino,
+                    unsigned int d_type, struct au_nhash *delist)
+{
+       int err;
+       unsigned int sz;
+       const unsigned int deblk_sz = vdir->vd_deblk_sz;
+       union au_vdir_deblk_p p, *room, deblk_end;
+       struct au_vdir_dehstr *dehstr;
+
+       p.deblk = last_deblk(vdir);
+       deblk_end.deblk = p.deblk + deblk_sz;
+       room = &vdir->vd_last.p;
+       AuDebugOn(room->deblk < p.deblk || deblk_end.deblk <= room->deblk
+                 || !is_deblk_end(room, &deblk_end));
+
+       sz = calc_size(nlen);
+       if (unlikely(sz > deblk_end.deblk - room->deblk)) {
+               err = append_deblk(vdir);
+               if (unlikely(err))
+                       goto out;
+
+               p.deblk = last_deblk(vdir);
+               deblk_end.deblk = p.deblk + deblk_sz;
+               /* smp_mb(); */
+               AuDebugOn(room->deblk != p.deblk);
+       }
+
+       err = -ENOMEM;
+       dehstr = au_cache_alloc_vdir_dehstr();
+       if (unlikely(!dehstr))
+               goto out;
+
+       dehstr->str = &room->de->de_str;
+       hlist_add_head(&dehstr->hash, au_name_hash(delist, name, nlen));
+       room->de->de_ino = ino;
+       room->de->de_type = d_type;
+       room->de->de_str.len = nlen;
+       memcpy(room->de->de_str.name, name, nlen);
+
+       err = 0;
+       room->deblk += sz;
+       if (unlikely(set_deblk_end(room, &deblk_end)))
+               err = append_deblk(vdir);
+       /* smp_mb(); */
+
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_vdir_free(struct au_vdir *vdir)
+{
+       unsigned char **deblk;
+
+       deblk = vdir->vd_deblk;
+       while (vdir->vd_nblk--)
+               kfree(*deblk++);
+       kfree(vdir->vd_deblk);
+       au_cache_free_vdir(vdir);
+}
+
+static struct au_vdir *alloc_vdir(struct file *file)
+{
+       struct au_vdir *vdir;
+       struct super_block *sb;
+       int err;
+
+       sb = file->f_dentry->d_sb;
+       SiMustAnyLock(sb);
+
+       err = -ENOMEM;
+       vdir = au_cache_alloc_vdir();
+       if (unlikely(!vdir))
+               goto out;
+
+       vdir->vd_deblk = kzalloc(sizeof(*vdir->vd_deblk), GFP_NOFS);
+       if (unlikely(!vdir->vd_deblk))
+               goto out_free;
+
+       vdir->vd_deblk_sz = au_sbi(sb)->si_rdblk;
+       if (!vdir->vd_deblk_sz) {
+               /* estimate the apropriate size for deblk */
+               vdir->vd_deblk_sz = au_dir_size(file, /*dentry*/NULL);
+               /* pr_info("vd_deblk_sz %u\n", vdir->vd_deblk_sz); */
+       }
+       vdir->vd_nblk = 0;
+       vdir->vd_version = 0;
+       vdir->vd_jiffy = 0;
+       err = append_deblk(vdir);
+       if (!err)
+               return vdir; /* success */
+
+       kfree(vdir->vd_deblk);
+
+out_free:
+       au_cache_free_vdir(vdir);
+out:
+       vdir = ERR_PTR(err);
+       return vdir;
+}
+
+static int reinit_vdir(struct au_vdir *vdir)
+{
+       int err;
+       union au_vdir_deblk_p p, deblk_end;
+
+       while (vdir->vd_nblk > 1) {
+               kfree(vdir->vd_deblk[vdir->vd_nblk - 1]);
+               /* vdir->vd_deblk[vdir->vd_nblk - 1] = NULL; */
+               vdir->vd_nblk--;
+       }
+       p.deblk = vdir->vd_deblk[0];
+       deblk_end.deblk = p.deblk + vdir->vd_deblk_sz;
+       err = set_deblk_end(&p, &deblk_end);
+       /* keep vd_dblk_sz */
+       vdir->vd_last.ul = 0;
+       vdir->vd_last.p.deblk = vdir->vd_deblk[0];
+       vdir->vd_version = 0;
+       vdir->vd_jiffy = 0;
+       /* smp_mb(); */
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define AuFillVdir_CALLED      1
+#define AuFillVdir_WHABLE      (1 << 1)
+#define AuFillVdir_SHWH                (1 << 2)
+#define au_ftest_fillvdir(flags, name) ((flags) & AuFillVdir_##name)
+#define au_fset_fillvdir(flags, name) \
+       do { (flags) |= AuFillVdir_##name; } while (0)
+#define au_fclr_fillvdir(flags, name) \
+       do { (flags) &= ~AuFillVdir_##name; } while (0)
+
+#ifndef CONFIG_AUFS_SHWH
+#undef AuFillVdir_SHWH
+#define AuFillVdir_SHWH                0
+#endif
+
+struct fillvdir_arg {
+       struct file             *file;
+       struct au_vdir          *vdir;
+       struct au_nhash         delist;
+       struct au_nhash         whlist;
+       aufs_bindex_t           bindex;
+       unsigned int            flags;
+       int                     err;
+};
+
+static int fillvdir(void *__arg, const char *__name, int nlen,
+                   loff_t offset __maybe_unused, u64 h_ino,
+                   unsigned int d_type)
+{
+       struct fillvdir_arg *arg = __arg;
+       char *name = (void *)__name;
+       struct super_block *sb;
+       ino_t ino;
+       const unsigned char shwh = !!au_ftest_fillvdir(arg->flags, SHWH);
+
+       arg->err = 0;
+       sb = arg->file->f_dentry->d_sb;
+       au_fset_fillvdir(arg->flags, CALLED);
+       /* smp_mb(); */
+       if (nlen <= AUFS_WH_PFX_LEN
+           || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+               if (test_known(&arg->delist, name, nlen)
+                   || au_nhash_test_known_wh(&arg->whlist, name, nlen))
+                       goto out; /* already exists or whiteouted */
+
+               sb = arg->file->f_dentry->d_sb;
+               arg->err = au_ino(sb, arg->bindex, h_ino, d_type, &ino);
+               if (!arg->err) {
+                       if (unlikely(nlen > AUFS_MAX_NAMELEN))
+                               d_type = DT_UNKNOWN;
+                       arg->err = append_de(arg->vdir, name, nlen, ino,
+                                            d_type, &arg->delist);
+               }
+       } else if (au_ftest_fillvdir(arg->flags, WHABLE)) {
+               name += AUFS_WH_PFX_LEN;
+               nlen -= AUFS_WH_PFX_LEN;
+               if (au_nhash_test_known_wh(&arg->whlist, name, nlen))
+                       goto out; /* already whiteouted */
+
+               if (shwh)
+                       arg->err = au_wh_ino(sb, arg->bindex, h_ino, d_type,
+                                            &ino);
+               if (!arg->err) {
+                       if (nlen <= AUFS_MAX_NAMELEN + AUFS_WH_PFX_LEN)
+                               d_type = DT_UNKNOWN;
+                       arg->err = au_nhash_append_wh
+                               (&arg->whlist, name, nlen, ino, d_type,
+                                arg->bindex, shwh);
+               }
+       }
+
+out:
+       if (!arg->err)
+               arg->vdir->vd_jiffy = jiffies;
+       /* smp_mb(); */
+       AuTraceErr(arg->err);
+       return arg->err;
+}
+
+static int au_handle_shwh(struct super_block *sb, struct au_vdir *vdir,
+                         struct au_nhash *whlist, struct au_nhash *delist)
+{
+#ifdef CONFIG_AUFS_SHWH
+       int err;
+       unsigned int nh, u;
+       struct hlist_head *head;
+       struct au_vdir_wh *tpos;
+       struct hlist_node *pos, *n;
+       char *p, *o;
+       struct au_vdir_destr *destr;
+
+       AuDebugOn(!au_opt_test(au_mntflags(sb), SHWH));
+
+       err = -ENOMEM;
+       o = p = __getname_gfp(GFP_NOFS);
+       if (unlikely(!p))
+               goto out;
+
+       err = 0;
+       nh = whlist->nh_num;
+       memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+       p += AUFS_WH_PFX_LEN;
+       for (u = 0; u < nh; u++) {
+               head = whlist->nh_head + u;
+               hlist_for_each_entry_safe(tpos, pos, n, head, wh_hash) {
+                       destr = &tpos->wh_str;
+                       memcpy(p, destr->name, destr->len);
+                       err = append_de(vdir, o, destr->len + AUFS_WH_PFX_LEN,
+                                       tpos->wh_ino, tpos->wh_type, delist);
+                       if (unlikely(err))
+                               break;
+               }
+       }
+
+       __putname(o);
+
+out:
+       AuTraceErr(err);
+       return err;
+#else
+       return 0;
+#endif
+}
+
+static int au_do_read_vdir(struct fillvdir_arg *arg)
+{
+       int err;
+       unsigned int rdhash;
+       loff_t offset;
+       aufs_bindex_t bend, bindex, bstart;
+       unsigned char shwh;
+       struct file *hf, *file;
+       struct super_block *sb;
+
+       file = arg->file;
+       sb = file->f_dentry->d_sb;
+       SiMustAnyLock(sb);
+
+       rdhash = au_sbi(sb)->si_rdhash;
+       if (!rdhash)
+               rdhash = au_rdhash_est(au_dir_size(file, /*dentry*/NULL));
+       err = au_nhash_alloc(&arg->delist, rdhash, GFP_NOFS);
+       if (unlikely(err))
+               goto out;
+       err = au_nhash_alloc(&arg->whlist, rdhash, GFP_NOFS);
+       if (unlikely(err))
+               goto out_delist;
+
+       err = 0;
+       arg->flags = 0;
+       shwh = 0;
+       if (au_opt_test(au_mntflags(sb), SHWH)) {
+               shwh = 1;
+               au_fset_fillvdir(arg->flags, SHWH);
+       }
+       bstart = au_fbstart(file);
+       bend = au_fbend_dir(file);
+       for (bindex = bstart; !err && bindex <= bend; bindex++) {
+               hf = au_hf_dir(file, bindex);
+               if (!hf)
+                       continue;
+
+               offset = vfsub_llseek(hf, 0, SEEK_SET);
+               err = offset;
+               if (unlikely(offset))
+                       break;
+
+               arg->bindex = bindex;
+               au_fclr_fillvdir(arg->flags, WHABLE);
+               if (shwh
+                   || (bindex != bend
+                       && au_br_whable(au_sbr_perm(sb, bindex))))
+                       au_fset_fillvdir(arg->flags, WHABLE);
+               do {
+                       arg->err = 0;
+                       au_fclr_fillvdir(arg->flags, CALLED);
+                       /* smp_mb(); */
+                       err = vfsub_readdir(hf, fillvdir, arg);
+                       if (err >= 0)
+                               err = arg->err;
+               } while (!err && au_ftest_fillvdir(arg->flags, CALLED));
+       }
+
+       if (!err && shwh)
+               err = au_handle_shwh(sb, arg->vdir, &arg->whlist, &arg->delist);
+
+       au_nhash_wh_free(&arg->whlist);
+
+out_delist:
+       au_nhash_de_free(&arg->delist);
+out:
+       return err;
+}
+
+static int read_vdir(struct file *file, int may_read)
+{
+       int err;
+       unsigned long expire;
+       unsigned char do_read;
+       struct fillvdir_arg arg;
+       struct inode *inode;
+       struct au_vdir *vdir, *allocated;
+
+       err = 0;
+       inode = file->f_dentry->d_inode;
+       IMustLock(inode);
+       SiMustAnyLock(inode->i_sb);
+
+       allocated = NULL;
+       do_read = 0;
+       expire = au_sbi(inode->i_sb)->si_rdcache;
+       vdir = au_ivdir(inode);
+       if (!vdir) {
+               do_read = 1;
+               vdir = alloc_vdir(file);
+               err = PTR_ERR(vdir);
+               if (IS_ERR(vdir))
+                       goto out;
+               err = 0;
+               allocated = vdir;
+       } else if (may_read
+                  && (inode->i_version != vdir->vd_version
+                      || time_after(jiffies, vdir->vd_jiffy + expire))) {
+               do_read = 1;
+               err = reinit_vdir(vdir);
+               if (unlikely(err))
+                       goto out;
+       }
+
+       if (!do_read)
+               return 0; /* success */
+
+       arg.file = file;
+       arg.vdir = vdir;
+       err = au_do_read_vdir(&arg);
+       if (!err) {
+               /* file->f_pos = 0; */
+               vdir->vd_version = inode->i_version;
+               vdir->vd_last.ul = 0;
+               vdir->vd_last.p.deblk = vdir->vd_deblk[0];
+               if (allocated)
+                       au_set_ivdir(inode, allocated);
+       } else if (allocated)
+               au_vdir_free(allocated);
+
+out:
+       return err;
+}
+
+static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src)
+{
+       int err, rerr;
+       unsigned long ul, n;
+       const unsigned int deblk_sz = src->vd_deblk_sz;
+
+       AuDebugOn(tgt->vd_nblk != 1);
+
+       err = -ENOMEM;
+       if (tgt->vd_nblk < src->vd_nblk) {
+               unsigned char **p;
+
+               p = krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk,
+                            GFP_NOFS);
+               if (unlikely(!p))
+                       goto out;
+               tgt->vd_deblk = p;
+       }
+
+       if (tgt->vd_deblk_sz != deblk_sz) {
+               unsigned char *p;
+
+               tgt->vd_deblk_sz = deblk_sz;
+               p = krealloc(tgt->vd_deblk[0], deblk_sz, GFP_NOFS);
+               if (unlikely(!p))
+                       goto out;
+               tgt->vd_deblk[0] = p;
+       }
+       memcpy(tgt->vd_deblk[0], src->vd_deblk[0], deblk_sz);
+       tgt->vd_version = src->vd_version;
+       tgt->vd_jiffy = src->vd_jiffy;
+
+       n = src->vd_nblk;
+       for (ul = 1; ul < n; ul++) {
+               tgt->vd_deblk[ul] = kmemdup(src->vd_deblk[ul], deblk_sz,
+                                           GFP_NOFS);
+               if (unlikely(!tgt->vd_deblk[ul]))
+                       goto out;
+               tgt->vd_nblk++;
+       }
+       tgt->vd_nblk = n;
+       tgt->vd_last.ul = tgt->vd_last.ul;
+       tgt->vd_last.p.deblk = tgt->vd_deblk[tgt->vd_last.ul];
+       tgt->vd_last.p.deblk += src->vd_last.p.deblk
+               - src->vd_deblk[src->vd_last.ul];
+       /* smp_mb(); */
+       return 0; /* success */
+
+out:
+       rerr = reinit_vdir(tgt);
+       BUG_ON(rerr);
+       return err;
+}
+
+int au_vdir_init(struct file *file)
+{
+       int err;
+       struct inode *inode;
+       struct au_vdir *vdir_cache, *allocated;
+
+       err = read_vdir(file, !file->f_pos);
+       if (unlikely(err))
+               goto out;
+
+       allocated = NULL;
+       vdir_cache = au_fvdir_cache(file);
+       if (!vdir_cache) {
+               vdir_cache = alloc_vdir(file);
+               err = PTR_ERR(vdir_cache);
+               if (IS_ERR(vdir_cache))
+                       goto out;
+               allocated = vdir_cache;
+       } else if (!file->f_pos && vdir_cache->vd_version != file->f_version) {
+               err = reinit_vdir(vdir_cache);
+               if (unlikely(err))
+                       goto out;
+       } else
+               return 0; /* success */
+
+       inode = file->f_dentry->d_inode;
+       err = copy_vdir(vdir_cache, au_ivdir(inode));
+       if (!err) {
+               file->f_version = inode->i_version;
+               if (allocated)
+                       au_set_fvdir_cache(file, allocated);
+       } else if (allocated)
+               au_vdir_free(allocated);
+
+out:
+       return err;
+}
+
+static loff_t calc_offset(struct au_vdir *vdir)
+{
+       loff_t offset;
+       union au_vdir_deblk_p p;
+
+       p.deblk = vdir->vd_deblk[vdir->vd_last.ul];
+       offset = vdir->vd_last.p.deblk - p.deblk;
+       offset += vdir->vd_deblk_sz * vdir->vd_last.ul;
+       return offset;
+}
+
+/* returns true or false */
+static int seek_vdir(struct file *file)
+{
+       int valid;
+       unsigned int deblk_sz;
+       unsigned long ul, n;
+       loff_t offset;
+       union au_vdir_deblk_p p, deblk_end;
+       struct au_vdir *vdir_cache;
+
+       valid = 1;
+       vdir_cache = au_fvdir_cache(file);
+       offset = calc_offset(vdir_cache);
+       AuDbg("offset %lld\n", offset);
+       if (file->f_pos == offset)
+               goto out;
+
+       vdir_cache->vd_last.ul = 0;
+       vdir_cache->vd_last.p.deblk = vdir_cache->vd_deblk[0];
+       if (!file->f_pos)
+               goto out;
+
+       valid = 0;
+       deblk_sz = vdir_cache->vd_deblk_sz;
+       ul = div64_u64(file->f_pos, deblk_sz);
+       AuDbg("ul %lu\n", ul);
+       if (ul >= vdir_cache->vd_nblk)
+               goto out;
+
+       n = vdir_cache->vd_nblk;
+       for (; ul < n; ul++) {
+               p.deblk = vdir_cache->vd_deblk[ul];
+               deblk_end.deblk = p.deblk + deblk_sz;
+               offset = ul;
+               offset *= deblk_sz;
+               while (!is_deblk_end(&p, &deblk_end) && offset < file->f_pos) {
+                       unsigned int l;
+
+                       l = calc_size(p.de->de_str.len);
+                       offset += l;
+                       p.deblk += l;
+               }
+               if (!is_deblk_end(&p, &deblk_end)) {
+                       valid = 1;
+                       vdir_cache->vd_last.ul = ul;
+                       vdir_cache->vd_last.p = p;
+                       break;
+               }
+       }
+
+out:
+       /* smp_mb(); */
+       AuTraceErr(!valid);
+       return valid;
+}
+
+int au_vdir_fill_de(struct file *file, void *dirent, filldir_t filldir)
+{
+       int err;
+       unsigned int l, deblk_sz;
+       union au_vdir_deblk_p deblk_end;
+       struct au_vdir *vdir_cache;
+       struct au_vdir_de *de;
+
+       vdir_cache = au_fvdir_cache(file);
+       if (!seek_vdir(file))
+               return 0;
+
+       deblk_sz = vdir_cache->vd_deblk_sz;
+       while (1) {
+               deblk_end.deblk = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
+               deblk_end.deblk += deblk_sz;
+               while (!is_deblk_end(&vdir_cache->vd_last.p, &deblk_end)) {
+                       de = vdir_cache->vd_last.p.de;
+                       AuDbg("%.*s, off%lld, i%lu, dt%d\n",
+                             de->de_str.len, de->de_str.name, file->f_pos,
+                             (unsigned long)de->de_ino, de->de_type);
+                       err = filldir(dirent, de->de_str.name, de->de_str.len,
+                                     file->f_pos, de->de_ino, de->de_type);
+                       if (unlikely(err)) {
+                               AuTraceErr(err);
+                               /* todo: ignore the error caused by udba? */
+                               /* return err; */
+                               return 0;
+                       }
+
+                       l = calc_size(de->de_str.len);
+                       vdir_cache->vd_last.p.deblk += l;
+                       file->f_pos += l;
+               }
+               if (vdir_cache->vd_last.ul < vdir_cache->vd_nblk - 1) {
+                       vdir_cache->vd_last.ul++;
+                       vdir_cache->vd_last.p.deblk
+                               = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
+                       file->f_pos = deblk_sz * vdir_cache->vd_last.ul;
+                       continue;
+               }
+               break;
+       }
+
+       /* smp_mb(); */
+       return 0;
+}
diff --git a/fs/aufs/vfsub.c b/fs/aufs/vfsub.c
new file mode 100644 (file)
index 0000000..fbb8d4b
--- /dev/null
@@ -0,0 +1,835 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sub-routines for VFS
+ */
+
+#include <linux/ima.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include <linux/splice.h>
+#include "aufs.h"
+
+int vfsub_update_h_iattr(struct path *h_path, int *did)
+{
+       int err;
+       struct kstat st;
+       struct super_block *h_sb;
+
+       /* for remote fs, leave work for its getattr or d_revalidate */
+       /* for bad i_attr fs, handle them in aufs_getattr() */
+       /* still some fs may acquire i_mutex. we need to skip them */
+       err = 0;
+       if (!did)
+               did = &err;
+       h_sb = h_path->dentry->d_sb;
+       *did = (!au_test_fs_remote(h_sb) && au_test_fs_refresh_iattr(h_sb));
+       if (*did)
+               err = vfs_getattr(h_path->mnt, h_path->dentry, &st);
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct file *vfsub_dentry_open(struct path *path, int flags)
+{
+       struct file *file;
+
+       path_get(path);
+       file = dentry_open(path->dentry, path->mnt,
+                          flags /* | __FMODE_NONOTIFY */,
+                          current_cred());
+       if (!IS_ERR_OR_NULL(file)
+           && (file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+               i_readcount_inc(path->dentry->d_inode);
+
+       return file;
+}
+
+struct file *vfsub_filp_open(const char *path, int oflags, int mode)
+{
+       struct file *file;
+
+       lockdep_off();
+       file = filp_open(path,
+                        oflags /* | __FMODE_NONOTIFY */,
+                        mode);
+       lockdep_on();
+       if (IS_ERR(file))
+               goto out;
+       vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+
+out:
+       return file;
+}
+
+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path)
+{
+       int err;
+
+       err = kern_path(name, flags, path);
+       if (!err && path->dentry->d_inode)
+               vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
+       return err;
+}
+
+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
+                                   int len)
+{
+       struct path path = {
+               .mnt = NULL
+       };
+
+       /* VFS checks it too, but by WARN_ON_ONCE() */
+       IMustLock(parent->d_inode);
+
+       path.dentry = lookup_one_len(name, parent, len);
+       if (IS_ERR(path.dentry))
+               goto out;
+       if (path.dentry->d_inode)
+               vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
+
+out:
+       AuTraceErrPtr(path.dentry);
+       return path.dentry;
+}
+
+struct dentry *vfsub_lookup_hash(struct nameidata *nd)
+{
+       struct path path = {
+               .mnt = nd->path.mnt
+       };
+
+       IMustLock(nd->path.dentry->d_inode);
+
+       path.dentry = lookup_hash(nd);
+       if (IS_ERR(path.dentry))
+               goto out;
+       if (path.dentry->d_inode)
+               vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
+
+out:
+       AuTraceErrPtr(path.dentry);
+       return path.dentry;
+}
+
+/*
+ * this is "VFS:__lookup_one_len()" which was removed and merged into
+ * VFS:lookup_one_len() by the commit.
+ *     6a96ba5 2011-03-14 kill __lookup_one_len()
+ * this function should always be equivalent to the corresponding part in
+ * VFS:lookup_one_len().
+ */
+int vfsub_name_hash(const char *name, struct qstr *this, int len)
+{
+       unsigned long hash;
+       unsigned int c;
+
+       this->name = name;
+       this->len = len;
+       if (!len)
+               return -EACCES;
+
+       hash = init_name_hash();
+       while (len--) {
+               c = *(const unsigned char *)name++;
+               if (c == '/' || c == '\0')
+                       return -EACCES;
+               hash = partial_name_hash(c, hash);
+       }
+       this->hash = end_name_hash(hash);
+       return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
+                                struct dentry *d2, struct au_hinode *hdir2)
+{
+       struct dentry *d;
+
+       lockdep_off();
+       d = lock_rename(d1, d2);
+       lockdep_on();
+       au_hn_suspend(hdir1);
+       if (hdir1 != hdir2)
+               au_hn_suspend(hdir2);
+
+       return d;
+}
+
+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
+                        struct dentry *d2, struct au_hinode *hdir2)
+{
+       au_hn_resume(hdir1);
+       if (hdir1 != hdir2)
+               au_hn_resume(hdir2);
+       lockdep_off();
+       unlock_rename(d1, d2);
+       lockdep_on();
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_create(struct inode *dir, struct path *path, int mode)
+{
+       int err;
+       struct dentry *d;
+
+       IMustLock(dir);
+
+       d = path->dentry;
+       path->dentry = d->d_parent;
+       err = security_path_mknod(path, d, mode, 0);
+       path->dentry = d;
+       if (unlikely(err))
+               goto out;
+
+       if (au_test_fs_null_nd(dir->i_sb))
+               err = vfs_create(dir, path->dentry, mode, NULL);
+       else {
+               struct nameidata h_nd;
+
+               memset(&h_nd, 0, sizeof(h_nd));
+               h_nd.flags = LOOKUP_CREATE;
+               h_nd.intent.open.flags = O_CREAT
+                       | vfsub_fmode_to_uint(FMODE_READ);
+               h_nd.intent.open.create_mode = mode;
+               h_nd.path.dentry = path->dentry->d_parent;
+               h_nd.path.mnt = path->mnt;
+               path_get(&h_nd.path);
+               err = vfs_create(dir, path->dentry, mode, &h_nd);
+               path_put(&h_nd.path);
+       }
+
+       if (!err) {
+               struct path tmp = *path;
+               int did;
+
+               vfsub_update_h_iattr(&tmp, &did);
+               if (did) {
+                       tmp.dentry = path->dentry->d_parent;
+                       vfsub_update_h_iattr(&tmp, /*did*/NULL);
+               }
+               /*ignore*/
+       }
+
+out:
+       return err;
+}
+
+int vfsub_symlink(struct inode *dir, struct path *path, const char *symname)
+{
+       int err;
+       struct dentry *d;
+
+       IMustLock(dir);
+
+       d = path->dentry;
+       path->dentry = d->d_parent;
+       err = security_path_symlink(path, d, symname);
+       path->dentry = d;
+       if (unlikely(err))
+               goto out;
+
+       err = vfs_symlink(dir, path->dentry, symname);
+       if (!err) {
+               struct path tmp = *path;
+               int did;
+
+               vfsub_update_h_iattr(&tmp, &did);
+               if (did) {
+                       tmp.dentry = path->dentry->d_parent;
+                       vfsub_update_h_iattr(&tmp, /*did*/NULL);
+               }
+               /*ignore*/
+       }
+
+out:
+       return err;
+}
+
+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev)
+{
+       int err;
+       struct dentry *d;
+
+       IMustLock(dir);
+
+       d = path->dentry;
+       path->dentry = d->d_parent;
+       err = security_path_mknod(path, d, mode, new_encode_dev(dev));
+       path->dentry = d;
+       if (unlikely(err))
+               goto out;
+
+       err = vfs_mknod(dir, path->dentry, mode, dev);
+       if (!err) {
+               struct path tmp = *path;
+               int did;
+
+               vfsub_update_h_iattr(&tmp, &did);
+               if (did) {
+                       tmp.dentry = path->dentry->d_parent;
+                       vfsub_update_h_iattr(&tmp, /*did*/NULL);
+               }
+               /*ignore*/
+       }
+
+out:
+       return err;
+}
+
+static int au_test_nlink(struct inode *inode)
+{
+       const unsigned int link_max = UINT_MAX >> 1; /* rough margin */
+
+       if (!au_test_fs_no_limit_nlink(inode->i_sb)
+           || inode->i_nlink < link_max)
+               return 0;
+       return -EMLINK;
+}
+
+int vfsub_link(struct dentry *src_dentry, struct inode *dir, struct path *path)
+{
+       int err;
+       struct dentry *d;
+
+       IMustLock(dir);
+
+       err = au_test_nlink(src_dentry->d_inode);
+       if (unlikely(err))
+               return err;
+
+       d = path->dentry;
+       path->dentry = d->d_parent;
+       err = security_path_link(src_dentry, path, d);
+       path->dentry = d;
+       if (unlikely(err))
+               goto out;
+
+       lockdep_off();
+       err = vfs_link(src_dentry, dir, path->dentry);
+       lockdep_on();
+       if (!err) {
+               struct path tmp = *path;
+               int did;
+
+               /* fuse has different memory inode for the same inumber */
+               vfsub_update_h_iattr(&tmp, &did);
+               if (did) {
+                       tmp.dentry = path->dentry->d_parent;
+                       vfsub_update_h_iattr(&tmp, /*did*/NULL);
+                       tmp.dentry = src_dentry;
+                       vfsub_update_h_iattr(&tmp, /*did*/NULL);
+               }
+               /*ignore*/
+       }
+
+out:
+       return err;
+}
+
+int vfsub_rename(struct inode *src_dir, struct dentry *src_dentry,
+                struct inode *dir, struct path *path)
+{
+       int err;
+       struct path tmp = {
+               .mnt    = path->mnt
+       };
+       struct dentry *d;
+
+       IMustLock(dir);
+       IMustLock(src_dir);
+
+       d = path->dentry;
+       path->dentry = d->d_parent;
+       tmp.dentry = src_dentry->d_parent;
+       err = security_path_rename(&tmp, src_dentry, path, d);
+       path->dentry = d;
+       if (unlikely(err))
+               goto out;
+
+       lockdep_off();
+       err = vfs_rename(src_dir, src_dentry, dir, path->dentry);
+       lockdep_on();
+       if (!err) {
+               int did;
+
+               tmp.dentry = d->d_parent;
+               vfsub_update_h_iattr(&tmp, &did);
+               if (did) {
+                       tmp.dentry = src_dentry;
+                       vfsub_update_h_iattr(&tmp, /*did*/NULL);
+                       tmp.dentry = src_dentry->d_parent;
+                       vfsub_update_h_iattr(&tmp, /*did*/NULL);
+               }
+               /*ignore*/
+       }
+
+out:
+       return err;
+}
+
+int vfsub_mkdir(struct inode *dir, struct path *path, int mode)
+{
+       int err;
+       struct dentry *d;
+
+       IMustLock(dir);
+
+       d = path->dentry;
+       path->dentry = d->d_parent;
+       err = security_path_mkdir(path, d, mode);
+       path->dentry = d;
+       if (unlikely(err))
+               goto out;
+
+       err = vfs_mkdir(dir, path->dentry, mode);
+       if (!err) {
+               struct path tmp = *path;
+               int did;
+
+               vfsub_update_h_iattr(&tmp, &did);
+               if (did) {
+                       tmp.dentry = path->dentry->d_parent;
+                       vfsub_update_h_iattr(&tmp, /*did*/NULL);
+               }
+               /*ignore*/
+       }
+
+out:
+       return err;
+}
+
+int vfsub_rmdir(struct inode *dir, struct path *path)
+{
+       int err;
+       struct dentry *d;
+
+       IMustLock(dir);
+
+       d = path->dentry;
+       path->dentry = d->d_parent;
+       err = security_path_rmdir(path, d);
+       path->dentry = d;
+       if (unlikely(err))
+               goto out;
+
+       lockdep_off();
+       err = vfs_rmdir(dir, path->dentry);
+       lockdep_on();
+       if (!err) {
+               struct path tmp = {
+                       .dentry = path->dentry->d_parent,
+                       .mnt    = path->mnt
+               };
+
+               vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
+       }
+
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: support mmap_sem? */
+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
+                    loff_t *ppos)
+{
+       ssize_t err;
+
+       lockdep_off();
+       err = vfs_read(file, ubuf, count, ppos);
+       lockdep_on();
+       if (err >= 0)
+               vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+       return err;
+}
+
+/* todo: kernel_read()? */
+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
+                    loff_t *ppos)
+{
+       ssize_t err;
+       mm_segment_t oldfs;
+       union {
+               void *k;
+               char __user *u;
+       } buf;
+
+       buf.k = kbuf;
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = vfsub_read_u(file, buf.u, count, ppos);
+       set_fs(oldfs);
+       return err;
+}
+
+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
+                     loff_t *ppos)
+{
+       ssize_t err;
+
+       lockdep_off();
+       err = vfs_write(file, ubuf, count, ppos);
+       lockdep_on();
+       if (err >= 0)
+               vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+       return err;
+}
+
+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, loff_t *ppos)
+{
+       ssize_t err;
+       mm_segment_t oldfs;
+       union {
+               void *k;
+               const char __user *u;
+       } buf;
+
+       buf.k = kbuf;
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = vfsub_write_u(file, buf.u, count, ppos);
+       set_fs(oldfs);
+       return err;
+}
+
+int vfsub_flush(struct file *file, fl_owner_t id)
+{
+       int err;
+
+       err = 0;
+       if (file->f_op && file->f_op->flush) {
+               if (!au_test_nfs(file->f_dentry->d_sb))
+                       err = file->f_op->flush(file, id);
+               else {
+                       lockdep_off();
+                       err = file->f_op->flush(file, id);
+                       lockdep_on();
+               }
+               if (!err)
+                       vfsub_update_h_iattr(&file->f_path, /*did*/NULL);
+               /*ignore*/
+       }
+       return err;
+}
+
+int vfsub_readdir(struct file *file, filldir_t filldir, void *arg)
+{
+       int err;
+
+       lockdep_off();
+       err = vfs_readdir(file, filldir, arg);
+       lockdep_on();
+       if (err >= 0)
+               vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+       return err;
+}
+
+long vfsub_splice_to(struct file *in, loff_t *ppos,
+                    struct pipe_inode_info *pipe, size_t len,
+                    unsigned int flags)
+{
+       long err;
+
+       lockdep_off();
+       err = do_splice_to(in, ppos, pipe, len, flags);
+       lockdep_on();
+       file_accessed(in);
+       if (err >= 0)
+               vfsub_update_h_iattr(&in->f_path, /*did*/NULL); /*ignore*/
+       return err;
+}
+
+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
+                      loff_t *ppos, size_t len, unsigned int flags)
+{
+       long err;
+
+       lockdep_off();
+       err = do_splice_from(pipe, out, ppos, len, flags);
+       lockdep_on();
+       if (err >= 0)
+               vfsub_update_h_iattr(&out->f_path, /*did*/NULL); /*ignore*/
+       return err;
+}
+
+int vfsub_fsync(struct file *file, struct path *path, int datasync)
+{
+       int err;
+
+       /* file can be NULL */
+       lockdep_off();
+       err = vfs_fsync(file, datasync);
+       lockdep_on();
+       if (!err) {
+               if (!path) {
+                       AuDebugOn(!file);
+                       path = &file->f_path;
+               }
+               vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
+       }
+       return err;
+}
+
+/* cf. open.c:do_sys_truncate() and do_sys_ftruncate() */
+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
+               struct file *h_file)
+{
+       int err;
+       struct inode *h_inode;
+
+       h_inode = h_path->dentry->d_inode;
+       if (!h_file) {
+               err = mnt_want_write(h_path->mnt);
+               if (err)
+                       goto out;
+               err = inode_permission(h_inode, MAY_WRITE);
+               if (err)
+                       goto out_mnt;
+               err = get_write_access(h_inode);
+               if (err)
+                       goto out_mnt;
+               err = break_lease(h_inode, O_WRONLY);
+               if (err)
+                       goto out_inode;
+       }
+
+       err = locks_verify_truncate(h_inode, h_file, length);
+       if (!err)
+               err = security_path_truncate(h_path);
+       if (!err) {
+               lockdep_off();
+               err = do_truncate(h_path->dentry, length, attr, h_file);
+               lockdep_on();
+       }
+
+out_inode:
+       if (!h_file)
+               put_write_access(h_inode);
+out_mnt:
+       if (!h_file)
+               mnt_drop_write(h_path->mnt);
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_vfsub_mkdir_args {
+       int *errp;
+       struct inode *dir;
+       struct path *path;
+       int mode;
+};
+
+static void au_call_vfsub_mkdir(void *args)
+{
+       struct au_vfsub_mkdir_args *a = args;
+       *a->errp = vfsub_mkdir(a->dir, a->path, a->mode);
+}
+
+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode)
+{
+       int err, do_sio, wkq_err;
+
+       do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
+       if (!do_sio)
+               err = vfsub_mkdir(dir, path, mode);
+       else {
+               struct au_vfsub_mkdir_args args = {
+                       .errp   = &err,
+                       .dir    = dir,
+                       .path   = path,
+                       .mode   = mode
+               };
+               wkq_err = au_wkq_wait(au_call_vfsub_mkdir, &args);
+               if (unlikely(wkq_err))
+                       err = wkq_err;
+       }
+
+       return err;
+}
+
+struct au_vfsub_rmdir_args {
+       int *errp;
+       struct inode *dir;
+       struct path *path;
+};
+
+static void au_call_vfsub_rmdir(void *args)
+{
+       struct au_vfsub_rmdir_args *a = args;
+       *a->errp = vfsub_rmdir(a->dir, a->path);
+}
+
+int vfsub_sio_rmdir(struct inode *dir, struct path *path)
+{
+       int err, do_sio, wkq_err;
+
+       do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
+       if (!do_sio)
+               err = vfsub_rmdir(dir, path);
+       else {
+               struct au_vfsub_rmdir_args args = {
+                       .errp   = &err,
+                       .dir    = dir,
+                       .path   = path
+               };
+               wkq_err = au_wkq_wait(au_call_vfsub_rmdir, &args);
+               if (unlikely(wkq_err))
+                       err = wkq_err;
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct notify_change_args {
+       int *errp;
+       struct path *path;
+       struct iattr *ia;
+};
+
+static void call_notify_change(void *args)
+{
+       struct notify_change_args *a = args;
+       struct inode *h_inode;
+
+       h_inode = a->path->dentry->d_inode;
+       IMustLock(h_inode);
+
+       *a->errp = -EPERM;
+       if (!IS_IMMUTABLE(h_inode) && !IS_APPEND(h_inode)) {
+               *a->errp = notify_change(a->path->dentry, a->ia);
+               if (!*a->errp)
+                       vfsub_update_h_iattr(a->path, /*did*/NULL); /*ignore*/
+       }
+       AuTraceErr(*a->errp);
+}
+
+int vfsub_notify_change(struct path *path, struct iattr *ia)
+{
+       int err;
+       struct notify_change_args args = {
+               .errp   = &err,
+               .path   = path,
+               .ia     = ia
+       };
+
+       call_notify_change(&args);
+
+       return err;
+}
+
+int vfsub_sio_notify_change(struct path *path, struct iattr *ia)
+{
+       int err, wkq_err;
+       struct notify_change_args args = {
+               .errp   = &err,
+               .path   = path,
+               .ia     = ia
+       };
+
+       wkq_err = au_wkq_wait(call_notify_change, &args);
+       if (unlikely(wkq_err))
+               err = wkq_err;
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct unlink_args {
+       int *errp;
+       struct inode *dir;
+       struct path *path;
+};
+
+static void call_unlink(void *args)
+{
+       struct unlink_args *a = args;
+       struct dentry *d = a->path->dentry;
+       struct inode *h_inode;
+       const int stop_sillyrename = (au_test_nfs(d->d_sb)
+                                     && d->d_count == 1);
+
+       IMustLock(a->dir);
+
+       a->path->dentry = d->d_parent;
+       *a->errp = security_path_unlink(a->path, d);
+       a->path->dentry = d;
+       if (unlikely(*a->errp))
+               return;
+
+       if (!stop_sillyrename)
+               dget(d);
+       h_inode = d->d_inode;
+       if (h_inode)
+               ihold(h_inode);
+
+       lockdep_off();
+       *a->errp = vfs_unlink(a->dir, d);
+       lockdep_on();
+       if (!*a->errp) {
+               struct path tmp = {
+                       .dentry = d->d_parent,
+                       .mnt    = a->path->mnt
+               };
+               vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
+       }
+
+       if (!stop_sillyrename)
+               dput(d);
+       if (h_inode)
+               iput(h_inode);
+
+       AuTraceErr(*a->errp);
+}
+
+/*
+ * @dir: must be locked.
+ * @dentry: target dentry.
+ */
+int vfsub_unlink(struct inode *dir, struct path *path, int force)
+{
+       int err;
+       struct unlink_args args = {
+               .errp   = &err,
+               .dir    = dir,
+               .path   = path
+       };
+
+       if (!force)
+               call_unlink(&args);
+       else {
+               int wkq_err;
+
+               wkq_err = au_wkq_wait(call_unlink, &args);
+               if (unlikely(wkq_err))
+                       err = wkq_err;
+       }
+
+       return err;
+}
diff --git a/fs/aufs/vfsub.h b/fs/aufs/vfsub.h
new file mode 100644 (file)
index 0000000..1fa62b4
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sub-routines for VFS
+ */
+
+#ifndef __AUFS_VFSUB_H__
+#define __AUFS_VFSUB_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/lglock.h>
+#include "debug.h"
+
+/* copied from linux/fs/internal.h */
+/* todo: BAD approach!! */
+DECLARE_BRLOCK(vfsmount_lock);
+extern void file_sb_list_del(struct file *f);
+extern spinlock_t inode_sb_list_lock;
+
+/* copied from linux/fs/file_table.c */
+DECLARE_LGLOCK(files_lglock);
+#ifdef CONFIG_SMP
+/*
+ * These macros iterate all files on all CPUs for a given superblock.
+ * files_lglock must be held globally.
+ */
+#define do_file_list_for_each_entry(__sb, __file)              \
+{                                                              \
+       int i;                                                  \
+       for_each_possible_cpu(i) {                              \
+               struct list_head *list;                         \
+               list = per_cpu_ptr((__sb)->s_files, i);         \
+               list_for_each_entry((__file), list, f_u.fu_list)
+
+#define while_file_list_for_each_entry                         \
+       }                                                       \
+}
+
+#else
+
+#define do_file_list_for_each_entry(__sb, __file)              \
+{                                                              \
+       struct list_head *list;                                 \
+       list = &(sb)->s_files;                                  \
+       list_for_each_entry((__file), list, f_u.fu_list)
+
+#define while_file_list_for_each_entry                         \
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for lower inode */
+/* default MAX_LOCKDEP_SUBCLASSES(8) is not enough */
+/* reduce? gave up. */
+enum {
+       AuLsc_I_Begin = I_MUTEX_QUOTA, /* 4 */
+       AuLsc_I_PARENT,         /* lower inode, parent first */
+       AuLsc_I_PARENT2,        /* copyup dirs */
+       AuLsc_I_PARENT3,        /* copyup wh */
+       AuLsc_I_CHILD,
+       AuLsc_I_CHILD2,
+       AuLsc_I_End
+};
+
+/* to debug easier, do not make them inlined functions */
+#define MtxMustLock(mtx)       AuDebugOn(!mutex_is_locked(mtx))
+#define IMustLock(i)           MtxMustLock(&(i)->i_mutex)
+
+/* ---------------------------------------------------------------------- */
+
+static inline void vfsub_drop_nlink(struct inode *inode)
+{
+       AuDebugOn(!inode->i_nlink);
+       drop_nlink(inode);
+}
+
+static inline void vfsub_dead_dir(struct inode *inode)
+{
+       AuDebugOn(!S_ISDIR(inode->i_mode));
+       inode->i_flags |= S_DEAD;
+       clear_nlink(inode);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_update_h_iattr(struct path *h_path, int *did);
+struct file *vfsub_dentry_open(struct path *path, int flags);
+struct file *vfsub_filp_open(const char *path, int oflags, int mode);
+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path);
+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
+                                   int len);
+struct dentry *vfsub_lookup_hash(struct nameidata *nd);
+int vfsub_name_hash(const char *name, struct qstr *this, int len);
+
+/* ---------------------------------------------------------------------- */
+
+struct au_hinode;
+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
+                                struct dentry *d2, struct au_hinode *hdir2);
+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
+                        struct dentry *d2, struct au_hinode *hdir2);
+
+int vfsub_create(struct inode *dir, struct path *path, int mode);
+int vfsub_symlink(struct inode *dir, struct path *path,
+                 const char *symname);
+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev);
+int vfsub_link(struct dentry *src_dentry, struct inode *dir,
+              struct path *path);
+int vfsub_rename(struct inode *src_hdir, struct dentry *src_dentry,
+                struct inode *hdir, struct path *path);
+int vfsub_mkdir(struct inode *dir, struct path *path, int mode);
+int vfsub_rmdir(struct inode *dir, struct path *path);
+
+/* ---------------------------------------------------------------------- */
+
+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
+                    loff_t *ppos);
+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
+                       loff_t *ppos);
+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
+                     loff_t *ppos);
+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count,
+                     loff_t *ppos);
+int vfsub_flush(struct file *file, fl_owner_t id);
+int vfsub_readdir(struct file *file, filldir_t filldir, void *arg);
+
+static inline unsigned int vfsub_file_flags(struct file *file)
+{
+       unsigned int flags;
+
+       spin_lock(&file->f_lock);
+       flags = file->f_flags;
+       spin_unlock(&file->f_lock);
+
+       return flags;
+}
+
+static inline void vfsub_file_accessed(struct file *h_file)
+{
+       file_accessed(h_file);
+       vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); /*ignore*/
+}
+
+static inline void vfsub_touch_atime(struct vfsmount *h_mnt,
+                                    struct dentry *h_dentry)
+{
+       struct path h_path = {
+               .dentry = h_dentry,
+               .mnt    = h_mnt
+       };
+       touch_atime(h_mnt, h_dentry);
+       vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/
+}
+
+long vfsub_splice_to(struct file *in, loff_t *ppos,
+                    struct pipe_inode_info *pipe, size_t len,
+                    unsigned int flags);
+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
+                      loff_t *ppos, size_t len, unsigned int flags);
+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
+               struct file *h_file);
+int vfsub_fsync(struct file *file, struct path *path, int datasync);
+
+/* ---------------------------------------------------------------------- */
+
+static inline loff_t vfsub_llseek(struct file *file, loff_t offset, int origin)
+{
+       loff_t err;
+
+       lockdep_off();
+       err = vfs_llseek(file, offset, origin);
+       lockdep_on();
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* dirty workaround for strict type of fmode_t */
+union vfsub_fmu {
+       fmode_t fm;
+       unsigned int ui;
+};
+
+static inline unsigned int vfsub_fmode_to_uint(fmode_t fm)
+{
+       union vfsub_fmu u = {
+               .fm = fm
+       };
+
+       BUILD_BUG_ON(sizeof(u.fm) != sizeof(u.ui));
+
+       return u.ui;
+}
+
+static inline fmode_t vfsub_uint_to_fmode(unsigned int ui)
+{
+       union vfsub_fmu u = {
+               .ui = ui
+       };
+
+       return u.fm;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode);
+int vfsub_sio_rmdir(struct inode *dir, struct path *path);
+int vfsub_sio_notify_change(struct path *path, struct iattr *ia);
+int vfsub_notify_change(struct path *path, struct iattr *ia);
+int vfsub_unlink(struct inode *dir, struct path *path, int force);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_VFSUB_H__ */
diff --git a/fs/aufs/wbr_policy.c b/fs/aufs/wbr_policy.c
new file mode 100644 (file)
index 0000000..638e860
--- /dev/null
@@ -0,0 +1,756 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * policies for selecting one among multiple writable branches
+ */
+
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/* subset of cpup_attr() */
+static noinline_for_stack
+int au_cpdown_attr(struct path *h_path, struct dentry *h_src)
+{
+       int err, sbits;
+       struct iattr ia;
+       struct inode *h_isrc;
+
+       h_isrc = h_src->d_inode;
+       ia.ia_valid = ATTR_FORCE | ATTR_MODE | ATTR_UID | ATTR_GID;
+       ia.ia_mode = h_isrc->i_mode;
+       ia.ia_uid = h_isrc->i_uid;
+       ia.ia_gid = h_isrc->i_gid;
+       sbits = !!(ia.ia_mode & (S_ISUID | S_ISGID));
+       au_cpup_attr_flags(h_path->dentry->d_inode, h_isrc->i_flags);
+       err = vfsub_sio_notify_change(h_path, &ia);
+
+       /* is this nfs only? */
+       if (!err && sbits && au_test_nfs(h_path->dentry->d_sb)) {
+               ia.ia_valid = ATTR_FORCE | ATTR_MODE;
+               ia.ia_mode = h_isrc->i_mode;
+               err = vfsub_sio_notify_change(h_path, &ia);
+       }
+
+       return err;
+}
+
+#define AuCpdown_PARENT_OPQ    1
+#define AuCpdown_WHED          (1 << 1)
+#define AuCpdown_MADE_DIR      (1 << 2)
+#define AuCpdown_DIROPQ                (1 << 3)
+#define au_ftest_cpdown(flags, name)   ((flags) & AuCpdown_##name)
+#define au_fset_cpdown(flags, name) \
+       do { (flags) |= AuCpdown_##name; } while (0)
+#define au_fclr_cpdown(flags, name) \
+       do { (flags) &= ~AuCpdown_##name; } while (0)
+
+static int au_cpdown_dir_opq(struct dentry *dentry, aufs_bindex_t bdst,
+                            unsigned int *flags)
+{
+       int err;
+       struct dentry *opq_dentry;
+
+       opq_dentry = au_diropq_create(dentry, bdst);
+       err = PTR_ERR(opq_dentry);
+       if (IS_ERR(opq_dentry))
+               goto out;
+       dput(opq_dentry);
+       au_fset_cpdown(*flags, DIROPQ);
+
+out:
+       return err;
+}
+
+static int au_cpdown_dir_wh(struct dentry *dentry, struct dentry *h_parent,
+                           struct inode *dir, aufs_bindex_t bdst)
+{
+       int err;
+       struct path h_path;
+       struct au_branch *br;
+
+       br = au_sbr(dentry->d_sb, bdst);
+       h_path.dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
+       err = PTR_ERR(h_path.dentry);
+       if (IS_ERR(h_path.dentry))
+               goto out;
+
+       err = 0;
+       if (h_path.dentry->d_inode) {
+               h_path.mnt = au_br_mnt(br);
+               err = au_wh_unlink_dentry(au_h_iptr(dir, bdst), &h_path,
+                                         dentry);
+       }
+       dput(h_path.dentry);
+
+out:
+       return err;
+}
+
+static int au_cpdown_dir(struct dentry *dentry, aufs_bindex_t bdst,
+                        struct au_pin *pin,
+                        struct dentry *h_parent, void *arg)
+{
+       int err, rerr;
+       aufs_bindex_t bopq, bstart;
+       struct path h_path;
+       struct dentry *parent;
+       struct inode *h_dir, *h_inode, *inode, *dir;
+       unsigned int *flags = arg;
+
+       bstart = au_dbstart(dentry);
+       /* dentry is di-locked */
+       parent = dget_parent(dentry);
+       dir = parent->d_inode;
+       h_dir = h_parent->d_inode;
+       AuDebugOn(h_dir != au_h_iptr(dir, bdst));
+       IMustLock(h_dir);
+
+       err = au_lkup_neg(dentry, bdst, /*wh*/0);
+       if (unlikely(err < 0))
+               goto out;
+       h_path.dentry = au_h_dptr(dentry, bdst);
+       h_path.mnt = au_sbr_mnt(dentry->d_sb, bdst);
+       err = vfsub_sio_mkdir(au_h_iptr(dir, bdst), &h_path,
+                             S_IRWXU | S_IRUGO | S_IXUGO);
+       if (unlikely(err))
+               goto out_put;
+       au_fset_cpdown(*flags, MADE_DIR);
+
+       bopq = au_dbdiropq(dentry);
+       au_fclr_cpdown(*flags, WHED);
+       au_fclr_cpdown(*flags, DIROPQ);
+       if (au_dbwh(dentry) == bdst)
+               au_fset_cpdown(*flags, WHED);
+       if (!au_ftest_cpdown(*flags, PARENT_OPQ) && bopq <= bdst)
+               au_fset_cpdown(*flags, PARENT_OPQ);
+       h_inode = h_path.dentry->d_inode;
+       mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
+       if (au_ftest_cpdown(*flags, WHED)) {
+               err = au_cpdown_dir_opq(dentry, bdst, flags);
+               if (unlikely(err)) {
+                       mutex_unlock(&h_inode->i_mutex);
+                       goto out_dir;
+               }
+       }
+
+       err = au_cpdown_attr(&h_path, au_h_dptr(dentry, bstart));
+       mutex_unlock(&h_inode->i_mutex);
+       if (unlikely(err))
+               goto out_opq;
+
+       if (au_ftest_cpdown(*flags, WHED)) {
+               err = au_cpdown_dir_wh(dentry, h_parent, dir, bdst);
+               if (unlikely(err))
+                       goto out_opq;
+       }
+
+       inode = dentry->d_inode;
+       if (au_ibend(inode) < bdst)
+               au_set_ibend(inode, bdst);
+       au_set_h_iptr(inode, bdst, au_igrab(h_inode),
+                     au_hi_flags(inode, /*isdir*/1));
+       goto out; /* success */
+
+       /* revert */
+out_opq:
+       if (au_ftest_cpdown(*flags, DIROPQ)) {
+               mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
+               rerr = au_diropq_remove(dentry, bdst);
+               mutex_unlock(&h_inode->i_mutex);
+               if (unlikely(rerr)) {
+                       AuIOErr("failed removing diropq for %.*s b%d (%d)\n",
+                               AuDLNPair(dentry), bdst, rerr);
+                       err = -EIO;
+                       goto out;
+               }
+       }
+out_dir:
+       if (au_ftest_cpdown(*flags, MADE_DIR)) {
+               rerr = vfsub_sio_rmdir(au_h_iptr(dir, bdst), &h_path);
+               if (unlikely(rerr)) {
+                       AuIOErr("failed removing %.*s b%d (%d)\n",
+                               AuDLNPair(dentry), bdst, rerr);
+                       err = -EIO;
+               }
+       }
+out_put:
+       au_set_h_dptr(dentry, bdst, NULL);
+       if (au_dbend(dentry) == bdst)
+               au_update_dbend(dentry);
+out:
+       dput(parent);
+       return err;
+}
+
+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+       int err;
+       unsigned int flags;
+
+       flags = 0;
+       err = au_cp_dirs(dentry, bdst, au_cpdown_dir, &flags);
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies for create */
+
+static int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex)
+{
+       int err, i, j, ndentry;
+       aufs_bindex_t bopq;
+       struct au_dcsub_pages dpages;
+       struct au_dpage *dpage;
+       struct dentry **dentries, *parent, *d;
+
+       err = au_dpages_init(&dpages, GFP_NOFS);
+       if (unlikely(err))
+               goto out;
+       parent = dget_parent(dentry);
+       err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/0);
+       if (unlikely(err))
+               goto out_free;
+
+       err = bindex;
+       for (i = 0; i < dpages.ndpage; i++) {
+               dpage = dpages.dpages + i;
+               dentries = dpage->dentries;
+               ndentry = dpage->ndentry;
+               for (j = 0; j < ndentry; j++) {
+                       d = dentries[j];
+                       di_read_lock_parent2(d, !AuLock_IR);
+                       bopq = au_dbdiropq(d);
+                       di_read_unlock(d, !AuLock_IR);
+                       if (bopq >= 0 && bopq < err)
+                               err = bopq;
+               }
+       }
+
+out_free:
+       dput(parent);
+       au_dpages_free(&dpages);
+out:
+       return err;
+}
+
+static int au_wbr_bu(struct super_block *sb, aufs_bindex_t bindex)
+{
+       for (; bindex >= 0; bindex--)
+               if (!au_br_rdonly(au_sbr(sb, bindex)))
+                       return bindex;
+       return -EROFS;
+}
+
+/* top down parent */
+static int au_wbr_create_tdp(struct dentry *dentry,
+                            unsigned int flags __maybe_unused)
+{
+       int err;
+       aufs_bindex_t bstart, bindex;
+       struct super_block *sb;
+       struct dentry *parent, *h_parent;
+
+       sb = dentry->d_sb;
+       bstart = au_dbstart(dentry);
+       err = bstart;
+       if (!au_br_rdonly(au_sbr(sb, bstart)))
+               goto out;
+
+       err = -EROFS;
+       parent = dget_parent(dentry);
+       for (bindex = au_dbstart(parent); bindex < bstart; bindex++) {
+               h_parent = au_h_dptr(parent, bindex);
+               if (!h_parent || !h_parent->d_inode)
+                       continue;
+
+               if (!au_br_rdonly(au_sbr(sb, bindex))) {
+                       err = bindex;
+                       break;
+               }
+       }
+       dput(parent);
+
+       /* bottom up here */
+       if (unlikely(err < 0)) {
+               err = au_wbr_bu(sb, bstart - 1);
+               if (err >= 0)
+                       err = au_wbr_nonopq(dentry, err);
+       }
+
+out:
+       AuDbg("b%d\n", err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* an exception for the policy other than tdp */
+static int au_wbr_create_exp(struct dentry *dentry)
+{
+       int err;
+       aufs_bindex_t bwh, bdiropq;
+       struct dentry *parent;
+
+       err = -1;
+       bwh = au_dbwh(dentry);
+       parent = dget_parent(dentry);
+       bdiropq = au_dbdiropq(parent);
+       if (bwh >= 0) {
+               if (bdiropq >= 0)
+                       err = min(bdiropq, bwh);
+               else
+                       err = bwh;
+               AuDbg("%d\n", err);
+       } else if (bdiropq >= 0) {
+               err = bdiropq;
+               AuDbg("%d\n", err);
+       }
+       dput(parent);
+
+       if (err >= 0)
+               err = au_wbr_nonopq(dentry, err);
+
+       if (err >= 0 && au_br_rdonly(au_sbr(dentry->d_sb, err)))
+               err = -1;
+
+       AuDbg("%d\n", err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* round robin */
+static int au_wbr_create_init_rr(struct super_block *sb)
+{
+       int err;
+
+       err = au_wbr_bu(sb, au_sbend(sb));
+       atomic_set(&au_sbi(sb)->si_wbr_rr_next, -err); /* less important */
+       /* smp_mb(); */
+
+       AuDbg("b%d\n", err);
+       return err;
+}
+
+static int au_wbr_create_rr(struct dentry *dentry, unsigned int flags)
+{
+       int err, nbr;
+       unsigned int u;
+       aufs_bindex_t bindex, bend;
+       struct super_block *sb;
+       atomic_t *next;
+
+       err = au_wbr_create_exp(dentry);
+       if (err >= 0)
+               goto out;
+
+       sb = dentry->d_sb;
+       next = &au_sbi(sb)->si_wbr_rr_next;
+       bend = au_sbend(sb);
+       nbr = bend + 1;
+       for (bindex = 0; bindex <= bend; bindex++) {
+               if (!au_ftest_wbr(flags, DIR)) {
+                       err = atomic_dec_return(next) + 1;
+                       /* modulo for 0 is meaningless */
+                       if (unlikely(!err))
+                               err = atomic_dec_return(next) + 1;
+               } else
+                       err = atomic_read(next);
+               AuDbg("%d\n", err);
+               u = err;
+               err = u % nbr;
+               AuDbg("%d\n", err);
+               if (!au_br_rdonly(au_sbr(sb, err)))
+                       break;
+               err = -EROFS;
+       }
+
+       if (err >= 0)
+               err = au_wbr_nonopq(dentry, err);
+
+out:
+       AuDbg("%d\n", err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* most free space */
+static void au_mfs(struct dentry *dentry, struct dentry *parent)
+{
+       struct super_block *sb;
+       struct au_branch *br;
+       struct au_wbr_mfs *mfs;
+       struct dentry *h_parent;
+       aufs_bindex_t bindex, bend;
+       int err;
+       unsigned long long b, bavail;
+       struct path h_path;
+       /* reduce the stack usage */
+       struct kstatfs *st;
+
+       st = kmalloc(sizeof(*st), GFP_NOFS);
+       if (unlikely(!st)) {
+               AuWarn1("failed updating mfs(%d), ignored\n", -ENOMEM);
+               return;
+       }
+
+       bavail = 0;
+       sb = dentry->d_sb;
+       mfs = &au_sbi(sb)->si_wbr_mfs;
+       MtxMustLock(&mfs->mfs_lock);
+       mfs->mfs_bindex = -EROFS;
+       mfs->mfsrr_bytes = 0;
+       if (!parent) {
+               bindex = 0;
+               bend = au_sbend(sb);
+       } else {
+               bindex = au_dbstart(parent);
+               bend = au_dbtaildir(parent);
+       }
+
+       for (; bindex <= bend; bindex++) {
+               if (parent) {
+                       h_parent = au_h_dptr(parent, bindex);
+                       if (!h_parent || !h_parent->d_inode)
+                               continue;
+               }
+               br = au_sbr(sb, bindex);
+               if (au_br_rdonly(br))
+                       continue;
+
+               /* sb->s_root for NFS is unreliable */
+               h_path.mnt = au_br_mnt(br);
+               h_path.dentry = h_path.mnt->mnt_root;
+               err = vfs_statfs(&h_path, st);
+               if (unlikely(err)) {
+                       AuWarn1("failed statfs, b%d, %d\n", bindex, err);
+                       continue;
+               }
+
+               /* when the available size is equal, select the lower one */
+               BUILD_BUG_ON(sizeof(b) < sizeof(st->f_bavail)
+                            || sizeof(b) < sizeof(st->f_bsize));
+               b = st->f_bavail * st->f_bsize;
+               br->br_wbr->wbr_bytes = b;
+               if (b >= bavail) {
+                       bavail = b;
+                       mfs->mfs_bindex = bindex;
+                       mfs->mfs_jiffy = jiffies;
+               }
+       }
+
+       mfs->mfsrr_bytes = bavail;
+       AuDbg("b%d\n", mfs->mfs_bindex);
+       kfree(st);
+}
+
+static int au_wbr_create_mfs(struct dentry *dentry, unsigned int flags)
+{
+       int err;
+       struct dentry *parent;
+       struct super_block *sb;
+       struct au_wbr_mfs *mfs;
+
+       err = au_wbr_create_exp(dentry);
+       if (err >= 0)
+               goto out;
+
+       sb = dentry->d_sb;
+       parent = NULL;
+       if (au_ftest_wbr(flags, PARENT))
+               parent = dget_parent(dentry);
+       mfs = &au_sbi(sb)->si_wbr_mfs;
+       mutex_lock(&mfs->mfs_lock);
+       if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire)
+           || mfs->mfs_bindex < 0
+           || au_br_rdonly(au_sbr(sb, mfs->mfs_bindex)))
+               au_mfs(dentry, parent);
+       mutex_unlock(&mfs->mfs_lock);
+       err = mfs->mfs_bindex;
+       dput(parent);
+
+       if (err >= 0)
+               err = au_wbr_nonopq(dentry, err);
+
+out:
+       AuDbg("b%d\n", err);
+       return err;
+}
+
+static int au_wbr_create_init_mfs(struct super_block *sb)
+{
+       struct au_wbr_mfs *mfs;
+
+       mfs = &au_sbi(sb)->si_wbr_mfs;
+       mutex_init(&mfs->mfs_lock);
+       mfs->mfs_jiffy = 0;
+       mfs->mfs_bindex = -EROFS;
+
+       return 0;
+}
+
+static int au_wbr_create_fin_mfs(struct super_block *sb __maybe_unused)
+{
+       mutex_destroy(&au_sbi(sb)->si_wbr_mfs.mfs_lock);
+       return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* most free space and then round robin */
+static int au_wbr_create_mfsrr(struct dentry *dentry, unsigned int flags)
+{
+       int err;
+       struct au_wbr_mfs *mfs;
+
+       err = au_wbr_create_mfs(dentry, flags);
+       if (err >= 0) {
+               mfs = &au_sbi(dentry->d_sb)->si_wbr_mfs;
+               mutex_lock(&mfs->mfs_lock);
+               if (mfs->mfsrr_bytes < mfs->mfsrr_watermark)
+                       err = au_wbr_create_rr(dentry, flags);
+               mutex_unlock(&mfs->mfs_lock);
+       }
+
+       AuDbg("b%d\n", err);
+       return err;
+}
+
+static int au_wbr_create_init_mfsrr(struct super_block *sb)
+{
+       int err;
+
+       au_wbr_create_init_mfs(sb); /* ignore */
+       err = au_wbr_create_init_rr(sb);
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* top down parent and most free space */
+static int au_wbr_create_pmfs(struct dentry *dentry, unsigned int flags)
+{
+       int err, e2;
+       unsigned long long b;
+       aufs_bindex_t bindex, bstart, bend;
+       struct super_block *sb;
+       struct dentry *parent, *h_parent;
+       struct au_branch *br;
+
+       err = au_wbr_create_tdp(dentry, flags);
+       if (unlikely(err < 0))
+               goto out;
+       parent = dget_parent(dentry);
+       bstart = au_dbstart(parent);
+       bend = au_dbtaildir(parent);
+       if (bstart == bend)
+               goto out_parent; /* success */
+
+       e2 = au_wbr_create_mfs(dentry, flags);
+       if (e2 < 0)
+               goto out_parent; /* success */
+
+       /* when the available size is equal, select upper one */
+       sb = dentry->d_sb;
+       br = au_sbr(sb, err);
+       b = br->br_wbr->wbr_bytes;
+       AuDbg("b%d, %llu\n", err, b);
+
+       for (bindex = bstart; bindex <= bend; bindex++) {
+               h_parent = au_h_dptr(parent, bindex);
+               if (!h_parent || !h_parent->d_inode)
+                       continue;
+
+               br = au_sbr(sb, bindex);
+               if (!au_br_rdonly(br) && br->br_wbr->wbr_bytes > b) {
+                       b = br->br_wbr->wbr_bytes;
+                       err = bindex;
+                       AuDbg("b%d, %llu\n", err, b);
+               }
+       }
+
+       if (err >= 0)
+               err = au_wbr_nonopq(dentry, err);
+
+out_parent:
+       dput(parent);
+out:
+       AuDbg("b%d\n", err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * - top down parent
+ * - most free space with parent
+ * - most free space round-robin regardless parent
+ */
+static int au_wbr_create_pmfsrr(struct dentry *dentry, unsigned int flags)
+{
+       int err;
+       unsigned long long watermark;
+       struct super_block *sb;
+       struct au_branch *br;
+       struct au_wbr_mfs *mfs;
+
+       err = au_wbr_create_pmfs(dentry, flags | AuWbr_PARENT);
+       if (unlikely(err < 0))
+               goto out;
+
+       sb = dentry->d_sb;
+       br = au_sbr(sb, err);
+       mfs = &au_sbi(sb)->si_wbr_mfs;
+       mutex_lock(&mfs->mfs_lock);
+       watermark = mfs->mfsrr_watermark;
+       mutex_unlock(&mfs->mfs_lock);
+       if (br->br_wbr->wbr_bytes < watermark)
+               /* regardless the parent dir */
+               err = au_wbr_create_mfsrr(dentry, flags);
+
+out:
+       AuDbg("b%d\n", err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies for copyup */
+
+/* top down parent */
+static int au_wbr_copyup_tdp(struct dentry *dentry)
+{
+       return au_wbr_create_tdp(dentry, /*flags, anything is ok*/0);
+}
+
+/* bottom up parent */
+static int au_wbr_copyup_bup(struct dentry *dentry)
+{
+       int err;
+       aufs_bindex_t bindex, bstart;
+       struct dentry *parent, *h_parent;
+       struct super_block *sb;
+
+       err = -EROFS;
+       sb = dentry->d_sb;
+       parent = dget_parent(dentry);
+       bstart = au_dbstart(parent);
+       for (bindex = au_dbstart(dentry); bindex >= bstart; bindex--) {
+               h_parent = au_h_dptr(parent, bindex);
+               if (!h_parent || !h_parent->d_inode)
+                       continue;
+
+               if (!au_br_rdonly(au_sbr(sb, bindex))) {
+                       err = bindex;
+                       break;
+               }
+       }
+       dput(parent);
+
+       /* bottom up here */
+       if (unlikely(err < 0))
+               err = au_wbr_bu(sb, bstart - 1);
+
+       AuDbg("b%d\n", err);
+       return err;
+}
+
+/* bottom up */
+static int au_wbr_copyup_bu(struct dentry *dentry)
+{
+       int err;
+       aufs_bindex_t bstart;
+
+       bstart = au_dbstart(dentry);
+       err = au_wbr_bu(dentry->d_sb, bstart);
+       AuDbg("b%d\n", err);
+       if (err > bstart)
+               err = au_wbr_nonopq(dentry, err);
+
+       AuDbg("b%d\n", err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_wbr_copyup_operations au_wbr_copyup_ops[] = {
+       [AuWbrCopyup_TDP] = {
+               .copyup = au_wbr_copyup_tdp
+       },
+       [AuWbrCopyup_BUP] = {
+               .copyup = au_wbr_copyup_bup
+       },
+       [AuWbrCopyup_BU] = {
+               .copyup = au_wbr_copyup_bu
+       }
+};
+
+struct au_wbr_create_operations au_wbr_create_ops[] = {
+       [AuWbrCreate_TDP] = {
+               .create = au_wbr_create_tdp
+       },
+       [AuWbrCreate_RR] = {
+               .create = au_wbr_create_rr,
+               .init   = au_wbr_create_init_rr
+       },
+       [AuWbrCreate_MFS] = {
+               .create = au_wbr_create_mfs,
+               .init   = au_wbr_create_init_mfs,
+               .fin    = au_wbr_create_fin_mfs
+       },
+       [AuWbrCreate_MFSV] = {
+               .create = au_wbr_create_mfs,
+               .init   = au_wbr_create_init_mfs,
+               .fin    = au_wbr_create_fin_mfs
+       },
+       [AuWbrCreate_MFSRR] = {
+               .create = au_wbr_create_mfsrr,
+               .init   = au_wbr_create_init_mfsrr,
+               .fin    = au_wbr_create_fin_mfs
+       },
+       [AuWbrCreate_MFSRRV] = {
+               .create = au_wbr_create_mfsrr,
+               .init   = au_wbr_create_init_mfsrr,
+               .fin    = au_wbr_create_fin_mfs
+       },
+       [AuWbrCreate_PMFS] = {
+               .create = au_wbr_create_pmfs,
+               .init   = au_wbr_create_init_mfs,
+               .fin    = au_wbr_create_fin_mfs
+       },
+       [AuWbrCreate_PMFSV] = {
+               .create = au_wbr_create_pmfs,
+               .init   = au_wbr_create_init_mfs,
+               .fin    = au_wbr_create_fin_mfs
+       },
+       [AuWbrCreate_PMFSRR] = {
+               .create = au_wbr_create_pmfsrr,
+               .init   = au_wbr_create_init_mfsrr,
+               .fin    = au_wbr_create_fin_mfs
+       },
+       [AuWbrCreate_PMFSRRV] = {
+               .create = au_wbr_create_pmfsrr,
+               .init   = au_wbr_create_init_mfsrr,
+               .fin    = au_wbr_create_fin_mfs
+       }
+};
diff --git a/fs/aufs/whout.c b/fs/aufs/whout.c
new file mode 100644 (file)
index 0000000..ca5990a
--- /dev/null
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * whiteout for logical deletion and opaque directory
+ */
+
+#include "aufs.h"
+
+#define WH_MASK                        S_IRUGO
+
+/*
+ * If a directory contains this file, then it is opaque.  We start with the
+ * .wh. flag so that it is blocked by lookup.
+ */
+static struct qstr diropq_name = {
+       .name = AUFS_WH_DIROPQ,
+       .len = sizeof(AUFS_WH_DIROPQ) - 1
+};
+
+/*
+ * generate whiteout name, which is NOT terminated by NULL.
+ * @name: original d_name.name
+ * @len: original d_name.len
+ * @wh: whiteout qstr
+ * returns zero when succeeds, otherwise error.
+ * succeeded value as wh->name should be freed by kfree().
+ */
+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name)
+{
+       char *p;
+
+       if (unlikely(name->len > PATH_MAX - AUFS_WH_PFX_LEN))
+               return -ENAMETOOLONG;
+
+       wh->len = name->len + AUFS_WH_PFX_LEN;
+       p = kmalloc(wh->len, GFP_NOFS);
+       wh->name = p;
+       if (p) {
+               memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+               memcpy(p + AUFS_WH_PFX_LEN, name->name, name->len);
+               /* smp_mb(); */
+               return 0;
+       }
+       return -ENOMEM;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * test if the @wh_name exists under @h_parent.
+ * @try_sio specifies the necessary of super-io.
+ */
+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name,
+              struct au_branch *br, int try_sio)
+{
+       int err;
+       struct dentry *wh_dentry;
+
+       if (!try_sio)
+               wh_dentry = au_lkup_one(wh_name, h_parent, br, /*nd*/NULL);
+       else
+               wh_dentry = au_sio_lkup_one(wh_name, h_parent, br);
+       err = PTR_ERR(wh_dentry);
+       if (IS_ERR(wh_dentry))
+               goto out;
+
+       err = 0;
+       if (!wh_dentry->d_inode)
+               goto out_wh; /* success */
+
+       err = 1;
+       if (S_ISREG(wh_dentry->d_inode->i_mode))
+               goto out_wh; /* success */
+
+       err = -EIO;
+       AuIOErr("%.*s Invalid whiteout entry type 0%o.\n",
+               AuDLNPair(wh_dentry), wh_dentry->d_inode->i_mode);
+
+out_wh:
+       dput(wh_dentry);
+out:
+       return err;
+}
+
+/*
+ * test if the @h_dentry sets opaque or not.
+ */
+int au_diropq_test(struct dentry *h_dentry, struct au_branch *br)
+{
+       int err;
+       struct inode *h_dir;
+
+       h_dir = h_dentry->d_inode;
+       err = au_wh_test(h_dentry, &diropq_name, br,
+                        au_test_h_perm_sio(h_dir, MAY_EXEC));
+       return err;
+}
+
+/*
+ * returns a negative dentry whose name is unique and temporary.
+ */
+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
+                            struct qstr *prefix)
+{
+       struct dentry *dentry;
+       int i;
+       char defname[NAME_MAX - AUFS_MAX_NAMELEN + DNAME_INLINE_LEN + 1],
+               *name, *p;
+       /* strict atomic_t is unnecessary here */
+       static unsigned short cnt;
+       struct qstr qs;
+
+       BUILD_BUG_ON(sizeof(cnt) * 2 > AUFS_WH_TMP_LEN);
+
+       name = defname;
+       qs.len = sizeof(defname) - DNAME_INLINE_LEN + prefix->len - 1;
+       if (unlikely(prefix->len > DNAME_INLINE_LEN)) {
+               dentry = ERR_PTR(-ENAMETOOLONG);
+               if (unlikely(qs.len > NAME_MAX))
+                       goto out;
+               dentry = ERR_PTR(-ENOMEM);
+               name = kmalloc(qs.len + 1, GFP_NOFS);
+               if (unlikely(!name))
+                       goto out;
+       }
+
+       /* doubly whiteout-ed */
+       memcpy(name, AUFS_WH_PFX AUFS_WH_PFX, AUFS_WH_PFX_LEN * 2);
+       p = name + AUFS_WH_PFX_LEN * 2;
+       memcpy(p, prefix->name, prefix->len);
+       p += prefix->len;
+       *p++ = '.';
+       AuDebugOn(name + qs.len + 1 - p <= AUFS_WH_TMP_LEN);
+
+       qs.name = name;
+       for (i = 0; i < 3; i++) {
+               sprintf(p, "%.*x", AUFS_WH_TMP_LEN, cnt++);
+               dentry = au_sio_lkup_one(&qs, h_parent, br);
+               if (IS_ERR(dentry) || !dentry->d_inode)
+                       goto out_name;
+               dput(dentry);
+       }
+       /* pr_warn("could not get random name\n"); */
+       dentry = ERR_PTR(-EEXIST);
+       AuDbg("%.*s\n", AuLNPair(&qs));
+       BUG();
+
+out_name:
+       if (name != defname)
+               kfree(name);
+out:
+       AuTraceErrPtr(dentry);
+       return dentry;
+}
+
+/*
+ * rename the @h_dentry on @br to the whiteouted temporary name.
+ */
+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br)
+{
+       int err;
+       struct path h_path = {
+               .mnt = au_br_mnt(br)
+       };
+       struct inode *h_dir;
+       struct dentry *h_parent;
+
+       h_parent = h_dentry->d_parent; /* dir inode is locked */
+       h_dir = h_parent->d_inode;
+       IMustLock(h_dir);
+
+       h_path.dentry = au_whtmp_lkup(h_parent, br, &h_dentry->d_name);
+       err = PTR_ERR(h_path.dentry);
+       if (IS_ERR(h_path.dentry))
+               goto out;
+
+       /* under the same dir, no need to lock_rename() */
+       err = vfsub_rename(h_dir, h_dentry, h_dir, &h_path);
+       AuTraceErr(err);
+       dput(h_path.dentry);
+
+out:
+       AuTraceErr(err);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * functions for removing a whiteout
+ */
+
+static int do_unlink_wh(struct inode *h_dir, struct path *h_path)
+{
+       int force;
+
+       /*
+        * forces superio when the dir has a sticky bit.
+        * this may be a violation of unix fs semantics.
+        */
+       force = (h_dir->i_mode & S_ISVTX)
+               && h_path->dentry->d_inode->i_uid != current_fsuid();
+       return vfsub_unlink(h_dir, h_path, force);
+}
+
+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
+                       struct dentry *dentry)
+{
+       int err;
+
+       err = do_unlink_wh(h_dir, h_path);
+       if (!err && dentry)
+               au_set_dbwh(dentry, -1);
+
+       return err;
+}
+
+static int unlink_wh_name(struct dentry *h_parent, struct qstr *wh,
+                         struct au_branch *br)
+{
+       int err;
+       struct path h_path = {
+               .mnt = au_br_mnt(br)
+       };
+
+       err = 0;
+       h_path.dentry = au_lkup_one(wh, h_parent, br, /*nd*/NULL);
+       if (IS_ERR(h_path.dentry))
+               err = PTR_ERR(h_path.dentry);
+       else {
+               if (h_path.dentry->d_inode
+                   && S_ISREG(h_path.dentry->d_inode->i_mode))
+                       err = do_unlink_wh(h_parent->d_inode, &h_path);
+               dput(h_path.dentry);
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * initialize/clean whiteout for a branch
+ */
+
+static void au_wh_clean(struct inode *h_dir, struct path *whpath,
+                       const int isdir)
+{
+       int err;
+
+       if (!whpath->dentry->d_inode)
+               return;
+
+       err = mnt_want_write(whpath->mnt);
+       if (!err) {
+               if (isdir)
+                       err = vfsub_rmdir(h_dir, whpath);
+               else
+                       err = vfsub_unlink(h_dir, whpath, /*force*/0);
+               mnt_drop_write(whpath->mnt);
+       }
+       if (unlikely(err))
+               pr_warn("failed removing %.*s (%d), ignored.\n",
+                       AuDLNPair(whpath->dentry), err);
+}
+
+static int test_linkable(struct dentry *h_root)
+{
+       struct inode *h_dir = h_root->d_inode;
+
+       if (h_dir->i_op->link)
+               return 0;
+
+       pr_err("%.*s (%s) doesn't support link(2), use noplink and rw+nolwh\n",
+              AuDLNPair(h_root), au_sbtype(h_root->d_sb));
+       return -ENOSYS;
+}
+
+/* todo: should this mkdir be done in /sbin/mount.aufs helper? */
+static int au_whdir(struct inode *h_dir, struct path *path)
+{
+       int err;
+
+       err = -EEXIST;
+       if (!path->dentry->d_inode) {
+               int mode = S_IRWXU;
+
+               if (au_test_nfs(path->dentry->d_sb))
+                       mode |= S_IXUGO;
+               err = mnt_want_write(path->mnt);
+               if (!err) {
+                       err = vfsub_mkdir(h_dir, path, mode);
+                       mnt_drop_write(path->mnt);
+               }
+       } else if (S_ISDIR(path->dentry->d_inode->i_mode))
+               err = 0;
+       else
+               pr_err("unknown %.*s exists\n", AuDLNPair(path->dentry));
+
+       return err;
+}
+
+struct au_wh_base {
+       const struct qstr *name;
+       struct dentry *dentry;
+};
+
+static void au_wh_init_ro(struct inode *h_dir, struct au_wh_base base[],
+                         struct path *h_path)
+{
+       h_path->dentry = base[AuBrWh_BASE].dentry;
+       au_wh_clean(h_dir, h_path, /*isdir*/0);
+       h_path->dentry = base[AuBrWh_PLINK].dentry;
+       au_wh_clean(h_dir, h_path, /*isdir*/1);
+       h_path->dentry = base[AuBrWh_ORPH].dentry;
+       au_wh_clean(h_dir, h_path, /*isdir*/1);
+}
+
+/*
+ * returns tri-state,
+ * minus: error, caller should print the mesage
+ * zero: succuess
+ * plus: error, caller should NOT print the mesage
+ */
+static int au_wh_init_rw_nolink(struct dentry *h_root, struct au_wbr *wbr,
+                               int do_plink, struct au_wh_base base[],
+                               struct path *h_path)
+{
+       int err;
+       struct inode *h_dir;
+
+       h_dir = h_root->d_inode;
+       h_path->dentry = base[AuBrWh_BASE].dentry;
+       au_wh_clean(h_dir, h_path, /*isdir*/0);
+       h_path->dentry = base[AuBrWh_PLINK].dentry;
+       if (do_plink) {
+               err = test_linkable(h_root);
+               if (unlikely(err)) {
+                       err = 1;
+                       goto out;
+               }
+
+               err = au_whdir(h_dir, h_path);
+               if (unlikely(err))
+                       goto out;
+               wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
+       } else
+               au_wh_clean(h_dir, h_path, /*isdir*/1);
+       h_path->dentry = base[AuBrWh_ORPH].dentry;
+       err = au_whdir(h_dir, h_path);
+       if (unlikely(err))
+               goto out;
+       wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
+
+out:
+       return err;
+}
+
+/*
+ * for the moment, aufs supports the branch filesystem which does not support
+ * link(2). testing on FAT which does not support i_op->setattr() fully either,
+ * copyup failed. finally, such filesystem will not be used as the writable
+ * branch.
+ *
+ * returns tri-state, see above.
+ */
+static int au_wh_init_rw(struct dentry *h_root, struct au_wbr *wbr,
+                        int do_plink, struct au_wh_base base[],
+                        struct path *h_path)
+{
+       int err;
+       struct inode *h_dir;
+
+       WbrWhMustWriteLock(wbr);
+
+       err = test_linkable(h_root);
+       if (unlikely(err)) {
+               err = 1;
+               goto out;
+       }
+
+       /*
+        * todo: should this create be done in /sbin/mount.aufs helper?
+        */
+       err = -EEXIST;
+       h_dir = h_root->d_inode;
+       if (!base[AuBrWh_BASE].dentry->d_inode) {
+               err = mnt_want_write(h_path->mnt);
+               if (!err) {
+                       h_path->dentry = base[AuBrWh_BASE].dentry;
+                       err = vfsub_create(h_dir, h_path, WH_MASK);
+                       mnt_drop_write(h_path->mnt);
+               }
+       } else if (S_ISREG(base[AuBrWh_BASE].dentry->d_inode->i_mode))
+               err = 0;
+       else
+               pr_err("unknown %.*s/%.*s exists\n",
+                      AuDLNPair(h_root), AuDLNPair(base[AuBrWh_BASE].dentry));
+       if (unlikely(err))
+               goto out;
+
+       h_path->dentry = base[AuBrWh_PLINK].dentry;
+       if (do_plink) {
+               err = au_whdir(h_dir, h_path);
+               if (unlikely(err))
+                       goto out;
+               wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
+       } else
+               au_wh_clean(h_dir, h_path, /*isdir*/1);
+       wbr->wbr_whbase = dget(base[AuBrWh_BASE].dentry);
+
+       h_path->dentry = base[AuBrWh_ORPH].dentry;
+       err = au_whdir(h_dir, h_path);
+       if (unlikely(err))
+               goto out;
+       wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
+
+out:
+       return err;
+}
+
+/*
+ * initialize the whiteout base file/dir for @br.
+ */
+int au_wh_init(struct au_branch *br, struct super_block *sb)
+{
+       int err, i;
+       const unsigned char do_plink
+               = !!au_opt_test(au_mntflags(sb), PLINK);
+       struct inode *h_dir;
+       struct path path = br->br_path;
+       struct dentry *h_root = path.dentry;
+       struct au_wbr *wbr = br->br_wbr;
+       static const struct qstr base_name[] = {
+               [AuBrWh_BASE] = {
+                       .name   = AUFS_BASE_NAME,
+                       .len    = sizeof(AUFS_BASE_NAME) - 1
+               },
+               [AuBrWh_PLINK] = {
+                       .name   = AUFS_PLINKDIR_NAME,
+                       .len    = sizeof(AUFS_PLINKDIR_NAME) - 1
+               },
+               [AuBrWh_ORPH] = {
+                       .name   = AUFS_ORPHDIR_NAME,
+                       .len    = sizeof(AUFS_ORPHDIR_NAME) - 1
+               }
+       };
+       struct au_wh_base base[] = {
+               [AuBrWh_BASE] = {
+                       .name   = base_name + AuBrWh_BASE,
+                       .dentry = NULL
+               },
+               [AuBrWh_PLINK] = {
+                       .name   = base_name + AuBrWh_PLINK,
+                       .dentry = NULL
+               },
+               [AuBrWh_ORPH] = {
+                       .name   = base_name + AuBrWh_ORPH,
+                       .dentry = NULL
+               }
+       };
+
+       if (wbr)
+               WbrWhMustWriteLock(wbr);
+
+       for (i = 0; i < AuBrWh_Last; i++) {
+               /* doubly whiteouted */
+               struct dentry *d;
+
+               d = au_wh_lkup(h_root, (void *)base[i].name, br);
+               err = PTR_ERR(d);
+               if (IS_ERR(d))
+                       goto out;
+
+               base[i].dentry = d;
+               AuDebugOn(wbr
+                         && wbr->wbr_wh[i]
+                         && wbr->wbr_wh[i] != base[i].dentry);
+       }
+
+       if (wbr)
+               for (i = 0; i < AuBrWh_Last; i++) {
+                       dput(wbr->wbr_wh[i]);
+                       wbr->wbr_wh[i] = NULL;
+               }
+
+       err = 0;
+       if (!au_br_writable(br->br_perm)) {
+               h_dir = h_root->d_inode;
+               au_wh_init_ro(h_dir, base, &path);
+       } else if (!au_br_wh_linkable(br->br_perm)) {
+               err = au_wh_init_rw_nolink(h_root, wbr, do_plink, base, &path);
+               if (err > 0)
+                       goto out;
+               else if (err)
+                       goto out_err;
+       } else {
+               err = au_wh_init_rw(h_root, wbr, do_plink, base, &path);
+               if (err > 0)
+                       goto out;
+               else if (err)
+                       goto out_err;
+       }
+       goto out; /* success */
+
+out_err:
+       pr_err("an error(%d) on the writable branch %.*s(%s)\n",
+              err, AuDLNPair(h_root), au_sbtype(h_root->d_sb));
+out:
+       for (i = 0; i < AuBrWh_Last; i++)
+               dput(base[i].dentry);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * whiteouts are all hard-linked usually.
+ * when its link count reaches a ceiling, we create a new whiteout base
+ * asynchronously.
+ */
+
+struct reinit_br_wh {
+       struct super_block *sb;
+       struct au_branch *br;
+};
+
+static void reinit_br_wh(void *arg)
+{
+       int err;
+       aufs_bindex_t bindex;
+       struct path h_path;
+       struct reinit_br_wh *a = arg;
+       struct au_wbr *wbr;
+       struct inode *dir;
+       struct dentry *h_root;
+       struct au_hinode *hdir;
+
+       err = 0;
+       wbr = a->br->br_wbr;
+       /* big aufs lock */
+       si_noflush_write_lock(a->sb);
+       if (!au_br_writable(a->br->br_perm))
+               goto out;
+       bindex = au_br_index(a->sb, a->br->br_id);
+       if (unlikely(bindex < 0))
+               goto out;
+
+       di_read_lock_parent(a->sb->s_root, AuLock_IR);
+       dir = a->sb->s_root->d_inode;
+       hdir = au_hi(dir, bindex);
+       h_root = au_h_dptr(a->sb->s_root, bindex);
+       AuDebugOn(h_root != au_br_dentry(a->br));
+
+       au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+       wbr_wh_write_lock(wbr);
+       err = au_h_verify(wbr->wbr_whbase, au_opt_udba(a->sb), hdir->hi_inode,
+                         h_root, a->br);
+       if (!err) {
+               h_path.mnt = au_br_mnt(a->br);
+               err = mnt_want_write(h_path.mnt);
+               if (!err) {
+                       h_path.dentry = wbr->wbr_whbase;
+                       err = vfsub_unlink(hdir->hi_inode, &h_path, /*force*/0);
+                       mnt_drop_write(h_path.mnt);
+               }
+       } else {
+               pr_warn("%.*s is moved, ignored\n",
+                       AuDLNPair(wbr->wbr_whbase));
+               err = 0;
+       }
+       dput(wbr->wbr_whbase);
+       wbr->wbr_whbase = NULL;
+       if (!err)
+               err = au_wh_init(a->br, a->sb);
+       wbr_wh_write_unlock(wbr);
+       au_hn_imtx_unlock(hdir);
+       di_read_unlock(a->sb->s_root, AuLock_IR);
+
+out:
+       if (wbr)
+               atomic_dec(&wbr->wbr_wh_running);
+       atomic_dec(&a->br->br_count);
+       si_write_unlock(a->sb);
+       au_nwt_done(&au_sbi(a->sb)->si_nowait);
+       kfree(arg);
+       if (unlikely(err))
+               AuIOErr("err %d\n", err);
+}
+
+static void kick_reinit_br_wh(struct super_block *sb, struct au_branch *br)
+{
+       int do_dec, wkq_err;
+       struct reinit_br_wh *arg;
+
+       do_dec = 1;
+       if (atomic_inc_return(&br->br_wbr->wbr_wh_running) != 1)
+               goto out;
+
+       /* ignore ENOMEM */
+       arg = kmalloc(sizeof(*arg), GFP_NOFS);
+       if (arg) {
+               /*
+                * dec(wh_running), kfree(arg) and dec(br_count)
+                * in reinit function
+                */
+               arg->sb = sb;
+               arg->br = br;
+               atomic_inc(&br->br_count);
+               wkq_err = au_wkq_nowait(reinit_br_wh, arg, sb, /*flags*/0);
+               if (unlikely(wkq_err)) {
+                       atomic_dec(&br->br_wbr->wbr_wh_running);
+                       atomic_dec(&br->br_count);
+                       kfree(arg);
+               }
+               do_dec = 0;
+       }
+
+out:
+       if (do_dec)
+               atomic_dec(&br->br_wbr->wbr_wh_running);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create the whiteout @wh.
+ */
+static int link_or_create_wh(struct super_block *sb, aufs_bindex_t bindex,
+                            struct dentry *wh)
+{
+       int err;
+       struct path h_path = {
+               .dentry = wh
+       };
+       struct au_branch *br;
+       struct au_wbr *wbr;
+       struct dentry *h_parent;
+       struct inode *h_dir;
+
+       h_parent = wh->d_parent; /* dir inode is locked */
+       h_dir = h_parent->d_inode;
+       IMustLock(h_dir);
+
+       br = au_sbr(sb, bindex);
+       h_path.mnt = au_br_mnt(br);
+       wbr = br->br_wbr;
+       wbr_wh_read_lock(wbr);
+       if (wbr->wbr_whbase) {
+               err = vfsub_link(wbr->wbr_whbase, h_dir, &h_path);
+               if (!err || err != -EMLINK)
+                       goto out;
+
+               /* link count full. re-initialize br_whbase. */
+               kick_reinit_br_wh(sb, br);
+       }
+
+       /* return this error in this context */
+       err = vfsub_create(h_dir, &h_path, WH_MASK);
+
+out:
+       wbr_wh_read_unlock(wbr);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create or remove the diropq.
+ */
+static struct dentry *do_diropq(struct dentry *dentry, aufs_bindex_t bindex,
+                               unsigned int flags)
+{
+       struct dentry *opq_dentry, *h_dentry;
+       struct super_block *sb;
+       struct au_branch *br;
+       int err;
+
+       sb = dentry->d_sb;
+       br = au_sbr(sb, bindex);
+       h_dentry = au_h_dptr(dentry, bindex);
+       opq_dentry = au_lkup_one(&diropq_name, h_dentry, br, /*nd*/NULL);
+       if (IS_ERR(opq_dentry))
+               goto out;
+
+       if (au_ftest_diropq(flags, CREATE)) {
+               err = link_or_create_wh(sb, bindex, opq_dentry);
+               if (!err) {
+                       au_set_dbdiropq(dentry, bindex);
+                       goto out; /* success */
+               }
+       } else {
+               struct path tmp = {
+                       .dentry = opq_dentry,
+                       .mnt    = au_br_mnt(br)
+               };
+               err = do_unlink_wh(au_h_iptr(dentry->d_inode, bindex), &tmp);
+               if (!err)
+                       au_set_dbdiropq(dentry, -1);
+       }
+       dput(opq_dentry);
+       opq_dentry = ERR_PTR(err);
+
+out:
+       return opq_dentry;
+}
+
+struct do_diropq_args {
+       struct dentry **errp;
+       struct dentry *dentry;
+       aufs_bindex_t bindex;
+       unsigned int flags;
+};
+
+static void call_do_diropq(void *args)
+{
+       struct do_diropq_args *a = args;
+       *a->errp = do_diropq(a->dentry, a->bindex, a->flags);
+}
+
+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
+                            unsigned int flags)
+{
+       struct dentry *diropq, *h_dentry;
+
+       h_dentry = au_h_dptr(dentry, bindex);
+       if (!au_test_h_perm_sio(h_dentry->d_inode, MAY_EXEC | MAY_WRITE))
+               diropq = do_diropq(dentry, bindex, flags);
+       else {
+               int wkq_err;
+               struct do_diropq_args args = {
+                       .errp           = &diropq,
+                       .dentry         = dentry,
+                       .bindex         = bindex,
+                       .flags          = flags
+               };
+
+               wkq_err = au_wkq_wait(call_do_diropq, &args);
+               if (unlikely(wkq_err))
+                       diropq = ERR_PTR(wkq_err);
+       }
+
+       return diropq;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * lookup whiteout dentry.
+ * @h_parent: lower parent dentry which must exist and be locked
+ * @base_name: name of dentry which will be whiteouted
+ * returns dentry for whiteout.
+ */
+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
+                         struct au_branch *br)
+{
+       int err;
+       struct qstr wh_name;
+       struct dentry *wh_dentry;
+
+       err = au_wh_name_alloc(&wh_name, base_name);
+       wh_dentry = ERR_PTR(err);
+       if (!err) {
+               wh_dentry = au_lkup_one(&wh_name, h_parent, br, /*nd*/NULL);
+               kfree(wh_name.name);
+       }
+       return wh_dentry;
+}
+
+/*
+ * link/create a whiteout for @dentry on @bindex.
+ */
+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
+                           struct dentry *h_parent)
+{
+       struct dentry *wh_dentry;
+       struct super_block *sb;
+       int err;
+
+       sb = dentry->d_sb;
+       wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, au_sbr(sb, bindex));
+       if (!IS_ERR(wh_dentry) && !wh_dentry->d_inode) {
+               err = link_or_create_wh(sb, bindex, wh_dentry);
+               if (!err)
+                       au_set_dbwh(dentry, bindex);
+               else {
+                       dput(wh_dentry);
+                       wh_dentry = ERR_PTR(err);
+               }
+       }
+
+       return wh_dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Delete all whiteouts in this directory on branch bindex. */
+static int del_wh_children(struct dentry *h_dentry, struct au_nhash *whlist,
+                          aufs_bindex_t bindex, struct au_branch *br)
+{
+       int err;
+       unsigned long ul, n;
+       struct qstr wh_name;
+       char *p;
+       struct hlist_head *head;
+       struct au_vdir_wh *tpos;
+       struct hlist_node *pos;
+       struct au_vdir_destr *str;
+
+       err = -ENOMEM;
+       p = __getname_gfp(GFP_NOFS);
+       wh_name.name = p;
+       if (unlikely(!wh_name.name))
+               goto out;
+
+       err = 0;
+       memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+       p += AUFS_WH_PFX_LEN;
+       n = whlist->nh_num;
+       head = whlist->nh_head;
+       for (ul = 0; !err && ul < n; ul++, head++) {
+               hlist_for_each_entry(tpos, pos, head, wh_hash) {
+                       if (tpos->wh_bindex != bindex)
+                               continue;
+
+                       str = &tpos->wh_str;
+                       if (str->len + AUFS_WH_PFX_LEN <= PATH_MAX) {
+                               memcpy(p, str->name, str->len);
+                               wh_name.len = AUFS_WH_PFX_LEN + str->len;
+                               err = unlink_wh_name(h_dentry, &wh_name, br);
+                               if (!err)
+                                       continue;
+                               break;
+                       }
+                       AuIOErr("whiteout name too long %.*s\n",
+                               str->len, str->name);
+                       err = -EIO;
+                       break;
+               }
+       }
+       __putname(wh_name.name);
+
+out:
+       return err;
+}
+
+struct del_wh_children_args {
+       int *errp;
+       struct dentry *h_dentry;
+       struct au_nhash *whlist;
+       aufs_bindex_t bindex;
+       struct au_branch *br;
+};
+
+static void call_del_wh_children(void *args)
+{
+       struct del_wh_children_args *a = args;
+       *a->errp = del_wh_children(a->h_dentry, a->whlist, a->bindex, a->br);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp)
+{
+       struct au_whtmp_rmdir *whtmp;
+       int err;
+       unsigned int rdhash;
+
+       SiMustAnyLock(sb);
+
+       whtmp = kmalloc(sizeof(*whtmp), gfp);
+       if (unlikely(!whtmp)) {
+               whtmp = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       whtmp->dir = NULL;
+       whtmp->br = NULL;
+       whtmp->wh_dentry = NULL;
+       /* no estimation for dir size */
+       rdhash = au_sbi(sb)->si_rdhash;
+       if (!rdhash)
+               rdhash = AUFS_RDHASH_DEF;
+       err = au_nhash_alloc(&whtmp->whlist, rdhash, gfp);
+       if (unlikely(err)) {
+               kfree(whtmp);
+               whtmp = ERR_PTR(err);
+       }
+
+out:
+       return whtmp;
+}
+
+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp)
+{
+       if (whtmp->br)
+               atomic_dec(&whtmp->br->br_count);
+       dput(whtmp->wh_dentry);
+       iput(whtmp->dir);
+       au_nhash_wh_free(&whtmp->whlist);
+       kfree(whtmp);
+}
+
+/*
+ * rmdir the whiteouted temporary named dir @h_dentry.
+ * @whlist: whiteouted children.
+ */
+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
+                  struct dentry *wh_dentry, struct au_nhash *whlist)
+{
+       int err;
+       struct path h_tmp;
+       struct inode *wh_inode, *h_dir;
+       struct au_branch *br;
+
+       h_dir = wh_dentry->d_parent->d_inode; /* dir inode is locked */
+       IMustLock(h_dir);
+
+       br = au_sbr(dir->i_sb, bindex);
+       wh_inode = wh_dentry->d_inode;
+       mutex_lock_nested(&wh_inode->i_mutex, AuLsc_I_CHILD);
+
+       /*
+        * someone else might change some whiteouts while we were sleeping.
+        * it means this whlist may have an obsoleted entry.
+        */
+       if (!au_test_h_perm_sio(wh_inode, MAY_EXEC | MAY_WRITE))
+               err = del_wh_children(wh_dentry, whlist, bindex, br);
+       else {
+               int wkq_err;
+               struct del_wh_children_args args = {
+                       .errp           = &err,
+                       .h_dentry       = wh_dentry,
+                       .whlist         = whlist,
+                       .bindex         = bindex,
+                       .br             = br
+               };
+
+               wkq_err = au_wkq_wait(call_del_wh_children, &args);
+               if (unlikely(wkq_err))
+                       err = wkq_err;
+       }
+       mutex_unlock(&wh_inode->i_mutex);
+
+       if (!err) {
+               h_tmp.dentry = wh_dentry;
+               h_tmp.mnt = au_br_mnt(br);
+               err = vfsub_rmdir(h_dir, &h_tmp);
+       }
+
+       if (!err) {
+               if (au_ibstart(dir) == bindex) {
+                       /* todo: dir->i_mutex is necessary */
+                       au_cpup_attr_timesizes(dir);
+                       vfsub_drop_nlink(dir);
+               }
+               return 0; /* success */
+       }
+
+       pr_warn("failed removing %.*s(%d), ignored\n",
+               AuDLNPair(wh_dentry), err);
+       return err;
+}
+
+static void call_rmdir_whtmp(void *args)
+{
+       int err;
+       aufs_bindex_t bindex;
+       struct au_whtmp_rmdir *a = args;
+       struct super_block *sb;
+       struct dentry *h_parent;
+       struct inode *h_dir;
+       struct au_hinode *hdir;
+       struct vfsmount *h_mnt;
+
+       /* rmdir by nfsd may cause deadlock with this i_mutex */
+       /* mutex_lock(&a->dir->i_mutex); */
+       err = -EROFS;
+       sb = a->dir->i_sb;
+       si_read_lock(sb, !AuLock_FLUSH);
+       if (!au_br_writable(a->br->br_perm))
+               goto out;
+       bindex = au_br_index(sb, a->br->br_id);
+       if (unlikely(bindex < 0))
+               goto out;
+
+       err = -EIO;
+       ii_write_lock_parent(a->dir);
+       h_parent = dget_parent(a->wh_dentry);
+       h_dir = h_parent->d_inode;
+       hdir = au_hi(a->dir, bindex);
+       au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+       err = au_h_verify(a->wh_dentry, au_opt_udba(sb), h_dir, h_parent,
+                         a->br);
+       if (!err) {
+               h_mnt = au_br_mnt(a->br);
+               err = mnt_want_write(h_mnt);
+               if (!err) {
+                       err = au_whtmp_rmdir(a->dir, bindex, a->wh_dentry,
+                                            &a->whlist);
+                       mnt_drop_write(h_mnt);
+               }
+       }
+       au_hn_imtx_unlock(hdir);
+       dput(h_parent);
+       ii_write_unlock(a->dir);
+
+out:
+       /* mutex_unlock(&a->dir->i_mutex); */
+       au_whtmp_rmdir_free(a);
+       si_read_unlock(sb);
+       au_nwt_done(&au_sbi(sb)->si_nowait);
+       if (unlikely(err))
+               AuIOErr("err %d\n", err);
+}
+
+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
+                        struct dentry *wh_dentry, struct au_whtmp_rmdir *args)
+{
+       int wkq_err;
+       struct super_block *sb;
+
+       IMustLock(dir);
+
+       /* all post-process will be done in do_rmdir_whtmp(). */
+       sb = dir->i_sb;
+       args->dir = au_igrab(dir);
+       args->br = au_sbr(sb, bindex);
+       atomic_inc(&args->br->br_count);
+       args->wh_dentry = dget(wh_dentry);
+       wkq_err = au_wkq_nowait(call_rmdir_whtmp, args, sb, /*flags*/0);
+       if (unlikely(wkq_err)) {
+               pr_warn("rmdir error %.*s (%d), ignored\n",
+                       AuDLNPair(wh_dentry), wkq_err);
+               au_whtmp_rmdir_free(args);
+       }
+}
diff --git a/fs/aufs/whout.h b/fs/aufs/whout.h
new file mode 100644 (file)
index 0000000..8508560
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * whiteout for logical deletion and opaque directory
+ */
+
+#ifndef __AUFS_WHOUT_H__
+#define __AUFS_WHOUT_H__
+
+#ifdef __KERNEL__
+
+#include "dir.h"
+
+/* whout.c */
+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name);
+struct au_branch;
+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name,
+              struct au_branch *br, int try_sio);
+int au_diropq_test(struct dentry *h_dentry, struct au_branch *br);
+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
+                            struct qstr *prefix);
+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br);
+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
+                       struct dentry *dentry);
+int au_wh_init(struct au_branch *br, struct super_block *sb);
+
+/* diropq flags */
+#define AuDiropq_CREATE        1
+#define au_ftest_diropq(flags, name)   ((flags) & AuDiropq_##name)
+#define au_fset_diropq(flags, name) \
+       do { (flags) |= AuDiropq_##name; } while (0)
+#define au_fclr_diropq(flags, name) \
+       do { (flags) &= ~AuDiropq_##name; } while (0)
+
+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
+                            unsigned int flags);
+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
+                         struct au_branch *br);
+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
+                           struct dentry *h_parent);
+
+/* real rmdir for the whiteout-ed dir */
+struct au_whtmp_rmdir {
+       struct inode *dir;
+       struct au_branch *br;
+       struct dentry *wh_dentry;
+       struct au_nhash whlist;
+};
+
+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp);
+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp);
+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
+                  struct dentry *wh_dentry, struct au_nhash *whlist);
+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
+                        struct dentry *wh_dentry, struct au_whtmp_rmdir *args);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct dentry *au_diropq_create(struct dentry *dentry,
+                                             aufs_bindex_t bindex)
+{
+       return au_diropq_sio(dentry, bindex, AuDiropq_CREATE);
+}
+
+static inline int au_diropq_remove(struct dentry *dentry, aufs_bindex_t bindex)
+{
+       return PTR_ERR(au_diropq_sio(dentry, bindex, !AuDiropq_CREATE));
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WHOUT_H__ */
diff --git a/fs/aufs/wkq.c b/fs/aufs/wkq.c
new file mode 100644 (file)
index 0000000..46c7e02
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * workqueue for asynchronous/super-io operations
+ * todo: try new dredential scheme
+ */
+
+#include <linux/module.h>
+#include "aufs.h"
+
+/* internal workqueue named AUFS_WKQ_NAME */
+
+static struct workqueue_struct *au_wkq;
+
+struct au_wkinfo {
+       struct work_struct wk;
+       struct kobject *kobj;
+
+       unsigned int flags; /* see wkq.h */
+
+       au_wkq_func_t func;
+       void *args;
+
+       struct completion *comp;
+};
+
+/* ---------------------------------------------------------------------- */
+
+static void wkq_func(struct work_struct *wk)
+{
+       struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk);
+
+       AuDebugOn(current_fsuid());
+       AuDebugOn(rlimit(RLIMIT_FSIZE) != RLIM_INFINITY);
+
+       wkinfo->func(wkinfo->args);
+       if (au_ftest_wkq(wkinfo->flags, WAIT))
+               complete(wkinfo->comp);
+       else {
+               kobject_put(wkinfo->kobj);
+               module_put(THIS_MODULE); /* todo: ?? */
+               kfree(wkinfo);
+       }
+}
+
+/*
+ * Since struct completion is large, try allocating it dynamically.
+ */
+#if 1 /* defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS) */
+#define AuWkqCompDeclare(name) struct completion *comp = NULL
+
+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
+{
+       *comp = kmalloc(sizeof(**comp), GFP_NOFS);
+       if (*comp) {
+               init_completion(*comp);
+               wkinfo->comp = *comp;
+               return 0;
+       }
+       return -ENOMEM;
+}
+
+static void au_wkq_comp_free(struct completion *comp)
+{
+       kfree(comp);
+}
+
+#else
+
+/* no braces */
+#define AuWkqCompDeclare(name) \
+       DECLARE_COMPLETION_ONSTACK(_ ## name); \
+       struct completion *comp = &_ ## name
+
+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
+{
+       wkinfo->comp = *comp;
+       return 0;
+}
+
+static void au_wkq_comp_free(struct completion *comp __maybe_unused)
+{
+       /* empty */
+}
+#endif /* 4KSTACKS */
+
+static void au_wkq_run(struct au_wkinfo *wkinfo)
+{
+       if (au_ftest_wkq(wkinfo->flags, NEST)) {
+               if (au_wkq_test()) {
+                       AuWarn1("wkq from wkq, due to a dead dir by UDBA?\n");
+                       AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT));
+               }
+       } else
+               au_dbg_verify_kthread();
+
+       if (au_ftest_wkq(wkinfo->flags, WAIT)) {
+               INIT_WORK_ONSTACK(&wkinfo->wk, wkq_func);
+               queue_work(au_wkq, &wkinfo->wk);
+       } else {
+               INIT_WORK(&wkinfo->wk, wkq_func);
+               schedule_work(&wkinfo->wk);
+       }
+}
+
+/*
+ * Be careful. It is easy to make deadlock happen.
+ * processA: lock, wkq and wait
+ * processB: wkq and wait, lock in wkq
+ * --> deadlock
+ */
+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args)
+{
+       int err;
+       AuWkqCompDeclare(comp);
+       struct au_wkinfo wkinfo = {
+               .flags  = flags,
+               .func   = func,
+               .args   = args
+       };
+
+       err = au_wkq_comp_alloc(&wkinfo, &comp);
+       if (!err) {
+               au_wkq_run(&wkinfo);
+               /* no timeout, no interrupt */
+               wait_for_completion(wkinfo.comp);
+               au_wkq_comp_free(comp);
+               destroy_work_on_stack(&wkinfo.wk);
+       }
+
+       return err;
+
+}
+
+/*
+ * Note: dget/dput() in func for aufs dentries are not supported. It will be a
+ * problem in a concurrent umounting.
+ */
+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
+                 unsigned int flags)
+{
+       int err;
+       struct au_wkinfo *wkinfo;
+
+       atomic_inc(&au_sbi(sb)->si_nowait.nw_len);
+
+       /*
+        * wkq_func() must free this wkinfo.
+        * it highly depends upon the implementation of workqueue.
+        */
+       err = 0;
+       wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS);
+       if (wkinfo) {
+               wkinfo->kobj = &au_sbi(sb)->si_kobj;
+               wkinfo->flags = flags & ~AuWkq_WAIT;
+               wkinfo->func = func;
+               wkinfo->args = args;
+               wkinfo->comp = NULL;
+               kobject_get(wkinfo->kobj);
+               __module_get(THIS_MODULE); /* todo: ?? */
+
+               au_wkq_run(wkinfo);
+       } else {
+               err = -ENOMEM;
+               au_nwt_done(&au_sbi(sb)->si_nowait);
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_nwt_init(struct au_nowait_tasks *nwt)
+{
+       atomic_set(&nwt->nw_len, 0);
+       /* smp_mb(); */ /* atomic_set */
+       init_waitqueue_head(&nwt->nw_wq);
+}
+
+void au_wkq_fin(void)
+{
+       destroy_workqueue(au_wkq);
+}
+
+int __init au_wkq_init(void)
+{
+       int err;
+
+       err = 0;
+       BUILD_BUG_ON(!WQ_RESCUER);
+       au_wkq = alloc_workqueue(AUFS_WKQ_NAME, !WQ_RESCUER, WQ_DFL_ACTIVE);
+       if (IS_ERR(au_wkq))
+               err = PTR_ERR(au_wkq);
+       else if (!au_wkq)
+               err = -ENOMEM;
+
+       return err;
+}
diff --git a/fs/aufs/wkq.h b/fs/aufs/wkq.h
new file mode 100644 (file)
index 0000000..c316b7f
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * workqueue for asynchronous/super-io operations
+ * todo: try new credentials management scheme
+ */
+
+#ifndef __AUFS_WKQ_H__
+#define __AUFS_WKQ_H__
+
+#ifdef __KERNEL__
+
+struct super_block;
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * in the next operation, wait for the 'nowait' tasks in system-wide workqueue
+ */
+struct au_nowait_tasks {
+       atomic_t                nw_len;
+       wait_queue_head_t       nw_wq;
+};
+
+/* ---------------------------------------------------------------------- */
+
+typedef void (*au_wkq_func_t)(void *args);
+
+/* wkq flags */
+#define AuWkq_WAIT     1
+#define AuWkq_NEST     (1 << 1)
+#define au_ftest_wkq(flags, name)      ((flags) & AuWkq_##name)
+#define au_fset_wkq(flags, name) \
+       do { (flags) |= AuWkq_##name; } while (0)
+#define au_fclr_wkq(flags, name) \
+       do { (flags) &= ~AuWkq_##name; } while (0)
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuWkq_NEST
+#define AuWkq_NEST     0
+#endif
+
+/* wkq.c */
+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args);
+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
+                 unsigned int flags);
+void au_nwt_init(struct au_nowait_tasks *nwt);
+int __init au_wkq_init(void);
+void au_wkq_fin(void);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int au_wkq_test(void)
+{
+       return current->flags & PF_WQ_WORKER;
+}
+
+static inline int au_wkq_wait(au_wkq_func_t func, void *args)
+{
+       return au_wkq_do_wait(AuWkq_WAIT, func, args);
+}
+
+static inline void au_nwt_done(struct au_nowait_tasks *nwt)
+{
+       if (atomic_dec_and_test(&nwt->nw_len))
+               wake_up_all(&nwt->nw_wq);
+}
+
+static inline int au_nwt_flush(struct au_nowait_tasks *nwt)
+{
+       wait_event(nwt->nw_wq, !atomic_read(&nwt->nw_len));
+       return 0;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WKQ_H__ */
diff --git a/fs/aufs/xino.c b/fs/aufs/xino.c
new file mode 100644 (file)
index 0000000..412e7d6
--- /dev/null
@@ -0,0 +1,1313 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * external inode number translation table and bitmap
+ */
+
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/* todo: unnecessary to support mmap_sem since kernel-space? */
+ssize_t xino_fread(au_readf_t func, struct file *file, void *kbuf, size_t size,
+                  loff_t *pos)
+{
+       ssize_t err;
+       mm_segment_t oldfs;
+       union {
+               void *k;
+               char __user *u;
+       } buf;
+
+       buf.k = kbuf;
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       do {
+               /* todo: signal_pending? */
+               err = func(file, buf.u, size, pos);
+       } while (err == -EAGAIN || err == -EINTR);
+       set_fs(oldfs);
+
+#if 0 /* reserved for future use */
+       if (err > 0)
+               fsnotify_access(file->f_dentry);
+#endif
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static ssize_t do_xino_fwrite(au_writef_t func, struct file *file, void *kbuf,
+                             size_t size, loff_t *pos)
+{
+       ssize_t err;
+       mm_segment_t oldfs;
+       union {
+               void *k;
+               const char __user *u;
+       } buf;
+
+       buf.k = kbuf;
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       do {
+               /* todo: signal_pending? */
+               err = func(file, buf.u, size, pos);
+       } while (err == -EAGAIN || err == -EINTR);
+       set_fs(oldfs);
+
+#if 0 /* reserved for future use */
+       if (err > 0)
+               fsnotify_modify(file->f_dentry);
+#endif
+
+       return err;
+}
+
+struct do_xino_fwrite_args {
+       ssize_t *errp;
+       au_writef_t func;
+       struct file *file;
+       void *buf;
+       size_t size;
+       loff_t *pos;
+};
+
+static void call_do_xino_fwrite(void *args)
+{
+       struct do_xino_fwrite_args *a = args;
+       *a->errp = do_xino_fwrite(a->func, a->file, a->buf, a->size, a->pos);
+}
+
+ssize_t xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size,
+                   loff_t *pos)
+{
+       ssize_t err;
+
+       /* todo: signal block and no wkq? */
+       if (rlimit(RLIMIT_FSIZE) == RLIM_INFINITY) {
+               lockdep_off();
+               err = do_xino_fwrite(func, file, buf, size, pos);
+               lockdep_on();
+       } else {
+               /*
+                * it breaks RLIMIT_FSIZE and normal user's limit,
+                * users should care about quota and real 'filesystem full.'
+                */
+               int wkq_err;
+               struct do_xino_fwrite_args args = {
+                       .errp   = &err,
+                       .func   = func,
+                       .file   = file,
+                       .buf    = buf,
+                       .size   = size,
+                       .pos    = pos
+               };
+
+               wkq_err = au_wkq_wait(call_do_xino_fwrite, &args);
+               if (unlikely(wkq_err))
+                       err = wkq_err;
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create a new xinofile at the same place/path as @base_file.
+ */
+struct file *au_xino_create2(struct file *base_file, struct file *copy_src)
+{
+       struct file *file;
+       struct dentry *base, *parent;
+       struct inode *dir;
+       struct qstr *name;
+       struct path path;
+       int err;
+
+       base = base_file->f_dentry;
+       parent = base->d_parent; /* dir inode is locked */
+       dir = parent->d_inode;
+       IMustLock(dir);
+
+       file = ERR_PTR(-EINVAL);
+       name = &base->d_name;
+       path.dentry = vfsub_lookup_one_len(name->name, parent, name->len);
+       if (IS_ERR(path.dentry)) {
+               file = (void *)path.dentry;
+               pr_err("%.*s lookup err %ld\n",
+                      AuLNPair(name), PTR_ERR(path.dentry));
+               goto out;
+       }
+
+       /* no need to mnt_want_write() since we call dentry_open() later */
+       err = vfs_create(dir, path.dentry, S_IRUGO | S_IWUGO, NULL);
+       if (unlikely(err)) {
+               file = ERR_PTR(err);
+               pr_err("%.*s create err %d\n", AuLNPair(name), err);
+               goto out_dput;
+       }
+
+       path.mnt = base_file->f_vfsmnt;
+       file = vfsub_dentry_open(&path,
+                                O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
+                                /* | __FMODE_NONOTIFY */);
+       if (IS_ERR(file)) {
+               pr_err("%.*s open err %ld\n", AuLNPair(name), PTR_ERR(file));
+               goto out_dput;
+       }
+
+       err = vfsub_unlink(dir, &file->f_path, /*force*/0);
+       if (unlikely(err)) {
+               pr_err("%.*s unlink err %d\n", AuLNPair(name), err);
+               goto out_fput;
+       }
+
+       if (copy_src) {
+               /* no one can touch copy_src xino */
+               err = au_copy_file(file, copy_src,
+                                  i_size_read(copy_src->f_dentry->d_inode));
+               if (unlikely(err)) {
+                       pr_err("%.*s copy err %d\n", AuLNPair(name), err);
+                       goto out_fput;
+               }
+       }
+       goto out_dput; /* success */
+
+out_fput:
+       fput(file);
+       file = ERR_PTR(err);
+out_dput:
+       dput(path.dentry);
+out:
+       return file;
+}
+
+struct au_xino_lock_dir {
+       struct au_hinode *hdir;
+       struct dentry *parent;
+       struct mutex *mtx;
+};
+
+static void au_xino_lock_dir(struct super_block *sb, struct file *xino,
+                            struct au_xino_lock_dir *ldir)
+{
+       aufs_bindex_t brid, bindex;
+
+       ldir->hdir = NULL;
+       bindex = -1;
+       brid = au_xino_brid(sb);
+       if (brid >= 0)
+               bindex = au_br_index(sb, brid);
+       if (bindex >= 0) {
+               ldir->hdir = au_hi(sb->s_root->d_inode, bindex);
+               au_hn_imtx_lock_nested(ldir->hdir, AuLsc_I_PARENT);
+       } else {
+               ldir->parent = dget_parent(xino->f_dentry);
+               ldir->mtx = &ldir->parent->d_inode->i_mutex;
+               mutex_lock_nested(ldir->mtx, AuLsc_I_PARENT);
+       }
+}
+
+static void au_xino_unlock_dir(struct au_xino_lock_dir *ldir)
+{
+       if (ldir->hdir)
+               au_hn_imtx_unlock(ldir->hdir);
+       else {
+               mutex_unlock(ldir->mtx);
+               dput(ldir->parent);
+       }
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* trucate xino files asynchronously */
+
+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex)
+{
+       int err;
+       unsigned long jiffy;
+       blkcnt_t blocks;
+       aufs_bindex_t bi, bend;
+       struct kstatfs *st;
+       struct au_branch *br;
+       struct file *new_xino, *file;
+       struct super_block *h_sb;
+       struct au_xino_lock_dir ldir;
+
+       err = -ENOMEM;
+       st = kzalloc(sizeof(*st), GFP_NOFS);
+       if (unlikely(!st))
+               goto out;
+
+       err = -EINVAL;
+       bend = au_sbend(sb);
+       if (unlikely(bindex < 0 || bend < bindex))
+               goto out_st;
+       br = au_sbr(sb, bindex);
+       file = br->br_xino.xi_file;
+       if (!file)
+               goto out_st;
+
+       err = vfs_statfs(&file->f_path, st);
+       if (unlikely(err))
+               AuErr1("statfs err %d, ignored\n", err);
+       jiffy = jiffies;
+       blocks = file->f_dentry->d_inode->i_blocks;
+       pr_info("begin truncating xino(b%d), ib%llu, %llu/%llu free blks\n",
+               bindex, (u64)blocks, st->f_bfree, st->f_blocks);
+
+       au_xino_lock_dir(sb, file, &ldir);
+       /* mnt_want_write() is unnecessary here */
+       new_xino = au_xino_create2(file, file);
+       au_xino_unlock_dir(&ldir);
+       err = PTR_ERR(new_xino);
+       if (IS_ERR(new_xino)) {
+               pr_err("err %d, ignored\n", err);
+               goto out_st;
+       }
+       err = 0;
+       fput(file);
+       br->br_xino.xi_file = new_xino;
+
+       h_sb = au_br_sb(br);
+       for (bi = 0; bi <= bend; bi++) {
+               if (unlikely(bi == bindex))
+                       continue;
+               br = au_sbr(sb, bi);
+               if (au_br_sb(br) != h_sb)
+                       continue;
+
+               fput(br->br_xino.xi_file);
+               br->br_xino.xi_file = new_xino;
+               get_file(new_xino);
+       }
+
+       err = vfs_statfs(&new_xino->f_path, st);
+       if (!err) {
+               pr_info("end truncating xino(b%d), ib%llu, %llu/%llu free blks\n",
+                       bindex, (u64)new_xino->f_dentry->d_inode->i_blocks,
+                       st->f_bfree, st->f_blocks);
+               if (new_xino->f_dentry->d_inode->i_blocks < blocks)
+                       au_sbi(sb)->si_xino_jiffy = jiffy;
+       } else
+               AuErr1("statfs err %d, ignored\n", err);
+
+out_st:
+       kfree(st);
+out:
+       return err;
+}
+
+struct xino_do_trunc_args {
+       struct super_block *sb;
+       struct au_branch *br;
+};
+
+static void xino_do_trunc(void *_args)
+{
+       struct xino_do_trunc_args *args = _args;
+       struct super_block *sb;
+       struct au_branch *br;
+       struct inode *dir;
+       int err;
+       aufs_bindex_t bindex;
+
+       err = 0;
+       sb = args->sb;
+       dir = sb->s_root->d_inode;
+       br = args->br;
+
+       si_noflush_write_lock(sb);
+       ii_read_lock_parent(dir);
+       bindex = au_br_index(sb, br->br_id);
+       err = au_xino_trunc(sb, bindex);
+       ii_read_unlock(dir);
+       if (unlikely(err))
+               pr_warn("err b%d, (%d)\n", bindex, err);
+       atomic_dec(&br->br_xino_running);
+       atomic_dec(&br->br_count);
+       si_write_unlock(sb);
+       au_nwt_done(&au_sbi(sb)->si_nowait);
+       kfree(args);
+}
+
+static int xino_trunc_test(struct super_block *sb, struct au_branch *br)
+{
+       int err;
+       struct kstatfs st;
+       struct au_sbinfo *sbinfo;
+
+       /* todo: si_xino_expire and the ratio should be customizable */
+       sbinfo = au_sbi(sb);
+       if (time_before(jiffies,
+                       sbinfo->si_xino_jiffy + sbinfo->si_xino_expire))
+               return 0;
+
+       /* truncation border */
+       err = vfs_statfs(&br->br_xino.xi_file->f_path, &st);
+       if (unlikely(err)) {
+               AuErr1("statfs err %d, ignored\n", err);
+               return 0;
+       }
+       if (div64_u64(st.f_bfree * 100, st.f_blocks) >= AUFS_XINO_DEF_TRUNC)
+               return 0;
+
+       return 1;
+}
+
+static void xino_try_trunc(struct super_block *sb, struct au_branch *br)
+{
+       struct xino_do_trunc_args *args;
+       int wkq_err;
+
+       if (!xino_trunc_test(sb, br))
+               return;
+
+       if (atomic_inc_return(&br->br_xino_running) > 1)
+               goto out;
+
+       /* lock and kfree() will be called in trunc_xino() */
+       args = kmalloc(sizeof(*args), GFP_NOFS);
+       if (unlikely(!args)) {
+               AuErr1("no memory\n");
+               goto out_args;
+       }
+
+       atomic_inc(&br->br_count);
+       args->sb = sb;
+       args->br = br;
+       wkq_err = au_wkq_nowait(xino_do_trunc, args, sb, /*flags*/0);
+       if (!wkq_err)
+               return; /* success */
+
+       pr_err("wkq %d\n", wkq_err);
+       atomic_dec(&br->br_count);
+
+out_args:
+       kfree(args);
+out:
+       atomic_dec(&br->br_xino_running);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_xino_do_write(au_writef_t write, struct file *file,
+                           ino_t h_ino, ino_t ino)
+{
+       loff_t pos;
+       ssize_t sz;
+
+       pos = h_ino;
+       if (unlikely(au_loff_max / sizeof(ino) - 1 < pos)) {
+               AuIOErr1("too large hi%lu\n", (unsigned long)h_ino);
+               return -EFBIG;
+       }
+       pos *= sizeof(ino);
+       sz = xino_fwrite(write, file, &ino, sizeof(ino), &pos);
+       if (sz == sizeof(ino))
+               return 0; /* success */
+
+       AuIOErr("write failed (%zd)\n", sz);
+       return -EIO;
+}
+
+/*
+ * write @ino to the xinofile for the specified branch{@sb, @bindex}
+ * at the position of @h_ino.
+ * even if @ino is zero, it is written to the xinofile and means no entry.
+ * if the size of the xino file on a specific filesystem exceeds the watermark,
+ * try truncating it.
+ */
+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+                 ino_t ino)
+{
+       int err;
+       unsigned int mnt_flags;
+       struct au_branch *br;
+
+       BUILD_BUG_ON(sizeof(long long) != sizeof(au_loff_max)
+                    || ((loff_t)-1) > 0);
+       SiMustAnyLock(sb);
+
+       mnt_flags = au_mntflags(sb);
+       if (!au_opt_test(mnt_flags, XINO))
+               return 0;
+
+       br = au_sbr(sb, bindex);
+       err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file,
+                              h_ino, ino);
+       if (!err) {
+               if (au_opt_test(mnt_flags, TRUNC_XINO)
+                   && au_test_fs_trunc_xino(au_br_sb(br)))
+                       xino_try_trunc(sb, br);
+               return 0; /* success */
+       }
+
+       AuIOErr("write failed (%d)\n", err);
+       return -EIO;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* aufs inode number bitmap */
+
+static const int page_bits = (int)PAGE_SIZE * BITS_PER_BYTE;
+static ino_t xib_calc_ino(unsigned long pindex, int bit)
+{
+       ino_t ino;
+
+       AuDebugOn(bit < 0 || page_bits <= bit);
+       ino = AUFS_FIRST_INO + pindex * page_bits + bit;
+       return ino;
+}
+
+static void xib_calc_bit(ino_t ino, unsigned long *pindex, int *bit)
+{
+       AuDebugOn(ino < AUFS_FIRST_INO);
+       ino -= AUFS_FIRST_INO;
+       *pindex = ino / page_bits;
+       *bit = ino % page_bits;
+}
+
+static int xib_pindex(struct super_block *sb, unsigned long pindex)
+{
+       int err;
+       loff_t pos;
+       ssize_t sz;
+       struct au_sbinfo *sbinfo;
+       struct file *xib;
+       unsigned long *p;
+
+       sbinfo = au_sbi(sb);
+       MtxMustLock(&sbinfo->si_xib_mtx);
+       AuDebugOn(pindex > ULONG_MAX / PAGE_SIZE
+                 || !au_opt_test(sbinfo->si_mntflags, XINO));
+
+       if (pindex == sbinfo->si_xib_last_pindex)
+               return 0;
+
+       xib = sbinfo->si_xib;
+       p = sbinfo->si_xib_buf;
+       pos = sbinfo->si_xib_last_pindex;
+       pos *= PAGE_SIZE;
+       sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
+       if (unlikely(sz != PAGE_SIZE))
+               goto out;
+
+       pos = pindex;
+       pos *= PAGE_SIZE;
+       if (i_size_read(xib->f_dentry->d_inode) >= pos + PAGE_SIZE)
+               sz = xino_fread(sbinfo->si_xread, xib, p, PAGE_SIZE, &pos);
+       else {
+               memset(p, 0, PAGE_SIZE);
+               sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
+       }
+       if (sz == PAGE_SIZE) {
+               sbinfo->si_xib_last_pindex = pindex;
+               return 0; /* success */
+       }
+
+out:
+       AuIOErr1("write failed (%zd)\n", sz);
+       err = sz;
+       if (sz >= 0)
+               err = -EIO;
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_xib_clear_bit(struct inode *inode)
+{
+       int err, bit;
+       unsigned long pindex;
+       struct super_block *sb;
+       struct au_sbinfo *sbinfo;
+
+       AuDebugOn(inode->i_nlink);
+
+       sb = inode->i_sb;
+       xib_calc_bit(inode->i_ino, &pindex, &bit);
+       AuDebugOn(page_bits <= bit);
+       sbinfo = au_sbi(sb);
+       mutex_lock(&sbinfo->si_xib_mtx);
+       err = xib_pindex(sb, pindex);
+       if (!err) {
+               clear_bit(bit, sbinfo->si_xib_buf);
+               sbinfo->si_xib_next_bit = bit;
+       }
+       mutex_unlock(&sbinfo->si_xib_mtx);
+}
+
+/* for s_op->delete_inode() */
+void au_xino_delete_inode(struct inode *inode, const int unlinked)
+{
+       int err;
+       unsigned int mnt_flags;
+       aufs_bindex_t bindex, bend, bi;
+       unsigned char try_trunc;
+       struct au_iinfo *iinfo;
+       struct super_block *sb;
+       struct au_hinode *hi;
+       struct inode *h_inode;
+       struct au_branch *br;
+       au_writef_t xwrite;
+
+       sb = inode->i_sb;
+       mnt_flags = au_mntflags(sb);
+       if (!au_opt_test(mnt_flags, XINO)
+           || inode->i_ino == AUFS_ROOT_INO)
+               return;
+
+       if (unlinked) {
+               au_xigen_inc(inode);
+               au_xib_clear_bit(inode);
+       }
+
+       iinfo = au_ii(inode);
+       if (!iinfo)
+               return;
+
+       bindex = iinfo->ii_bstart;
+       if (bindex < 0)
+               return;
+
+       xwrite = au_sbi(sb)->si_xwrite;
+       try_trunc = !!au_opt_test(mnt_flags, TRUNC_XINO);
+       hi = iinfo->ii_hinode + bindex;
+       bend = iinfo->ii_bend;
+       for (; bindex <= bend; bindex++, hi++) {
+               h_inode = hi->hi_inode;
+               if (!h_inode
+                   || (!unlinked && h_inode->i_nlink))
+                       continue;
+
+               /* inode may not be revalidated */
+               bi = au_br_index(sb, hi->hi_id);
+               if (bi < 0)
+                       continue;
+
+               br = au_sbr(sb, bi);
+               err = au_xino_do_write(xwrite, br->br_xino.xi_file,
+                                      h_inode->i_ino, /*ino*/0);
+               if (!err && try_trunc
+                   && au_test_fs_trunc_xino(au_br_sb(br)))
+                       xino_try_trunc(sb, br);
+       }
+}
+
+/* get an unused inode number from bitmap */
+ino_t au_xino_new_ino(struct super_block *sb)
+{
+       ino_t ino;
+       unsigned long *p, pindex, ul, pend;
+       struct au_sbinfo *sbinfo;
+       struct file *file;
+       int free_bit, err;
+
+       if (!au_opt_test(au_mntflags(sb), XINO))
+               return iunique(sb, AUFS_FIRST_INO);
+
+       sbinfo = au_sbi(sb);
+       mutex_lock(&sbinfo->si_xib_mtx);
+       p = sbinfo->si_xib_buf;
+       free_bit = sbinfo->si_xib_next_bit;
+       if (free_bit < page_bits && !test_bit(free_bit, p))
+               goto out; /* success */
+       free_bit = find_first_zero_bit(p, page_bits);
+       if (free_bit < page_bits)
+               goto out; /* success */
+
+       pindex = sbinfo->si_xib_last_pindex;
+       for (ul = pindex - 1; ul < ULONG_MAX; ul--) {
+               err = xib_pindex(sb, ul);
+               if (unlikely(err))
+                       goto out_err;
+               free_bit = find_first_zero_bit(p, page_bits);
+               if (free_bit < page_bits)
+                       goto out; /* success */
+       }
+
+       file = sbinfo->si_xib;
+       pend = i_size_read(file->f_dentry->d_inode) / PAGE_SIZE;
+       for (ul = pindex + 1; ul <= pend; ul++) {
+               err = xib_pindex(sb, ul);
+               if (unlikely(err))
+                       goto out_err;
+               free_bit = find_first_zero_bit(p, page_bits);
+               if (free_bit < page_bits)
+                       goto out; /* success */
+       }
+       BUG();
+
+out:
+       set_bit(free_bit, p);
+       sbinfo->si_xib_next_bit = free_bit + 1;
+       pindex = sbinfo->si_xib_last_pindex;
+       mutex_unlock(&sbinfo->si_xib_mtx);
+       ino = xib_calc_ino(pindex, free_bit);
+       AuDbg("i%lu\n", (unsigned long)ino);
+       return ino;
+out_err:
+       mutex_unlock(&sbinfo->si_xib_mtx);
+       AuDbg("i0\n");
+       return 0;
+}
+
+/*
+ * read @ino from xinofile for the specified branch{@sb, @bindex}
+ * at the position of @h_ino.
+ * if @ino does not exist and @do_new is true, get new one.
+ */
+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+                ino_t *ino)
+{
+       int err;
+       ssize_t sz;
+       loff_t pos;
+       struct file *file;
+       struct au_sbinfo *sbinfo;
+
+       *ino = 0;
+       if (!au_opt_test(au_mntflags(sb), XINO))
+               return 0; /* no xino */
+
+       err = 0;
+       sbinfo = au_sbi(sb);
+       pos = h_ino;
+       if (unlikely(au_loff_max / sizeof(*ino) - 1 < pos)) {
+               AuIOErr1("too large hi%lu\n", (unsigned long)h_ino);
+               return -EFBIG;
+       }
+       pos *= sizeof(*ino);
+
+       file = au_sbr(sb, bindex)->br_xino.xi_file;
+       if (i_size_read(file->f_dentry->d_inode) < pos + sizeof(*ino))
+               return 0; /* no ino */
+
+       sz = xino_fread(sbinfo->si_xread, file, ino, sizeof(*ino), &pos);
+       if (sz == sizeof(*ino))
+               return 0; /* success */
+
+       err = sz;
+       if (unlikely(sz >= 0)) {
+               err = -EIO;
+               AuIOErr("xino read error (%zd)\n", sz);
+       }
+
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* create and set a new xino file */
+
+struct file *au_xino_create(struct super_block *sb, char *fname, int silent)
+{
+       struct file *file;
+       struct dentry *h_parent, *d;
+       struct inode *h_dir;
+       int err;
+
+       /*
+        * at mount-time, and the xino file is the default path,
+        * hnotify is disabled so we have no notify events to ignore.
+        * when a user specified the xino, we cannot get au_hdir to be ignored.
+        */
+       file = vfsub_filp_open(fname, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
+                              /* | __FMODE_NONOTIFY */,
+                              S_IRUGO | S_IWUGO);
+       if (IS_ERR(file)) {
+               if (!silent)
+                       pr_err("open %s(%ld)\n", fname, PTR_ERR(file));
+               return file;
+       }
+
+       /* keep file count */
+       h_parent = dget_parent(file->f_dentry);
+       h_dir = h_parent->d_inode;
+       mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
+       /* mnt_want_write() is unnecessary here */
+       err = vfsub_unlink(h_dir, &file->f_path, /*force*/0);
+       mutex_unlock(&h_dir->i_mutex);
+       dput(h_parent);
+       if (unlikely(err)) {
+               if (!silent)
+                       pr_err("unlink %s(%d)\n", fname, err);
+               goto out;
+       }
+
+       err = -EINVAL;
+       d = file->f_dentry;
+       if (unlikely(sb == d->d_sb)) {
+               if (!silent)
+                       pr_err("%s must be outside\n", fname);
+               goto out;
+       }
+       if (unlikely(au_test_fs_bad_xino(d->d_sb))) {
+               if (!silent)
+                       pr_err("xino doesn't support %s(%s)\n",
+                              fname, au_sbtype(d->d_sb));
+               goto out;
+       }
+       return file; /* success */
+
+out:
+       fput(file);
+       file = ERR_PTR(err);
+       return file;
+}
+
+/*
+ * find another branch who is on the same filesystem of the specified
+ * branch{@btgt}. search until @bend.
+ */
+static int is_sb_shared(struct super_block *sb, aufs_bindex_t btgt,
+                       aufs_bindex_t bend)
+{
+       aufs_bindex_t bindex;
+       struct super_block *tgt_sb = au_sbr_sb(sb, btgt);
+
+       for (bindex = 0; bindex < btgt; bindex++)
+               if (unlikely(tgt_sb == au_sbr_sb(sb, bindex)))
+                       return bindex;
+       for (bindex++; bindex <= bend; bindex++)
+               if (unlikely(tgt_sb == au_sbr_sb(sb, bindex)))
+                       return bindex;
+       return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * initialize the xinofile for the specified branch @br
+ * at the place/path where @base_file indicates.
+ * test whether another branch is on the same filesystem or not,
+ * if @do_test is true.
+ */
+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t h_ino,
+              struct file *base_file, int do_test)
+{
+       int err;
+       ino_t ino;
+       aufs_bindex_t bend, bindex;
+       struct au_branch *shared_br, *b;
+       struct file *file;
+       struct super_block *tgt_sb;
+
+       shared_br = NULL;
+       bend = au_sbend(sb);
+       if (do_test) {
+               tgt_sb = au_br_sb(br);
+               for (bindex = 0; bindex <= bend; bindex++) {
+                       b = au_sbr(sb, bindex);
+                       if (tgt_sb == au_br_sb(b)) {
+                               shared_br = b;
+                               break;
+                       }
+               }
+       }
+
+       if (!shared_br || !shared_br->br_xino.xi_file) {
+               struct au_xino_lock_dir ldir;
+
+               au_xino_lock_dir(sb, base_file, &ldir);
+               /* mnt_want_write() is unnecessary here */
+               file = au_xino_create2(base_file, NULL);
+               au_xino_unlock_dir(&ldir);
+               err = PTR_ERR(file);
+               if (IS_ERR(file))
+                       goto out;
+               br->br_xino.xi_file = file;
+       } else {
+               br->br_xino.xi_file = shared_br->br_xino.xi_file;
+               get_file(br->br_xino.xi_file);
+       }
+
+       ino = AUFS_ROOT_INO;
+       err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file,
+                              h_ino, ino);
+       if (unlikely(err)) {
+               fput(br->br_xino.xi_file);
+               br->br_xino.xi_file = NULL;
+       }
+
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* trucate a xino bitmap file */
+
+/* todo: slow */
+static int do_xib_restore(struct super_block *sb, struct file *file, void *page)
+{
+       int err, bit;
+       ssize_t sz;
+       unsigned long pindex;
+       loff_t pos, pend;
+       struct au_sbinfo *sbinfo;
+       au_readf_t func;
+       ino_t *ino;
+       unsigned long *p;
+
+       err = 0;
+       sbinfo = au_sbi(sb);
+       MtxMustLock(&sbinfo->si_xib_mtx);
+       p = sbinfo->si_xib_buf;
+       func = sbinfo->si_xread;
+       pend = i_size_read(file->f_dentry->d_inode);
+       pos = 0;
+       while (pos < pend) {
+               sz = xino_fread(func, file, page, PAGE_SIZE, &pos);
+               err = sz;
+               if (unlikely(sz <= 0))
+                       goto out;
+
+               err = 0;
+               for (ino = page; sz > 0; ino++, sz -= sizeof(ino)) {
+                       if (unlikely(*ino < AUFS_FIRST_INO))
+                               continue;
+
+                       xib_calc_bit(*ino, &pindex, &bit);
+                       AuDebugOn(page_bits <= bit);
+                       err = xib_pindex(sb, pindex);
+                       if (!err)
+                               set_bit(bit, p);
+                       else
+                               goto out;
+               }
+       }
+
+out:
+       return err;
+}
+
+static int xib_restore(struct super_block *sb)
+{
+       int err;
+       aufs_bindex_t bindex, bend;
+       void *page;
+
+       err = -ENOMEM;
+       page = (void *)__get_free_page(GFP_NOFS);
+       if (unlikely(!page))
+               goto out;
+
+       err = 0;
+       bend = au_sbend(sb);
+       for (bindex = 0; !err && bindex <= bend; bindex++)
+               if (!bindex || is_sb_shared(sb, bindex, bindex - 1) < 0)
+                       err = do_xib_restore
+                               (sb, au_sbr(sb, bindex)->br_xino.xi_file, page);
+               else
+                       AuDbg("b%d\n", bindex);
+       free_page((unsigned long)page);
+
+out:
+       return err;
+}
+
+int au_xib_trunc(struct super_block *sb)
+{
+       int err;
+       ssize_t sz;
+       loff_t pos;
+       struct au_xino_lock_dir ldir;
+       struct au_sbinfo *sbinfo;
+       unsigned long *p;
+       struct file *file;
+
+       SiMustWriteLock(sb);
+
+       err = 0;
+       sbinfo = au_sbi(sb);
+       if (!au_opt_test(sbinfo->si_mntflags, XINO))
+               goto out;
+
+       file = sbinfo->si_xib;
+       if (i_size_read(file->f_dentry->d_inode) <= PAGE_SIZE)
+               goto out;
+
+       au_xino_lock_dir(sb, file, &ldir);
+       /* mnt_want_write() is unnecessary here */
+       file = au_xino_create2(sbinfo->si_xib, NULL);
+       au_xino_unlock_dir(&ldir);
+       err = PTR_ERR(file);
+       if (IS_ERR(file))
+               goto out;
+       fput(sbinfo->si_xib);
+       sbinfo->si_xib = file;
+
+       p = sbinfo->si_xib_buf;
+       memset(p, 0, PAGE_SIZE);
+       pos = 0;
+       sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xib, p, PAGE_SIZE, &pos);
+       if (unlikely(sz != PAGE_SIZE)) {
+               err = sz;
+               AuIOErr("err %d\n", err);
+               if (sz >= 0)
+                       err = -EIO;
+               goto out;
+       }
+
+       mutex_lock(&sbinfo->si_xib_mtx);
+       /* mnt_want_write() is unnecessary here */
+       err = xib_restore(sb);
+       mutex_unlock(&sbinfo->si_xib_mtx);
+
+out:
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * xino mount option handlers
+ */
+static au_readf_t find_readf(struct file *h_file)
+{
+       const struct file_operations *fop = h_file->f_op;
+
+       if (fop) {
+               if (fop->read)
+                       return fop->read;
+               if (fop->aio_read)
+                       return do_sync_read;
+       }
+       return ERR_PTR(-ENOSYS);
+}
+
+static au_writef_t find_writef(struct file *h_file)
+{
+       const struct file_operations *fop = h_file->f_op;
+
+       if (fop) {
+               if (fop->write)
+                       return fop->write;
+               if (fop->aio_write)
+                       return do_sync_write;
+       }
+       return ERR_PTR(-ENOSYS);
+}
+
+/* xino bitmap */
+static void xino_clear_xib(struct super_block *sb)
+{
+       struct au_sbinfo *sbinfo;
+
+       SiMustWriteLock(sb);
+
+       sbinfo = au_sbi(sb);
+       sbinfo->si_xread = NULL;
+       sbinfo->si_xwrite = NULL;
+       if (sbinfo->si_xib)
+               fput(sbinfo->si_xib);
+       sbinfo->si_xib = NULL;
+       free_page((unsigned long)sbinfo->si_xib_buf);
+       sbinfo->si_xib_buf = NULL;
+}
+
+static int au_xino_set_xib(struct super_block *sb, struct file *base)
+{
+       int err;
+       loff_t pos;
+       struct au_sbinfo *sbinfo;
+       struct file *file;
+
+       SiMustWriteLock(sb);
+
+       sbinfo = au_sbi(sb);
+       file = au_xino_create2(base, sbinfo->si_xib);
+       err = PTR_ERR(file);
+       if (IS_ERR(file))
+               goto out;
+       if (sbinfo->si_xib)
+               fput(sbinfo->si_xib);
+       sbinfo->si_xib = file;
+       sbinfo->si_xread = find_readf(file);
+       sbinfo->si_xwrite = find_writef(file);
+
+       err = -ENOMEM;
+       if (!sbinfo->si_xib_buf)
+               sbinfo->si_xib_buf = (void *)get_zeroed_page(GFP_NOFS);
+       if (unlikely(!sbinfo->si_xib_buf))
+               goto out_unset;
+
+       sbinfo->si_xib_last_pindex = 0;
+       sbinfo->si_xib_next_bit = 0;
+       if (i_size_read(file->f_dentry->d_inode) < PAGE_SIZE) {
+               pos = 0;
+               err = xino_fwrite(sbinfo->si_xwrite, file, sbinfo->si_xib_buf,
+                                 PAGE_SIZE, &pos);
+               if (unlikely(err != PAGE_SIZE))
+                       goto out_free;
+       }
+       err = 0;
+       goto out; /* success */
+
+out_free:
+       free_page((unsigned long)sbinfo->si_xib_buf);
+       sbinfo->si_xib_buf = NULL;
+       if (err >= 0)
+               err = -EIO;
+out_unset:
+       fput(sbinfo->si_xib);
+       sbinfo->si_xib = NULL;
+       sbinfo->si_xread = NULL;
+       sbinfo->si_xwrite = NULL;
+out:
+       return err;
+}
+
+/* xino for each branch */
+static void xino_clear_br(struct super_block *sb)
+{
+       aufs_bindex_t bindex, bend;
+       struct au_branch *br;
+
+       bend = au_sbend(sb);
+       for (bindex = 0; bindex <= bend; bindex++) {
+               br = au_sbr(sb, bindex);
+               if (!br || !br->br_xino.xi_file)
+                       continue;
+
+               fput(br->br_xino.xi_file);
+               br->br_xino.xi_file = NULL;
+       }
+}
+
+static int au_xino_set_br(struct super_block *sb, struct file *base)
+{
+       int err;
+       ino_t ino;
+       aufs_bindex_t bindex, bend, bshared;
+       struct {
+               struct file *old, *new;
+       } *fpair, *p;
+       struct au_branch *br;
+       struct inode *inode;
+       au_writef_t writef;
+
+       SiMustWriteLock(sb);
+
+       err = -ENOMEM;
+       bend = au_sbend(sb);
+       fpair = kcalloc(bend + 1, sizeof(*fpair), GFP_NOFS);
+       if (unlikely(!fpair))
+               goto out;
+
+       inode = sb->s_root->d_inode;
+       ino = AUFS_ROOT_INO;
+       writef = au_sbi(sb)->si_xwrite;
+       for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) {
+               br = au_sbr(sb, bindex);
+               bshared = is_sb_shared(sb, bindex, bindex - 1);
+               if (bshared >= 0) {
+                       /* shared xino */
+                       *p = fpair[bshared];
+                       get_file(p->new);
+               }
+
+               if (!p->new) {
+                       /* new xino */
+                       p->old = br->br_xino.xi_file;
+                       p->new = au_xino_create2(base, br->br_xino.xi_file);
+                       err = PTR_ERR(p->new);
+                       if (IS_ERR(p->new)) {
+                               p->new = NULL;
+                               goto out_pair;
+                       }
+               }
+
+               err = au_xino_do_write(writef, p->new,
+                                      au_h_iptr(inode, bindex)->i_ino, ino);
+               if (unlikely(err))
+                       goto out_pair;
+       }
+
+       for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) {
+               br = au_sbr(sb, bindex);
+               if (br->br_xino.xi_file)
+                       fput(br->br_xino.xi_file);
+               get_file(p->new);
+               br->br_xino.xi_file = p->new;
+       }
+
+out_pair:
+       for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++)
+               if (p->new)
+                       fput(p->new);
+               else
+                       break;
+       kfree(fpair);
+out:
+       return err;
+}
+
+void au_xino_clr(struct super_block *sb)
+{
+       struct au_sbinfo *sbinfo;
+
+       au_xigen_clr(sb);
+       xino_clear_xib(sb);
+       xino_clear_br(sb);
+       sbinfo = au_sbi(sb);
+       /* lvalue, do not call au_mntflags() */
+       au_opt_clr(sbinfo->si_mntflags, XINO);
+}
+
+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount)
+{
+       int err, skip;
+       struct dentry *parent, *cur_parent;
+       struct qstr *dname, *cur_name;
+       struct file *cur_xino;
+       struct inode *dir;
+       struct au_sbinfo *sbinfo;
+
+       SiMustWriteLock(sb);
+
+       err = 0;
+       sbinfo = au_sbi(sb);
+       parent = dget_parent(xino->file->f_dentry);
+       if (remount) {
+               skip = 0;
+               dname = &xino->file->f_dentry->d_name;
+               cur_xino = sbinfo->si_xib;
+               if (cur_xino) {
+                       cur_parent = dget_parent(cur_xino->f_dentry);
+                       cur_name = &cur_xino->f_dentry->d_name;
+                       skip = (cur_parent == parent
+                               && dname->len == cur_name->len
+                               && !memcmp(dname->name, cur_name->name,
+                                          dname->len));
+                       dput(cur_parent);
+               }
+               if (skip)
+                       goto out;
+       }
+
+       au_opt_set(sbinfo->si_mntflags, XINO);
+       dir = parent->d_inode;
+       mutex_lock_nested(&dir->i_mutex, AuLsc_I_PARENT);
+       /* mnt_want_write() is unnecessary here */
+       err = au_xino_set_xib(sb, xino->file);
+       if (!err)
+               err = au_xigen_set(sb, xino->file);
+       if (!err)
+               err = au_xino_set_br(sb, xino->file);
+       mutex_unlock(&dir->i_mutex);
+       if (!err)
+               goto out; /* success */
+
+       /* reset all */
+       AuIOErr("failed creating xino(%d).\n", err);
+
+out:
+       dput(parent);
+       return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create a xinofile at the default place/path.
+ */
+struct file *au_xino_def(struct super_block *sb)
+{
+       struct file *file;
+       char *page, *p;
+       struct au_branch *br;
+       struct super_block *h_sb;
+       struct path path;
+       aufs_bindex_t bend, bindex, bwr;
+
+       br = NULL;
+       bend = au_sbend(sb);
+       bwr = -1;
+       for (bindex = 0; bindex <= bend; bindex++) {
+               br = au_sbr(sb, bindex);
+               if (au_br_writable(br->br_perm)
+                   && !au_test_fs_bad_xino(au_br_sb(br))) {
+                       bwr = bindex;
+                       break;
+               }
+       }
+
+       if (bwr >= 0) {
+               file = ERR_PTR(-ENOMEM);
+               page = __getname_gfp(GFP_NOFS);
+               if (unlikely(!page))
+                       goto out;
+               path.mnt = au_br_mnt(br);
+               path.dentry = au_h_dptr(sb->s_root, bwr);
+               p = d_path(&path, page, PATH_MAX - sizeof(AUFS_XINO_FNAME));
+               file = (void *)p;
+               if (!IS_ERR(p)) {
+                       strcat(p, "/" AUFS_XINO_FNAME);
+                       AuDbg("%s\n", p);
+                       file = au_xino_create(sb, p, /*silent*/0);
+                       if (!IS_ERR(file))
+                               au_xino_brid_set(sb, br->br_id);
+               }
+               __putname(page);
+       } else {
+               file = au_xino_create(sb, AUFS_XINO_DEFPATH, /*silent*/0);
+               if (IS_ERR(file))
+                       goto out;
+               h_sb = file->f_dentry->d_sb;
+               if (unlikely(au_test_fs_bad_xino(h_sb))) {
+                       pr_err("xino doesn't support %s(%s)\n",
+                              AUFS_XINO_DEFPATH, au_sbtype(h_sb));
+                       fput(file);
+                       file = ERR_PTR(-EINVAL);
+               }
+               if (!IS_ERR(file))
+                       au_xino_brid_set(sb, -1);
+       }
+
+out:
+       return file;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_xino_path(struct seq_file *seq, struct file *file)
+{
+       int err;
+
+       err = au_seq_path(seq, &file->f_path);
+       if (unlikely(err < 0))
+               goto out;
+
+       err = 0;
+#define Deleted "\\040(deleted)"
+       seq->count -= sizeof(Deleted) - 1;
+       AuDebugOn(memcmp(seq->buf + seq->count, Deleted,
+                        sizeof(Deleted) - 1));
+#undef Deleted
+
+out:
+       return err;
+}
index 62d7a6d..0769e2e 100644 (file)
@@ -237,7 +237,7 @@ static void autofs_dev_ioctl_fd_install(unsigned int fd, struct file *file)
        fdt = files_fdtable(files);
        BUG_ON(fdt->fd[fd] != NULL);
        rcu_assign_pointer(fdt->fd[fd], file);
-       FD_SET(fd, fdt->close_on_exec);
+       __set_close_on_exec(fd, fdt);
        spin_unlock(&files->file_lock);
 }
 
index 7423cb9..5dccf6d 100644 (file)
@@ -411,12 +411,12 @@ static int parse_command(const char __user *buffer, size_t count)
 {
        char s[4];
 
-       if (!count)
-               return 0;
        if (count > 3)
                return -EINVAL;
        if (copy_from_user(s, buffer, count))
                return -EFAULT;
+       if (!count)
+               return 0;
        if (s[count-1] == '\n')
                count--;
        if (count == 1 && s[0] == '0')
index d955b8e..1d480aa 100644 (file)
@@ -54,6 +54,7 @@ int cifsFYI = 0;
 int cifsERROR = 1;
 int traceSMB = 0;
 bool enable_oplocks = true;
+bool no_serverino_autodisable = false;
 unsigned int linuxExtEnabled = 1;
 unsigned int lookupCacheEnabled = 1;
 unsigned int multiuser_mount = 0;
@@ -85,6 +86,7 @@ MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
 module_param(enable_oplocks, bool, 0644);
 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks (bool). Default:"
                                 "y/Y/1");
+module_param(no_serverino_autodisable, bool, 0644);
 
 extern mempool_t *cifs_sm_req_poolp;
 extern mempool_t *cifs_req_poolp;
index 969f74f..555b576 100644 (file)
@@ -3906,13 +3906,12 @@ CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
        int rc = 0;
        int bytes_returned = 0;
        SET_SEC_DESC_REQ *pSMB = NULL;
-       NTRANSACT_RSP *pSMBr = NULL;
+       void *pSMBr;
 
 setCifsAclRetry:
-       rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB,
-                       (void **) &pSMBr);
+       rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB, &pSMBr);
        if (rc)
-                       return (rc);
+               return rc;
 
        pSMB->MaxSetupCount = 0;
        pSMB->Reserved = 0;
@@ -3940,9 +3939,8 @@ setCifsAclRetry:
        pSMB->AclFlags = cpu_to_le32(aclflag);
 
        if (pntsd && acllen) {
-               memcpy((char *) &pSMBr->hdr.Protocol + data_offset,
-                       (char *) pntsd,
-                       acllen);
+               memcpy((char *)pSMBr + offsetof(struct smb_hdr, Protocol) +
+                               data_offset, pntsd, acllen);
                inc_rfc1001_len(pSMB, byte_count + data_count);
        } else
                inc_rfc1001_len(pSMB, byte_count);
@@ -5721,7 +5719,8 @@ CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
        param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
        offset = param_offset + params;
 
-       data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+       data_offset = (char *)pSMB +
+                       offsetof(struct smb_hdr, Protocol) + offset;
 
        count = sizeof(FILE_BASIC_INFO);
        pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -5990,7 +5989,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
                       u16 fid, u32 pid_of_opener)
 {
        struct smb_com_transaction2_sfi_req *pSMB  = NULL;
-       FILE_UNIX_BASIC_INFO *data_offset;
+       char *data_offset;
        int rc = 0;
        u16 params, param_offset, offset, byte_count, count;
 
@@ -6012,8 +6011,9 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
        param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
        offset = param_offset + params;
 
-       data_offset = (FILE_UNIX_BASIC_INFO *)
-                               ((char *)(&pSMB->hdr.Protocol) + offset);
+       data_offset = (char *)pSMB +
+                       offsetof(struct smb_hdr, Protocol) + offset;
+
        count = sizeof(FILE_UNIX_BASIC_INFO);
 
        pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -6035,7 +6035,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
        inc_rfc1001_len(pSMB, byte_count);
        pSMB->ByteCount = cpu_to_le16(byte_count);
 
-       cifs_fill_unix_set_info(data_offset, args);
+       cifs_fill_unix_set_info((FILE_UNIX_BASIC_INFO *)data_offset, args);
 
        rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
        if (rc)
index f0f7a6c..3dcf20b 100644 (file)
@@ -54,7 +54,7 @@
 #define RFC1001_PORT 139
 
 /* SMB echo "timeout" -- FIXME: tunable? */
-#define SMB_ECHO_INTERVAL (60 * HZ)
+#define SMB_ECHO_INTERVAL (6 * HZ)
 
 extern mempool_t *cifs_req_poolp;
 
index 703ef5c..04ab27c 100644 (file)
@@ -645,7 +645,10 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
 void
 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
 {
-       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+       extern bool no_serverino_autodisable;
+
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
+           !no_serverino_autodisable) {
                cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
                cERROR(1, "Autodisabling the use of server inode numbers on "
                           "%s. This server doesn't seem to support them "
index 4bf082d..4a03adb 100644 (file)
@@ -797,8 +797,9 @@ asmlinkage long compat_sys_mount(const char __user * dev_name,
        char *dir_page;
        int retval;
 
-       retval = copy_mount_string(type, &kernel_type);
-       if (retval < 0)
+       kernel_type = copy_mount_string(type);
+       retval = PTR_ERR(kernel_type);
+       if (IS_ERR(kernel_type))
                goto out;
 
        dir_page = getname(dir_name);
@@ -806,8 +807,9 @@ asmlinkage long compat_sys_mount(const char __user * dev_name,
        if (IS_ERR(dir_page))
                goto out1;
 
-       retval = copy_mount_string(dev_name, &kernel_dev);
-       if (retval < 0)
+       kernel_dev = copy_mount_string(dev_name);
+       retval = PTR_ERR(kernel_dev);
+       if (IS_ERR(kernel_dev))
                goto out2;
 
        retval = copy_mount_options(data, &data_page);
index 90f7657..d501660 100644 (file)
@@ -95,7 +95,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n");
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+struct dentry *debugfs_create_u8(const char *name, umode_t mode,
                                 struct dentry *parent, u8 *value)
 {
        /* if there are no write bits set, make read only */
@@ -147,7 +147,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n");
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+struct dentry *debugfs_create_u16(const char *name, umode_t mode,
                                  struct dentry *parent, u16 *value)
 {
        /* if there are no write bits set, make read only */
@@ -199,7 +199,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n");
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+struct dentry *debugfs_create_u32(const char *name, umode_t mode,
                                 struct dentry *parent, u32 *value)
 {
        /* if there are no write bits set, make read only */
@@ -252,7 +252,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+struct dentry *debugfs_create_u64(const char *name, umode_t mode,
                                 struct dentry *parent, u64 *value)
 {
        /* if there are no write bits set, make read only */
@@ -298,7 +298,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_x64, debugfs_u64_get, debugfs_u64_set, "0x%016llx\n
  * @value: a pointer to the variable that the file should read to and write
  *         from.
  */
-struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+struct dentry *debugfs_create_x8(const char *name, umode_t mode,
                                 struct dentry *parent, u8 *value)
 {
        /* if there are no write bits set, make read only */
@@ -322,7 +322,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_x8);
  * @value: a pointer to the variable that the file should read to and write
  *         from.
  */
-struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+struct dentry *debugfs_create_x16(const char *name, umode_t mode,
                                 struct dentry *parent, u16 *value)
 {
        /* if there are no write bits set, make read only */
@@ -346,7 +346,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_x16);
  * @value: a pointer to the variable that the file should read to and write
  *         from.
  */
-struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+struct dentry *debugfs_create_x32(const char *name, umode_t mode,
                                 struct dentry *parent, u32 *value)
 {
        /* if there are no write bits set, make read only */
@@ -370,7 +370,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_x32);
  * @value: a pointer to the variable that the file should read to and write
  *         from.
  */
-struct dentry *debugfs_create_x64(const char *name, mode_t mode,
+struct dentry *debugfs_create_x64(const char *name, umode_t mode,
                                 struct dentry *parent, u64 *value)
 {
        return debugfs_create_file(name, mode, parent, value, &fops_x64);
@@ -401,7 +401,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_size_t, debugfs_size_t_get, debugfs_size_t_set,
  * @value: a pointer to the variable that the file should read to and write
  *         from.
  */
-struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
                                     struct dentry *parent, size_t *value)
 {
        return debugfs_create_file(name, mode, parent, value, &fops_size_t);
@@ -473,7 +473,7 @@ static const struct file_operations fops_bool = {
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+struct dentry *debugfs_create_bool(const char *name, umode_t mode,
                                   struct dentry *parent, u32 *value)
 {
        return debugfs_create_file(name, mode, parent, value, &fops_bool);
@@ -518,7 +518,7 @@ static const struct file_operations fops_blob = {
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+struct dentry *debugfs_create_blob(const char *name, umode_t mode,
                                   struct dentry *parent,
                                   struct debugfs_blob_wrapper *blob)
 {
index 74f03b5..5ccce85 100644 (file)
@@ -30,7 +30,7 @@ static struct vfsmount *debugfs_mount;
 static int debugfs_mount_count;
 static bool debugfs_registered;
 
-static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev,
+static struct inode *debugfs_get_inode(struct super_block *sb, umode_t mode, dev_t dev,
                                       void *data, const struct file_operations *fops)
 
 {
@@ -69,7 +69,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
 
 /* SMP-safe */
 static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
-                        int mode, dev_t dev, void *data,
+                        umode_t mode, dev_t dev, void *data,
                         const struct file_operations *fops)
 {
        struct inode *inode;
@@ -87,7 +87,7 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
        return error;
 }
 
-static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode,
+static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode,
                         void *data, const struct file_operations *fops)
 {
        int res;
@@ -101,14 +101,14 @@ static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode,
        return res;
 }
 
-static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode,
+static int debugfs_link(struct inode *dir, struct dentry *dentry, umode_t mode,
                        void *data, const struct file_operations *fops)
 {
        mode = (mode & S_IALLUGO) | S_IFLNK;
        return debugfs_mknod(dir, dentry, mode, 0, data, fops);
 }
 
-static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int debugfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
                          void *data, const struct file_operations *fops)
 {
        int res;
@@ -166,7 +166,7 @@ static struct file_system_type debug_fs_type = {
        .kill_sb =      kill_litter_super,
 };
 
-static int debugfs_create_by_name(const char *name, mode_t mode,
+static int debugfs_create_by_name(const char *name, umode_t mode,
                                  struct dentry *parent,
                                  struct dentry **dentry,
                                  void *data,
@@ -234,7 +234,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
  * returned.
  */
-struct dentry *debugfs_create_file(const char *name, mode_t mode,
+struct dentry *debugfs_create_file(const char *name, umode_t mode,
                                   struct dentry *parent, void *data,
                                   const struct file_operations *fops)
 {
index 2262a77..e654e39 100644 (file)
@@ -151,6 +151,11 @@ ecryptfs_get_key_payload_data(struct key *key)
                                          * dentry name */
 #define ECRYPTFS_TAG_73_PACKET_TYPE 0x49 /* FEK-encrypted filename as
                                          * metadata */
+#define ECRYPTFS_MIN_PKT_LEN_SIZE 1 /* Min size to specify packet length */
+#define ECRYPTFS_MAX_PKT_LEN_SIZE 2 /* Pass at least this many bytes to
+                                    * ecryptfs_parse_packet_length() and
+                                    * ecryptfs_write_packet_length()
+                                    */
 /* Constraint: ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES >=
  * ECRYPTFS_MAX_IV_BYTES */
 #define ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES 16
index 5ce56e7..725f9c0 100644 (file)
@@ -109,7 +109,7 @@ int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
                (*size) += ((unsigned char)(data[1]) + 192);
                (*length_size) = 2;
        } else if (data[0] == 255) {
-               /* Five-byte length; we're not supposed to see this */
+               /* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */
                ecryptfs_printk(KERN_ERR, "Five-byte packet length not "
                                "supported\n");
                rc = -EINVAL;
@@ -126,7 +126,7 @@ out:
 /**
  * ecryptfs_write_packet_length
  * @dest: The byte array target into which to write the length. Must
- *        have at least 5 bytes allocated.
+ *        have at least ECRYPTFS_MAX_PKT_LEN_SIZE bytes allocated.
  * @size: The length to write.
  * @packet_size_length: The number of bytes used to encode the packet
  *                      length is written to this address.
@@ -146,6 +146,7 @@ int ecryptfs_write_packet_length(char *dest, size_t size,
                dest[1] = ((size - 192) % 256);
                (*packet_size_length) = 2;
        } else {
+               /* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */
                rc = -EINVAL;
                ecryptfs_printk(KERN_WARNING,
                                "Unsupported packet size: [%zd]\n", size);
index de42310..c0038f6 100644 (file)
@@ -223,6 +223,29 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
        return 0;
 }
 
+/*
+ * miscdevfs packet format:
+ *  Octet 0: Type
+ *  Octets 1-4: network byte order msg_ctx->counter
+ *  Octets 5-N0: Size of struct ecryptfs_message to follow
+ *  Octets N0-N1: struct ecryptfs_message (including data)
+ *
+ *  Octets 5-N1 not written if the packet type does not include a message
+ */
+#define PKT_TYPE_SIZE          1
+#define PKT_CTR_SIZE           4
+#define MIN_NON_MSG_PKT_SIZE   (PKT_TYPE_SIZE + PKT_CTR_SIZE)
+#define MIN_MSG_PKT_SIZE       (PKT_TYPE_SIZE + PKT_CTR_SIZE \
+                                + ECRYPTFS_MIN_PKT_LEN_SIZE)
+/* 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES comes from tag 65 packet format */
+#define MAX_MSG_PKT_SIZE       (PKT_TYPE_SIZE + PKT_CTR_SIZE \
+                                + ECRYPTFS_MAX_PKT_LEN_SIZE \
+                                + sizeof(struct ecryptfs_message) \
+                                + 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)
+#define PKT_TYPE_OFFSET                0
+#define PKT_CTR_OFFSET         PKT_TYPE_SIZE
+#define PKT_LEN_OFFSET         (PKT_TYPE_SIZE + PKT_CTR_SIZE)
+
 /**
  * ecryptfs_miscdev_read - format and send message from queue
  * @file: fs/ecryptfs/euid miscdevfs handle (ignored)
@@ -242,7 +265,7 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
        struct ecryptfs_daemon *daemon;
        struct ecryptfs_msg_ctx *msg_ctx;
        size_t packet_length_size;
-       char packet_length[3];
+       char packet_length[ECRYPTFS_MAX_PKT_LEN_SIZE];
        size_t i;
        size_t total_length;
        uid_t euid = current_euid();
@@ -315,15 +338,8 @@ check_list:
                packet_length_size = 0;
                msg_ctx->msg_size = 0;
        }
-       /* miscdevfs packet format:
-        *  Octet 0: Type
-        *  Octets 1-4: network byte order msg_ctx->counter
-        *  Octets 5-N0: Size of struct ecryptfs_message to follow
-        *  Octets N0-N1: struct ecryptfs_message (including data)
-        *
-        *  Octets 5-N1 not written if the packet type does not
-        *  include a message */
-       total_length = (1 + 4 + packet_length_size + msg_ctx->msg_size);
+       total_length = (PKT_TYPE_SIZE + PKT_CTR_SIZE + packet_length_size
+                       + msg_ctx->msg_size);
        if (count < total_length) {
                rc = 0;
                printk(KERN_WARNING "%s: Only given user buffer of "
@@ -334,9 +350,10 @@ check_list:
        rc = -EFAULT;
        if (put_user(msg_ctx->type, buf))
                goto out_unlock_msg_ctx;
-       if (put_user(cpu_to_be32(msg_ctx->counter), (__be32 __user *)(buf + 1)))
+       if (put_user(cpu_to_be32(msg_ctx->counter),
+                    (__be32 __user *)(&buf[PKT_CTR_OFFSET])))
                goto out_unlock_msg_ctx;
-       i = 5;
+       i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
        if (msg_ctx->msg) {
                if (copy_to_user(&buf[i], packet_length, packet_length_size))
                        goto out_unlock_msg_ctx;
@@ -401,12 +418,6 @@ out:
  * @count: Amount of data in @buf
  * @ppos: Pointer to offset in file (ignored)
  *
- * miscdevfs packet format:
- *  Octet 0: Type
- *  Octets 1-4: network byte order msg_ctx->counter (0's for non-response)
- *  Octets 5-N0: Size of struct ecryptfs_message to follow
- *  Octets N0-N1: struct ecryptfs_message (including data)
- *
  * Returns the number of bytes read from @buf
  */
 static ssize_t
@@ -415,30 +426,25 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
 {
        __be32 counter_nbo;
        u32 seq;
-       size_t packet_size, packet_size_length, i;
-       ssize_t sz = 0;
+       size_t packet_size, packet_size_length;
        char *data;
        uid_t euid = current_euid();
-       unsigned char packet_size_peek[3];
-       int rc;
+       unsigned char packet_size_peek[ECRYPTFS_MAX_PKT_LEN_SIZE];
+       ssize_t rc;
 
        if (count == 0) {
-               goto out;
-       } else if (count == (1 + 4)) {
+               return 0;
+       } else if (count == MIN_NON_MSG_PKT_SIZE) {
                /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */
                goto memdup;
-       } else if (count < (1 + 4 + 1)
-                  || count > (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
-                              + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)) {
+       } else if (count < MIN_MSG_PKT_SIZE || count > MAX_MSG_PKT_SIZE) {
                printk(KERN_WARNING "%s: Acceptable packet size range is "
-                      "[%d-%lu], but amount of data written is [%zu].",
-                      __func__, (1 + 4 + 1),
-                      (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
-                       + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES), count);
+                      "[%d-%zu], but amount of data written is [%zu].",
+                      __func__, MIN_MSG_PKT_SIZE, MAX_MSG_PKT_SIZE, count);
                return -EINVAL;
        }
 
-       if (copy_from_user(packet_size_peek, (buf + 1 + 4),
+       if (copy_from_user(packet_size_peek, &buf[PKT_LEN_OFFSET],
                           sizeof(packet_size_peek))) {
                printk(KERN_WARNING "%s: Error while inspecting packet size\n",
                       __func__);
@@ -449,11 +455,12 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
                                          &packet_size_length);
        if (rc) {
                printk(KERN_WARNING "%s: Error parsing packet length; "
-                      "rc = [%d]\n", __func__, rc);
+                      "rc = [%zd]\n", __func__, rc);
                return rc;
        }
 
-       if ((1 + 4 + packet_size_length + packet_size) != count) {
+       if ((PKT_TYPE_SIZE + PKT_CTR_SIZE + packet_size_length + packet_size)
+           != count) {
                printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__,
                       packet_size);
                return -EINVAL;
@@ -464,31 +471,33 @@ memdup:
        if (IS_ERR(data)) {
                printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
                       __func__, PTR_ERR(data));
-               goto out;
+               return PTR_ERR(data);
        }
-       sz = count;
-       i = 0;
-       switch (data[i++]) {
+       switch (data[PKT_TYPE_OFFSET]) {
        case ECRYPTFS_MSG_RESPONSE:
-               if (count < (1 + 4 + 1 + sizeof(struct ecryptfs_message))) {
+               if (count < (MIN_MSG_PKT_SIZE
+                            + sizeof(struct ecryptfs_message))) {
                        printk(KERN_WARNING "%s: Minimum acceptable packet "
                               "size is [%zd], but amount of data written is "
                               "only [%zd]. Discarding response packet.\n",
                               __func__,
-                              (1 + 4 + 1 + sizeof(struct ecryptfs_message)),
-                              count);
+                              (MIN_MSG_PKT_SIZE
+                               + sizeof(struct ecryptfs_message)), count);
+                       rc = -EINVAL;
                        goto out_free;
                }
-               memcpy(&counter_nbo, &data[i], 4);
+               memcpy(&counter_nbo, &data[PKT_CTR_OFFSET], PKT_CTR_SIZE);
                seq = be32_to_cpu(counter_nbo);
-               i += 4 + packet_size_length;
-               rc = ecryptfs_miscdev_response(&data[i], packet_size,
-                                              euid, current_user_ns(),
-                                              task_pid(current), seq);
-               if (rc)
+               rc = ecryptfs_miscdev_response(
+                               &data[PKT_LEN_OFFSET + packet_size_length],
+                               packet_size, euid, current_user_ns(),
+                               task_pid(current), seq);
+               if (rc) {
                        printk(KERN_WARNING "%s: Failed to deliver miscdev "
-                              "response to requesting operation; rc = [%d]\n",
+                              "response to requesting operation; rc = [%zd]\n",
                               __func__, rc);
+                       goto out_free;
+               }
                break;
        case ECRYPTFS_MSG_HELO:
        case ECRYPTFS_MSG_QUIT:
@@ -497,12 +506,13 @@ memdup:
                ecryptfs_printk(KERN_WARNING, "Dropping miscdev "
                                "message of unrecognized type [%d]\n",
                                data[0]);
-               break;
+               rc = -EINVAL;
+               goto out_free;
        }
+       rc = count;
 out_free:
        kfree(data);
-out:
-       return sz;
+       return rc;
 }
 
 
index a0006d8..0416d69 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -2118,8 +2118,8 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
        fd_install(0, rp);
        spin_lock(&cf->file_lock);
        fdt = files_fdtable(cf);
-       FD_SET(0, fdt->open_fds);
-       FD_CLR(0, fdt->close_on_exec);
+       __set_open_fd(0, fdt);
+       __clear_close_on_exec(0, fdt);
        spin_unlock(&cf->file_lock);
 
        /* and disallow core files too */
index aca191b..65e174b 100644 (file)
@@ -754,6 +754,13 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
        return ret;
 }
 
+static int fat_ioctl_volume_id(struct inode *dir)
+{
+       struct super_block *sb = dir->i_sb;
+       struct msdos_sb_info *sbi = MSDOS_SB(sb);
+       return sbi->vol_id;
+}
+
 static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
                          unsigned long arg)
 {
@@ -770,6 +777,8 @@ static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
                short_only = 0;
                both = 1;
                break;
+       case VFAT_IOCTL_GET_VOLUME_ID:
+               return fat_ioctl_volume_id(inode);
        default:
                return fat_generic_ioctl(filp, cmd, arg);
        }
index 1510a4d..71af868 100644 (file)
@@ -78,6 +78,7 @@ struct msdos_sb_info {
        const void *dir_ops;                 /* Opaque; default directory operations */
        int dir_per_block;           /* dir entries per block */
        int dir_per_block_bits;      /* log2(dir_per_block) */
+       unsigned long vol_id;        /* volume ID */
 
        int fatent_shift;
        struct fatent_operations *fatent_ops;
index fc33ca1..04997de 100644 (file)
@@ -1260,6 +1260,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
        struct inode *root_inode = NULL, *fat_inode = NULL;
        struct buffer_head *bh;
        struct fat_boot_sector *b;
+       struct fat_boot_bsx *bsx;
        struct msdos_sb_info *sbi;
        u16 logical_sector_size;
        u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
@@ -1404,6 +1405,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                        goto out_fail;
                }
 
+               bsx = (struct fat_boot_bsx *)(bh->b_data + FAT32_BSX_OFFSET);
+
                fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
                if (!IS_FSINFO(fsinfo)) {
                        fat_msg(sb, KERN_WARNING, "Invalid FSINFO signature: "
@@ -1419,8 +1422,14 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                }
 
                brelse(fsinfo_bh);
+       } else {
+               bsx = (struct fat_boot_bsx *)(bh->b_data + FAT16_BSX_OFFSET);
        }
 
+       /* interpret volume ID as a little endian 32 bit integer */
+       sbi->vol_id = (((u32)bsx->vol_id[0]) | ((u32)bsx->vol_id[1] << 8) |
+               ((u32)bsx->vol_id[2] << 16) | ((u32)bsx->vol_id[3] << 24));
+
        sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
        sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
 
index 22764c7..75e7c1f 100644 (file)
@@ -32,20 +32,20 @@ void set_close_on_exec(unsigned int fd, int flag)
        spin_lock(&files->file_lock);
        fdt = files_fdtable(files);
        if (flag)
-               FD_SET(fd, fdt->close_on_exec);
+               __set_close_on_exec(fd, fdt);
        else
-               FD_CLR(fd, fdt->close_on_exec);
+               __clear_close_on_exec(fd, fdt);
        spin_unlock(&files->file_lock);
 }
 
-static int get_close_on_exec(unsigned int fd)
+static bool get_close_on_exec(unsigned int fd)
 {
        struct files_struct *files = current->files;
        struct fdtable *fdt;
-       int res;
+       bool res;
        rcu_read_lock();
        fdt = files_fdtable(files);
-       res = FD_ISSET(fd, fdt->close_on_exec);
+       res = close_on_exec(fd, fdt);
        rcu_read_unlock();
        return res;
 }
@@ -90,15 +90,15 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
        err = -EBUSY;
        fdt = files_fdtable(files);
        tofree = fdt->fd[newfd];
-       if (!tofree && FD_ISSET(newfd, fdt->open_fds))
+       if (!tofree && fd_is_open(newfd, fdt))
                goto out_unlock;
        get_file(file);
        rcu_assign_pointer(fdt->fd[newfd], file);
-       FD_SET(newfd, fdt->open_fds);
+       __set_open_fd(newfd, fdt);
        if (flags & O_CLOEXEC)
-               FD_SET(newfd, fdt->close_on_exec);
+               __set_close_on_exec(newfd, fdt);
        else
-               FD_CLR(newfd, fdt->close_on_exec);
+               __clear_close_on_exec(newfd, fdt);
        spin_unlock(&files->file_lock);
 
        if (tofree)
index 30bfc99..8df13b0 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -268,6 +268,7 @@ int expand_files(struct files_struct *files, int nr)
        /* All good, so we try */
        return expand_fdtable(files, nr);
 }
+EXPORT_SYMBOL_GPL(expand_files);
 
 static int count_open_files(struct fdtable *fdt)
 {
@@ -366,7 +367,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
                         * is partway through open().  So make sure that this
                         * fd is available to the new process.
                         */
-                       FD_CLR(open_files - i, new_fdt->open_fds);
+                       __clear_open_fd(open_files - i, new_fdt);
                }
                rcu_assign_pointer(*new_fds++, f);
        }
@@ -460,11 +461,11 @@ repeat:
        if (start <= files->next_fd)
                files->next_fd = fd + 1;
 
-       FD_SET(fd, fdt->open_fds);
+       __set_open_fd(fd, fdt);
        if (flags & O_CLOEXEC)
-               FD_SET(fd, fdt->close_on_exec);
+               __set_close_on_exec(fd, fdt);
        else
-               FD_CLR(fd, fdt->close_on_exec);
+               __clear_close_on_exec(fd, fdt);
        error = fd;
 #if 1
        /* Sanity check */
index c322794..2aad244 100644 (file)
@@ -443,6 +443,8 @@ void file_sb_list_del(struct file *file)
        }
 }
 
+EXPORT_SYMBOL(file_sb_list_del);
+
 #ifdef CONFIG_SMP
 
 /*
index e2d3633..d62cb51 100644 (file)
@@ -65,6 +65,7 @@ static struct hlist_head *inode_hashtable __read_mostly;
 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
 
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
+EXPORT_SYMBOL(inode_sb_list_lock);
 
 /*
  * Empty aops. Can be used for the cases where the user does not
index fe327c2..03e1a15 100644 (file)
@@ -60,7 +60,7 @@ extern int check_unsafe_exec(struct linux_binprm *);
  * namespace.c
  */
 extern int copy_mount_options(const void __user *, unsigned long *);
-extern int copy_mount_string(const void __user *, char **);
+extern char *copy_mount_string(const void __user *);
 
 extern unsigned int mnt_get_count(struct vfsmount *mnt);
 extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
index 2c22655..9af0125 100644 (file)
@@ -1788,10 +1788,11 @@ static struct dentry *__lookup_hash(struct qstr *name,
  * needs parent already locked. Doesn't follow mounts.
  * SMP-safe.
  */
-static struct dentry *lookup_hash(struct nameidata *nd)
+struct dentry *lookup_hash(struct nameidata *nd)
 {
        return __lookup_hash(&nd->last, nd->path.dentry, nd);
 }
+EXPORT_SYMBOL(lookup_hash);
 
 /**
  * lookup_one_len - filesystem helper to lookup single pathname component
index a1e663d..1f35032 100644 (file)
@@ -1507,6 +1507,7 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
        }
        return 0;
 }
+EXPORT_SYMBOL(iterate_mounts);
 
 static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
 {
@@ -2253,21 +2254,9 @@ int copy_mount_options(const void __user * data, unsigned long *where)
        return 0;
 }
 
-int copy_mount_string(const void __user *data, char **where)
+char *copy_mount_string(const void __user *data)
 {
-       char *tmp;
-
-       if (!data) {
-               *where = NULL;
-               return 0;
-       }
-
-       tmp = strndup_user(data, PAGE_SIZE);
-       if (IS_ERR(tmp))
-               return PTR_ERR(tmp);
-
-       *where = tmp;
-       return 0;
+       return data ? strndup_user(data, PAGE_SIZE) : NULL;
 }
 
 /*
@@ -2535,8 +2524,9 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
        char *kernel_dev;
        unsigned long data_page;
 
-       ret = copy_mount_string(type, &kernel_type);
-       if (ret < 0)
+       kernel_type = copy_mount_string(type);
+       ret = PTR_ERR(kernel_type);
+       if (IS_ERR(kernel_type))
                goto out_type;
 
        kernel_dir = getname(dir_name);
@@ -2545,8 +2535,9 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
                goto out_dir;
        }
 
-       ret = copy_mount_string(dev_name, &kernel_dev);
-       if (ret < 0)
+       kernel_dev = copy_mount_string(dev_name);
+       ret = PTR_ERR(kernel_dev);
+       if (IS_ERR(kernel_dev))
                goto out_dev;
 
        ret = copy_mount_options(data, &data_page);
index 63fc294..6f4adca 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/srcu.h>
 #include <linux/rculist.h>
 #include <linux/wait.h>
+#include <linux/module.h>
 
 #include <linux/fsnotify_backend.h>
 #include "fsnotify.h"
@@ -70,6 +71,7 @@ void fsnotify_put_group(struct fsnotify_group *group)
        if (atomic_dec_and_test(&group->refcnt))
                fsnotify_destroy_group(group);
 }
+EXPORT_SYMBOL(fsnotify_put_group);
 
 /*
  * Create a new fsnotify_group and hold a reference for the group returned.
@@ -102,3 +104,4 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
 
        return group;
 }
+EXPORT_SYMBOL(fsnotify_alloc_group);
index f104d56..54f36db 100644 (file)
@@ -112,6 +112,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
        if (atomic_dec_and_test(&mark->refcnt))
                mark->free_mark(mark);
 }
+EXPORT_SYMBOL(fsnotify_put_mark);
 
 /*
  * Any time a mark is getting freed we end up here.
@@ -191,6 +192,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
        if (unlikely(atomic_dec_and_test(&group->num_marks)))
                fsnotify_final_destroy_group(group);
 }
+EXPORT_SYMBOL(fsnotify_destroy_mark);
 
 void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
 {
@@ -278,6 +280,7 @@ err:
 
        return ret;
 }
+EXPORT_SYMBOL(fsnotify_add_mark);
 
 /*
  * clear any marks in a group in which mark->flags & flags is true
@@ -333,6 +336,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
        atomic_set(&mark->refcnt, 1);
        mark->free_mark = free_mark;
 }
+EXPORT_SYMBOL(fsnotify_init_mark);
 
 static int fsnotify_mark_destroy(void *ignored)
 {
index dc45deb..73ba819 100644 (file)
@@ -553,7 +553,7 @@ void o2net_debugfs_exit(void)
 
 int o2net_debugfs_init(void)
 {
-       mode_t mode = S_IFREG|S_IRUSR;
+       umode_t mode = S_IFREG|S_IRUSR;
 
        o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
        if (o2net_dentry)
index ca155d4..4dbdde5 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -60,6 +60,7 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
        mutex_unlock(&dentry->d_inode->i_mutex);
        return ret;
 }
+EXPORT_SYMBOL(do_truncate);
 
 static long do_sys_truncate(const char __user *pathname, loff_t length)
 {
@@ -836,7 +837,7 @@ EXPORT_SYMBOL(dentry_open);
 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
 {
        struct fdtable *fdt = files_fdtable(files);
-       __FD_CLR(fd, fdt->open_fds);
+       __clear_open_fd(fd, fdt);
        if (fd < files->next_fd)
                files->next_fd = fd;
 }
@@ -1079,7 +1080,7 @@ SYSCALL_DEFINE1(close, unsigned int, fd)
        if (!filp)
                goto out_unlock;
        rcu_assign_pointer(fdt->fd[fd], NULL);
-       FD_CLR(fd, fdt->close_on_exec);
+       __clear_close_on_exec(fd, fdt);
        __put_unused_fd(files, fd);
        spin_unlock(&files->file_lock);
        retval = filp_close(filp, files);
index 402976a..48c7016 100644 (file)
@@ -1831,7 +1831,7 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
 
                        fdt = files_fdtable(files);
                        f_flags = file->f_flags & ~O_CLOEXEC;
-                       if (FD_ISSET(fd, fdt->close_on_exec))
+                       if (close_on_exec(fd, fdt))
                                f_flags |= O_CLOEXEC;
 
                        if (path) {
@@ -2716,6 +2716,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        ONE("stat",       S_IRUGO, proc_tgid_stat),
        ONE("statm",      S_IRUGO, proc_pid_statm),
        REG("maps",       S_IRUGO, proc_maps_operations),
+       REG("arm_maps",   S_IRUGO, proc_armv7_maps_operations),
 #ifdef CONFIG_NUMA
        REG("numa_maps",  S_IRUGO, proc_numa_maps_operations),
 #endif
@@ -3062,6 +3063,7 @@ static const struct pid_entry tid_base_stuff[] = {
        ONE("stat",      S_IRUGO, proc_tid_stat),
        ONE("statm",     S_IRUGO, proc_pid_statm),
        REG("maps",      S_IRUGO, proc_maps_operations),
+       REG("arm_maps",  S_IRUGO, proc_armv7_maps_operations),
 #ifdef CONFIG_NUMA
        REG("numa_maps", S_IRUGO, proc_numa_maps_operations),
 #endif
index 7838e5c..1723caa 100644 (file)
@@ -54,6 +54,7 @@ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
 
 extern const struct file_operations proc_maps_operations;
+extern const struct file_operations proc_armv7_maps_operations;
 extern const struct file_operations proc_numa_maps_operations;
 extern const struct file_operations proc_smaps_operations;
 extern const struct file_operations proc_clear_refs_operations;
index de404f2..8e1b444 100644 (file)
@@ -210,6 +210,196 @@ static int do_maps_open(struct inode *inode, struct file *file,
        return ret;
 }
 
+#ifdef __arm__
+static void show_arm_map_vma(struct seq_file *m, struct vm_area_struct *vma)
+{
+       static const char *cache_attrs4[4] = { "noC", "WB-WA", "WT-noWA", "WB-noWA" };
+       struct mm_struct *mm = vma->vm_mm;
+       struct file *file = vma->vm_file;
+       vm_flags_t flags = vma->vm_flags;
+       unsigned long start, end, end_b;
+       const char *name = NULL;
+       u32 *arm_pgd;
+       u32 *cpt, *cpt_e;
+       u32 desc1, desc2;
+       u32 tex_cb = 0;
+       u32 prrr, nmrr = 0;
+       u32 control = 0;
+       u32 xn = 1, ap = 0;
+       int desc_type;
+       int type;
+       char buf[64];
+       char rw[4];
+       int len;
+       int s;
+
+       if (mm == NULL)
+               return;
+
+       if (!file) {
+               name = arch_vma_name(vma);
+               if (!name) {
+                       if (vma->vm_start <= mm->brk &&
+                                       vma->vm_end >= mm->start_brk) {
+                               name = "[heap]";
+                       } else if (vma->vm_start <= mm->start_stack &&
+                                  vma->vm_end >= mm->start_stack) {
+                               name = "[stack]";
+                       }
+               }
+       }
+
+       arm_pgd = (u32 *)mm->pgd;
+
+       asm ("mrc p15, 0, %0, c1, c0, 0" : "=r"(control));
+       asm ("mrc p15, 0, %0, c10, c2, 0" : "=r"(prrr)); // primary region RR
+       asm ("mrc p15, 0, %0, c10, c2, 1" : "=r"(nmrr)); // normal memory RR
+
+       start = vma->vm_start;
+       end = vma->vm_end;
+
+       while (start < end) {
+               desc_type = '-';
+
+               desc1 = arm_pgd[start >> 20];
+
+               end_b = (start & ~0xfffff) + 0x100000;
+               for (; end_b < end; end_b += 0x100000)
+                       if ((arm_pgd[end_b >> 20] ^ desc1) & 0xfffff)
+                               break;
+
+               switch (desc1 & 3) {
+               case 0:
+                       sprintf(buf, "l1_fault");
+                       goto do_output;
+               case 1:
+                       break;
+               case 2:
+                       tex_cb = ((desc1 >> 2) & 0x03) | ((desc1 >> 10) & 0x1c);
+                       s = (desc1 >> 16) & 1;
+                       xn = (desc1 >> 4) & 1;
+                       ap = ((desc1 >> 10) & 3) | ((desc1 >> 13) & 4);
+                       desc_type = (desc1 & (1 << 18)) ? 's' : 'h';
+                       goto do_tex_cb;
+               case 3:
+                       sprintf(buf, "reserved");
+                       goto do_output;
+               }
+
+               cpt = __va(desc1 & 0xfffffc00);
+               desc2 = cpt[(start >> 12) & 0xff];
+
+               // find end
+               cpt_e = cpt;
+               for (end_b = start + 0x1000; end_b < end; end_b += 0x1000) {
+                       if ((end_b & 0x000ff000) == 0) {
+                               cpt_e = __va(arm_pgd[end_b >> 20] & 0xfffffc00);
+                               if ((arm_pgd[end_b >> 20] ^ desc1) & 0x3ff)
+                                       break;
+                       }
+
+                       // assume small pages
+                       if ((cpt_e[(end_b >> 12) & 0xff] ^ desc2) & 0xfff)
+                               break;
+               }
+
+               switch (desc2 & 3) {
+               case 0:
+                       sprintf(buf, "l2_fault");
+                       goto do_output;
+               case 1:
+                       tex_cb = ((desc2 >> 2) & 0x03) | ((desc2 >> 10) & 0x1c);
+                       s = (desc2 >> 10) & 1;
+                       xn = (desc2 >> 15) & 1;
+                       ap = ((desc2 >> 4) & 3) | ((desc2 >> 7) & 4);
+                       break;
+               case 2:
+               case 3:
+                       tex_cb = ((desc2 >> 2) & 0x03) | ((desc2 >> 4) & 0x1c);
+                       s = (desc2 >> 10) & 1;
+                       xn = desc2 & 1;
+                       ap = ((desc2 >> 4) & 3) | ((desc2 >> 7) & 4);
+                       break;
+               }
+
+do_tex_cb:
+               if (control & (1 << 28)) { // TEX remap
+                       // S (shareable) bit remapping
+                       char s_normal[2] = { (prrr >> 18) & 1, (prrr >> 19) & 1 };
+                       char s_device[2] = { (prrr >> 16) & 1, (prrr >> 17) & 1 };
+
+                       buf[0] = 0;
+                       tex_cb &= 7;
+                       type = (prrr >> tex_cb * 2) & 3;
+                       switch (type) {
+                       case 0:
+                               sprintf(buf, "strongly-ordered");
+                               break;
+                       case 1:
+                               sprintf(buf, "device");
+                               s = s_device[s];
+                               break;
+                       case 3:
+                               sprintf(buf, "reserved/normal");
+                       case 2:
+                               s = s_normal[s];
+                               sprintf(buf + strlen(buf), "inner-%s-outer-%s",
+                                       cache_attrs4[(nmrr >> tex_cb * 2) & 3],
+                                       cache_attrs4[(nmrr >> (tex_cb * 2 + 16)) & 3]);
+                       }
+               }
+               else if (tex_cb & 0x10) { // TEX[2] set
+                       sprintf(buf, "inner-%s-outer-%s",
+                               cache_attrs4[tex_cb & 3], cache_attrs4[(tex_cb >> 2) & 3]);
+               }
+               else {
+                       switch (tex_cb) {
+                       case 0x00: sprintf(buf, "strongly-ordered"); s = 1; break;
+                       case 0x01: sprintf(buf, "shareable-device"); s = 1; break;
+                       case 0x02: sprintf(buf, "inner-outer-WT-noWA"); break;
+                       case 0x03: sprintf(buf, "inner-outer-WB-noWA"); break;
+                       case 0x04: sprintf(buf, "inner-outer-non-cacheable"); break;
+                       case 0x06: sprintf(buf, "implementation-defined"); break;
+                       case 0x07: sprintf(buf, "inner-outer-WB-WA"); break;
+                       case 0x08: sprintf(buf, "non-shareable-device"); s = 0; break;
+                       default:   sprintf(buf, "reserved"); break;
+                       }
+               }
+
+               if (s)
+                       sprintf(buf + strlen(buf), "-shareable");
+
+do_output:
+               // use user permissions here
+               if (control & (1 << 29)) // AFE
+                       sprintf(rw, "%c%c", (ap & 2) ? 'r' : '-',
+                               ((ap & 2) && !(ap & 4)) ? 'w' : '-');
+               else
+                       sprintf(rw, "%c%c", (ap & 2) ? 'r' : '-',
+                               (ap == 3) ? 'w' : '-');
+
+               seq_printf(m, "%08lx-%08lx %s%c%c%c %-28s %n",
+                               start, end_b,
+                               rw,
+                               xn ? '-' : 'x',
+                               flags & VM_MAYSHARE ? 's' : 'p',
+                               desc_type,
+                               buf, &len);
+
+               if (file) {
+                       pad_len_spaces(m, len);
+                       seq_path(m, &file->f_path, "\n");
+               } else if (name) {
+                       pad_len_spaces(m, len);
+                       seq_puts(m, name);
+               }
+               seq_putc(m, '\n');
+
+               start = end_b;
+       }
+}
+#endif
+
 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 {
        struct mm_struct *mm = vma->vm_mm;
@@ -309,6 +499,41 @@ const struct file_operations proc_maps_operations = {
        .release        = seq_release_private,
 };
 
+#ifdef __arm__
+static int show_armv7_map(struct seq_file *m, void *v)
+{
+       struct vm_area_struct *vma = v;
+       struct proc_maps_private *priv = m->private;
+       struct task_struct *task = priv->task;
+
+       show_arm_map_vma(m, vma);
+
+       if (m->count < m->size)  /* vma is copied successfully */
+               m->version = (vma != get_gate_vma(task->mm))
+                       ? vma->vm_start : 0;
+       return 0;
+}
+
+static const struct seq_operations proc_pid_armv7_maps_op = {
+       .start  = m_start,
+       .next   = m_next,
+       .stop   = m_stop,
+       .show   = show_armv7_map
+};
+#endif
+
+static int armv7_maps_open(struct inode *inode, struct file *file)
+{
+       return do_maps_open(inode, file, &proc_pid_armv7_maps_op);
+}
+
+const struct file_operations proc_armv7_maps_operations = {
+       .open           = armv7_maps_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
+
 /*
  * Proportional Set Size(PSS): my share of RSS.
  *
index 459128b..e4b5500 100644 (file)
@@ -1127,8 +1127,8 @@ EXPORT_SYMBOL(generic_splice_sendpage);
 /*
  * Attempt to initiate a splice from pipe to file.
  */
-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
-                          loff_t *ppos, size_t len, unsigned int flags)
+long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+                   loff_t *ppos, size_t len, unsigned int flags)
 {
        ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
                                loff_t *, size_t, unsigned int);
@@ -1151,13 +1151,14 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
 
        return splice_write(pipe, out, ppos, len, flags);
 }
+EXPORT_SYMBOL(do_splice_from);
 
 /*
  * Attempt to initiate a splice from a file to a pipe.
  */
-static long do_splice_to(struct file *in, loff_t *ppos,
-                        struct pipe_inode_info *pipe, size_t len,
-                        unsigned int flags)
+long do_splice_to(struct file *in, loff_t *ppos,
+                 struct pipe_inode_info *pipe, size_t len,
+                 unsigned int flags)
 {
        ssize_t (*splice_read)(struct file *, loff_t *,
                               struct pipe_inode_info *, size_t, unsigned int);
@@ -1177,6 +1178,7 @@ static long do_splice_to(struct file *in, loff_t *ppos,
 
        return splice_read(in, ppos, pipe, len, flags);
 }
+EXPORT_SYMBOL(do_splice_to);
 
 /**
  * splice_direct_to_actor - splices data directly between two non-pipes
index f8b0160..ba66d50 100644 (file)
@@ -11,12 +11,6 @@ config UBIFS_FS
        help
          UBIFS is a file system for flash devices which works on top of UBI.
 
-config UBIFS_FS_XATTR
-       bool "Extended attributes support"
-       depends on UBIFS_FS
-       help
-         This option enables support of extended attributes.
-
 config UBIFS_FS_ADVANCED_COMPR
        bool "Advanced compression options"
        depends on UBIFS_FS
@@ -41,20 +35,3 @@ config UBIFS_FS_ZLIB
        default y
        help
          Zlib compresses better than LZO but it is slower. Say 'Y' if unsure.
-
-# Debugging-related stuff
-config UBIFS_FS_DEBUG
-       bool "Enable debugging support"
-       depends on UBIFS_FS
-       select DEBUG_FS
-       select KALLSYMS
-       help
-         This option enables UBIFS debugging support. It makes sure various
-         assertions, self-checks, debugging messages and test modes are compiled
-         in (this all is compiled out otherwise). Assertions are light-weight
-         and this option also enables them. Self-checks, debugging messages and
-         test modes are switched off by default. Thus, it is safe and actually
-         recommended to have debugging support enabled, and it should not slow
-         down UBIFS. You can then further enable / disable individual  debugging
-         features using UBIFS module parameters and the corresponding sysfs
-         interfaces.
index 80e93c3..2c6f0cb 100644 (file)
@@ -3,7 +3,4 @@ obj-$(CONFIG_UBIFS_FS) += ubifs.o
 ubifs-y += shrinker.o journal.o file.o dir.o super.o sb.o io.o
 ubifs-y += tnc.o master.o scan.o replay.o log.o commit.o gc.o orphan.o
 ubifs-y += budget.o find.o tnc_commit.o compress.o lpt.o lprops.o
-ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o
-
-ubifs-$(CONFIG_UBIFS_FS_DEBUG) += debug.o
-ubifs-$(CONFIG_UBIFS_FS_XATTR) += xattr.o
+ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o xattr.o debug.o
index bc4f94b..ea9c814 100644 (file)
@@ -342,9 +342,8 @@ static int do_budget_space(struct ubifs_info *c)
        lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
               c->lst.taken_empty_lebs;
        if (unlikely(rsvd_idx_lebs > lebs)) {
-               dbg_budg("out of indexing space: min_idx_lebs %d (old %d), "
-                        "rsvd_idx_lebs %d", min_idx_lebs, c->bi.min_idx_lebs,
-                        rsvd_idx_lebs);
+               dbg_budg("out of indexing space: min_idx_lebs %d (old %d), rsvd_idx_lebs %d",
+                        min_idx_lebs, c->bi.min_idx_lebs, rsvd_idx_lebs);
                return -ENOSPC;
        }
 
index b2ca12f..26b69b2 100644 (file)
@@ -289,8 +289,8 @@ int ubifs_bg_thread(void *info)
        int err;
        struct ubifs_info *c = info;
 
-       dbg_msg("background thread \"%s\" started, PID %d",
-               c->bgt_name, current->pid);
+       ubifs_msg("background thread \"%s\" started, PID %d",
+                 c->bgt_name, current->pid);
        set_freezable();
 
        while (1) {
@@ -324,7 +324,7 @@ int ubifs_bg_thread(void *info)
                cond_resched();
        }
 
-       dbg_msg("background thread \"%s\" stops", c->bgt_name);
+       ubifs_msg("background thread \"%s\" stops", c->bgt_name);
        return 0;
 }
 
@@ -492,7 +492,9 @@ int ubifs_gc_should_commit(struct ubifs_info *c)
        return ret;
 }
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
+/*
+ * Everything below is related to debugging.
+ */
 
 /**
  * struct idx_node - hold index nodes during index tree traversal.
@@ -508,7 +510,7 @@ struct idx_node {
        struct list_head list;
        int iip;
        union ubifs_key upper_key;
-       struct ubifs_idx_node idx __attribute__((aligned(8)));
+       struct ubifs_idx_node idx __aligned(8);
 };
 
 /**
@@ -710,14 +712,14 @@ out:
        return 0;
 
 out_dump:
-       dbg_err("dumping index node (iip=%d)", i->iip);
-       dbg_dump_node(c, idx);
+       ubifs_err("dumping index node (iip=%d)", i->iip);
+       ubifs_dump_node(c, idx);
        list_del(&i->list);
        kfree(i);
        if (!list_empty(&list)) {
                i = list_entry(list.prev, struct idx_node, list);
-               dbg_err("dumping parent index node");
-               dbg_dump_node(c, &i->idx);
+               ubifs_err("dumping parent index node");
+               ubifs_dump_node(c, &i->idx);
        }
 out_free:
        while (!list_empty(&list)) {
@@ -730,5 +732,3 @@ out_free:
                err = -EINVAL;
        return err;
 }
-
-#endif /* CONFIG_UBIFS_FS_DEBUG */
index 11e4132..2bfa095 100644 (file)
@@ -112,8 +112,7 @@ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len,
        if (compr->comp_mutex)
                mutex_unlock(compr->comp_mutex);
        if (unlikely(err)) {
-               ubifs_warn("cannot compress %d bytes, compressor %s, "
-                          "error %d, leave data uncompressed",
+               ubifs_warn("cannot compress %d bytes, compressor %s, error %d, leave data uncompressed",
                           in_len, compr->name, err);
                 goto no_compr;
        }
@@ -176,8 +175,8 @@ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf,
        if (compr->decomp_mutex)
                mutex_unlock(compr->decomp_mutex);
        if (err)
-               ubifs_err("cannot decompress %d bytes, compressor %s, "
-                         "error %d", in_len, compr->name, err);
+               ubifs_err("cannot decompress %d bytes, compressor %s, error %d",
+                         in_len, compr->name, err);
 
        return err;
 }
index b09ba2d..c668c28 100644 (file)
 #include <linux/random.h>
 #include "ubifs.h"
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
-
-DEFINE_SPINLOCK(dbg_lock);
-
-static char dbg_key_buf0[128];
-static char dbg_key_buf1[128];
+static DEFINE_SPINLOCK(dbg_lock);
 
 static const char *get_key_fmt(int fmt)
 {
@@ -103,8 +98,8 @@ static const char *get_dent_type(int type)
        }
 }
 
-static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key,
-                       char *buffer)
+const char *dbg_snprintf_key(const struct ubifs_info *c,
+                            const union ubifs_key *key, char *buffer, int len)
 {
        char *p = buffer;
        int type = key_type(c, key);
@@ -112,45 +107,34 @@ static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key,
        if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) {
                switch (type) {
                case UBIFS_INO_KEY:
-                       sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key),
-                              get_key_type(type));
+                       len -= snprintf(p, len, "(%lu, %s)",
+                                       (unsigned long)key_inum(c, key),
+                                       get_key_type(type));
                        break;
                case UBIFS_DENT_KEY:
                case UBIFS_XENT_KEY:
-                       sprintf(p, "(%lu, %s, %#08x)",
-                               (unsigned long)key_inum(c, key),
-                               get_key_type(type), key_hash(c, key));
+                       len -= snprintf(p, len, "(%lu, %s, %#08x)",
+                                       (unsigned long)key_inum(c, key),
+                                       get_key_type(type), key_hash(c, key));
                        break;
                case UBIFS_DATA_KEY:
-                       sprintf(p, "(%lu, %s, %u)",
-                               (unsigned long)key_inum(c, key),
-                               get_key_type(type), key_block(c, key));
+                       len -= snprintf(p, len, "(%lu, %s, %u)",
+                                       (unsigned long)key_inum(c, key),
+                                       get_key_type(type), key_block(c, key));
                        break;
                case UBIFS_TRUN_KEY:
-                       sprintf(p, "(%lu, %s)",
-                               (unsigned long)key_inum(c, key),
-                               get_key_type(type));
+                       len -= snprintf(p, len, "(%lu, %s)",
+                                       (unsigned long)key_inum(c, key),
+                                       get_key_type(type));
                        break;
                default:
-                       sprintf(p, "(bad key type: %#08x, %#08x)",
-                               key->u32[0], key->u32[1]);
+                       len -= snprintf(p, len, "(bad key type: %#08x, %#08x)",
+                                       key->u32[0], key->u32[1]);
                }
        } else
-               sprintf(p, "bad key format %d", c->key_fmt);
-}
-
-const char *dbg_key_str0(const struct ubifs_info *c, const union ubifs_key *key)
-{
-       /* dbg_lock must be held */
-       sprintf_key(c, key, dbg_key_buf0);
-       return dbg_key_buf0;
-}
-
-const char *dbg_key_str1(const struct ubifs_info *c, const union ubifs_key *key)
-{
-       /* dbg_lock must be held */
-       sprintf_key(c, key, dbg_key_buf1);
-       return dbg_key_buf1;
+               len -= snprintf(p, len, "bad key format %d", c->key_fmt);
+       ubifs_assert(len > 0);
+       return p;
 }
 
 const char *dbg_ntype(int type)
@@ -235,18 +219,18 @@ const char *dbg_jhead(int jhead)
 
 static void dump_ch(const struct ubifs_ch *ch)
 {
-       printk(KERN_DEBUG "\tmagic          %#x\n", le32_to_cpu(ch->magic));
-       printk(KERN_DEBUG "\tcrc            %#x\n", le32_to_cpu(ch->crc));
-       printk(KERN_DEBUG "\tnode_type      %d (%s)\n", ch->node_type,
+       pr_err("\tmagic          %#x\n", le32_to_cpu(ch->magic));
+       pr_err("\tcrc            %#x\n", le32_to_cpu(ch->crc));
+       pr_err("\tnode_type      %d (%s)\n", ch->node_type,
               dbg_ntype(ch->node_type));
-       printk(KERN_DEBUG "\tgroup_type     %d (%s)\n", ch->group_type,
+       pr_err("\tgroup_type     %d (%s)\n", ch->group_type,
               dbg_gtype(ch->group_type));
-       printk(KERN_DEBUG "\tsqnum          %llu\n",
+       pr_err("\tsqnum          %llu\n",
               (unsigned long long)le64_to_cpu(ch->sqnum));
-       printk(KERN_DEBUG "\tlen            %u\n", le32_to_cpu(ch->len));
+       pr_err("\tlen            %u\n", le32_to_cpu(ch->len));
 }
 
-void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode)
+void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
 {
        const struct ubifs_inode *ui = ubifs_inode(inode);
        struct qstr nm = { .name = NULL };
@@ -254,43 +238,43 @@ void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode)
        struct ubifs_dent_node *dent, *pdent = NULL;
        int count = 2;
 
-       printk(KERN_DEBUG "Dump in-memory inode:");
-       printk(KERN_DEBUG "\tinode          %lu\n", inode->i_ino);
-       printk(KERN_DEBUG "\tsize           %llu\n",
+       pr_err("Dump in-memory inode:");
+       pr_err("\tinode          %lu\n", inode->i_ino);
+       pr_err("\tsize           %llu\n",
               (unsigned long long)i_size_read(inode));
-       printk(KERN_DEBUG "\tnlink          %u\n", inode->i_nlink);
-       printk(KERN_DEBUG "\tuid            %u\n", (unsigned int)inode->i_uid);
-       printk(KERN_DEBUG "\tgid            %u\n", (unsigned int)inode->i_gid);
-       printk(KERN_DEBUG "\tatime          %u.%u\n",
+       pr_err("\tnlink          %u\n", inode->i_nlink);
+       pr_err("\tuid            %u\n", (unsigned int)inode->i_uid);
+       pr_err("\tgid            %u\n", (unsigned int)inode->i_gid);
+       pr_err("\tatime          %u.%u\n",
               (unsigned int)inode->i_atime.tv_sec,
               (unsigned int)inode->i_atime.tv_nsec);
-       printk(KERN_DEBUG "\tmtime          %u.%u\n",
+       pr_err("\tmtime          %u.%u\n",
               (unsigned int)inode->i_mtime.tv_sec,
               (unsigned int)inode->i_mtime.tv_nsec);
-       printk(KERN_DEBUG "\tctime          %u.%u\n",
+       pr_err("\tctime          %u.%u\n",
               (unsigned int)inode->i_ctime.tv_sec,
               (unsigned int)inode->i_ctime.tv_nsec);
-       printk(KERN_DEBUG "\tcreat_sqnum    %llu\n", ui->creat_sqnum);
-       printk(KERN_DEBUG "\txattr_size     %u\n", ui->xattr_size);
-       printk(KERN_DEBUG "\txattr_cnt      %u\n", ui->xattr_cnt);
-       printk(KERN_DEBUG "\txattr_names    %u\n", ui->xattr_names);
-       printk(KERN_DEBUG "\tdirty          %u\n", ui->dirty);
-       printk(KERN_DEBUG "\txattr          %u\n", ui->xattr);
-       printk(KERN_DEBUG "\tbulk_read      %u\n", ui->xattr);
-       printk(KERN_DEBUG "\tsynced_i_size  %llu\n",
+       pr_err("\tcreat_sqnum    %llu\n", ui->creat_sqnum);
+       pr_err("\txattr_size     %u\n", ui->xattr_size);
+       pr_err("\txattr_cnt      %u\n", ui->xattr_cnt);
+       pr_err("\txattr_names    %u\n", ui->xattr_names);
+       pr_err("\tdirty          %u\n", ui->dirty);
+       pr_err("\txattr          %u\n", ui->xattr);
+       pr_err("\tbulk_read      %u\n", ui->xattr);
+       pr_err("\tsynced_i_size  %llu\n",
               (unsigned long long)ui->synced_i_size);
-       printk(KERN_DEBUG "\tui_size        %llu\n",
+       pr_err("\tui_size        %llu\n",
               (unsigned long long)ui->ui_size);
-       printk(KERN_DEBUG "\tflags          %d\n", ui->flags);
-       printk(KERN_DEBUG "\tcompr_type     %d\n", ui->compr_type);
-       printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read);
-       printk(KERN_DEBUG "\tread_in_a_row  %lu\n", ui->read_in_a_row);
-       printk(KERN_DEBUG "\tdata_len       %d\n", ui->data_len);
+       pr_err("\tflags          %d\n", ui->flags);
+       pr_err("\tcompr_type     %d\n", ui->compr_type);
+       pr_err("\tlast_page_read %lu\n", ui->last_page_read);
+       pr_err("\tread_in_a_row  %lu\n", ui->read_in_a_row);
+       pr_err("\tdata_len       %d\n", ui->data_len);
 
        if (!S_ISDIR(inode->i_mode))
                return;
 
-       printk(KERN_DEBUG "List of directory entries:\n");
+       pr_err("List of directory entries:\n");
        ubifs_assert(!mutex_is_locked(&c->tnc_mutex));
 
        lowest_dent_key(c, &key, inode->i_ino);
@@ -298,11 +282,11 @@ void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode)
                dent = ubifs_tnc_next_ent(c, &key, &nm);
                if (IS_ERR(dent)) {
                        if (PTR_ERR(dent) != -ENOENT)
-                               printk(KERN_DEBUG "error %ld\n", PTR_ERR(dent));
+                               pr_err("error %ld\n", PTR_ERR(dent));
                        break;
                }
 
-               printk(KERN_DEBUG "\t%d: %s (%s)\n",
+               pr_err("\t%d: %s (%s)\n",
                       count++, dent->name, get_dent_type(dent->type));
 
                nm.name = dent->name;
@@ -314,19 +298,17 @@ void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode)
        kfree(pdent);
 }
 
-void dbg_dump_node(const struct ubifs_info *c, const void *node)
+void ubifs_dump_node(const struct ubifs_info *c, const void *node)
 {
        int i, n;
        union ubifs_key key;
        const struct ubifs_ch *ch = node;
-
-       if (dbg_is_tst_rcvry(c))
-               return;
+       char key_buf[DBG_KEY_BUF_LEN];
 
        /* If the magic is incorrect, just hexdump the first bytes */
        if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) {
-               printk(KERN_DEBUG "Not a node, first %zu bytes:", UBIFS_CH_SZ);
-               print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+               pr_err("Not a node, first %zu bytes:", UBIFS_CH_SZ);
+               print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 32, 1,
                               (void *)node, UBIFS_CH_SZ, 1);
                return;
        }
@@ -339,8 +321,7 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
        {
                const struct ubifs_pad_node *pad = node;
 
-               printk(KERN_DEBUG "\tpad_len        %u\n",
-                      le32_to_cpu(pad->pad_len));
+               pr_err("\tpad_len        %u\n", le32_to_cpu(pad->pad_len));
                break;
        }
        case UBIFS_SB_NODE:
@@ -348,112 +329,77 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
                const struct ubifs_sb_node *sup = node;
                unsigned int sup_flags = le32_to_cpu(sup->flags);
 
-               printk(KERN_DEBUG "\tkey_hash       %d (%s)\n",
+               pr_err("\tkey_hash       %d (%s)\n",
                       (int)sup->key_hash, get_key_hash(sup->key_hash));
-               printk(KERN_DEBUG "\tkey_fmt        %d (%s)\n",
+               pr_err("\tkey_fmt        %d (%s)\n",
                       (int)sup->key_fmt, get_key_fmt(sup->key_fmt));
-               printk(KERN_DEBUG "\tflags          %#x\n", sup_flags);
-               printk(KERN_DEBUG "\t  big_lpt      %u\n",
+               pr_err("\tflags          %#x\n", sup_flags);
+               pr_err("\t  big_lpt      %u\n",
                       !!(sup_flags & UBIFS_FLG_BIGLPT));
-               printk(KERN_DEBUG "\t  space_fixup  %u\n",
+               pr_err("\t  space_fixup  %u\n",
                       !!(sup_flags & UBIFS_FLG_SPACE_FIXUP));
-               printk(KERN_DEBUG "\tmin_io_size    %u\n",
-                      le32_to_cpu(sup->min_io_size));
-               printk(KERN_DEBUG "\tleb_size       %u\n",
-                      le32_to_cpu(sup->leb_size));
-               printk(KERN_DEBUG "\tleb_cnt        %u\n",
-                      le32_to_cpu(sup->leb_cnt));
-               printk(KERN_DEBUG "\tmax_leb_cnt    %u\n",
-                      le32_to_cpu(sup->max_leb_cnt));
-               printk(KERN_DEBUG "\tmax_bud_bytes  %llu\n",
+               pr_err("\tmin_io_size    %u\n", le32_to_cpu(sup->min_io_size));
+               pr_err("\tleb_size       %u\n", le32_to_cpu(sup->leb_size));
+               pr_err("\tleb_cnt        %u\n", le32_to_cpu(sup->leb_cnt));
+               pr_err("\tmax_leb_cnt    %u\n", le32_to_cpu(sup->max_leb_cnt));
+               pr_err("\tmax_bud_bytes  %llu\n",
                       (unsigned long long)le64_to_cpu(sup->max_bud_bytes));
-               printk(KERN_DEBUG "\tlog_lebs       %u\n",
-                      le32_to_cpu(sup->log_lebs));
-               printk(KERN_DEBUG "\tlpt_lebs       %u\n",
-                      le32_to_cpu(sup->lpt_lebs));
-               printk(KERN_DEBUG "\torph_lebs      %u\n",
-                      le32_to_cpu(sup->orph_lebs));
-               printk(KERN_DEBUG "\tjhead_cnt      %u\n",
-                      le32_to_cpu(sup->jhead_cnt));
-               printk(KERN_DEBUG "\tfanout         %u\n",
-                      le32_to_cpu(sup->fanout));
-               printk(KERN_DEBUG "\tlsave_cnt      %u\n",
-                      le32_to_cpu(sup->lsave_cnt));
-               printk(KERN_DEBUG "\tdefault_compr  %u\n",
+               pr_err("\tlog_lebs       %u\n", le32_to_cpu(sup->log_lebs));
+               pr_err("\tlpt_lebs       %u\n", le32_to_cpu(sup->lpt_lebs));
+               pr_err("\torph_lebs      %u\n", le32_to_cpu(sup->orph_lebs));
+               pr_err("\tjhead_cnt      %u\n", le32_to_cpu(sup->jhead_cnt));
+               pr_err("\tfanout         %u\n", le32_to_cpu(sup->fanout));
+               pr_err("\tlsave_cnt      %u\n", le32_to_cpu(sup->lsave_cnt));
+               pr_err("\tdefault_compr  %u\n",
                       (int)le16_to_cpu(sup->default_compr));
-               printk(KERN_DEBUG "\trp_size        %llu\n",
+               pr_err("\trp_size        %llu\n",
                       (unsigned long long)le64_to_cpu(sup->rp_size));
-               printk(KERN_DEBUG "\trp_uid         %u\n",
-                      le32_to_cpu(sup->rp_uid));
-               printk(KERN_DEBUG "\trp_gid         %u\n",
-                      le32_to_cpu(sup->rp_gid));
-               printk(KERN_DEBUG "\tfmt_version    %u\n",
-                      le32_to_cpu(sup->fmt_version));
-               printk(KERN_DEBUG "\ttime_gran      %u\n",
-                      le32_to_cpu(sup->time_gran));
-               printk(KERN_DEBUG "\tUUID           %pUB\n",
-                      sup->uuid);
+               pr_err("\trp_uid         %u\n", le32_to_cpu(sup->rp_uid));
+               pr_err("\trp_gid         %u\n", le32_to_cpu(sup->rp_gid));
+               pr_err("\tfmt_version    %u\n", le32_to_cpu(sup->fmt_version));
+               pr_err("\ttime_gran      %u\n", le32_to_cpu(sup->time_gran));
+               pr_err("\tUUID           %pUB\n", sup->uuid);
                break;
        }
        case UBIFS_MST_NODE:
        {
                const struct ubifs_mst_node *mst = node;
 
-               printk(KERN_DEBUG "\thighest_inum   %llu\n",
+               pr_err("\thighest_inum   %llu\n",
                       (unsigned long long)le64_to_cpu(mst->highest_inum));
-               printk(KERN_DEBUG "\tcommit number  %llu\n",
+               pr_err("\tcommit number  %llu\n",
                       (unsigned long long)le64_to_cpu(mst->cmt_no));
-               printk(KERN_DEBUG "\tflags          %#x\n",
-                      le32_to_cpu(mst->flags));
-               printk(KERN_DEBUG "\tlog_lnum       %u\n",
-                      le32_to_cpu(mst->log_lnum));
-               printk(KERN_DEBUG "\troot_lnum      %u\n",
-                      le32_to_cpu(mst->root_lnum));
-               printk(KERN_DEBUG "\troot_offs      %u\n",
-                      le32_to_cpu(mst->root_offs));
-               printk(KERN_DEBUG "\troot_len       %u\n",
-                      le32_to_cpu(mst->root_len));
-               printk(KERN_DEBUG "\tgc_lnum        %u\n",
-                      le32_to_cpu(mst->gc_lnum));
-               printk(KERN_DEBUG "\tihead_lnum     %u\n",
-                      le32_to_cpu(mst->ihead_lnum));
-               printk(KERN_DEBUG "\tihead_offs     %u\n",
-                      le32_to_cpu(mst->ihead_offs));
-               printk(KERN_DEBUG "\tindex_size     %llu\n",
+               pr_err("\tflags          %#x\n", le32_to_cpu(mst->flags));
+               pr_err("\tlog_lnum       %u\n", le32_to_cpu(mst->log_lnum));
+               pr_err("\troot_lnum      %u\n", le32_to_cpu(mst->root_lnum));
+               pr_err("\troot_offs      %u\n", le32_to_cpu(mst->root_offs));
+               pr_err("\troot_len       %u\n", le32_to_cpu(mst->root_len));
+               pr_err("\tgc_lnum        %u\n", le32_to_cpu(mst->gc_lnum));
+               pr_err("\tihead_lnum     %u\n", le32_to_cpu(mst->ihead_lnum));
+               pr_err("\tihead_offs     %u\n", le32_to_cpu(mst->ihead_offs));
+               pr_err("\tindex_size     %llu\n",
                       (unsigned long long)le64_to_cpu(mst->index_size));
-               printk(KERN_DEBUG "\tlpt_lnum       %u\n",
-                      le32_to_cpu(mst->lpt_lnum));
-               printk(KERN_DEBUG "\tlpt_offs       %u\n",
-                      le32_to_cpu(mst->lpt_offs));
-               printk(KERN_DEBUG "\tnhead_lnum     %u\n",
-                      le32_to_cpu(mst->nhead_lnum));
-               printk(KERN_DEBUG "\tnhead_offs     %u\n",
-                      le32_to_cpu(mst->nhead_offs));
-               printk(KERN_DEBUG "\tltab_lnum      %u\n",
-                      le32_to_cpu(mst->ltab_lnum));
-               printk(KERN_DEBUG "\tltab_offs      %u\n",
-                      le32_to_cpu(mst->ltab_offs));
-               printk(KERN_DEBUG "\tlsave_lnum     %u\n",
-                      le32_to_cpu(mst->lsave_lnum));
-               printk(KERN_DEBUG "\tlsave_offs     %u\n",
-                      le32_to_cpu(mst->lsave_offs));
-               printk(KERN_DEBUG "\tlscan_lnum     %u\n",
-                      le32_to_cpu(mst->lscan_lnum));
-               printk(KERN_DEBUG "\tleb_cnt        %u\n",
-                      le32_to_cpu(mst->leb_cnt));
-               printk(KERN_DEBUG "\tempty_lebs     %u\n",
-                      le32_to_cpu(mst->empty_lebs));
-               printk(KERN_DEBUG "\tidx_lebs       %u\n",
-                      le32_to_cpu(mst->idx_lebs));
-               printk(KERN_DEBUG "\ttotal_free     %llu\n",
+               pr_err("\tlpt_lnum       %u\n", le32_to_cpu(mst->lpt_lnum));
+               pr_err("\tlpt_offs       %u\n", le32_to_cpu(mst->lpt_offs));
+               pr_err("\tnhead_lnum     %u\n", le32_to_cpu(mst->nhead_lnum));
+               pr_err("\tnhead_offs     %u\n", le32_to_cpu(mst->nhead_offs));
+               pr_err("\tltab_lnum      %u\n", le32_to_cpu(mst->ltab_lnum));
+               pr_err("\tltab_offs      %u\n", le32_to_cpu(mst->ltab_offs));
+               pr_err("\tlsave_lnum     %u\n", le32_to_cpu(mst->lsave_lnum));
+               pr_err("\tlsave_offs     %u\n", le32_to_cpu(mst->lsave_offs));
+               pr_err("\tlscan_lnum     %u\n", le32_to_cpu(mst->lscan_lnum));
+               pr_err("\tleb_cnt        %u\n", le32_to_cpu(mst->leb_cnt));
+               pr_err("\tempty_lebs     %u\n", le32_to_cpu(mst->empty_lebs));
+               pr_err("\tidx_lebs       %u\n", le32_to_cpu(mst->idx_lebs));
+               pr_err("\ttotal_free     %llu\n",
                       (unsigned long long)le64_to_cpu(mst->total_free));
-               printk(KERN_DEBUG "\ttotal_dirty    %llu\n",
+               pr_err("\ttotal_dirty    %llu\n",
                       (unsigned long long)le64_to_cpu(mst->total_dirty));
-               printk(KERN_DEBUG "\ttotal_used     %llu\n",
+               pr_err("\ttotal_used     %llu\n",
                       (unsigned long long)le64_to_cpu(mst->total_used));
-               printk(KERN_DEBUG "\ttotal_dead     %llu\n",
+               pr_err("\ttotal_dead     %llu\n",
                       (unsigned long long)le64_to_cpu(mst->total_dead));
-               printk(KERN_DEBUG "\ttotal_dark     %llu\n",
+               pr_err("\ttotal_dark     %llu\n",
                       (unsigned long long)le64_to_cpu(mst->total_dark));
                break;
        }
@@ -461,12 +407,9 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
        {
                const struct ubifs_ref_node *ref = node;
 
-               printk(KERN_DEBUG "\tlnum           %u\n",
-                      le32_to_cpu(ref->lnum));
-               printk(KERN_DEBUG "\toffs           %u\n",
-                      le32_to_cpu(ref->offs));
-               printk(KERN_DEBUG "\tjhead          %u\n",
-                      le32_to_cpu(ref->jhead));
+               pr_err("\tlnum           %u\n", le32_to_cpu(ref->lnum));
+               pr_err("\toffs           %u\n", le32_to_cpu(ref->offs));
+               pr_err("\tjhead          %u\n", le32_to_cpu(ref->jhead));
                break;
        }
        case UBIFS_INO_NODE:
@@ -474,40 +417,32 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
                const struct ubifs_ino_node *ino = node;
 
                key_read(c, &ino->key, &key);
-               printk(KERN_DEBUG "\tkey            %s\n", DBGKEY(&key));
-               printk(KERN_DEBUG "\tcreat_sqnum    %llu\n",
+               pr_err("\tkey            %s\n",
+                      dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
+               pr_err("\tcreat_sqnum    %llu\n",
                       (unsigned long long)le64_to_cpu(ino->creat_sqnum));
-               printk(KERN_DEBUG "\tsize           %llu\n",
+               pr_err("\tsize           %llu\n",
                       (unsigned long long)le64_to_cpu(ino->size));
-               printk(KERN_DEBUG "\tnlink          %u\n",
-                      le32_to_cpu(ino->nlink));
-               printk(KERN_DEBUG "\tatime          %lld.%u\n",
+               pr_err("\tnlink          %u\n", le32_to_cpu(ino->nlink));
+               pr_err("\tatime          %lld.%u\n",
                       (long long)le64_to_cpu(ino->atime_sec),
                       le32_to_cpu(ino->atime_nsec));
-               printk(KERN_DEBUG "\tmtime          %lld.%u\n",
+               pr_err("\tmtime          %lld.%u\n",
                       (long long)le64_to_cpu(ino->mtime_sec),
                       le32_to_cpu(ino->mtime_nsec));
-               printk(KERN_DEBUG "\tctime          %lld.%u\n",
+               pr_err("\tctime          %lld.%u\n",
                       (long long)le64_to_cpu(ino->ctime_sec),
                       le32_to_cpu(ino->ctime_nsec));
-               printk(KERN_DEBUG "\tuid            %u\n",
-                      le32_to_cpu(ino->uid));
-               printk(KERN_DEBUG "\tgid            %u\n",
-                      le32_to_cpu(ino->gid));
-               printk(KERN_DEBUG "\tmode           %u\n",
-                      le32_to_cpu(ino->mode));
-               printk(KERN_DEBUG "\tflags          %#x\n",
-                      le32_to_cpu(ino->flags));
-               printk(KERN_DEBUG "\txattr_cnt      %u\n",
-                      le32_to_cpu(ino->xattr_cnt));
-               printk(KERN_DEBUG "\txattr_size     %u\n",
-                      le32_to_cpu(ino->xattr_size));
-               printk(KERN_DEBUG "\txattr_names    %u\n",
-                      le32_to_cpu(ino->xattr_names));
-               printk(KERN_DEBUG "\tcompr_type     %#x\n",
+               pr_err("\tuid            %u\n", le32_to_cpu(ino->uid));
+               pr_err("\tgid            %u\n", le32_to_cpu(ino->gid));
+               pr_err("\tmode           %u\n", le32_to_cpu(ino->mode));
+               pr_err("\tflags          %#x\n", le32_to_cpu(ino->flags));
+               pr_err("\txattr_cnt      %u\n", le32_to_cpu(ino->xattr_cnt));
+               pr_err("\txattr_size     %u\n", le32_to_cpu(ino->xattr_size));
+               pr_err("\txattr_names    %u\n", le32_to_cpu(ino->xattr_names));
+               pr_err("\tcompr_type     %#x\n",
                       (int)le16_to_cpu(ino->compr_type));
-               printk(KERN_DEBUG "\tdata len       %u\n",
-                      le32_to_cpu(ino->data_len));
+               pr_err("\tdata len       %u\n", le32_to_cpu(ino->data_len));
                break;
        }
        case UBIFS_DENT_NODE:
@@ -517,21 +452,21 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
                int nlen = le16_to_cpu(dent->nlen);
 
                key_read(c, &dent->key, &key);
-               printk(KERN_DEBUG "\tkey            %s\n", DBGKEY(&key));
-               printk(KERN_DEBUG "\tinum           %llu\n",
+               pr_err("\tkey            %s\n",
+                      dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
+               pr_err("\tinum           %llu\n",
                       (unsigned long long)le64_to_cpu(dent->inum));
-               printk(KERN_DEBUG "\ttype           %d\n", (int)dent->type);
-               printk(KERN_DEBUG "\tnlen           %d\n", nlen);
-               printk(KERN_DEBUG "\tname           ");
+               pr_err("\ttype           %d\n", (int)dent->type);
+               pr_err("\tnlen           %d\n", nlen);
+               pr_err("\tname           ");
 
                if (nlen > UBIFS_MAX_NLEN)
-                       printk(KERN_DEBUG "(bad name length, not printing, "
-                                         "bad or corrupted node)");
+                       pr_err("(bad name length, not printing, bad or corrupted node)");
                else {
                        for (i = 0; i < nlen && dent->name[i]; i++)
-                               printk(KERN_CONT "%c", dent->name[i]);
+                               pr_cont("%c", dent->name[i]);
                }
-               printk(KERN_CONT "\n");
+               pr_cont("\n");
 
                break;
        }
@@ -541,15 +476,14 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
                int dlen = le32_to_cpu(ch->len) - UBIFS_DATA_NODE_SZ;
 
                key_read(c, &dn->key, &key);
-               printk(KERN_DEBUG "\tkey            %s\n", DBGKEY(&key));
-               printk(KERN_DEBUG "\tsize           %u\n",
-                      le32_to_cpu(dn->size));
-               printk(KERN_DEBUG "\tcompr_typ      %d\n",
+               pr_err("\tkey            %s\n",
+                      dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
+               pr_err("\tsize           %u\n", le32_to_cpu(dn->size));
+               pr_err("\tcompr_typ      %d\n",
                       (int)le16_to_cpu(dn->compr_type));
-               printk(KERN_DEBUG "\tdata size      %d\n",
-                      dlen);
-               printk(KERN_DEBUG "\tdata:\n");
-               print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 32, 1,
+               pr_err("\tdata size      %d\n", dlen);
+               pr_err("\tdata:\n");
+               print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1,
                               (void *)&dn->data, dlen, 0);
                break;
        }
@@ -557,11 +491,10 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
        {
                const struct ubifs_trun_node *trun = node;
 
-               printk(KERN_DEBUG "\tinum           %u\n",
-                      le32_to_cpu(trun->inum));
-               printk(KERN_DEBUG "\told_size       %llu\n",
+               pr_err("\tinum           %u\n", le32_to_cpu(trun->inum));
+               pr_err("\told_size       %llu\n",
                       (unsigned long long)le64_to_cpu(trun->old_size));
-               printk(KERN_DEBUG "\tnew_size       %llu\n",
+               pr_err("\tnew_size       %llu\n",
                       (unsigned long long)le64_to_cpu(trun->new_size));
                break;
        }
@@ -570,19 +503,20 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
                const struct ubifs_idx_node *idx = node;
 
                n = le16_to_cpu(idx->child_cnt);
-               printk(KERN_DEBUG "\tchild_cnt      %d\n", n);
-               printk(KERN_DEBUG "\tlevel          %d\n",
-                      (int)le16_to_cpu(idx->level));
-               printk(KERN_DEBUG "\tBranches:\n");
+               pr_err("\tchild_cnt      %d\n", n);
+               pr_err("\tlevel          %d\n", (int)le16_to_cpu(idx->level));
+               pr_err("\tBranches:\n");
 
                for (i = 0; i < n && i < c->fanout - 1; i++) {
                        const struct ubifs_branch *br;
 
                        br = ubifs_idx_branch(c, idx, i);
                        key_read(c, &br->key, &key);
-                       printk(KERN_DEBUG "\t%d: LEB %d:%d len %d key %s\n",
+                       pr_err("\t%d: LEB %d:%d len %d key %s\n",
                               i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs),
-                              le32_to_cpu(br->len), DBGKEY(&key));
+                              le32_to_cpu(br->len),
+                              dbg_snprintf_key(c, &key, key_buf,
+                                               DBG_KEY_BUF_LEN));
                }
                break;
        }
@@ -592,57 +526,55 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
        {
                const struct ubifs_orph_node *orph = node;
 
-               printk(KERN_DEBUG "\tcommit number  %llu\n",
+               pr_err("\tcommit number  %llu\n",
                       (unsigned long long)
                                le64_to_cpu(orph->cmt_no) & LLONG_MAX);
-               printk(KERN_DEBUG "\tlast node flag %llu\n",
+               pr_err("\tlast node flag %llu\n",
                       (unsigned long long)(le64_to_cpu(orph->cmt_no)) >> 63);
                n = (le32_to_cpu(ch->len) - UBIFS_ORPH_NODE_SZ) >> 3;
-               printk(KERN_DEBUG "\t%d orphan inode numbers:\n", n);
+               pr_err("\t%d orphan inode numbers:\n", n);
                for (i = 0; i < n; i++)
-                       printk(KERN_DEBUG "\t  ino %llu\n",
+                       pr_err("\t  ino %llu\n",
                               (unsigned long long)le64_to_cpu(orph->inos[i]));
                break;
        }
        default:
-               printk(KERN_DEBUG "node type %d was not recognized\n",
+               pr_err("node type %d was not recognized\n",
                       (int)ch->node_type);
        }
        spin_unlock(&dbg_lock);
 }
 
-void dbg_dump_budget_req(const struct ubifs_budget_req *req)
+void ubifs_dump_budget_req(const struct ubifs_budget_req *req)
 {
        spin_lock(&dbg_lock);
-       printk(KERN_DEBUG "Budgeting request: new_ino %d, dirtied_ino %d\n",
+       pr_err("Budgeting request: new_ino %d, dirtied_ino %d\n",
               req->new_ino, req->dirtied_ino);
-       printk(KERN_DEBUG "\tnew_ino_d   %d, dirtied_ino_d %d\n",
+       pr_err("\tnew_ino_d   %d, dirtied_ino_d %d\n",
               req->new_ino_d, req->dirtied_ino_d);
-       printk(KERN_DEBUG "\tnew_page    %d, dirtied_page %d\n",
+       pr_err("\tnew_page    %d, dirtied_page %d\n",
               req->new_page, req->dirtied_page);
-       printk(KERN_DEBUG "\tnew_dent    %d, mod_dent     %d\n",
+       pr_err("\tnew_dent    %d, mod_dent     %d\n",
               req->new_dent, req->mod_dent);
-       printk(KERN_DEBUG "\tidx_growth  %d\n", req->idx_growth);
-       printk(KERN_DEBUG "\tdata_growth %d dd_growth     %d\n",
+       pr_err("\tidx_growth  %d\n", req->idx_growth);
+       pr_err("\tdata_growth %d dd_growth     %d\n",
               req->data_growth, req->dd_growth);
        spin_unlock(&dbg_lock);
 }
 
-void dbg_dump_lstats(const struct ubifs_lp_stats *lst)
+void ubifs_dump_lstats(const struct ubifs_lp_stats *lst)
 {
        spin_lock(&dbg_lock);
-       printk(KERN_DEBUG "(pid %d) Lprops statistics: empty_lebs %d, "
-              "idx_lebs  %d\n", current->pid, lst->empty_lebs, lst->idx_lebs);
-       printk(KERN_DEBUG "\ttaken_empty_lebs %d, total_free %lld, "
-              "total_dirty %lld\n", lst->taken_empty_lebs, lst->total_free,
-              lst->total_dirty);
-       printk(KERN_DEBUG "\ttotal_used %lld, total_dark %lld, "
-              "total_dead %lld\n", lst->total_used, lst->total_dark,
-              lst->total_dead);
+       pr_err("(pid %d) Lprops statistics: empty_lebs %d, idx_lebs  %d\n",
+              current->pid, lst->empty_lebs, lst->idx_lebs);
+       pr_err("\ttaken_empty_lebs %d, total_free %lld, total_dirty %lld\n",
+              lst->taken_empty_lebs, lst->total_free, lst->total_dirty);
+       pr_err("\ttotal_used %lld, total_dark %lld, total_dead %lld\n",
+              lst->total_used, lst->total_dark, lst->total_dead);
        spin_unlock(&dbg_lock);
 }
 
-void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi)
+void ubifs_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi)
 {
        int i;
        struct rb_node *rb;
@@ -652,21 +584,17 @@ void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi)
 
        spin_lock(&c->space_lock);
        spin_lock(&dbg_lock);
-       printk(KERN_DEBUG "(pid %d) Budgeting info: data budget sum %lld, "
-              "total budget sum %lld\n", current->pid,
-              bi->data_growth + bi->dd_growth,
+       pr_err("(pid %d) Budgeting info: data budget sum %lld, total budget sum %lld\n",
+              current->pid, bi->data_growth + bi->dd_growth,
               bi->data_growth + bi->dd_growth + bi->idx_growth);
-       printk(KERN_DEBUG "\tbudg_data_growth %lld, budg_dd_growth %lld, "
-              "budg_idx_growth %lld\n", bi->data_growth, bi->dd_growth,
-              bi->idx_growth);
-       printk(KERN_DEBUG "\tmin_idx_lebs %d, old_idx_sz %llu, "
-              "uncommitted_idx %lld\n", bi->min_idx_lebs, bi->old_idx_sz,
-              bi->uncommitted_idx);
-       printk(KERN_DEBUG "\tpage_budget %d, inode_budget %d, dent_budget %d\n",
+       pr_err("\tbudg_data_growth %lld, budg_dd_growth %lld, budg_idx_growth %lld\n",
+              bi->data_growth, bi->dd_growth, bi->idx_growth);
+       pr_err("\tmin_idx_lebs %d, old_idx_sz %llu, uncommitted_idx %lld\n",
+              bi->min_idx_lebs, bi->old_idx_sz, bi->uncommitted_idx);
+       pr_err("\tpage_budget %d, inode_budget %d, dent_budget %d\n",
               bi->page_budget, bi->inode_budget, bi->dent_budget);
-       printk(KERN_DEBUG "\tnospace %u, nospace_rp %u\n",
-              bi->nospace, bi->nospace_rp);
-       printk(KERN_DEBUG "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
+       pr_err("\tnospace %u, nospace_rp %u\n", bi->nospace, bi->nospace_rp);
+       pr_err("\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
               c->dark_wm, c->dead_wm, c->max_idx_node_sz);
 
        if (bi != &c->bi)
@@ -677,45 +605,44 @@ void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi)
                 */
                goto out_unlock;
 
-       printk(KERN_DEBUG "\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n",
+       pr_err("\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n",
               c->freeable_cnt, c->calc_idx_sz, c->idx_gc_cnt);
-       printk(KERN_DEBUG "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, "
-              "clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt),
+       pr_err("\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, clean_zn_cnt %ld\n",
+              atomic_long_read(&c->dirty_pg_cnt),
               atomic_long_read(&c->dirty_zn_cnt),
               atomic_long_read(&c->clean_zn_cnt));
-       printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n",
-              c->gc_lnum, c->ihead_lnum);
+       pr_err("\tgc_lnum %d, ihead_lnum %d\n", c->gc_lnum, c->ihead_lnum);
 
        /* If we are in R/O mode, journal heads do not exist */
        if (c->jheads)
                for (i = 0; i < c->jhead_cnt; i++)
-                       printk(KERN_DEBUG "\tjhead %s\t LEB %d\n",
+                       pr_err("\tjhead %s\t LEB %d\n",
                               dbg_jhead(c->jheads[i].wbuf.jhead),
                               c->jheads[i].wbuf.lnum);
        for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
                bud = rb_entry(rb, struct ubifs_bud, rb);
-               printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
+               pr_err("\tbud LEB %d\n", bud->lnum);
        }
        list_for_each_entry(bud, &c->old_buds, list)
-               printk(KERN_DEBUG "\told bud LEB %d\n", bud->lnum);
+               pr_err("\told bud LEB %d\n", bud->lnum);
        list_for_each_entry(idx_gc, &c->idx_gc, list)
-               printk(KERN_DEBUG "\tGC'ed idx LEB %d unmap %d\n",
+               pr_err("\tGC'ed idx LEB %d unmap %d\n",
                       idx_gc->lnum, idx_gc->unmap);
-       printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state);
+       pr_err("\tcommit state %d\n", c->cmt_state);
 
        /* Print budgeting predictions */
        available = ubifs_calc_available(c, c->bi.min_idx_lebs);
        outstanding = c->bi.data_growth + c->bi.dd_growth;
        free = ubifs_get_free_space_nolock(c);
-       printk(KERN_DEBUG "Budgeting predictions:\n");
-       printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n",
+       pr_err("Budgeting predictions:\n");
+       pr_err("\tavailable: %lld, outstanding %lld, free %lld\n",
               available, outstanding, free);
 out_unlock:
        spin_unlock(&dbg_lock);
        spin_unlock(&c->space_lock);
 }
 
-void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
+void ubifs_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
 {
        int i, spc, dark = 0, dead = 0;
        struct rb_node *rb;
@@ -728,21 +655,19 @@ void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
                dark = ubifs_calc_dark(c, spc);
 
        if (lp->flags & LPROPS_INDEX)
-               printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
-                      "free + dirty %-8d flags %#x (", lp->lnum, lp->free,
-                      lp->dirty, c->leb_size - spc, spc, lp->flags);
+               pr_err("LEB %-7d free %-8d dirty %-8d used %-8d free + dirty %-8d flags %#x (",
+                      lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc,
+                      lp->flags);
        else
-               printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
-                      "free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d "
-                      "flags %#-4x (", lp->lnum, lp->free, lp->dirty,
-                      c->leb_size - spc, spc, dark, dead,
-                      (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags);
+               pr_err("LEB %-7d free %-8d dirty %-8d used %-8d free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d flags %#-4x (",
+                      lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc,
+                      dark, dead, (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags);
 
        if (lp->flags & LPROPS_TAKEN) {
                if (lp->flags & LPROPS_INDEX)
-                       printk(KERN_CONT "index, taken");
+                       pr_cont("index, taken");
                else
-                       printk(KERN_CONT "taken");
+                       pr_cont("taken");
        } else {
                const char *s;
 
@@ -779,7 +704,7 @@ void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
                                break;
                        }
                }
-               printk(KERN_CONT "%s", s);
+               pr_cont("%s", s);
        }
 
        for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) {
@@ -794,109 +719,101 @@ void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
                                 */
                                if (c->jheads &&
                                    lp->lnum == c->jheads[i].wbuf.lnum) {
-                                       printk(KERN_CONT ", jhead %s",
-                                              dbg_jhead(i));
+                                       pr_cont(", jhead %s", dbg_jhead(i));
                                        head = 1;
                                }
                        }
                        if (!head)
-                               printk(KERN_CONT ", bud of jhead %s",
+                               pr_cont(", bud of jhead %s",
                                       dbg_jhead(bud->jhead));
                }
        }
        if (lp->lnum == c->gc_lnum)
-               printk(KERN_CONT ", GC LEB");
-       printk(KERN_CONT ")\n");
+               pr_cont(", GC LEB");
+       pr_cont(")\n");
 }
 
-void dbg_dump_lprops(struct ubifs_info *c)
+void ubifs_dump_lprops(struct ubifs_info *c)
 {
        int lnum, err;
        struct ubifs_lprops lp;
        struct ubifs_lp_stats lst;
 
-       printk(KERN_DEBUG "(pid %d) start dumping LEB properties\n",
-              current->pid);
+       pr_err("(pid %d) start dumping LEB properties\n", current->pid);
        ubifs_get_lp_stats(c, &lst);
-       dbg_dump_lstats(&lst);
+       ubifs_dump_lstats(&lst);
 
        for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
                err = ubifs_read_one_lp(c, lnum, &lp);
                if (err)
                        ubifs_err("cannot read lprops for LEB %d", lnum);
 
-               dbg_dump_lprop(c, &lp);
+               ubifs_dump_lprop(c, &lp);
        }
-       printk(KERN_DEBUG "(pid %d) finish dumping LEB properties\n",
-              current->pid);
+       pr_err("(pid %d) finish dumping LEB properties\n", current->pid);
 }
 
-void dbg_dump_lpt_info(struct ubifs_info *c)
+void ubifs_dump_lpt_info(struct ubifs_info *c)
 {
        int i;
 
        spin_lock(&dbg_lock);
-       printk(KERN_DEBUG "(pid %d) dumping LPT information\n", current->pid);
-       printk(KERN_DEBUG "\tlpt_sz:        %lld\n", c->lpt_sz);
-       printk(KERN_DEBUG "\tpnode_sz:      %d\n", c->pnode_sz);
-       printk(KERN_DEBUG "\tnnode_sz:      %d\n", c->nnode_sz);
-       printk(KERN_DEBUG "\tltab_sz:       %d\n", c->ltab_sz);
-       printk(KERN_DEBUG "\tlsave_sz:      %d\n", c->lsave_sz);
-       printk(KERN_DEBUG "\tbig_lpt:       %d\n", c->big_lpt);
-       printk(KERN_DEBUG "\tlpt_hght:      %d\n", c->lpt_hght);
-       printk(KERN_DEBUG "\tpnode_cnt:     %d\n", c->pnode_cnt);
-       printk(KERN_DEBUG "\tnnode_cnt:     %d\n", c->nnode_cnt);
-       printk(KERN_DEBUG "\tdirty_pn_cnt:  %d\n", c->dirty_pn_cnt);
-       printk(KERN_DEBUG "\tdirty_nn_cnt:  %d\n", c->dirty_nn_cnt);
-       printk(KERN_DEBUG "\tlsave_cnt:     %d\n", c->lsave_cnt);
-       printk(KERN_DEBUG "\tspace_bits:    %d\n", c->space_bits);
-       printk(KERN_DEBUG "\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits);
-       printk(KERN_DEBUG "\tlpt_offs_bits: %d\n", c->lpt_offs_bits);
-       printk(KERN_DEBUG "\tlpt_spc_bits:  %d\n", c->lpt_spc_bits);
-       printk(KERN_DEBUG "\tpcnt_bits:     %d\n", c->pcnt_bits);
-       printk(KERN_DEBUG "\tlnum_bits:     %d\n", c->lnum_bits);
-       printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs);
-       printk(KERN_DEBUG "\tLPT head is at %d:%d\n",
+       pr_err("(pid %d) dumping LPT information\n", current->pid);
+       pr_err("\tlpt_sz:        %lld\n", c->lpt_sz);
+       pr_err("\tpnode_sz:      %d\n", c->pnode_sz);
+       pr_err("\tnnode_sz:      %d\n", c->nnode_sz);
+       pr_err("\tltab_sz:       %d\n", c->ltab_sz);
+       pr_err("\tlsave_sz:      %d\n", c->lsave_sz);
+       pr_err("\tbig_lpt:       %d\n", c->big_lpt);
+       pr_err("\tlpt_hght:      %d\n", c->lpt_hght);
+       pr_err("\tpnode_cnt:     %d\n", c->pnode_cnt);
+       pr_err("\tnnode_cnt:     %d\n", c->nnode_cnt);
+       pr_err("\tdirty_pn_cnt:  %d\n", c->dirty_pn_cnt);
+       pr_err("\tdirty_nn_cnt:  %d\n", c->dirty_nn_cnt);
+       pr_err("\tlsave_cnt:     %d\n", c->lsave_cnt);
+       pr_err("\tspace_bits:    %d\n", c->space_bits);
+       pr_err("\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits);
+       pr_err("\tlpt_offs_bits: %d\n", c->lpt_offs_bits);
+       pr_err("\tlpt_spc_bits:  %d\n", c->lpt_spc_bits);
+       pr_err("\tpcnt_bits:     %d\n", c->pcnt_bits);
+       pr_err("\tlnum_bits:     %d\n", c->lnum_bits);
+       pr_err("\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs);
+       pr_err("\tLPT head is at %d:%d\n",
               c->nhead_lnum, c->nhead_offs);
-       printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n",
-              c->ltab_lnum, c->ltab_offs);
+       pr_err("\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs);
        if (c->big_lpt)
-               printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n",
+               pr_err("\tLPT lsave is at %d:%d\n",
                       c->lsave_lnum, c->lsave_offs);
        for (i = 0; i < c->lpt_lebs; i++)
-               printk(KERN_DEBUG "\tLPT LEB %d free %d dirty %d tgc %d "
-                      "cmt %d\n", i + c->lpt_first, c->ltab[i].free,
-                      c->ltab[i].dirty, c->ltab[i].tgc, c->ltab[i].cmt);
+               pr_err("\tLPT LEB %d free %d dirty %d tgc %d cmt %d\n",
+                      i + c->lpt_first, c->ltab[i].free, c->ltab[i].dirty,
+                      c->ltab[i].tgc, c->ltab[i].cmt);
        spin_unlock(&dbg_lock);
 }
 
-void dbg_dump_sleb(const struct ubifs_info *c,
-                  const struct ubifs_scan_leb *sleb, int offs)
+void ubifs_dump_sleb(const struct ubifs_info *c,
+                    const struct ubifs_scan_leb *sleb, int offs)
 {
        struct ubifs_scan_node *snod;
 
-       printk(KERN_DEBUG "(pid %d) start dumping scanned data from LEB %d:%d\n",
+       pr_err("(pid %d) start dumping scanned data from LEB %d:%d\n",
               current->pid, sleb->lnum, offs);
 
        list_for_each_entry(snod, &sleb->nodes, list) {
                cond_resched();
-               printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", sleb->lnum,
-                      snod->offs, snod->len);
-               dbg_dump_node(c, snod->node);
+               pr_err("Dumping node at LEB %d:%d len %d\n",
+                      sleb->lnum, snod->offs, snod->len);
+               ubifs_dump_node(c, snod->node);
        }
 }
 
-void dbg_dump_leb(const struct ubifs_info *c, int lnum)
+void ubifs_dump_leb(const struct ubifs_info *c, int lnum)
 {
        struct ubifs_scan_leb *sleb;
        struct ubifs_scan_node *snod;
        void *buf;
 
-       if (dbg_is_tst_rcvry(c))
-               return;
-
-       printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
-              current->pid, lnum);
+       pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
 
        buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
        if (!buf) {
@@ -910,18 +827,17 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum)
                goto out;
        }
 
-       printk(KERN_DEBUG "LEB %d has %d nodes ending at %d\n", lnum,
+       pr_err("LEB %d has %d nodes ending at %d\n", lnum,
               sleb->nodes_cnt, sleb->endpt);
 
        list_for_each_entry(snod, &sleb->nodes, list) {
                cond_resched();
-               printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", lnum,
+               pr_err("Dumping node at LEB %d:%d len %d\n", lnum,
                       snod->offs, snod->len);
-               dbg_dump_node(c, snod->node);
+               ubifs_dump_node(c, snod->node);
        }
 
-       printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n",
-              current->pid, lnum);
+       pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum);
        ubifs_scan_destroy(sleb);
 
 out:
@@ -929,11 +845,12 @@ out:
        return;
 }
 
-void dbg_dump_znode(const struct ubifs_info *c,
-                   const struct ubifs_znode *znode)
+void ubifs_dump_znode(const struct ubifs_info *c,
+                     const struct ubifs_znode *znode)
 {
        int n;
        const struct ubifs_zbranch *zbr;
+       char key_buf[DBG_KEY_BUF_LEN];
 
        spin_lock(&dbg_lock);
        if (znode->parent)
@@ -941,103 +858,102 @@ void dbg_dump_znode(const struct ubifs_info *c,
        else
                zbr = &c->zroot;
 
-       printk(KERN_DEBUG "znode %p, LEB %d:%d len %d parent %p iip %d level %d"
-              " child_cnt %d flags %lx\n", znode, zbr->lnum, zbr->offs,
-              zbr->len, znode->parent, znode->iip, znode->level,
-              znode->child_cnt, znode->flags);
+       pr_err("znode %p, LEB %d:%d len %d parent %p iip %d level %d child_cnt %d flags %lx\n",
+              znode, zbr->lnum, zbr->offs, zbr->len, znode->parent, znode->iip,
+              znode->level, znode->child_cnt, znode->flags);
 
        if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
                spin_unlock(&dbg_lock);
                return;
        }
 
-       printk(KERN_DEBUG "zbranches:\n");
+       pr_err("zbranches:\n");
        for (n = 0; n < znode->child_cnt; n++) {
                zbr = &znode->zbranch[n];
                if (znode->level > 0)
-                       printk(KERN_DEBUG "\t%d: znode %p LEB %d:%d len %d key "
-                                         "%s\n", n, zbr->znode, zbr->lnum,
-                                         zbr->offs, zbr->len,
-                                         DBGKEY(&zbr->key));
+                       pr_err("\t%d: znode %p LEB %d:%d len %d key %s\n",
+                              n, zbr->znode, zbr->lnum, zbr->offs, zbr->len,
+                              dbg_snprintf_key(c, &zbr->key, key_buf,
+                                               DBG_KEY_BUF_LEN));
                else
-                       printk(KERN_DEBUG "\t%d: LNC %p LEB %d:%d len %d key "
-                                         "%s\n", n, zbr->znode, zbr->lnum,
-                                         zbr->offs, zbr->len,
-                                         DBGKEY(&zbr->key));
+                       pr_err("\t%d: LNC %p LEB %d:%d len %d key %s\n",
+                              n, zbr->znode, zbr->lnum, zbr->offs, zbr->len,
+                              dbg_snprintf_key(c, &zbr->key, key_buf,
+                                               DBG_KEY_BUF_LEN));
        }
        spin_unlock(&dbg_lock);
 }
 
-void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat)
+void ubifs_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat)
 {
        int i;
 
-       printk(KERN_DEBUG "(pid %d) start dumping heap cat %d (%d elements)\n",
+       pr_err("(pid %d) start dumping heap cat %d (%d elements)\n",
               current->pid, cat, heap->cnt);
        for (i = 0; i < heap->cnt; i++) {
                struct ubifs_lprops *lprops = heap->arr[i];
 
-               printk(KERN_DEBUG "\t%d. LEB %d hpos %d free %d dirty %d "
-                      "flags %d\n", i, lprops->lnum, lprops->hpos,
-                      lprops->free, lprops->dirty, lprops->flags);
+               pr_err("\t%d. LEB %d hpos %d free %d dirty %d flags %d\n",
+                      i, lprops->lnum, lprops->hpos, lprops->free,
+                      lprops->dirty, lprops->flags);
        }
-       printk(KERN_DEBUG "(pid %d) finish dumping heap\n", current->pid);
+       pr_err("(pid %d) finish dumping heap\n", current->pid);
 }
 
-void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
-                   struct ubifs_nnode *parent, int iip)
+void ubifs_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
+                     struct ubifs_nnode *parent, int iip)
 {
        int i;
 
-       printk(KERN_DEBUG "(pid %d) dumping pnode:\n", current->pid);
-       printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n",
+       pr_err("(pid %d) dumping pnode:\n", current->pid);
+       pr_err("\taddress %zx parent %zx cnext %zx\n",
               (size_t)pnode, (size_t)parent, (size_t)pnode->cnext);
-       printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n",
+       pr_err("\tflags %lu iip %d level %d num %d\n",
               pnode->flags, iip, pnode->level, pnode->num);
        for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
                struct ubifs_lprops *lp = &pnode->lprops[i];
 
-               printk(KERN_DEBUG "\t%d: free %d dirty %d flags %d lnum %d\n",
+               pr_err("\t%d: free %d dirty %d flags %d lnum %d\n",
                       i, lp->free, lp->dirty, lp->flags, lp->lnum);
        }
 }
 
-void dbg_dump_tnc(struct ubifs_info *c)
+void ubifs_dump_tnc(struct ubifs_info *c)
 {
        struct ubifs_znode *znode;
        int level;
 
-       printk(KERN_DEBUG "\n");
-       printk(KERN_DEBUG "(pid %d) start dumping TNC tree\n", current->pid);
+       pr_err("\n");
+       pr_err("(pid %d) start dumping TNC tree\n", current->pid);
        znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
        level = znode->level;
-       printk(KERN_DEBUG "== Level %d ==\n", level);
+       pr_err("== Level %d ==\n", level);
        while (znode) {
                if (level != znode->level) {
                        level = znode->level;
-                       printk(KERN_DEBUG "== Level %d ==\n", level);
+                       pr_err("== Level %d ==\n", level);
                }
-               dbg_dump_znode(c, znode);
+               ubifs_dump_znode(c, znode);
                znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode);
        }
-       printk(KERN_DEBUG "(pid %d) finish dumping TNC tree\n", current->pid);
+       pr_err("(pid %d) finish dumping TNC tree\n", current->pid);
 }
 
 static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode,
                      void *priv)
 {
-       dbg_dump_znode(c, znode);
+       ubifs_dump_znode(c, znode);
        return 0;
 }
 
 /**
- * dbg_dump_index - dump the on-flash index.
+ * ubifs_dump_index - dump the on-flash index.
  * @c: UBIFS file-system description object
  *
- * This function dumps whole UBIFS indexing B-tree, unlike 'dbg_dump_tnc()'
+ * This function dumps whole UBIFS indexing B-tree, unlike 'ubifs_dump_tnc()'
  * which dumps only in-memory znodes and does not read znodes which from flash.
  */
-void dbg_dump_index(struct ubifs_info *c)
+void ubifs_dump_index(struct ubifs_info *c)
 {
        dbg_walk_index(c, NULL, dump_znode, NULL);
 }
@@ -1123,15 +1039,15 @@ int dbg_check_space_info(struct ubifs_info *c)
 
 out:
        ubifs_msg("saved lprops statistics dump");
-       dbg_dump_lstats(&d->saved_lst);
+       ubifs_dump_lstats(&d->saved_lst);
        ubifs_msg("saved budgeting info dump");
-       dbg_dump_budg(c, &d->saved_bi);
+       ubifs_dump_budg(c, &d->saved_bi);
        ubifs_msg("saved idx_gc_cnt %d", d->saved_idx_gc_cnt);
        ubifs_msg("current lprops statistics dump");
        ubifs_get_lp_stats(c, &lst);
-       dbg_dump_lstats(&lst);
+       ubifs_dump_lstats(&lst);
        ubifs_msg("current budgeting info dump");
-       dbg_dump_budg(c, &c->bi);
+       ubifs_dump_budg(c, &c->bi);
        dump_stack();
        return -EINVAL;
 }
@@ -1159,11 +1075,11 @@ int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode)
        mutex_lock(&ui->ui_mutex);
        spin_lock(&ui->ui_lock);
        if (ui->ui_size != ui->synced_i_size && !ui->dirty) {
-               ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode "
-                         "is clean", ui->ui_size, ui->synced_i_size);
+               ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode is clean",
+                         ui->ui_size, ui->synced_i_size);
                ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
                          inode->i_mode, i_size_read(inode));
-               dbg_dump_stack();
+               dump_stack();
                err = -EINVAL;
        }
        spin_unlock(&ui->ui_lock);
@@ -1222,18 +1138,17 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
        kfree(pdent);
 
        if (i_size_read(dir) != size) {
-               ubifs_err("directory inode %lu has size %llu, "
-                         "but calculated size is %llu", dir->i_ino,
-                         (unsigned long long)i_size_read(dir),
+               ubifs_err("directory inode %lu has size %llu, but calculated size is %llu",
+                         dir->i_ino, (unsigned long long)i_size_read(dir),
                          (unsigned long long)size);
-               dbg_dump_inode(c, dir);
+               ubifs_dump_inode(c, dir);
                dump_stack();
                return -EINVAL;
        }
        if (dir->i_nlink != nlink) {
-               ubifs_err("directory inode %lu has nlink %u, but calculated "
-                         "nlink is %u", dir->i_ino, dir->i_nlink, nlink);
-               dbg_dump_inode(c, dir);
+               ubifs_err("directory inode %lu has nlink %u, but calculated nlink is %u",
+                         dir->i_ino, dir->i_nlink, nlink);
+               ubifs_dump_inode(c, dir);
                dump_stack();
                return -EINVAL;
        }
@@ -1260,6 +1175,7 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
        int err, nlen1, nlen2, cmp;
        struct ubifs_dent_node *dent1, *dent2;
        union ubifs_key key;
+       char key_buf[DBG_KEY_BUF_LEN];
 
        ubifs_assert(!keys_cmp(c, &zbr1->key, &zbr2->key));
        dent1 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
@@ -1289,21 +1205,25 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
        err = 1;
        key_read(c, &dent1->key, &key);
        if (keys_cmp(c, &zbr1->key, &key)) {
-               dbg_err("1st entry at %d:%d has key %s", zbr1->lnum,
-                       zbr1->offs, DBGKEY(&key));
-               dbg_err("but it should have key %s according to tnc",
-                       DBGKEY(&zbr1->key));
-               dbg_dump_node(c, dent1);
+               ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum,
+                         zbr1->offs, dbg_snprintf_key(c, &key, key_buf,
+                                                      DBG_KEY_BUF_LEN));
+               ubifs_err("but it should have key %s according to tnc",
+                         dbg_snprintf_key(c, &zbr1->key, key_buf,
+                                          DBG_KEY_BUF_LEN));
+               ubifs_dump_node(c, dent1);
                goto out_free;
        }
 
        key_read(c, &dent2->key, &key);
        if (keys_cmp(c, &zbr2->key, &key)) {
-               dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum,
-                       zbr1->offs, DBGKEY(&key));
-               dbg_err("but it should have key %s according to tnc",
-                       DBGKEY(&zbr2->key));
-               dbg_dump_node(c, dent2);
+               ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum,
+                         zbr1->offs, dbg_snprintf_key(c, &key, key_buf,
+                                                      DBG_KEY_BUF_LEN));
+               ubifs_err("but it should have key %s according to tnc",
+                         dbg_snprintf_key(c, &zbr2->key, key_buf,
+                                          DBG_KEY_BUF_LEN));
+               ubifs_dump_node(c, dent2);
                goto out_free;
        }
 
@@ -1316,15 +1236,15 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
                goto out_free;
        }
        if (cmp == 0 && nlen1 == nlen2)
-               dbg_err("2 xent/dent nodes with the same name");
+               ubifs_err("2 xent/dent nodes with the same name");
        else
-               dbg_err("bad order of colliding key %s",
-                       DBGKEY(&key));
+               ubifs_err("bad order of colliding key %s",
+                         dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
 
        ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs);
-       dbg_dump_node(c, dent1);
+       ubifs_dump_node(c, dent1);
        ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs);
-       dbg_dump_node(c, dent2);
+       ubifs_dump_node(c, dent2);
 
 out_free:
        kfree(dent2);
@@ -1527,10 +1447,10 @@ static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr)
 out:
        ubifs_err("failed, error %d", err);
        ubifs_msg("dump of the znode");
-       dbg_dump_znode(c, znode);
+       ubifs_dump_znode(c, znode);
        if (zp) {
                ubifs_msg("dump of the parent znode");
-               dbg_dump_znode(c, zp);
+               ubifs_dump_znode(c, zp);
        }
        dump_stack();
        return -EINVAL;
@@ -1597,9 +1517,9 @@ int dbg_check_tnc(struct ubifs_info *c, int extra)
                                return err;
                        if (err) {
                                ubifs_msg("first znode");
-                               dbg_dump_znode(c, prev);
+                               ubifs_dump_znode(c, prev);
                                ubifs_msg("second znode");
-                               dbg_dump_znode(c, znode);
+                               ubifs_dump_znode(c, znode);
                                return -EINVAL;
                        }
                }
@@ -1686,9 +1606,9 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
                if (znode_cb) {
                        err = znode_cb(c, znode, priv);
                        if (err) {
-                               ubifs_err("znode checking function returned "
-                                         "error %d", err);
-                               dbg_dump_znode(c, znode);
+                               ubifs_err("znode checking function returned error %d",
+                                         err);
+                               ubifs_dump_znode(c, znode);
                                goto out_dump;
                        }
                }
@@ -1697,9 +1617,7 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
                                zbr = &znode->zbranch[idx];
                                err = leaf_cb(c, zbr, priv);
                                if (err) {
-                                       ubifs_err("leaf checking function "
-                                                 "returned error %d, for leaf "
-                                                 "at LEB %d:%d",
+                                       ubifs_err("leaf checking function returned error %d, for leaf at LEB %d:%d",
                                                  err, zbr->lnum, zbr->offs);
                                        goto out_dump;
                                }
@@ -1756,7 +1674,7 @@ out_dump:
        else
                zbr = &c->zroot;
        ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
-       dbg_dump_znode(c, znode);
+       ubifs_dump_znode(c, znode);
 out_unlock:
        mutex_unlock(&c->tnc_mutex);
        return err;
@@ -1807,8 +1725,8 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
        }
 
        if (calc != idx_size) {
-               ubifs_err("index size check failed: calculated size is %lld, "
-                         "should be %lld", calc, idx_size);
+               ubifs_err("index size check failed: calculated size is %lld, should be %lld",
+                         calc, idx_size);
                dump_stack();
                return -EINVAL;
        }
@@ -2120,8 +2038,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                fscki = read_add_inode(c, priv, inum);
                if (IS_ERR(fscki)) {
                        err = PTR_ERR(fscki);
-                       ubifs_err("error %d while processing data node and "
-                                 "trying to find inode node %lu",
+                       ubifs_err("error %d while processing data node and trying to find inode node %lu",
                                  err, (unsigned long)inum);
                        goto out_dump;
                }
@@ -2131,9 +2048,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                blk_offs <<= UBIFS_BLOCK_SHIFT;
                blk_offs += le32_to_cpu(dn->size);
                if (blk_offs > fscki->size) {
-                       ubifs_err("data node at LEB %d:%d is not within inode "
-                                 "size %lld", zbr->lnum, zbr->offs,
-                                 fscki->size);
+                       ubifs_err("data node at LEB %d:%d is not within inode size %lld",
+                                 zbr->lnum, zbr->offs, fscki->size);
                        err = -EINVAL;
                        goto out_dump;
                }
@@ -2154,8 +2070,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                fscki = read_add_inode(c, priv, inum);
                if (IS_ERR(fscki)) {
                        err = PTR_ERR(fscki);
-                       ubifs_err("error %d while processing entry node and "
-                                 "trying to find inode node %lu",
+                       ubifs_err("error %d while processing entry node and trying to find inode node %lu",
                                  err, (unsigned long)inum);
                        goto out_dump;
                }
@@ -2167,8 +2082,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                fscki1 = read_add_inode(c, priv, inum);
                if (IS_ERR(fscki1)) {
                        err = PTR_ERR(fscki1);
-                       ubifs_err("error %d while processing entry node and "
-                                 "trying to find parent inode node %lu",
+                       ubifs_err("error %d while processing entry node and trying to find parent inode node %lu",
                                  err, (unsigned long)inum);
                        goto out_dump;
                }
@@ -2192,7 +2106,7 @@ out:
 
 out_dump:
        ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
-       dbg_dump_node(c, node);
+       ubifs_dump_node(c, node);
 out_free:
        kfree(node);
        return err;
@@ -2258,61 +2172,52 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
                         */
                        if (fscki->inum != UBIFS_ROOT_INO &&
                            fscki->references != 1) {
-                               ubifs_err("directory inode %lu has %d "
-                                         "direntries which refer it, but "
-                                         "should be 1",
+                               ubifs_err("directory inode %lu has %d direntries which refer it, but should be 1",
                                          (unsigned long)fscki->inum,
                                          fscki->references);
                                goto out_dump;
                        }
                        if (fscki->inum == UBIFS_ROOT_INO &&
                            fscki->references != 0) {
-                               ubifs_err("root inode %lu has non-zero (%d) "
-                                         "direntries which refer it",
+                               ubifs_err("root inode %lu has non-zero (%d) direntries which refer it",
                                          (unsigned long)fscki->inum,
                                          fscki->references);
                                goto out_dump;
                        }
                        if (fscki->calc_sz != fscki->size) {
-                               ubifs_err("directory inode %lu size is %lld, "
-                                         "but calculated size is %lld",
+                               ubifs_err("directory inode %lu size is %lld, but calculated size is %lld",
                                          (unsigned long)fscki->inum,
                                          fscki->size, fscki->calc_sz);
                                goto out_dump;
                        }
                        if (fscki->calc_cnt != fscki->nlink) {
-                               ubifs_err("directory inode %lu nlink is %d, "
-                                         "but calculated nlink is %d",
+                               ubifs_err("directory inode %lu nlink is %d, but calculated nlink is %d",
                                          (unsigned long)fscki->inum,
                                          fscki->nlink, fscki->calc_cnt);
                                goto out_dump;
                        }
                } else {
                        if (fscki->references != fscki->nlink) {
-                               ubifs_err("inode %lu nlink is %d, but "
-                                         "calculated nlink is %d",
+                               ubifs_err("inode %lu nlink is %d, but calculated nlink is %d",
                                          (unsigned long)fscki->inum,
                                          fscki->nlink, fscki->references);
                                goto out_dump;
                        }
                }
                if (fscki->xattr_sz != fscki->calc_xsz) {
-                       ubifs_err("inode %lu has xattr size %u, but "
-                                 "calculated size is %lld",
+                       ubifs_err("inode %lu has xattr size %u, but calculated size is %lld",
                                  (unsigned long)fscki->inum, fscki->xattr_sz,
                                  fscki->calc_xsz);
                        goto out_dump;
                }
                if (fscki->xattr_cnt != fscki->calc_xcnt) {
-                       ubifs_err("inode %lu has %u xattrs, but "
-                                 "calculated count is %lld",
+                       ubifs_err("inode %lu has %u xattrs, but calculated count is %lld",
                                  (unsigned long)fscki->inum,
                                  fscki->xattr_cnt, fscki->calc_xcnt);
                        goto out_dump;
                }
                if (fscki->xattr_nms != fscki->calc_xnms) {
-                       ubifs_err("inode %lu has xattr names' size %u, but "
-                                 "calculated names' size is %lld",
+                       ubifs_err("inode %lu has xattr names' size %u, but calculated names' size is %lld",
                                  (unsigned long)fscki->inum, fscki->xattr_nms,
                                  fscki->calc_xnms);
                        goto out_dump;
@@ -2350,7 +2255,7 @@ out_dump:
 
        ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
                  (unsigned long)fscki->inum, zbr->lnum, zbr->offs);
-       dbg_dump_node(c, ino);
+       ubifs_dump_node(c, ino);
        kfree(ino);
        return -EINVAL;
 }
@@ -2421,12 +2326,12 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
 
                if (sa->type != UBIFS_DATA_NODE) {
                        ubifs_err("bad node type %d", sa->type);
-                       dbg_dump_node(c, sa->node);
+                       ubifs_dump_node(c, sa->node);
                        return -EINVAL;
                }
                if (sb->type != UBIFS_DATA_NODE) {
                        ubifs_err("bad node type %d", sb->type);
-                       dbg_dump_node(c, sb->node);
+                       ubifs_dump_node(c, sb->node);
                        return -EINVAL;
                }
 
@@ -2457,8 +2362,8 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
        return 0;
 
 error_dump:
-       dbg_dump_node(c, sa->node);
-       dbg_dump_node(c, sb->node);
+       ubifs_dump_node(c, sa->node);
+       ubifs_dump_node(c, sb->node);
        return -EINVAL;
 }
 
@@ -2489,13 +2394,13 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
                if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
                    sa->type != UBIFS_XENT_NODE) {
                        ubifs_err("bad node type %d", sa->type);
-                       dbg_dump_node(c, sa->node);
+                       ubifs_dump_node(c, sa->node);
                        return -EINVAL;
                }
                if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
                    sa->type != UBIFS_XENT_NODE) {
                        ubifs_err("bad node type %d", sb->type);
-                       dbg_dump_node(c, sb->node);
+                       ubifs_dump_node(c, sb->node);
                        return -EINVAL;
                }
 
@@ -2545,9 +2450,9 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
 
 error_dump:
        ubifs_msg("dumping first node");
-       dbg_dump_node(c, sa->node);
+       ubifs_dump_node(c, sa->node);
        ubifs_msg("dumping second node");
-       dbg_dump_node(c, sb->node);
+       ubifs_dump_node(c, sb->node);
        return -EINVAL;
        return 0;
 }
@@ -2676,7 +2581,7 @@ static void cut_data(const void *buf, unsigned int len)
 }
 
 int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
-                 int offs, int len, int dtype)
+                 int offs, int len)
 {
        int err, failing;
 
@@ -2686,7 +2591,7 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
        failing = power_cut_emulated(c, lnum, 1);
        if (failing)
                cut_data(buf, len);
-       err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype);
+       err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
        if (err)
                return err;
        if (failing)
@@ -2695,7 +2600,7 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
 }
 
 int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf,
-                  int len, int dtype)
+                  int len)
 {
        int err;
 
@@ -2703,7 +2608,7 @@ int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf,
                return -EROFS;
        if (power_cut_emulated(c, lnum, 1))
                return -EROFS;
-       err = ubi_leb_change(c->ubi, lnum, buf, len, dtype);
+       err = ubi_leb_change(c->ubi, lnum, buf, len);
        if (err)
                return err;
        if (power_cut_emulated(c, lnum, 1))
@@ -2727,7 +2632,7 @@ int dbg_leb_unmap(struct ubifs_info *c, int lnum)
        return 0;
 }
 
-int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype)
+int dbg_leb_map(struct ubifs_info *c, int lnum)
 {
        int err;
 
@@ -2735,7 +2640,7 @@ int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype)
                return -EROFS;
        if (power_cut_emulated(c, lnum, 0))
                return -EROFS;
-       err = ubi_leb_map(c->ubi, lnum, dtype);
+       err = ubi_leb_map(c->ubi, lnum);
        if (err)
                return err;
        if (power_cut_emulated(c, lnum, 0))
@@ -2855,16 +2760,16 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u,
         * 'ubifs-debug' file-system instead.
         */
        if (file->f_path.dentry == d->dfs_dump_lprops) {
-               dbg_dump_lprops(c);
+               ubifs_dump_lprops(c);
                return count;
        }
        if (file->f_path.dentry == d->dfs_dump_budg) {
-               dbg_dump_budg(c, &c->bi);
+               ubifs_dump_budg(c, &c->bi);
                return count;
        }
        if (file->f_path.dentry == d->dfs_dump_tnc) {
                mutex_lock(&c->tnc_mutex);
-               dbg_dump_tnc(c);
+               ubifs_dump_tnc(c);
                mutex_unlock(&c->tnc_mutex);
                return count;
        }
@@ -2918,6 +2823,9 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
        struct dentry *dent;
        struct ubifs_debug_info *d = c->dbg;
 
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME,
                     c->vi.ubi_num, c->vi.vol_id);
        if (n == UBIFS_DFS_DIR_LEN) {
@@ -3010,7 +2918,8 @@ out:
  */
 void dbg_debugfs_exit_fs(struct ubifs_info *c)
 {
-       debugfs_remove_recursive(c->dbg->dfs_dir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove_recursive(c->dbg->dfs_dir);
 }
 
 struct ubifs_global_debug_info ubifs_dbg;
@@ -3095,6 +3004,9 @@ int dbg_debugfs_init(void)
        const char *fname;
        struct dentry *dent;
 
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        fname = "ubifs";
        dent = debugfs_create_dir(fname, NULL);
        if (IS_ERR_OR_NULL(dent))
@@ -3159,7 +3071,8 @@ out:
  */
 void dbg_debugfs_exit(void)
 {
-       debugfs_remove_recursive(dfs_rootdir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove_recursive(dfs_rootdir);
 }
 
 /**
@@ -3187,5 +3100,3 @@ void ubifs_debugging_exit(struct ubifs_info *c)
 {
        kfree(c->dbg);
 }
-
-#endif /* CONFIG_UBIFS_FS_DEBUG */
index c9d2941..a6b9697 100644 (file)
@@ -29,8 +29,6 @@ typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
 typedef int (*dbg_znode_callback)(struct ubifs_info *c,
                                  struct ubifs_znode *znode, void *priv);
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
-
 /*
  * The UBIFS debugfs directory name pattern and maximum name length (3 for "ubi"
  * + 1 for "_" and plus 2x2 for 2 UBI numbers and 1 for the trailing zero byte.
@@ -147,63 +145,50 @@ struct ubifs_global_debug_info {
 
 #define ubifs_assert(expr) do {                                                \
        if (unlikely(!(expr))) {                                               \
-               printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \
+               pr_crit("UBIFS assert failed in %s at %u (pid %d)\n",          \
                       __func__, __LINE__, current->pid);                      \
-               dbg_dump_stack();                                              \
+               dump_stack();                                                  \
        }                                                                      \
 } while (0)
 
 #define ubifs_assert_cmt_locked(c) do {                                        \
        if (unlikely(down_write_trylock(&(c)->commit_sem))) {                  \
                up_write(&(c)->commit_sem);                                    \
-               printk(KERN_CRIT "commit lock is not locked!\n");              \
+               pr_crit("commit lock is not locked!\n");                       \
                ubifs_assert(0);                                               \
        }                                                                      \
 } while (0)
 
-#define dbg_dump_stack() dump_stack()
-
-#define dbg_err(fmt, ...) do {                                                 \
-       spin_lock(&dbg_lock);                                                  \
-       ubifs_err(fmt, ##__VA_ARGS__);                                         \
-       spin_unlock(&dbg_lock);                                                \
-} while (0)
-
-const char *dbg_key_str0(const struct ubifs_info *c,
-                        const union ubifs_key *key);
-const char *dbg_key_str1(const struct ubifs_info *c,
-                        const union ubifs_key *key);
-
-/*
- * TODO: these macros are now broken because there is no locking around them
- * and we use a global buffer for the key string. This means that in case of
- * concurrent execution we will end up with incorrect and messy key strings.
- */
-#define DBGKEY(key) dbg_key_str0(c, (key))
-#define DBGKEY1(key) dbg_key_str1(c, (key))
-
-extern spinlock_t dbg_lock;
-
 #define ubifs_dbg_msg(type, fmt, ...) \
-       pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
-
-/* Just a debugging messages not related to any specific UBIFS subsystem */
-#define dbg_msg(fmt, ...)                                                     \
-       printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid,  \
-              __func__, ##__VA_ARGS__)
+       pr_debug("UBIFS DBG " type " (pid %d): " fmt "\n", current->pid,       \
+                ##__VA_ARGS__)
+
+#define DBG_KEY_BUF_LEN 32
+#define ubifs_dbg_msg_key(type, key, fmt, ...) do {                            \
+       char __tmp_key_buf[DBG_KEY_BUF_LEN];                                   \
+       pr_debug("UBIFS DBG " type " (pid %d): " fmt "%s\n", current->pid,     \
+                ##__VA_ARGS__,                                                \
+                dbg_snprintf_key(c, key, __tmp_key_buf, DBG_KEY_BUF_LEN));    \
+} while (0)
 
 /* General messages */
 #define dbg_gen(fmt, ...)   ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
 /* Additional journal messages */
 #define dbg_jnl(fmt, ...)   ubifs_dbg_msg("jnl", fmt, ##__VA_ARGS__)
+#define dbg_jnlk(key, fmt, ...) \
+       ubifs_dbg_msg_key("jnl", key, fmt, ##__VA_ARGS__)
 /* Additional TNC messages */
 #define dbg_tnc(fmt, ...)   ubifs_dbg_msg("tnc", fmt, ##__VA_ARGS__)
+#define dbg_tnck(key, fmt, ...) \
+       ubifs_dbg_msg_key("tnc", key, fmt, ##__VA_ARGS__)
 /* Additional lprops messages */
 #define dbg_lp(fmt, ...)    ubifs_dbg_msg("lp", fmt, ##__VA_ARGS__)
 /* Additional LEB find messages */
 #define dbg_find(fmt, ...)  ubifs_dbg_msg("find", fmt, ##__VA_ARGS__)
 /* Additional mount messages */
 #define dbg_mnt(fmt, ...)   ubifs_dbg_msg("mnt", fmt, ##__VA_ARGS__)
+#define dbg_mntk(key, fmt, ...) \
+       ubifs_dbg_msg_key("mnt", key, fmt, ##__VA_ARGS__)
 /* Additional I/O messages */
 #define dbg_io(fmt, ...)    ubifs_dbg_msg("io", fmt, ##__VA_ARGS__)
 /* Additional commit messages */
@@ -259,27 +244,29 @@ const char *dbg_cstate(int cmt_state);
 const char *dbg_jhead(int jhead);
 const char *dbg_get_key_dump(const struct ubifs_info *c,
                             const union ubifs_key *key);
-void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode);
-void dbg_dump_node(const struct ubifs_info *c, const void *node);
-void dbg_dump_lpt_node(const struct ubifs_info *c, void *node, int lnum,
-                      int offs);
-void dbg_dump_budget_req(const struct ubifs_budget_req *req);
-void dbg_dump_lstats(const struct ubifs_lp_stats *lst);
-void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi);
-void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp);
-void dbg_dump_lprops(struct ubifs_info *c);
-void dbg_dump_lpt_info(struct ubifs_info *c);
-void dbg_dump_leb(const struct ubifs_info *c, int lnum);
-void dbg_dump_sleb(const struct ubifs_info *c,
-                  const struct ubifs_scan_leb *sleb, int offs);
-void dbg_dump_znode(const struct ubifs_info *c,
-                   const struct ubifs_znode *znode);
-void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat);
-void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
-                   struct ubifs_nnode *parent, int iip);
-void dbg_dump_tnc(struct ubifs_info *c);
-void dbg_dump_index(struct ubifs_info *c);
-void dbg_dump_lpt_lebs(const struct ubifs_info *c);
+const char *dbg_snprintf_key(const struct ubifs_info *c,
+                            const union ubifs_key *key, char *buffer, int len);
+void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode);
+void ubifs_dump_node(const struct ubifs_info *c, const void *node);
+void ubifs_dump_budget_req(const struct ubifs_budget_req *req);
+void ubifs_dump_lstats(const struct ubifs_lp_stats *lst);
+void ubifs_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi);
+void ubifs_dump_lprop(const struct ubifs_info *c,
+                     const struct ubifs_lprops *lp);
+void ubifs_dump_lprops(struct ubifs_info *c);
+void ubifs_dump_lpt_info(struct ubifs_info *c);
+void ubifs_dump_leb(const struct ubifs_info *c, int lnum);
+void ubifs_dump_sleb(const struct ubifs_info *c,
+                    const struct ubifs_scan_leb *sleb, int offs);
+void ubifs_dump_znode(const struct ubifs_info *c,
+                     const struct ubifs_znode *znode);
+void ubifs_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap,
+                    int cat);
+void ubifs_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
+                     struct ubifs_nnode *parent, int iip);
+void ubifs_dump_tnc(struct ubifs_info *c);
+void ubifs_dump_index(struct ubifs_info *c);
+void ubifs_dump_lpt_lebs(const struct ubifs_info *c);
 
 int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
                   dbg_znode_callback znode_cb, void *priv);
@@ -309,11 +296,10 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head);
 int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head);
 
 int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
-                 int len, int dtype);
-int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len,
-                  int dtype);
+                 int len);
+int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len);
 int dbg_leb_unmap(struct ubifs_info *c, int lnum);
-int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype);
+int dbg_leb_map(struct ubifs_info *c, int lnum);
 
 /* Debugfs-related stuff */
 int dbg_debugfs_init(void);
@@ -321,155 +307,4 @@ void dbg_debugfs_exit(void);
 int dbg_debugfs_init_fs(struct ubifs_info *c);
 void dbg_debugfs_exit_fs(struct ubifs_info *c);
 
-#else /* !CONFIG_UBIFS_FS_DEBUG */
-
-/* Use "if (0)" to make compiler check arguments even if debugging is off */
-#define ubifs_assert(expr)  do {                                               \
-       if (0)                                                                 \
-               printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \
-                      __func__, __LINE__, current->pid);                      \
-} while (0)
-
-#define dbg_err(fmt, ...)   do {                   \
-       if (0)                                     \
-               ubifs_err(fmt, ##__VA_ARGS__);     \
-} while (0)
-
-#define DBGKEY(key)  ((char *)(key))
-#define DBGKEY1(key) ((char *)(key))
-
-#define ubifs_dbg_msg(fmt, ...) do {                        \
-       if (0)                                              \
-               printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
-} while (0)
-
-#define dbg_dump_stack()
-#define ubifs_assert_cmt_locked(c)
-
-#define dbg_msg(fmt, ...)   ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_gen(fmt, ...)   ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_jnl(fmt, ...)   ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_tnc(fmt, ...)   ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_lp(fmt, ...)    ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_find(fmt, ...)  ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_mnt(fmt, ...)   ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_io(fmt, ...)    ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_cmt(fmt, ...)   ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_budg(fmt, ...)  ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_log(fmt, ...)   ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_gc(fmt, ...)    ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_scan(fmt, ...)  ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_rcvry(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-
-static inline int ubifs_debugging_init(struct ubifs_info *c)      { return 0; }
-static inline void ubifs_debugging_exit(struct ubifs_info *c)     { return; }
-static inline const char *dbg_ntype(int type)                     { return ""; }
-static inline const char *dbg_cstate(int cmt_state)               { return ""; }
-static inline const char *dbg_jhead(int jhead)                    { return ""; }
-static inline const char *
-dbg_get_key_dump(const struct ubifs_info *c,
-                const union ubifs_key *key)                      { return ""; }
-static inline void dbg_dump_inode(struct ubifs_info *c,
-                                 const struct inode *inode)      { return; }
-static inline void dbg_dump_node(const struct ubifs_info *c,
-                                const void *node)                { return; }
-static inline void dbg_dump_lpt_node(const struct ubifs_info *c,
-                                    void *node, int lnum,
-                                    int offs)                    { return; }
-static inline void
-dbg_dump_budget_req(const struct ubifs_budget_req *req)           { return; }
-static inline void
-dbg_dump_lstats(const struct ubifs_lp_stats *lst)                 { return; }
-static inline void
-dbg_dump_budg(struct ubifs_info *c,
-             const struct ubifs_budg_info *bi)                   { return; }
-static inline void dbg_dump_lprop(const struct ubifs_info *c,
-                                 const struct ubifs_lprops *lp)  { return; }
-static inline void dbg_dump_lprops(struct ubifs_info *c)          { return; }
-static inline void dbg_dump_lpt_info(struct ubifs_info *c)        { return; }
-static inline void dbg_dump_leb(const struct ubifs_info *c,
-                               int lnum)                         { return; }
-static inline void
-dbg_dump_sleb(const struct ubifs_info *c,
-             const struct ubifs_scan_leb *sleb, int offs)        { return; }
-static inline void
-dbg_dump_znode(const struct ubifs_info *c,
-              const struct ubifs_znode *znode)                   { return; }
-static inline void dbg_dump_heap(struct ubifs_info *c,
-                                struct ubifs_lpt_heap *heap,
-                                int cat)                         { return; }
-static inline void dbg_dump_pnode(struct ubifs_info *c,
-                                 struct ubifs_pnode *pnode,
-                                 struct ubifs_nnode *parent,
-                                 int iip)                        { return; }
-static inline void dbg_dump_tnc(struct ubifs_info *c)             { return; }
-static inline void dbg_dump_index(struct ubifs_info *c)           { return; }
-static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c)  { return; }
-
-static inline int dbg_walk_index(struct ubifs_info *c,
-                                dbg_leaf_callback leaf_cb,
-                                dbg_znode_callback znode_cb,
-                                void *priv)                      { return 0; }
-static inline void dbg_save_space_info(struct ubifs_info *c)      { return; }
-static inline int dbg_check_space_info(struct ubifs_info *c)      { return 0; }
-static inline int dbg_check_lprops(struct ubifs_info *c)          { return 0; }
-static inline int
-dbg_old_index_check_init(struct ubifs_info *c,
-                        struct ubifs_zbranch *zroot)             { return 0; }
-static inline int
-dbg_check_old_index(struct ubifs_info *c,
-                   struct ubifs_zbranch *zroot)                  { return 0; }
-static inline int dbg_check_cats(struct ubifs_info *c)            { return 0; }
-static inline int dbg_check_ltab(struct ubifs_info *c)            { return 0; }
-static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c)      { return 0; }
-static inline int dbg_chk_lpt_sz(struct ubifs_info *c,
-                                int action, int len)             { return 0; }
-static inline int
-dbg_check_synced_i_size(const struct ubifs_info *c,
-                       struct inode *inode)                      { return 0; }
-static inline int dbg_check_dir(struct ubifs_info *c,
-                               const struct inode *dir)          { return 0; }
-static inline int dbg_check_tnc(struct ubifs_info *c, int extra)  { return 0; }
-static inline int dbg_check_idx_size(struct ubifs_info *c,
-                                    long long idx_size)          { return 0; }
-static inline int dbg_check_filesystem(struct ubifs_info *c)      { return 0; }
-static inline void dbg_check_heap(struct ubifs_info *c,
-                                 struct ubifs_lpt_heap *heap,
-                                 int cat, int add_pos)           { return; }
-static inline int dbg_check_lpt_nodes(struct ubifs_info *c,
-       struct ubifs_cnode *cnode, int row, int col)              { return 0; }
-static inline int dbg_check_inode_size(struct ubifs_info *c,
-                                      const struct inode *inode,
-                                      loff_t size)               { return 0; }
-static inline int
-dbg_check_data_nodes_order(struct ubifs_info *c,
-                          struct list_head *head)                { return 0; }
-static inline int
-dbg_check_nondata_nodes_order(struct ubifs_info *c,
-                             struct list_head *head)             { return 0; }
-
-static inline int dbg_leb_write(struct ubifs_info *c, int lnum,
-                               const void *buf, int offset,
-                               int len, int dtype)               { return 0; }
-static inline int dbg_leb_change(struct ubifs_info *c, int lnum,
-                                const void *buf, int len,
-                                int dtype)                       { return 0; }
-static inline int dbg_leb_unmap(struct ubifs_info *c, int lnum)   { return 0; }
-static inline int dbg_leb_map(struct ubifs_info *c, int lnum,
-                             int dtype)                          { return 0; }
-
-static inline int dbg_is_chk_gen(const struct ubifs_info *c)      { return 0; }
-static inline int dbg_is_chk_index(const struct ubifs_info *c)    { return 0; }
-static inline int dbg_is_chk_orph(const struct ubifs_info *c)     { return 0; }
-static inline int dbg_is_chk_lprops(const struct ubifs_info *c)   { return 0; }
-static inline int dbg_is_chk_fs(const struct ubifs_info *c)       { return 0; }
-static inline int dbg_is_tst_rcvry(const struct ubifs_info *c)    { return 0; }
-static inline int dbg_is_power_cut(const struct ubifs_info *c)    { return 0; }
-
-static inline int dbg_debugfs_init(void)                          { return 0; }
-static inline void dbg_debugfs_exit(void)                         { return; }
-static inline int dbg_debugfs_init_fs(struct ubifs_info *c)       { return 0; }
-static inline int dbg_debugfs_exit_fs(struct ubifs_info *c)       { return 0; }
-
-#endif /* !CONFIG_UBIFS_FS_DEBUG */
 #endif /* !__UBIFS_DEBUG_H__ */
index aaebf0f..a36f821 100644 (file)
@@ -170,8 +170,6 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
        return inode;
 }
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
-
 static int dbg_check_name(const struct ubifs_info *c,
                          const struct ubifs_dent_node *dent,
                          const struct qstr *nm)
@@ -185,12 +183,6 @@ static int dbg_check_name(const struct ubifs_info *c,
        return 0;
 }
 
-#else
-
-#define dbg_check_name(c, dent, nm) 0
-
-#endif
-
 static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
                                   struct nameidata *nd)
 {
@@ -590,6 +582,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
        int sz_change = CALC_DENT_SIZE(dentry->d_name.len);
        int err, budgeted = 1;
        struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 };
+       unsigned int saved_nlink = inode->i_nlink;
 
        /*
         * Budget request settings: deletion direntry, deletion inode (+1 for
@@ -637,7 +630,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
 out_cancel:
        dir->i_size += sz_change;
        dir_ui->ui_size = dir->i_size;
-       inc_nlink(inode);
+       set_nlink(inode, saved_nlink);
        unlock_2_inodes(dir, inode);
        if (budgeted)
                ubifs_release_budget(c, &req);
@@ -728,8 +721,7 @@ out_cancel:
        dir->i_size += sz_change;
        dir_ui->ui_size = dir->i_size;
        inc_nlink(dir);
-       inc_nlink(inode);
-       inc_nlink(inode);
+       set_nlink(inode, 2);
        unlock_2_inodes(dir, inode);
        if (budgeted)
                ubifs_release_budget(c, &req);
@@ -1001,6 +993,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
                        .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
        struct timespec time;
+       unsigned int uninitialized_var(saved_nlink);
 
        /*
         * Budget request settings: deletion direntry, new direntry, removing
@@ -1011,8 +1004,8 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
         * separately.
         */
 
-       dbg_gen("dent '%.*s' ino %lu in dir ino %lu to dent '%.*s' in "
-               "dir ino %lu", old_dentry->d_name.len, old_dentry->d_name.name,
+       dbg_gen("dent '%.*s' ino %lu in dir ino %lu to dent '%.*s' in dir ino %lu",
+               old_dentry->d_name.len, old_dentry->d_name.name,
                old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len,
                new_dentry->d_name.name, new_dir->i_ino);
        ubifs_assert(mutex_is_locked(&old_dir->i_mutex));
@@ -1083,13 +1076,14 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (unlink) {
                /*
                 * Directories cannot have hard-links, so if this is a
-                * directory, decrement its @i_nlink twice because an empty
-                * directory has @i_nlink 2.
+                * directory, just clear @i_nlink.
                 */
+               saved_nlink = new_inode->i_nlink;
                if (is_dir)
+                       clear_nlink(new_inode);
+               else
                        drop_nlink(new_inode);
                new_inode->i_ctime = time;
-               drop_nlink(new_inode);
        } else {
                new_dir->i_size += new_sz;
                ubifs_inode(new_dir)->ui_size = new_dir->i_size;
@@ -1126,9 +1120,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
 out_cancel:
        if (unlink) {
-               if (is_dir)
-                       inc_nlink(new_inode);
-               inc_nlink(new_inode);
+               set_nlink(new_inode, saved_nlink);
        } else {
                new_dir->i_size -= new_sz;
                ubifs_inode(new_dir)->ui_size = new_dir->i_size;
@@ -1211,12 +1203,10 @@ const struct inode_operations ubifs_dir_inode_operations = {
        .rename      = ubifs_rename,
        .setattr     = ubifs_setattr,
        .getattr     = ubifs_getattr,
-#ifdef CONFIG_UBIFS_FS_XATTR
        .setxattr    = ubifs_setxattr,
        .getxattr    = ubifs_getxattr,
        .listxattr   = ubifs_listxattr,
        .removexattr = ubifs_removexattr,
-#endif
 };
 
 const struct file_operations ubifs_dir_operations = {
index 6589006..7159576 100644 (file)
@@ -97,7 +97,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
 dump:
        ubifs_err("bad data node (block %u, inode %lu)",
                  block, inode->i_ino);
-       dbg_dump_node(c, dn);
+       ubifs_dump_node(c, dn);
        return -EINVAL;
 }
 
@@ -1486,8 +1486,8 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
        err = ubifs_budget_space(c, &req);
        if (unlikely(err)) {
                if (err == -ENOSPC)
-                       ubifs_warn("out of space for mmapped file "
-                                  "(inode number %lu)", inode->i_ino);
+                       ubifs_warn("out of space for mmapped file (inode number %lu)",
+                                  inode->i_ino);
                return VM_FAULT_SIGBUS;
        }
 
@@ -1561,12 +1561,10 @@ const struct address_space_operations ubifs_file_address_operations = {
 const struct inode_operations ubifs_file_inode_operations = {
        .setattr     = ubifs_setattr,
        .getattr     = ubifs_getattr,
-#ifdef CONFIG_UBIFS_FS_XATTR
        .setxattr    = ubifs_setxattr,
        .getxattr    = ubifs_getxattr,
        .listxattr   = ubifs_listxattr,
        .removexattr = ubifs_removexattr,
-#endif
 };
 
 const struct inode_operations ubifs_symlink_inode_operations = {
index ded29f6..76ca53c 100644 (file)
@@ -109,7 +109,7 @@ static int switch_gc_head(struct ubifs_info *c)
                return err;
 
        c->gc_lnum = -1;
-       err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0, UBI_LONGTERM);
+       err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
        return err;
 }
 
@@ -714,9 +714,9 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
                        break;
                }
 
-               dbg_gc("found LEB %d: free %d, dirty %d, sum %d "
-                      "(min. space %d)", lp.lnum, lp.free, lp.dirty,
-                      lp.free + lp.dirty, min_space);
+               dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
+                      lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
+                      min_space);
 
                space_before = c->leb_size - wbuf->offs - wbuf->used;
                if (wbuf->lnum == -1)
index 9228950..e18b988 100644 (file)
@@ -109,13 +109,13 @@ int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
        if (err && (err != -EBADMSG || even_ebadmsg)) {
                ubifs_err("reading %d bytes from LEB %d:%d failed, error %d",
                          len, lnum, offs, err);
-               dbg_dump_stack();
+               dump_stack();
        }
        return err;
 }
 
 int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
-                   int len, int dtype)
+                   int len)
 {
        int err;
 
@@ -123,20 +123,19 @@ int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
        if (c->ro_error)
                return -EROFS;
        if (!dbg_is_tst_rcvry(c))
-               err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype);
+               err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
        else
-               err = dbg_leb_write(c, lnum, buf, offs, len, dtype);
+               err = dbg_leb_write(c, lnum, buf, offs, len);
        if (err) {
                ubifs_err("writing %d bytes to LEB %d:%d failed, error %d",
                          len, lnum, offs, err);
                ubifs_ro_mode(c, err);
-               dbg_dump_stack();
+               dump_stack();
        }
        return err;
 }
 
-int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len,
-                    int dtype)
+int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
 {
        int err;
 
@@ -144,14 +143,14 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len,
        if (c->ro_error)
                return -EROFS;
        if (!dbg_is_tst_rcvry(c))
-               err = ubi_leb_change(c->ubi, lnum, buf, len, dtype);
+               err = ubi_leb_change(c->ubi, lnum, buf, len);
        else
-               err = dbg_leb_change(c, lnum, buf, len, dtype);
+               err = dbg_leb_change(c, lnum, buf, len);
        if (err) {
                ubifs_err("changing %d bytes in LEB %d failed, error %d",
                          len, lnum, err);
                ubifs_ro_mode(c, err);
-               dbg_dump_stack();
+               dump_stack();
        }
        return err;
 }
@@ -170,12 +169,12 @@ int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
        if (err) {
                ubifs_err("unmap LEB %d failed, error %d", lnum, err);
                ubifs_ro_mode(c, err);
-               dbg_dump_stack();
+               dump_stack();
        }
        return err;
 }
 
-int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype)
+int ubifs_leb_map(struct ubifs_info *c, int lnum)
 {
        int err;
 
@@ -183,13 +182,13 @@ int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype)
        if (c->ro_error)
                return -EROFS;
        if (!dbg_is_tst_rcvry(c))
-               err = ubi_leb_map(c->ubi, lnum, dtype);
+               err = ubi_leb_map(c->ubi, lnum);
        else
-               err = dbg_leb_map(c, lnum, dtype);
+               err = dbg_leb_map(c, lnum);
        if (err) {
                ubifs_err("mapping LEB %d failed, error %d", lnum, err);
                ubifs_ro_mode(c, err);
-               dbg_dump_stack();
+               dump_stack();
        }
        return err;
 }
@@ -202,7 +201,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
        if (err < 0) {
                ubifs_err("ubi_is_mapped failed for LEB %d, error %d",
                          lnum, err);
-               dbg_dump_stack();
+               dump_stack();
        }
        return err;
 }
@@ -294,8 +293,8 @@ out_len:
 out:
        if (!quiet) {
                ubifs_err("bad node at LEB %d:%d", lnum, offs);
-               dbg_dump_node(c, buf);
-               dbg_dump_stack();
+               ubifs_dump_node(c, buf);
+               dump_stack();
        }
        return err;
 }
@@ -523,8 +522,7 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
        dirt = sync_len - wbuf->used;
        if (dirt)
                ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
-       err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len,
-                             wbuf->dtype);
+       err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
        if (err)
                return err;
 
@@ -562,14 +560,12 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
  * @wbuf: write-buffer
  * @lnum: logical eraseblock number to seek to
  * @offs: logical eraseblock offset to seek to
- * @dtype: data type
  *
  * This function targets the write-buffer to logical eraseblock @lnum:@offs.
  * The write-buffer has to be empty. Returns zero in case of success and a
  * negative error code in case of failure.
  */
-int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
-                          int dtype)
+int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
 {
        const struct ubifs_info *c = wbuf->c;
 
@@ -592,7 +588,6 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
        wbuf->avail = wbuf->size;
        wbuf->used = 0;
        spin_unlock(&wbuf->lock);
-       wbuf->dtype = dtype;
 
        return 0;
 }
@@ -719,8 +714,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
                        dbg_io("flush jhead %s wbuf to LEB %d:%d",
                               dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
                        err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
-                                             wbuf->offs, wbuf->size,
-                                             wbuf->dtype);
+                                             wbuf->offs, wbuf->size);
                        if (err)
                                goto out;
 
@@ -756,7 +750,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
                       dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
                memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
                err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
-                                     wbuf->size, wbuf->dtype);
+                                     wbuf->size);
                if (err)
                        goto out;
 
@@ -775,7 +769,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
                dbg_io("write %d bytes to LEB %d:%d",
                       wbuf->size, wbuf->lnum, wbuf->offs);
                err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
-                                     wbuf->size, wbuf->dtype);
+                                     wbuf->size);
                if (err)
                        goto out;
 
@@ -797,7 +791,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
                dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
                       wbuf->offs);
                err = ubifs_leb_write(c, wbuf->lnum, buf + written,
-                                     wbuf->offs, n, wbuf->dtype);
+                                     wbuf->offs, n);
                if (err)
                        goto out;
                wbuf->offs += n;
@@ -841,9 +835,9 @@ exit:
 out:
        ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
                  len, wbuf->lnum, wbuf->offs, err);
-       dbg_dump_node(c, buf);
-       dbg_dump_stack();
-       dbg_dump_leb(c, wbuf->lnum);
+       ubifs_dump_node(c, buf);
+       dump_stack();
+       ubifs_dump_leb(c, wbuf->lnum);
        return err;
 }
 
@@ -854,7 +848,6 @@ out:
  * @len: node length
  * @lnum: logical eraseblock number
  * @offs: offset within the logical eraseblock
- * @dtype: node life-time hint (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN)
  *
  * This function automatically fills node magic number, assigns sequence
  * number, and calculates node CRC checksum. The length of the @buf buffer has
@@ -863,7 +856,7 @@ out:
  * success and a negative error code in case of failure.
  */
 int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
-                    int offs, int dtype)
+                    int offs)
 {
        int err, buf_len = ALIGN(len, c->min_io_size);
 
@@ -879,9 +872,9 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
                return -EROFS;
 
        ubifs_prepare_node(c, buf, len, 1);
-       err = ubifs_leb_write(c, lnum, buf, offs, buf_len, dtype);
+       err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
        if (err)
-               dbg_dump_node(c, buf);
+               ubifs_dump_node(c, buf);
 
        return err;
 }
@@ -960,8 +953,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
 
 out:
        ubifs_err("bad node at LEB %d:%d", lnum, offs);
-       dbg_dump_node(c, buf);
-       dbg_dump_stack();
+       ubifs_dump_node(c, buf);
+       dump_stack();
        return -EINVAL;
 }
 
@@ -1017,8 +1010,8 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
 out:
        ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs,
                  ubi_is_mapped(c->ubi, lnum));
-       dbg_dump_node(c, buf);
-       dbg_dump_stack();
+       ubifs_dump_node(c, buf);
+       dump_stack();
        return -EINVAL;
 }
 
@@ -1056,7 +1049,6 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
         */
        size = c->max_write_size - (c->leb_start % c->max_write_size);
        wbuf->avail = wbuf->size = size;
-       wbuf->dtype = UBI_UNKNOWN;
        wbuf->sync_callback = NULL;
        mutex_init(&wbuf->io_mutex);
        spin_lock_init(&wbuf->lock);
index cef0460..12c0f15 100644 (file)
@@ -214,7 +214,7 @@ out:
        err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
        if (err)
                goto out_return;
-       err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype);
+       err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
        if (err)
                goto out_unlock;
 
@@ -385,9 +385,9 @@ out:
        if (err == -ENOSPC) {
                /* This are some budgeting problems, print useful information */
                down_write(&c->commit_sem);
-               dbg_dump_stack();
-               dbg_dump_budg(c, &c->bi);
-               dbg_dump_lprops(c);
+               dump_stack();
+               ubifs_dump_budg(c, &c->bi);
+               ubifs_dump_lprops(c);
                cmt_retries = dbg_check_lprops(c);
                up_write(&c->commit_sem);
        }
@@ -697,9 +697,8 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
        int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
        struct ubifs_inode *ui = ubifs_inode(inode);
 
-       dbg_jnl("ino %lu, blk %u, len %d, key %s",
-               (unsigned long)key_inum(c, key), key_block(c, key), len,
-               DBGKEY(key));
+       dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
+               (unsigned long)key_inum(c, key), key_block(c, key), len);
        ubifs_assert(len <= UBIFS_BLOCK_SIZE);
 
        data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN);
@@ -1177,7 +1176,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
                dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
                blk = new_size >> UBIFS_BLOCK_SHIFT;
                data_key_init(c, &key, inum, blk);
-               dbg_jnl("last block key %s", DBGKEY(&key));
+               dbg_jnlk(&key, "last block key ");
                err = ubifs_tnc_lookup(c, &key, dn);
                if (err == -ENOENT)
                        dlen = 0; /* Not found (so it is a hole) */
@@ -1268,7 +1267,6 @@ out_free:
        return err;
 }
 
-#ifdef CONFIG_UBIFS_FS_XATTR
 
 /**
  * ubifs_jnl_delete_xattr - delete an extended attribute.
@@ -1463,4 +1461,3 @@ out_free:
        return err;
 }
 
-#endif /* CONFIG_UBIFS_FS_XATTR */
index 843beda..06649d2 100644 (file)
 
 #include "ubifs.h"
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
 static int dbg_check_bud_bytes(struct ubifs_info *c);
-#else
-#define dbg_check_bud_bytes(c) 0
-#endif
 
 /**
  * ubifs_search_bud - search bud LEB.
@@ -266,7 +262,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
                 * an unclean reboot, because the target LEB might have been
                 * unmapped, but not yet physically erased.
                 */
-               err = ubifs_leb_map(c, bud->lnum, UBI_SHORTTERM);
+               err = ubifs_leb_map(c, bud->lnum);
                if (err)
                        goto out_unlock;
        }
@@ -274,7 +270,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
        dbg_log("write ref LEB %d:%d",
                c->lhead_lnum, c->lhead_offs);
        err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum,
-                              c->lhead_offs, UBI_SHORTTERM);
+                              c->lhead_offs);
        if (err)
                goto out_unlock;
 
@@ -323,17 +319,15 @@ static void remove_buds(struct ubifs_info *c)
                         * heads (non-closed buds).
                         */
                        c->cmt_bud_bytes += wbuf->offs - bud->start;
-                       dbg_log("preserve %d:%d, jhead %s, bud bytes %d, "
-                               "cmt_bud_bytes %lld", bud->lnum, bud->start,
-                               dbg_jhead(bud->jhead), wbuf->offs - bud->start,
-                               c->cmt_bud_bytes);
+                       dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
+                               bud->lnum, bud->start, dbg_jhead(bud->jhead),
+                               wbuf->offs - bud->start, c->cmt_bud_bytes);
                        bud->start = wbuf->offs;
                } else {
                        c->cmt_bud_bytes += c->leb_size - bud->start;
-                       dbg_log("remove %d:%d, jhead %s, bud bytes %d, "
-                               "cmt_bud_bytes %lld", bud->lnum, bud->start,
-                               dbg_jhead(bud->jhead), c->leb_size - bud->start,
-                               c->cmt_bud_bytes);
+                       dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
+                               bud->lnum, bud->start, dbg_jhead(bud->jhead),
+                               c->leb_size - bud->start, c->cmt_bud_bytes);
                        rb_erase(p1, &c->buds);
                        /*
                         * If the commit does not finish, the recovery will need
@@ -426,7 +420,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
 
        len = ALIGN(len, c->min_io_size);
        dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
-       err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len, UBI_SHORTTERM);
+       err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len);
        if (err)
                goto out;
 
@@ -632,7 +626,7 @@ static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
                int sz = ALIGN(*offs, c->min_io_size), err;
 
                ubifs_pad(c, buf + *offs, sz - *offs);
-               err = ubifs_leb_change(c, *lnum, buf, sz, UBI_SHORTTERM);
+               err = ubifs_leb_change(c, *lnum, buf, sz);
                if (err)
                        return err;
                *lnum = ubifs_next_log_lnum(c, *lnum);
@@ -711,7 +705,7 @@ int ubifs_consolidate_log(struct ubifs_info *c)
                int sz = ALIGN(offs, c->min_io_size);
 
                ubifs_pad(c, buf + offs, sz - offs);
-               err = ubifs_leb_change(c, write_lnum, buf, sz, UBI_SHORTTERM);
+               err = ubifs_leb_change(c, write_lnum, buf, sz);
                if (err)
                        goto out_free;
                offs = ALIGN(offs, c->min_io_size);
@@ -743,8 +737,6 @@ out_free:
        return err;
 }
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
-
 /**
  * dbg_check_bud_bytes - make sure bud bytes calculation are all right.
  * @c: UBIFS file-system description object
@@ -776,5 +768,3 @@ static int dbg_check_bud_bytes(struct ubifs_info *c)
 
        return err;
 }
-
-#endif /* CONFIG_UBIFS_FS_DEBUG */
index ea9d491..46190a7 100644 (file)
@@ -453,7 +453,7 @@ static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops)
        int new_cat = ubifs_categorize_lprops(c, lprops);
 
        if (old_cat == new_cat) {
-               struct ubifs_lpt_heap *heap = &c->lpt_heap[new_cat - 1];
+               struct ubifs_lpt_heap *heap;
 
                /* lprops on a heap now must be moved up or down */
                if (new_cat < 1 || new_cat > LPROPS_HEAP_CNT)
@@ -852,7 +852,9 @@ const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c)
        return lprops;
 }
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
+/*
+ * Everything below is related to debugging.
+ */
 
 /**
  * dbg_check_cats - check category heaps and lists.
@@ -871,15 +873,15 @@ int dbg_check_cats(struct ubifs_info *c)
 
        list_for_each_entry(lprops, &c->empty_list, list) {
                if (lprops->free != c->leb_size) {
-                       ubifs_err("non-empty LEB %d on empty list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("non-empty LEB %d on empty list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
                if (lprops->flags & LPROPS_TAKEN) {
-                       ubifs_err("taken LEB %d on empty list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("taken LEB %d on empty list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
        }
@@ -887,15 +889,15 @@ int dbg_check_cats(struct ubifs_info *c)
        i = 0;
        list_for_each_entry(lprops, &c->freeable_list, list) {
                if (lprops->free + lprops->dirty != c->leb_size) {
-                       ubifs_err("non-freeable LEB %d on freeable list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("non-freeable LEB %d on freeable list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
                if (lprops->flags & LPROPS_TAKEN) {
-                       ubifs_err("taken LEB %d on freeable list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("taken LEB %d on freeable list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
                i += 1;
@@ -917,21 +919,21 @@ int dbg_check_cats(struct ubifs_info *c)
 
        list_for_each_entry(lprops, &c->frdi_idx_list, list) {
                if (lprops->free + lprops->dirty != c->leb_size) {
-                       ubifs_err("non-freeable LEB %d on frdi_idx list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
                if (lprops->flags & LPROPS_TAKEN) {
-                       ubifs_err("taken LEB %d on frdi_idx list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("taken LEB %d on frdi_idx list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
                if (!(lprops->flags & LPROPS_INDEX)) {
-                       ubifs_err("non-index LEB %d on frdi_idx list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
        }
@@ -986,9 +988,9 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
                        goto out;
                }
                if (lprops != lp) {
-                       dbg_msg("lprops %zx lp %zx lprops->lnum %d lp->lnum %d",
-                               (size_t)lprops, (size_t)lp, lprops->lnum,
-                               lp->lnum);
+                       ubifs_err("lprops %zx lp %zx lprops->lnum %d lp->lnum %d",
+                                 (size_t)lprops, (size_t)lp, lprops->lnum,
+                                 lp->lnum);
                        err = 4;
                        goto out;
                }
@@ -1006,9 +1008,9 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
        }
 out:
        if (err) {
-               dbg_msg("failed cat %d hpos %d err %d", cat, i, err);
-               dbg_dump_stack();
-               dbg_dump_heap(c, heap, cat);
+               ubifs_err("failed cat %d hpos %d err %d", cat, i, err);
+               dump_stack();
+               ubifs_dump_heap(c, heap, cat);
        }
 }
 
@@ -1115,8 +1117,8 @@ static int scan_check_cb(struct ubifs_info *c,
        if (IS_ERR(sleb)) {
                ret = PTR_ERR(sleb);
                if (ret == -EUCLEAN) {
-                       dbg_dump_lprops(c);
-                       dbg_dump_budg(c, &c->bi);
+                       ubifs_dump_lprops(c);
+                       ubifs_dump_budg(c, &c->bi);
                }
                goto out;
        }
@@ -1157,8 +1159,8 @@ static int scan_check_cb(struct ubifs_info *c,
 
        if (free > c->leb_size || free < 0 || dirty > c->leb_size ||
            dirty < 0) {
-               ubifs_err("bad calculated accounting for LEB %d: "
-                         "free %d, dirty %d", lnum, free, dirty);
+               ubifs_err("bad calculated accounting for LEB %d: free %d, dirty %d",
+                         lnum, free, dirty);
                goto out_destroy;
        }
 
@@ -1204,8 +1206,7 @@ static int scan_check_cb(struct ubifs_info *c,
                        /* Free but not unmapped LEB, it's fine */
                        is_idx = 0;
                else {
-                       ubifs_err("indexing node without indexing "
-                                 "flag");
+                       ubifs_err("indexing node without indexing flag");
                        goto out_print;
                }
        }
@@ -1240,10 +1241,9 @@ static int scan_check_cb(struct ubifs_info *c,
        return LPT_SCAN_CONTINUE;
 
 out_print:
-       ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, "
-                 "should be free %d, dirty %d",
+       ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d",
                  lnum, lp->free, lp->dirty, lp->flags, free, dirty);
-       dbg_dump_leb(c, lnum);
+       ubifs_dump_leb(c, lnum);
 out_destroy:
        ubifs_scan_destroy(sleb);
        ret = -EINVAL;
@@ -1294,12 +1294,10 @@ int dbg_check_lprops(struct ubifs_info *c)
            lst.total_dirty != c->lst.total_dirty ||
            lst.total_used != c->lst.total_used) {
                ubifs_err("bad overall accounting");
-               ubifs_err("calculated: empty_lebs %d, idx_lebs %d, "
-                         "total_free %lld, total_dirty %lld, total_used %lld",
+               ubifs_err("calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
                          lst.empty_lebs, lst.idx_lebs, lst.total_free,
                          lst.total_dirty, lst.total_used);
-               ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, "
-                         "total_free %lld, total_dirty %lld, total_used %lld",
+               ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
                          c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free,
                          c->lst.total_dirty, c->lst.total_used);
                err = -EINVAL;
@@ -1321,5 +1319,3 @@ int dbg_check_lprops(struct ubifs_info *c)
 out:
        return err;
 }
-
-#endif /* CONFIG_UBIFS_FS_DEBUG */
index 6189c74..901d3b6 100644 (file)
@@ -701,8 +701,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
                        alen = ALIGN(len, c->min_io_size);
                        set_ltab(c, lnum, c->leb_size - alen, alen - len);
                        memset(p, 0xff, alen - len);
-                       err = ubifs_leb_change(c, lnum++, buf, alen,
-                                              UBI_SHORTTERM);
+                       err = ubifs_leb_change(c, lnum++, buf, alen);
                        if (err)
                                goto out;
                        p = buf;
@@ -732,8 +731,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
                                set_ltab(c, lnum, c->leb_size - alen,
                                            alen - len);
                                memset(p, 0xff, alen - len);
-                               err = ubifs_leb_change(c, lnum++, buf, alen,
-                                                      UBI_SHORTTERM);
+                               err = ubifs_leb_change(c, lnum++, buf, alen);
                                if (err)
                                        goto out;
                                p = buf;
@@ -780,8 +778,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
                        alen = ALIGN(len, c->min_io_size);
                        set_ltab(c, lnum, c->leb_size - alen, alen - len);
                        memset(p, 0xff, alen - len);
-                       err = ubifs_leb_change(c, lnum++, buf, alen,
-                                              UBI_SHORTTERM);
+                       err = ubifs_leb_change(c, lnum++, buf, alen);
                        if (err)
                                goto out;
                        p = buf;
@@ -806,7 +803,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
                alen = ALIGN(len, c->min_io_size);
                set_ltab(c, lnum, c->leb_size - alen, alen - len);
                memset(p, 0xff, alen - len);
-               err = ubifs_leb_change(c, lnum++, buf, alen, UBI_SHORTTERM);
+               err = ubifs_leb_change(c, lnum++, buf, alen);
                if (err)
                        goto out;
                p = buf;
@@ -826,7 +823,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
 
        /* Write remaining buffer */
        memset(p, 0xff, alen - len);
-       err = ubifs_leb_change(c, lnum, buf, alen, UBI_SHORTTERM);
+       err = ubifs_leb_change(c, lnum, buf, alen);
        if (err)
                goto out;
 
@@ -926,7 +923,7 @@ static int check_lpt_crc(void *buf, int len)
        if (crc != calc_crc) {
                ubifs_err("invalid crc in LPT node: crc %hx calc %hx", crc,
                          calc_crc);
-               dbg_dump_stack();
+               dump_stack();
                return -EINVAL;
        }
        return 0;
@@ -949,7 +946,7 @@ static int check_lpt_type(uint8_t **addr, int *pos, int type)
        if (node_type != type) {
                ubifs_err("invalid type (%d) in LPT node type %d", node_type,
                          type);
-               dbg_dump_stack();
+               dump_stack();
                return -EINVAL;
        }
        return 0;
@@ -1247,7 +1244,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
 
 out:
        ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs);
-       dbg_dump_stack();
+       dump_stack();
        kfree(nnode);
        return err;
 }
@@ -1312,9 +1309,9 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
 
 out:
        ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs);
-       dbg_dump_pnode(c, pnode, parent, iip);
-       dbg_dump_stack();
-       dbg_msg("calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
+       ubifs_dump_pnode(c, pnode, parent, iip);
+       dump_stack();
+       ubifs_err("calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
        kfree(pnode);
        return err;
 }
@@ -1740,16 +1737,20 @@ int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr)
        if (rd) {
                err = lpt_init_rd(c);
                if (err)
-                       return err;
+                       goto out_err;
        }
 
        if (wr) {
                err = lpt_init_wr(c);
                if (err)
-                       return err;
+                       goto out_err;
        }
 
        return 0;
+
+out_err:
+       ubifs_lpt_free(c, 0);
+       return err;
 }
 
 /**
@@ -1986,12 +1987,11 @@ again:
 
                                if (path[h].in_tree)
                                        continue;
-                               nnode = kmalloc(sz, GFP_NOFS);
+                               nnode = kmemdup(&path[h].nnode, sz, GFP_NOFS);
                                if (!nnode) {
                                        err = -ENOMEM;
                                        goto out;
                                }
-                               memcpy(nnode, &path[h].nnode, sz);
                                parent = nnode->parent;
                                parent->nbranch[nnode->iip].nnode = nnode;
                                path[h].ptr.nnode = nnode;
@@ -2004,12 +2004,11 @@ again:
                                const size_t sz = sizeof(struct ubifs_pnode);
                                struct ubifs_nnode *parent;
 
-                               pnode = kmalloc(sz, GFP_NOFS);
+                               pnode = kmemdup(&path[h].pnode, sz, GFP_NOFS);
                                if (!pnode) {
                                        err = -ENOMEM;
                                        goto out;
                                }
-                               memcpy(pnode, &path[h].pnode, sz);
                                parent = pnode->parent;
                                parent->nbranch[pnode->iip].pnode = pnode;
                                path[h].ptr.pnode = pnode;
@@ -2082,8 +2081,6 @@ out:
        return err;
 }
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
-
 /**
  * dbg_chk_pnode - check a pnode.
  * @c: the UBIFS file-system description object
@@ -2098,8 +2095,8 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
        int i;
 
        if (pnode->num != col) {
-               dbg_err("pnode num %d expected %d parent num %d iip %d",
-                       pnode->num, col, pnode->parent->num, pnode->iip);
+               ubifs_err("pnode num %d expected %d parent num %d iip %d",
+                         pnode->num, col, pnode->parent->num, pnode->iip);
                return -EINVAL;
        }
        for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
@@ -2113,14 +2110,14 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
                if (lnum >= c->leb_cnt)
                        continue;
                if (lprops->lnum != lnum) {
-                       dbg_err("bad LEB number %d expected %d",
-                               lprops->lnum, lnum);
+                       ubifs_err("bad LEB number %d expected %d",
+                                 lprops->lnum, lnum);
                        return -EINVAL;
                }
                if (lprops->flags & LPROPS_TAKEN) {
                        if (cat != LPROPS_UNCAT) {
-                               dbg_err("LEB %d taken but not uncat %d",
-                                       lprops->lnum, cat);
+                               ubifs_err("LEB %d taken but not uncat %d",
+                                         lprops->lnum, cat);
                                return -EINVAL;
                        }
                        continue;
@@ -2132,8 +2129,8 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
                        case LPROPS_FRDI_IDX:
                                break;
                        default:
-                               dbg_err("LEB %d index but cat %d",
-                                       lprops->lnum, cat);
+                               ubifs_err("LEB %d index but cat %d",
+                                         lprops->lnum, cat);
                                return -EINVAL;
                        }
                } else {
@@ -2145,8 +2142,8 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
                        case LPROPS_FREEABLE:
                                break;
                        default:
-                               dbg_err("LEB %d not index but cat %d",
-                                       lprops->lnum, cat);
+                               ubifs_err("LEB %d not index but cat %d",
+                                         lprops->lnum, cat);
                                return -EINVAL;
                        }
                }
@@ -2186,24 +2183,24 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
                        break;
                }
                if (!found) {
-                       dbg_err("LEB %d cat %d not found in cat heap/list",
-                               lprops->lnum, cat);
+                       ubifs_err("LEB %d cat %d not found in cat heap/list",
+                                 lprops->lnum, cat);
                        return -EINVAL;
                }
                switch (cat) {
                case LPROPS_EMPTY:
                        if (lprops->free != c->leb_size) {
-                               dbg_err("LEB %d cat %d free %d dirty %d",
-                                       lprops->lnum, cat, lprops->free,
-                                       lprops->dirty);
+                               ubifs_err("LEB %d cat %d free %d dirty %d",
+                                         lprops->lnum, cat, lprops->free,
+                                         lprops->dirty);
                                return -EINVAL;
                        }
                case LPROPS_FREEABLE:
                case LPROPS_FRDI_IDX:
                        if (lprops->free + lprops->dirty != c->leb_size) {
-                               dbg_err("LEB %d cat %d free %d dirty %d",
-                                       lprops->lnum, cat, lprops->free,
-                                       lprops->dirty);
+                               ubifs_err("LEB %d cat %d free %d dirty %d",
+                                         lprops->lnum, cat, lprops->free,
+                                         lprops->dirty);
                                return -EINVAL;
                        }
                }
@@ -2237,9 +2234,9 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
                        /* cnode is a nnode */
                        num = calc_nnode_num(row, col);
                        if (cnode->num != num) {
-                               dbg_err("nnode num %d expected %d "
-                                       "parent num %d iip %d", cnode->num, num,
-                                       (nnode ? nnode->num : 0), cnode->iip);
+                               ubifs_err("nnode num %d expected %d parent num %d iip %d",
+                                         cnode->num, num,
+                                         (nnode ? nnode->num : 0), cnode->iip);
                                return -EINVAL;
                        }
                        nn = (struct ubifs_nnode *)cnode;
@@ -2276,5 +2273,3 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
        }
        return 0;
 }
-
-#endif /* CONFIG_UBIFS_FS_DEBUG */
index cddd6bd..9daaeef 100644 (file)
 #include <linux/random.h>
 #include "ubifs.h"
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
 static int dbg_populate_lsave(struct ubifs_info *c);
-#else
-#define dbg_populate_lsave(c) 0
-#endif
 
 /**
  * first_dirty_cnode - find first dirty cnode.
@@ -324,11 +320,10 @@ static int layout_cnodes(struct ubifs_info *c)
        return 0;
 
 no_space:
-       ubifs_err("LPT out of space");
-       dbg_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, "
-               "done_lsave %d", lnum, offs, len, done_ltab, done_lsave);
-       dbg_dump_lpt_info(c);
-       dbg_dump_lpt_lebs(c);
+       ubifs_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
+                 lnum, offs, len, done_ltab, done_lsave);
+       ubifs_dump_lpt_info(c);
+       ubifs_dump_lpt_lebs(c);
        dump_stack();
        return err;
 }
@@ -421,7 +416,7 @@ static int write_cnodes(struct ubifs_info *c)
                                alen = ALIGN(wlen, c->min_io_size);
                                memset(buf + offs, 0xff, alen - wlen);
                                err = ubifs_leb_write(c, lnum, buf + from, from,
-                                                      alen, UBI_SHORTTERM);
+                                                      alen);
                                if (err)
                                        return err;
                        }
@@ -479,8 +474,7 @@ static int write_cnodes(struct ubifs_info *c)
                        wlen = offs - from;
                        alen = ALIGN(wlen, c->min_io_size);
                        memset(buf + offs, 0xff, alen - wlen);
-                       err = ubifs_leb_write(c, lnum, buf + from, from, alen,
-                                             UBI_SHORTTERM);
+                       err = ubifs_leb_write(c, lnum, buf + from, from, alen);
                        if (err)
                                return err;
                        dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
@@ -506,8 +500,7 @@ static int write_cnodes(struct ubifs_info *c)
                        wlen = offs - from;
                        alen = ALIGN(wlen, c->min_io_size);
                        memset(buf + offs, 0xff, alen - wlen);
-                       err = ubifs_leb_write(c, lnum, buf + from, from, alen,
-                                             UBI_SHORTTERM);
+                       err = ubifs_leb_write(c, lnum, buf + from, from, alen);
                        if (err)
                                return err;
                        dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
@@ -531,7 +524,7 @@ static int write_cnodes(struct ubifs_info *c)
        wlen = offs - from;
        alen = ALIGN(wlen, c->min_io_size);
        memset(buf + offs, 0xff, alen - wlen);
-       err = ubifs_leb_write(c, lnum, buf + from, from, alen, UBI_SHORTTERM);
+       err = ubifs_leb_write(c, lnum, buf + from, from, alen);
        if (err)
                return err;
 
@@ -552,11 +545,10 @@ static int write_cnodes(struct ubifs_info *c)
        return 0;
 
 no_space:
-       ubifs_err("LPT out of space mismatch");
-       dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab "
-               "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave);
-       dbg_dump_lpt_info(c);
-       dbg_dump_lpt_lebs(c);
+       ubifs_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
+                 lnum, offs, len, done_ltab, done_lsave);
+       ubifs_dump_lpt_info(c);
+       ubifs_dump_lpt_lebs(c);
        dump_stack();
        return err;
 }
@@ -1497,7 +1489,9 @@ void ubifs_lpt_free(struct ubifs_info *c, int wr_only)
        kfree(c->lpt_nod_buf);
 }
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
+/*
+ * Everything below is related to debugging.
+ */
 
 /**
  * dbg_is_all_ff - determine if a buffer contains only 0xFF bytes.
@@ -1668,21 +1662,19 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
                                continue;
                        }
                        if (!dbg_is_all_ff(p, len)) {
-                               dbg_msg("invalid empty space in LEB %d at %d",
-                                       lnum, c->leb_size - len);
+                               ubifs_err("invalid empty space in LEB %d at %d",
+                                         lnum, c->leb_size - len);
                                err = -EINVAL;
                        }
                        i = lnum - c->lpt_first;
                        if (len != c->ltab[i].free) {
-                               dbg_msg("invalid free space in LEB %d "
-                                       "(free %d, expected %d)",
-                                       lnum, len, c->ltab[i].free);
+                               ubifs_err("invalid free space in LEB %d (free %d, expected %d)",
+                                         lnum, len, c->ltab[i].free);
                                err = -EINVAL;
                        }
                        if (dirty != c->ltab[i].dirty) {
-                               dbg_msg("invalid dirty space in LEB %d "
-                                       "(dirty %d, expected %d)",
-                                       lnum, dirty, c->ltab[i].dirty);
+                               ubifs_err("invalid dirty space in LEB %d (dirty %d, expected %d)",
+                                         lnum, dirty, c->ltab[i].dirty);
                                err = -EINVAL;
                        }
                        goto out;
@@ -1735,7 +1727,7 @@ int dbg_check_ltab(struct ubifs_info *c)
        for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) {
                err = dbg_check_ltab_lnum(c, lnum);
                if (err) {
-                       dbg_err("failed at LEB %d", lnum);
+                       ubifs_err("failed at LEB %d", lnum);
                        return err;
                }
        }
@@ -1767,10 +1759,10 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c)
                        free += c->leb_size;
        }
        if (free < c->lpt_sz) {
-               dbg_err("LPT space error: free %lld lpt_sz %lld",
-                       free, c->lpt_sz);
-               dbg_dump_lpt_info(c);
-               dbg_dump_lpt_lebs(c);
+               ubifs_err("LPT space error: free %lld lpt_sz %lld",
+                         free, c->lpt_sz);
+               ubifs_dump_lpt_info(c);
+               ubifs_dump_lpt_lebs(c);
                dump_stack();
                return -EINVAL;
        }
@@ -1807,13 +1799,13 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
                d->chk_lpt_lebs = 0;
                d->chk_lpt_wastage = 0;
                if (c->dirty_pn_cnt > c->pnode_cnt) {
-                       dbg_err("dirty pnodes %d exceed max %d",
-                               c->dirty_pn_cnt, c->pnode_cnt);
+                       ubifs_err("dirty pnodes %d exceed max %d",
+                                 c->dirty_pn_cnt, c->pnode_cnt);
                        err = -EINVAL;
                }
                if (c->dirty_nn_cnt > c->nnode_cnt) {
-                       dbg_err("dirty nnodes %d exceed max %d",
-                               c->dirty_nn_cnt, c->nnode_cnt);
+                       ubifs_err("dirty nnodes %d exceed max %d",
+                                 c->dirty_nn_cnt, c->nnode_cnt);
                        err = -EINVAL;
                }
                return err;
@@ -1830,23 +1822,23 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
                chk_lpt_sz *= d->chk_lpt_lebs;
                chk_lpt_sz += len - c->nhead_offs;
                if (d->chk_lpt_sz != chk_lpt_sz) {
-                       dbg_err("LPT wrote %lld but space used was %lld",
-                               d->chk_lpt_sz, chk_lpt_sz);
+                       ubifs_err("LPT wrote %lld but space used was %lld",
+                                 d->chk_lpt_sz, chk_lpt_sz);
                        err = -EINVAL;
                }
                if (d->chk_lpt_sz > c->lpt_sz) {
-                       dbg_err("LPT wrote %lld but lpt_sz is %lld",
-                               d->chk_lpt_sz, c->lpt_sz);
+                       ubifs_err("LPT wrote %lld but lpt_sz is %lld",
+                                 d->chk_lpt_sz, c->lpt_sz);
                        err = -EINVAL;
                }
                if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) {
-                       dbg_err("LPT layout size %lld but wrote %lld",
-                               d->chk_lpt_sz, d->chk_lpt_sz2);
+                       ubifs_err("LPT layout size %lld but wrote %lld",
+                                 d->chk_lpt_sz, d->chk_lpt_sz2);
                        err = -EINVAL;
                }
                if (d->chk_lpt_sz2 && d->new_nhead_offs != len) {
-                       dbg_err("LPT new nhead offs: expected %d was %d",
-                               d->new_nhead_offs, len);
+                       ubifs_err("LPT new nhead offs: expected %d was %d",
+                                 d->new_nhead_offs, len);
                        err = -EINVAL;
                }
                lpt_sz = (long long)c->pnode_cnt * c->pnode_sz;
@@ -1855,13 +1847,13 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
                if (c->big_lpt)
                        lpt_sz += c->lsave_sz;
                if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) {
-                       dbg_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld",
-                               d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz);
+                       ubifs_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld",
+                                 d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz);
                        err = -EINVAL;
                }
                if (err) {
-                       dbg_dump_lpt_info(c);
-                       dbg_dump_lpt_lebs(c);
+                       ubifs_dump_lpt_info(c);
+                       ubifs_dump_lpt_lebs(c);
                        dump_stack();
                }
                d->chk_lpt_sz2 = d->chk_lpt_sz;
@@ -1880,7 +1872,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
 }
 
 /**
- * dbg_dump_lpt_leb - dump an LPT LEB.
+ * ubifs_dump_lpt_leb - dump an LPT LEB.
  * @c: UBIFS file-system description object
  * @lnum: LEB number to dump
  *
@@ -1894,8 +1886,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
        int err, len = c->leb_size, node_type, node_num, node_len, offs;
        void *buf, *p;
 
-       printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
-              current->pid, lnum);
+       pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
        buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
        if (!buf) {
                ubifs_err("cannot allocate memory to dump LPT");
@@ -1913,14 +1904,14 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
 
                        pad_len = get_pad_len(c, p, len);
                        if (pad_len) {
-                               printk(KERN_DEBUG "LEB %d:%d, pad %d bytes\n",
+                               pr_err("LEB %d:%d, pad %d bytes\n",
                                       lnum, offs, pad_len);
                                p += pad_len;
                                len -= pad_len;
                                continue;
                        }
                        if (len)
-                               printk(KERN_DEBUG "LEB %d:%d, free %d bytes\n",
+                               pr_err("LEB %d:%d, free %d bytes\n",
                                       lnum, offs, len);
                        break;
                }
@@ -1931,11 +1922,10 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
                {
                        node_len = c->pnode_sz;
                        if (c->big_lpt)
-                               printk(KERN_DEBUG "LEB %d:%d, pnode num %d\n",
+                               pr_err("LEB %d:%d, pnode num %d\n",
                                       lnum, offs, node_num);
                        else
-                               printk(KERN_DEBUG "LEB %d:%d, pnode\n",
-                                      lnum, offs);
+                               pr_err("LEB %d:%d, pnode\n", lnum, offs);
                        break;
                }
                case UBIFS_LPT_NNODE:
@@ -1945,29 +1935,28 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
 
                        node_len = c->nnode_sz;
                        if (c->big_lpt)
-                               printk(KERN_DEBUG "LEB %d:%d, nnode num %d, ",
+                               pr_err("LEB %d:%d, nnode num %d, ",
                                       lnum, offs, node_num);
                        else
-                               printk(KERN_DEBUG "LEB %d:%d, nnode, ",
+                               pr_err("LEB %d:%d, nnode, ",
                                       lnum, offs);
                        err = ubifs_unpack_nnode(c, p, &nnode);
                        for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
-                               printk(KERN_CONT "%d:%d", nnode.nbranch[i].lnum,
+                               pr_cont("%d:%d", nnode.nbranch[i].lnum,
                                       nnode.nbranch[i].offs);
                                if (i != UBIFS_LPT_FANOUT - 1)
-                                       printk(KERN_CONT ", ");
+                                       pr_cont(", ");
                        }
-                       printk(KERN_CONT "\n");
+                       pr_cont("\n");
                        break;
                }
                case UBIFS_LPT_LTAB:
                        node_len = c->ltab_sz;
-                       printk(KERN_DEBUG "LEB %d:%d, ltab\n",
-                              lnum, offs);
+                       pr_err("LEB %d:%d, ltab\n", lnum, offs);
                        break;
                case UBIFS_LPT_LSAVE:
                        node_len = c->lsave_sz;
-                       printk(KERN_DEBUG "LEB %d:%d, lsave len\n", lnum, offs);
+                       pr_err("LEB %d:%d, lsave len\n", lnum, offs);
                        break;
                default:
                        ubifs_err("LPT node type %d not recognized", node_type);
@@ -1978,30 +1967,27 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
                len -= node_len;
        }
 
-       printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n",
-              current->pid, lnum);
+       pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum);
 out:
        vfree(buf);
        return;
 }
 
 /**
- * dbg_dump_lpt_lebs - dump LPT lebs.
+ * ubifs_dump_lpt_lebs - dump LPT lebs.
  * @c: UBIFS file-system description object
  *
  * This function dumps all LPT LEBs. The caller has to make sure the LPT is
  * locked.
  */
-void dbg_dump_lpt_lebs(const struct ubifs_info *c)
+void ubifs_dump_lpt_lebs(const struct ubifs_info *c)
 {
        int i;
 
-       printk(KERN_DEBUG "(pid %d) start dumping all LPT LEBs\n",
-              current->pid);
+       pr_err("(pid %d) start dumping all LPT LEBs\n", current->pid);
        for (i = 0; i < c->lpt_lebs; i++)
                dump_lpt_leb(c, i + c->lpt_first);
-       printk(KERN_DEBUG "(pid %d) finish dumping all LPT LEBs\n",
-              current->pid);
+       pr_err("(pid %d) finish dumping all LPT LEBs\n", current->pid);
 }
 
 /**
@@ -2046,5 +2032,3 @@ static int dbg_populate_lsave(struct ubifs_info *c)
 
        return 1;
 }
-
-#endif /* CONFIG_UBIFS_FS_DEBUG */
index bb9f481..1a4bb9e 100644 (file)
@@ -241,7 +241,7 @@ static int validate_master(const struct ubifs_info *c)
 
 out:
        ubifs_err("bad master node at offset %d error %d", c->mst_offs, err);
-       dbg_dump_node(c, c->mst_node);
+       ubifs_dump_node(c, c->mst_node);
        return -EINVAL;
 }
 
@@ -317,7 +317,7 @@ int ubifs_read_master(struct ubifs_info *c)
                if (c->leb_cnt < old_leb_cnt ||
                    c->leb_cnt < UBIFS_MIN_LEB_CNT) {
                        ubifs_err("bad leb_cnt on master node");
-                       dbg_dump_node(c, c->mst_node);
+                       ubifs_dump_node(c, c->mst_node);
                        return -EINVAL;
                }
 
@@ -378,7 +378,7 @@ int ubifs_write_master(struct ubifs_info *c)
        c->mst_offs = offs;
        c->mst_node->highest_inum = cpu_to_le64(c->highest_inum);
 
-       err = ubifs_write_node(c, c->mst_node, len, lnum, offs, UBI_SHORTTERM);
+       err = ubifs_write_node(c, c->mst_node, len, lnum, offs);
        if (err)
                return err;
 
@@ -389,7 +389,7 @@ int ubifs_write_master(struct ubifs_info *c)
                if (err)
                        return err;
        }
-       err = ubifs_write_node(c, c->mst_node, len, lnum, offs, UBI_SHORTTERM);
+       err = ubifs_write_node(c, c->mst_node, len, lnum, offs);
 
        return err;
 }
index f9c90b5..574d7a0 100644 (file)
  * than the maximum number of orphans allowed.
  */
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
 static int dbg_check_orphans(struct ubifs_info *c);
-#else
-#define dbg_check_orphans(c) 0
-#endif
 
 /**
  * ubifs_add_orphan - add an orphan.
@@ -92,7 +88,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum)
                else if (inum > o->inum)
                        p = &(*p)->rb_right;
                else {
-                       dbg_err("orphaned twice");
+                       ubifs_err("orphaned twice");
                        spin_unlock(&c->orphan_lock);
                        kfree(orphan);
                        return 0;
@@ -159,8 +155,8 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
                }
        }
        spin_unlock(&c->orphan_lock);
-       dbg_err("missing orphan ino %lu", (unsigned long)inum);
-       dbg_dump_stack();
+       ubifs_err("missing orphan ino %lu", (unsigned long)inum);
+       dump_stack();
 }
 
 /**
@@ -249,8 +245,7 @@ static int do_write_orph_node(struct ubifs_info *c, int len, int atomic)
                ubifs_assert(c->ohead_offs == 0);
                ubifs_prepare_node(c, c->orph_buf, len, 1);
                len = ALIGN(len, c->min_io_size);
-               err = ubifs_leb_change(c, c->ohead_lnum, c->orph_buf, len,
-                                      UBI_SHORTTERM);
+               err = ubifs_leb_change(c, c->ohead_lnum, c->orph_buf, len);
        } else {
                if (c->ohead_offs == 0) {
                        /* Ensure LEB has been unmapped */
@@ -259,7 +254,7 @@ static int do_write_orph_node(struct ubifs_info *c, int len, int atomic)
                                return err;
                }
                err = ubifs_write_node(c, c->orph_buf, len, c->ohead_lnum,
-                                      c->ohead_offs, UBI_SHORTTERM);
+                                      c->ohead_offs);
        }
        return err;
 }
@@ -570,9 +565,9 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
 
        list_for_each_entry(snod, &sleb->nodes, list) {
                if (snod->type != UBIFS_ORPH_NODE) {
-                       ubifs_err("invalid node type %d in orphan area at "
-                                 "%d:%d", snod->type, sleb->lnum, snod->offs);
-                       dbg_dump_node(c, snod->node);
+                       ubifs_err("invalid node type %d in orphan area at %d:%d",
+                                 snod->type, sleb->lnum, snod->offs);
+                       ubifs_dump_node(c, snod->node);
                        return -EINVAL;
                }
 
@@ -597,10 +592,9 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
                         * number. That makes this orphan node, out of date.
                         */
                        if (!first) {
-                               ubifs_err("out of order commit number %llu in "
-                                         "orphan node at %d:%d",
+                               ubifs_err("out of order commit number %llu in orphan node at %d:%d",
                                          cmt_no, sleb->lnum, snod->offs);
-                               dbg_dump_node(c, snod->node);
+                               ubifs_dump_node(c, snod->node);
                                return -EINVAL;
                        }
                        dbg_rcvry("out of date LEB %d", sleb->lnum);
@@ -728,7 +722,9 @@ int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only)
        return err;
 }
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
+/*
+ * Everything below is related to debugging.
+ */
 
 struct check_orphan {
        struct rb_node rb;
@@ -971,5 +967,3 @@ out:
        kfree(ci.node);
        return err;
 }
-
-#endif /* CONFIG_UBIFS_FS_DEBUG */
index ee4f43f..065096e 100644 (file)
@@ -213,10 +213,10 @@ static int write_rcvrd_mst_node(struct ubifs_info *c,
        mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY);
 
        ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1);
-       err = ubifs_leb_change(c, lnum, mst, sz, UBI_SHORTTERM);
+       err = ubifs_leb_change(c, lnum, mst, sz);
        if (err)
                goto out;
-       err = ubifs_leb_change(c, lnum + 1, mst, sz, UBI_SHORTTERM);
+       err = ubifs_leb_change(c, lnum + 1, mst, sz);
        if (err)
                goto out;
 out:
@@ -362,12 +362,12 @@ out_err:
 out_free:
        ubifs_err("failed to recover master node");
        if (mst1) {
-               dbg_err("dumping first master node");
-               dbg_dump_node(c, mst1);
+               ubifs_err("dumping first master node");
+               ubifs_dump_node(c, mst1);
        }
        if (mst2) {
-               dbg_err("dumping second master node");
-               dbg_dump_node(c, mst2);
+               ubifs_err("dumping second master node");
+               ubifs_dump_node(c, mst2);
        }
        vfree(buf2);
        vfree(buf1);
@@ -555,8 +555,7 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
                                        ubifs_pad(c, buf, pad_len);
                                }
                        }
-                       err = ubifs_leb_change(c, lnum, sleb->buf, len,
-                                              UBI_UNKNOWN);
+                       err = ubifs_leb_change(c, lnum, sleb->buf, len);
                        if (err)
                                return err;
                }
@@ -610,7 +609,8 @@ static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
                snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
                                  list);
 
-               dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
+               dbg_rcvry("dropping last node at %d:%d",
+                         sleb->lnum, snod->offs);
                *offs = snod->offs;
                list_del(&snod->list);
                kfree(snod);
@@ -679,10 +679,11 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
                           ret == SCANNED_GARBAGE     ||
                           ret == SCANNED_A_BAD_PAD_NODE ||
                           ret == SCANNED_A_CORRUPT_NODE) {
-                       dbg_rcvry("found corruption - %d", ret);
+                       dbg_rcvry("found corruption (%d) at %d:%d",
+                                 ret, lnum, offs);
                        break;
                } else {
-                       dbg_err("unexpected return value %d", ret);
+                       ubifs_err("unexpected return value %d", ret);
                        err = -EINVAL;
                        goto error;
                }
@@ -702,8 +703,8 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
                         * See header comment for this file for more
                         * explanations about the reasons we have this check.
                         */
-                       ubifs_err("corrupt empty space LEB %d:%d, corruption "
-                                 "starts at %d", lnum, offs, corruption);
+                       ubifs_err("corrupt empty space LEB %d:%d, corruption starts at %d",
+                                 lnum, offs, corruption);
                        /* Make sure we dump interesting non-0xFF data */
                        offs += corruption;
                        buf += corruption;
@@ -788,7 +789,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
 
 corrupted_rescan:
        /* Re-scan the corrupted data with verbose messages */
-       dbg_err("corruptio %d", ret);
+       ubifs_err("corruption %d", ret);
        ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
 corrupted:
        ubifs_scanned_corruption(c, lnum, offs, buf);
@@ -826,17 +827,17 @@ static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
                goto out_free;
        ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0);
        if (ret != SCANNED_A_NODE) {
-               dbg_err("Not a valid node");
+               ubifs_err("Not a valid node");
                goto out_err;
        }
        if (cs_node->ch.node_type != UBIFS_CS_NODE) {
-               dbg_err("Node a CS node, type is %d", cs_node->ch.node_type);
+               ubifs_err("Node a CS node, type is %d", cs_node->ch.node_type);
                goto out_err;
        }
        if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) {
-               dbg_err("CS node cmt_no %llu != current cmt_no %llu",
-                       (unsigned long long)le64_to_cpu(cs_node->cmt_no),
-                       c->cmt_no);
+               ubifs_err("CS node cmt_no %llu != current cmt_no %llu",
+                         (unsigned long long)le64_to_cpu(cs_node->cmt_no),
+                         c->cmt_no);
                goto out_err;
        }
        *cs_sqnum = le64_to_cpu(cs_node->ch.sqnum);
@@ -899,8 +900,8 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
                                }
                        }
                        if (snod->sqnum > cs_sqnum) {
-                               ubifs_err("unrecoverable log corruption "
-                                         "in LEB %d", lnum);
+                               ubifs_err("unrecoverable log corruption in LEB %d",
+                                         lnum);
                                ubifs_scan_destroy(sleb);
                                return ERR_PTR(-EUCLEAN);
                        }
@@ -940,7 +941,7 @@ static int recover_head(struct ubifs_info *c, int lnum, int offs, void *sbuf)
                err = ubifs_leb_read(c, lnum, sbuf, 0, offs, 1);
                if (err)
                        return err;
-               return ubifs_leb_change(c, lnum, sbuf, offs, UBI_UNKNOWN);
+               return ubifs_leb_change(c, lnum, sbuf, offs);
        }
 
        return 0;
@@ -1070,7 +1071,7 @@ static int clean_an_unclean_leb(struct ubifs_info *c,
        }
 
        /* Write back the LEB atomically */
-       err = ubifs_leb_change(c, lnum, sbuf, len, UBI_UNKNOWN);
+       err = ubifs_leb_change(c, lnum, sbuf, len);
        if (err)
                return err;
 
@@ -1137,9 +1138,9 @@ static int grab_empty_leb(struct ubifs_info *c)
         */
        lnum = ubifs_find_free_leb_for_idx(c);
        if (lnum < 0) {
-               dbg_err("could not find an empty LEB");
-               dbg_dump_lprops(c);
-               dbg_dump_budg(c, &c->bi);
+               ubifs_err("could not find an empty LEB");
+               ubifs_dump_lprops(c);
+               ubifs_dump_budg(c, &c->bi);
                return lnum;
        }
 
@@ -1217,7 +1218,7 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c)
        }
        mutex_unlock(&wbuf->io_mutex);
        if (err < 0) {
-               dbg_err("GC failed, error %d", err);
+               ubifs_err("GC failed, error %d", err);
                if (err == -EAGAIN)
                        err = -EINVAL;
                return err;
@@ -1471,7 +1472,7 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
                len -= 1;
        len = ALIGN(len + 1, c->min_io_size);
        /* Atomically write the fixed LEB back again */
-       err = ubifs_leb_change(c, lnum, c->sbuf, len, UBI_UNKNOWN);
+       err = ubifs_leb_change(c, lnum, c->sbuf, len);
        if (err)
                goto out;
        dbg_rcvry("inode %lu at %d:%d size %lld -> %lld",
index ccabaf1..30b5bdc 100644 (file)
@@ -141,9 +141,9 @@ static int set_bud_lprops(struct ubifs_info *c, struct bud_entry *b)
                 * during the replay.
                 */
                if (dirty != 0)
-                       dbg_msg("LEB %d lp: %d free %d dirty "
-                               "replay: %d free %d dirty", b->bud->lnum,
-                               lp->free, lp->dirty, b->free, b->dirty);
+                       dbg_mnt("LEB %d lp: %d free %d dirty replay: %d free %d dirty",
+                               b->bud->lnum, lp->free, lp->dirty, b->free,
+                               b->dirty);
        }
        lp = ubifs_change_lp(c, lp, b->free, dirty + b->dirty,
                             lp->flags | LPROPS_TAKEN, 0);
@@ -154,8 +154,7 @@ static int set_bud_lprops(struct ubifs_info *c, struct bud_entry *b)
 
        /* Make sure the journal head points to the latest bud */
        err = ubifs_wbuf_seek_nolock(&c->jheads[b->bud->jhead].wbuf,
-                                    b->bud->lnum, c->leb_size - b->free,
-                                    UBI_SHORTTERM);
+                                    b->bud->lnum, c->leb_size - b->free);
 
 out:
        ubifs_release_lprops(c);
@@ -221,8 +220,8 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
 {
        int err;
 
-       dbg_mnt("LEB %d:%d len %d deletion %d sqnum %llu %s", r->lnum,
-               r->offs, r->len, r->deletion, r->sqnum, DBGKEY(&r->key));
+       dbg_mntk(&r->key, "LEB %d:%d len %d deletion %d sqnum %llu key ",
+                r->lnum, r->offs, r->len, r->deletion, r->sqnum);
 
        /* Set c->replay_sqnum to help deal with dangling branches. */
        c->replay_sqnum = r->sqnum;
@@ -361,7 +360,7 @@ static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
 {
        struct replay_entry *r;
 
-       dbg_mnt("add LEB %d:%d, key %s", lnum, offs, DBGKEY(key));
+       dbg_mntk(key, "add LEB %d:%d, key ", lnum, offs);
 
        if (key_inum(c, key) >= c->highest_inum)
                c->highest_inum = key_inum(c, key);
@@ -409,7 +408,7 @@ static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len,
        struct replay_entry *r;
        char *nbuf;
 
-       dbg_mnt("add LEB %d:%d, key %s", lnum, offs, DBGKEY(key));
+       dbg_mntk(key, "add LEB %d:%d, key ", lnum, offs);
        if (key_inum(c, key) >= c->highest_inum)
                c->highest_inum = key_inum(c, key);
 
@@ -678,7 +677,8 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
 
        b->dirty = sleb->endpt - offs - used;
        b->free = c->leb_size - sleb->endpt;
-       dbg_mnt("bud LEB %d replied: dirty %d, free %d", lnum, b->dirty, b->free);
+       dbg_mnt("bud LEB %d replied: dirty %d, free %d",
+               lnum, b->dirty, b->free);
 
 out:
        ubifs_scan_destroy(sleb);
@@ -686,7 +686,7 @@ out:
 
 out_dump:
        ubifs_err("bad node is at LEB %d:%d", lnum, snod->offs);
-       dbg_dump_node(c, snod->node);
+       ubifs_dump_node(c, snod->node);
        ubifs_scan_destroy(sleb);
        return -EINVAL;
 }
@@ -861,16 +861,15 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
                 * numbers.
                 */
                if (snod->type != UBIFS_CS_NODE) {
-                       dbg_err("first log node at LEB %d:%d is not CS node",
-                               lnum, offs);
+                       ubifs_err("first log node at LEB %d:%d is not CS node",
+                                 lnum, offs);
                        goto out_dump;
                }
                if (le64_to_cpu(node->cmt_no) != c->cmt_no) {
-                       dbg_err("first CS node at LEB %d:%d has wrong "
-                               "commit number %llu expected %llu",
-                               lnum, offs,
-                               (unsigned long long)le64_to_cpu(node->cmt_no),
-                               c->cmt_no);
+                       ubifs_err("first CS node at LEB %d:%d has wrong commit number %llu expected %llu",
+                                 lnum, offs,
+                                 (unsigned long long)le64_to_cpu(node->cmt_no),
+                                 c->cmt_no);
                        goto out_dump;
                }
 
@@ -892,7 +891,7 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
 
        /* Make sure the first node sits at offset zero of the LEB */
        if (snod->offs != 0) {
-               dbg_err("first node is not at zero offset");
+               ubifs_err("first node is not at zero offset");
                goto out_dump;
        }
 
@@ -905,8 +904,8 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
                }
 
                if (snod->sqnum < c->cs_sqnum) {
-                       dbg_err("bad sqnum %llu, commit sqnum %llu",
-                               snod->sqnum, c->cs_sqnum);
+                       ubifs_err("bad sqnum %llu, commit sqnum %llu",
+                                 snod->sqnum, c->cs_sqnum);
                        goto out_dump;
                }
 
@@ -958,7 +957,7 @@ out:
 out_dump:
        ubifs_err("log error detected while replaying the log at LEB %d:%d",
                  lnum, offs + snod->offs);
-       dbg_dump_node(c, snod->node);
+       ubifs_dump_node(c, snod->node);
        ubifs_scan_destroy(sleb);
        return -EINVAL;
 }
@@ -1068,8 +1067,8 @@ int ubifs_replay_journal(struct ubifs_info *c)
        c->bi.uncommitted_idx *= c->max_idx_node_sz;
 
        ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery);
-       dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, "
-               "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum,
+       dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, highest_inum %lu",
+               c->lhead_lnum, c->lhead_offs, c->max_sqnum,
                (unsigned long)c->highest_inum);
 out:
        destroy_replay_list(c);
index b73ecd8..5b7bfa2 100644 (file)
@@ -130,7 +130,6 @@ static int create_default_filesystem(struct ubifs_info *c)
         * orphan node.
         */
        orph_lebs = UBIFS_MIN_ORPH_LEBS;
-#ifdef CONFIG_UBIFS_FS_DEBUG
        if (c->leb_cnt - min_leb_cnt > 1)
                /*
                 * For debugging purposes it is better to have at least 2
@@ -138,7 +137,6 @@ static int create_default_filesystem(struct ubifs_info *c)
                 * consolidations and would be stressed more.
                 */
                orph_lebs += 1;
-#endif
 
        main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS - log_lebs;
        main_lebs -= orph_lebs;
@@ -196,7 +194,7 @@ static int create_default_filesystem(struct ubifs_info *c)
        sup->rp_size = cpu_to_le64(tmp64);
        sup->ro_compat_version = cpu_to_le32(UBIFS_RO_COMPAT_VERSION);
 
-       err = ubifs_write_node(c, sup, UBIFS_SB_NODE_SZ, 0, 0, UBI_LONGTERM);
+       err = ubifs_write_node(c, sup, UBIFS_SB_NODE_SZ, 0, 0);
        kfree(sup);
        if (err)
                return err;
@@ -252,14 +250,13 @@ static int create_default_filesystem(struct ubifs_info *c)
 
        mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ);
 
-       err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0,
-                              UBI_UNKNOWN);
+       err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0);
        if (err) {
                kfree(mst);
                return err;
        }
-       err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1, 0,
-                              UBI_UNKNOWN);
+       err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1,
+                              0);
        kfree(mst);
        if (err)
                return err;
@@ -282,8 +279,7 @@ static int create_default_filesystem(struct ubifs_info *c)
        key_write_idx(c, &key, &br->key);
        br->lnum = cpu_to_le32(main_first + DEFAULT_DATA_LEB);
        br->len  = cpu_to_le32(UBIFS_INO_NODE_SZ);
-       err = ubifs_write_node(c, idx, tmp, main_first + DEFAULT_IDX_LEB, 0,
-                              UBI_UNKNOWN);
+       err = ubifs_write_node(c, idx, tmp, main_first + DEFAULT_IDX_LEB, 0);
        kfree(idx);
        if (err)
                return err;
@@ -315,8 +311,7 @@ static int create_default_filesystem(struct ubifs_info *c)
        ino->flags = cpu_to_le32(UBIFS_COMPR_FL);
 
        err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ,
-                              main_first + DEFAULT_DATA_LEB, 0,
-                              UBI_UNKNOWN);
+                              main_first + DEFAULT_DATA_LEB, 0);
        kfree(ino);
        if (err)
                return err;
@@ -335,8 +330,7 @@ static int create_default_filesystem(struct ubifs_info *c)
                return -ENOMEM;
 
        cs->ch.node_type = UBIFS_CS_NODE;
-       err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM,
-                              0, UBI_UNKNOWN);
+       err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, 0);
        kfree(cs);
 
        ubifs_msg("default file-system created");
@@ -397,9 +391,8 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
        min_leb_cnt += c->lpt_lebs + c->orph_lebs + c->jhead_cnt + 6;
 
        if (c->leb_cnt < min_leb_cnt || c->leb_cnt > c->vi.size) {
-               ubifs_err("bad LEB count: %d in superblock, %d on UBI volume, "
-                         "%d minimum required", c->leb_cnt, c->vi.size,
-                         min_leb_cnt);
+               ubifs_err("bad LEB count: %d in superblock, %d on UBI volume, %d minimum required",
+                         c->leb_cnt, c->vi.size, min_leb_cnt);
                goto failed;
        }
 
@@ -410,13 +403,22 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
        }
 
        if (c->main_lebs < UBIFS_MIN_MAIN_LEBS) {
-               err = 7;
+               ubifs_err("too few main LEBs count %d, must be at least %d",
+                         c->main_lebs, UBIFS_MIN_MAIN_LEBS);
                goto failed;
        }
 
-       if (c->max_bud_bytes < (long long)c->leb_size * UBIFS_MIN_BUD_LEBS ||
-           c->max_bud_bytes > (long long)c->leb_size * c->main_lebs) {
-               err = 8;
+       max_bytes = (long long)c->leb_size * UBIFS_MIN_BUD_LEBS;
+       if (c->max_bud_bytes < max_bytes) {
+               ubifs_err("too small journal (%lld bytes), must be at least %lld bytes",
+                         c->max_bud_bytes, max_bytes);
+               goto failed;
+       }
+
+       max_bytes = (long long)c->leb_size * c->main_lebs;
+       if (c->max_bud_bytes > max_bytes) {
+               ubifs_err("too large journal size (%lld bytes), only %lld bytes available in the main area",
+                         c->max_bud_bytes, max_bytes);
                goto failed;
        }
 
@@ -450,7 +452,6 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
                goto failed;
        }
 
-       max_bytes = c->main_lebs * (long long)c->leb_size;
        if (c->rp_size < 0 || max_bytes < c->rp_size) {
                err = 14;
                goto failed;
@@ -466,7 +467,7 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
 
 failed:
        ubifs_err("bad superblock, error %d", err);
-       dbg_dump_node(c, sup);
+       ubifs_dump_node(c, sup);
        return -EINVAL;
 }
 
@@ -509,7 +510,7 @@ int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup)
        int len = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size);
 
        ubifs_prepare_node(c, sup, UBIFS_SB_NODE_SZ, 1);
-       return ubifs_leb_change(c, UBIFS_SB_LNUM, sup, len, UBI_LONGTERM);
+       return ubifs_leb_change(c, UBIFS_SB_LNUM, sup, len);
 }
 
 /**
@@ -546,10 +547,9 @@ int ubifs_read_superblock(struct ubifs_info *c)
                ubifs_assert(!c->ro_media || c->ro_mount);
                if (!c->ro_mount ||
                    c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
-                       ubifs_err("on-flash format version is w%d/r%d, but "
-                                 "software only supports up to version "
-                                 "w%d/r%d", c->fmt_version,
-                                 c->ro_compat_version, UBIFS_FORMAT_VERSION,
+                       ubifs_err("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
+                                 c->fmt_version, c->ro_compat_version,
+                                 UBIFS_FORMAT_VERSION,
                                  UBIFS_RO_COMPAT_VERSION);
                        if (c->ro_compat_version <= UBIFS_RO_COMPAT_VERSION) {
                                ubifs_msg("only R/O mounting is possible");
@@ -682,7 +682,7 @@ static int fixup_leb(struct ubifs_info *c, int lnum, int len)
        if (err)
                return err;
 
-       return ubifs_leb_change(c, lnum, c->sbuf, len, UBI_UNKNOWN);
+       return ubifs_leb_change(c, lnum, c->sbuf, len);
 }
 
 /**
index 37383e8..58aa05d 100644 (file)
@@ -75,7 +75,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
        magic = le32_to_cpu(ch->magic);
 
        if (magic == 0xFFFFFFFF) {
-               dbg_scan("hit empty space");
+               dbg_scan("hit empty space at LEB %d:%d", lnum, offs);
                return SCANNED_EMPTY_SPACE;
        }
 
@@ -85,7 +85,8 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
        if (len < UBIFS_CH_SZ)
                return SCANNED_GARBAGE;
 
-       dbg_scan("scanning %s", dbg_ntype(ch->node_type));
+       dbg_scan("scanning %s at LEB %d:%d",
+                dbg_ntype(ch->node_type), lnum, offs);
 
        if (ubifs_check_node(c, buf, lnum, offs, quiet, 1))
                return SCANNED_A_CORRUPT_NODE;
@@ -101,7 +102,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
                        if (!quiet) {
                                ubifs_err("bad pad node at LEB %d:%d",
                                          lnum, offs);
-                               dbg_dump_node(c, pad);
+                               ubifs_dump_node(c, pad);
                        }
                        return SCANNED_A_BAD_PAD_NODE;
                }
@@ -109,13 +110,13 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
                /* Make the node pads to 8-byte boundary */
                if ((node_len + pad_len) & 7) {
                        if (!quiet)
-                               dbg_err("bad padding length %d - %d",
-                                       offs, offs + node_len + pad_len);
+                               ubifs_err("bad padding length %d - %d",
+                                         offs, offs + node_len + pad_len);
                        return SCANNED_A_BAD_PAD_NODE;
                }
 
-               dbg_scan("%d bytes padded, offset now %d",
-                        pad_len, ALIGN(offs + node_len + pad_len, 8));
+               dbg_scan("%d bytes padded at LEB %d:%d, offset now %d", pad_len,
+                        lnum, offs, ALIGN(offs + node_len + pad_len, 8));
 
                return node_len + pad_len;
        }
@@ -150,8 +151,8 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum,
 
        err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0);
        if (err && err != -EBADMSG) {
-               ubifs_err("cannot read %d bytes from LEB %d:%d,"
-                         " error %d", c->leb_size - offs, lnum, offs, err);
+               ubifs_err("cannot read %d bytes from LEB %d:%d, error %d",
+                         c->leb_size - offs, lnum, offs, err);
                kfree(sleb);
                return ERR_PTR(err);
        }
@@ -240,12 +241,10 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
        int len;
 
        ubifs_err("corruption at LEB %d:%d", lnum, offs);
-       if (dbg_is_tst_rcvry(c))
-               return;
        len = c->leb_size - offs;
        if (len > 8192)
                len = 8192;
-       dbg_err("first %d bytes from LEB %d:%d", len, lnum, offs);
+       ubifs_err("first %d bytes from LEB %d:%d", len, lnum, offs);
        print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1);
 }
 
@@ -300,16 +299,16 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
 
                switch (ret) {
                case SCANNED_GARBAGE:
-                       dbg_err("garbage");
+                       ubifs_err("garbage");
                        goto corrupted;
                case SCANNED_A_NODE:
                        break;
                case SCANNED_A_CORRUPT_NODE:
                case SCANNED_A_BAD_PAD_NODE:
-                       dbg_err("bad node");
+                       ubifs_err("bad node");
                        goto corrupted;
                default:
-                       dbg_err("unknown");
+                       ubifs_err("unknown");
                        err = -EINVAL;
                        goto error;
                }
index 201bcfc..ec8fcfd 100644 (file)
@@ -89,9 +89,8 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode)
                return 5;
 
        if (!ubifs_compr_present(ui->compr_type)) {
-               ubifs_warn("inode %lu uses '%s' compression, but it was not "
-                          "compiled in", inode->i_ino,
-                          ubifs_compr_name(ui->compr_type));
+               ubifs_warn("inode %lu uses '%s' compression, but it was not compiled in",
+                          inode->i_ino, ubifs_compr_name(ui->compr_type));
        }
 
        err = dbg_check_dir(c, inode);
@@ -246,8 +245,8 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
 
 out_invalid:
        ubifs_err("inode %lu validation failed, error %d", inode->i_ino, err);
-       dbg_dump_node(c, ino);
-       dbg_dump_inode(c, inode);
+       ubifs_dump_node(c, ino);
+       ubifs_dump_inode(c, inode);
        err = -EINVAL;
 out_ino:
        kfree(ino);
@@ -669,8 +668,8 @@ static int init_constants_sb(struct ubifs_info *c)
        tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt;
        tmp = ALIGN(tmp, c->min_io_size);
        if (tmp > c->leb_size) {
-               dbg_err("too small LEB size %d, at least %d needed",
-                       c->leb_size, tmp);
+               ubifs_err("too small LEB size %d, at least %d needed",
+                         c->leb_size, tmp);
                return -EINVAL;
        }
 
@@ -684,8 +683,8 @@ static int init_constants_sb(struct ubifs_info *c)
        tmp /= c->leb_size;
        tmp += 1;
        if (c->log_lebs < tmp) {
-               dbg_err("too small log %d LEBs, required min. %d LEBs",
-                       c->log_lebs, tmp);
+               ubifs_err("too small log %d LEBs, required min. %d LEBs",
+                         c->log_lebs, tmp);
                return -EINVAL;
        }
 
@@ -814,13 +813,10 @@ static int alloc_wbufs(struct ubifs_info *c)
                c->jheads[i].grouped = 1;
        }
 
-       c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM;
        /*
-        * Garbage Collector head likely contains long-term data and
-        * does not need to be synchronized by timer. Also GC head nodes are
-        * not grouped.
+        * Garbage Collector head does not need to be synchronized by timer.
+        * Also GC head nodes are not grouped.
         */
-       c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
        c->jheads[GCHD].wbuf.no_timer = 1;
        c->jheads[GCHD].grouped = 0;
 
@@ -864,7 +860,7 @@ static void free_orphans(struct ubifs_info *c)
                orph = list_entry(c->orph_list.next, struct ubifs_orphan, list);
                list_del(&orph->list);
                kfree(orph);
-               dbg_err("orphan list not empty at unmount");
+               ubifs_err("orphan list not empty at unmount");
        }
 
        vfree(c->orph_buf);
@@ -1065,8 +1061,8 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
 
                        flag = parse_standard_option(p);
                        if (!flag) {
-                               ubifs_err("unrecognized mount option \"%s\" "
-                                         "or missing value", p);
+                               ubifs_err("unrecognized mount option \"%s\" or missing value",
+                                         p);
                                return -EINVAL;
                        }
                        sb->s_flags |= flag;
@@ -1128,8 +1124,8 @@ again:
                }
 
                /* Just disable bulk-read */
-               ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, "
-                          "disabling it", c->max_bu_buf_len);
+               ubifs_warn("cannot allocate %d bytes of memory for bulk-read, disabling it",
+                          c->max_bu_buf_len);
                c->mount_opts.bulk_read = 1;
                c->bulk_read = 0;
                return;
@@ -1148,8 +1144,8 @@ static int check_free_space(struct ubifs_info *c)
        ubifs_assert(c->dark_wm > 0);
        if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) {
                ubifs_err("insufficient free space to mount in R/W mode");
-               dbg_dump_budg(c, &c->bi);
-               dbg_dump_lprops(c);
+               ubifs_dump_budg(c, &c->bi);
+               ubifs_dump_lprops(c);
                return -ENOSPC;
        }
        return 0;
@@ -1168,7 +1164,7 @@ static int check_free_space(struct ubifs_info *c)
 static int mount_ubifs(struct ubifs_info *c)
 {
        int err;
-       long long x;
+       long long x, y;
        size_t sz;
 
        c->ro_mount = !!(c->vfs_sb->s_flags & MS_RDONLY);
@@ -1302,7 +1298,7 @@ static int mount_ubifs(struct ubifs_info *c)
        if (!c->ro_mount && c->space_fixup) {
                err = ubifs_fixup_free_space(c);
                if (err)
-                       goto out_master;
+                       goto out_lpt;
        }
 
        if (!c->ro_mount) {
@@ -1418,75 +1414,69 @@ static int mount_ubifs(struct ubifs_info *c)
 
        c->mounting = 0;
 
-       ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
-                 c->vi.ubi_num, c->vi.vol_id, c->vi.name);
-       if (c->ro_mount)
-               ubifs_msg("mounted read-only");
+       ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"%s",
+                 c->vi.ubi_num, c->vi.vol_id, c->vi.name,
+                 c->ro_mount ? ", R/O mode" : NULL);
        x = (long long)c->main_lebs * c->leb_size;
-       ubifs_msg("file system size:   %lld bytes (%lld KiB, %lld MiB, %d "
-                 "LEBs)", x, x >> 10, x >> 20, c->main_lebs);
-       x = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
-       ubifs_msg("journal size:       %lld bytes (%lld KiB, %lld MiB, %d "
-                 "LEBs)", x, x >> 10, x >> 20, c->log_lebs + c->max_bud_cnt);
-       ubifs_msg("media format:       w%d/r%d (latest is w%d/r%d)",
+       y = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
+       ubifs_msg("LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes",
+                 c->leb_size, c->leb_size >> 10, c->min_io_size,
+                 c->max_write_size);
+       ubifs_msg("FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)",
+                 x, x >> 20, c->main_lebs,
+                 y, y >> 20, c->log_lebs + c->max_bud_cnt);
+       ubifs_msg("reserved for root: %llu bytes (%llu KiB)",
+                 c->report_rp_size, c->report_rp_size >> 10);
+       ubifs_msg("media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s",
                  c->fmt_version, c->ro_compat_version,
-                 UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
-       ubifs_msg("default compressor: %s", ubifs_compr_name(c->default_compr));
-       ubifs_msg("reserved for root:  %llu bytes (%llu KiB)",
-               c->report_rp_size, c->report_rp_size >> 10);
-
-       dbg_msg("compiled on:         " __DATE__ " at " __TIME__);
-       dbg_msg("min. I/O unit size:  %d bytes", c->min_io_size);
-       dbg_msg("max. write size:     %d bytes", c->max_write_size);
-       dbg_msg("LEB size:            %d bytes (%d KiB)",
-               c->leb_size, c->leb_size >> 10);
-       dbg_msg("data journal heads:  %d",
+                 UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION, c->uuid,
+                 c->big_lpt ? ", big LPT model" : ", small LPT model");
+
+       dbg_gen("default compressor:  %s", ubifs_compr_name(c->default_compr));
+       dbg_gen("data journal heads:  %d",
                c->jhead_cnt - NONDATA_JHEADS_CNT);
-       dbg_msg("UUID:                %pUB", c->uuid);
-       dbg_msg("big_lpt              %d", c->big_lpt);
-       dbg_msg("log LEBs:            %d (%d - %d)",
+       dbg_gen("log LEBs:            %d (%d - %d)",
                c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
-       dbg_msg("LPT area LEBs:       %d (%d - %d)",
+       dbg_gen("LPT area LEBs:       %d (%d - %d)",
                c->lpt_lebs, c->lpt_first, c->lpt_last);
-       dbg_msg("orphan area LEBs:    %d (%d - %d)",
+       dbg_gen("orphan area LEBs:    %d (%d - %d)",
                c->orph_lebs, c->orph_first, c->orph_last);
-       dbg_msg("main area LEBs:      %d (%d - %d)",
+       dbg_gen("main area LEBs:      %d (%d - %d)",
                c->main_lebs, c->main_first, c->leb_cnt - 1);
-       dbg_msg("index LEBs:          %d", c->lst.idx_lebs);
-       dbg_msg("total index bytes:   %lld (%lld KiB, %lld MiB)",
+       dbg_gen("index LEBs:          %d", c->lst.idx_lebs);
+       dbg_gen("total index bytes:   %lld (%lld KiB, %lld MiB)",
                c->bi.old_idx_sz, c->bi.old_idx_sz >> 10,
                c->bi.old_idx_sz >> 20);
-       dbg_msg("key hash type:       %d", c->key_hash_type);
-       dbg_msg("tree fanout:         %d", c->fanout);
-       dbg_msg("reserved GC LEB:     %d", c->gc_lnum);
-       dbg_msg("first main LEB:      %d", c->main_first);
-       dbg_msg("max. znode size      %d", c->max_znode_sz);
-       dbg_msg("max. index node size %d", c->max_idx_node_sz);
-       dbg_msg("node sizes:          data %zu, inode %zu, dentry %zu",
+       dbg_gen("key hash type:       %d", c->key_hash_type);
+       dbg_gen("tree fanout:         %d", c->fanout);
+       dbg_gen("reserved GC LEB:     %d", c->gc_lnum);
+       dbg_gen("max. znode size      %d", c->max_znode_sz);
+       dbg_gen("max. index node size %d", c->max_idx_node_sz);
+       dbg_gen("node sizes:          data %zu, inode %zu, dentry %zu",
                UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ);
-       dbg_msg("node sizes:          trun %zu, sb %zu, master %zu",
+       dbg_gen("node sizes:          trun %zu, sb %zu, master %zu",
                UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ);
-       dbg_msg("node sizes:          ref %zu, cmt. start %zu, orph %zu",
+       dbg_gen("node sizes:          ref %zu, cmt. start %zu, orph %zu",
                UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ);
-       dbg_msg("max. node sizes:     data %zu, inode %zu dentry %zu, idx %d",
+       dbg_gen("max. node sizes:     data %zu, inode %zu dentry %zu, idx %d",
                UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ,
                UBIFS_MAX_DENT_NODE_SZ, ubifs_idx_node_sz(c, c->fanout));
-       dbg_msg("dead watermark:      %d", c->dead_wm);
-       dbg_msg("dark watermark:      %d", c->dark_wm);
-       dbg_msg("LEB overhead:        %d", c->leb_overhead);
+       dbg_gen("dead watermark:      %d", c->dead_wm);
+       dbg_gen("dark watermark:      %d", c->dark_wm);
+       dbg_gen("LEB overhead:        %d", c->leb_overhead);
        x = (long long)c->main_lebs * c->dark_wm;
-       dbg_msg("max. dark space:     %lld (%lld KiB, %lld MiB)",
+       dbg_gen("max. dark space:     %lld (%lld KiB, %lld MiB)",
                x, x >> 10, x >> 20);
-       dbg_msg("maximum bud bytes:   %lld (%lld KiB, %lld MiB)",
+       dbg_gen("maximum bud bytes:   %lld (%lld KiB, %lld MiB)",
                c->max_bud_bytes, c->max_bud_bytes >> 10,
                c->max_bud_bytes >> 20);
-       dbg_msg("BG commit bud bytes: %lld (%lld KiB, %lld MiB)",
+       dbg_gen("BG commit bud bytes: %lld (%lld KiB, %lld MiB)",
                c->bg_bud_bytes, c->bg_bud_bytes >> 10,
                c->bg_bud_bytes >> 20);
-       dbg_msg("current bud bytes    %lld (%lld KiB, %lld MiB)",
+       dbg_gen("current bud bytes    %lld (%lld KiB, %lld MiB)",
                c->bud_bytes, c->bud_bytes >> 10, c->bud_bytes >> 20);
-       dbg_msg("max. seq. number:    %llu", c->max_sqnum);
-       dbg_msg("commit number:       %llu", c->cmt_no);
+       dbg_gen("max. seq. number:    %llu", c->max_sqnum);
+       dbg_gen("commit number:       %llu", c->cmt_no);
 
        return 0;
 
@@ -1571,10 +1561,9 @@ static int ubifs_remount_rw(struct ubifs_info *c)
 
        if (c->rw_incompat) {
                ubifs_err("the file-system is not R/W-compatible");
-               ubifs_msg("on-flash format version is w%d/r%d, but software "
-                         "only supports up to version w%d/r%d", c->fmt_version,
-                         c->ro_compat_version, UBIFS_FORMAT_VERSION,
-                         UBIFS_RO_COMPAT_VERSION);
+               ubifs_msg("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
+                         c->fmt_version, c->ro_compat_version,
+                         UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
                return -EROFS;
        }
 
@@ -1835,8 +1824,8 @@ static void ubifs_put_super(struct super_block *sb)
                                 * next mount, so we just print a message and
                                 * continue to unmount normally.
                                 */
-                               ubifs_err("failed to write master node, "
-                                         "error %d", err);
+                               ubifs_err("failed to write master node, error %d",
+                                         err);
                } else {
                        for (i = 0; i < c->jhead_cnt; i++)
                                /* Make sure write-buffer timers are canceled */
@@ -2128,8 +2117,8 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
         */
        ubi = open_ubi(name, UBI_READONLY);
        if (IS_ERR(ubi)) {
-               dbg_err("cannot open \"%s\", error %d",
-                       name, (int)PTR_ERR(ubi));
+               ubifs_err("cannot open \"%s\", error %d",
+                         name, (int)PTR_ERR(ubi));
                return ERR_CAST(ubi);
        }
 
@@ -2257,8 +2246,7 @@ static int __init ubifs_init(void)
         * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
         */
        if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) {
-               ubifs_err("VFS page cache size is %u bytes, but UBIFS requires"
-                         " at least 4096 bytes",
+               ubifs_err("VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
                          (unsigned int)PAGE_CACHE_SIZE);
                return -EINVAL;
        }
index 0667386..a50b6bd 100644 (file)
@@ -339,17 +339,16 @@ static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
 
        err = ubifs_validate_entry(c, dent);
        if (err) {
-               dbg_dump_stack();
-               dbg_dump_node(c, dent);
+               dump_stack();
+               ubifs_dump_node(c, dent);
                return err;
        }
 
-       lnc_node = kmalloc(zbr->len, GFP_NOFS);
+       lnc_node = kmemdup(node, zbr->len, GFP_NOFS);
        if (!lnc_node)
                /* We don't have to have the cache, so no error */
                return 0;
 
-       memcpy(lnc_node, node, zbr->len);
        zbr->leaf = lnc_node;
        return 0;
 }
@@ -373,8 +372,8 @@ static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
 
        err = ubifs_validate_entry(c, node);
        if (err) {
-               dbg_dump_stack();
-               dbg_dump_node(c, node);
+               dump_stack();
+               ubifs_dump_node(c, node);
                return err;
        }
 
@@ -506,7 +505,7 @@ static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
 {
        int ret;
 
-       dbg_tnc("LEB %d:%d, key %s", zbr->lnum, zbr->offs, DBGKEY(key));
+       dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs);
 
        ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum,
                            zbr->offs);
@@ -520,8 +519,8 @@ static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
                        ret = 0;
        }
        if (ret == 0 && c->replaying)
-               dbg_mnt("dangling branch LEB %d:%d len %d, key %s",
-                       zbr->lnum, zbr->offs, zbr->len, DBGKEY(key));
+               dbg_mntk(key, "dangling branch LEB %d:%d len %d, key ",
+                       zbr->lnum, zbr->offs, zbr->len);
        return ret;
 }
 
@@ -996,9 +995,9 @@ static int fallible_resolve_collision(struct ubifs_info *c,
        if (adding || !o_znode)
                return 0;
 
-       dbg_mnt("dangling match LEB %d:%d len %d %s",
+       dbg_mntk(key, "dangling match LEB %d:%d len %d key ",
                o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs,
-               o_znode->zbranch[o_n].len, DBGKEY(key));
+               o_znode->zbranch[o_n].len);
        *zn = o_znode;
        *n = o_n;
        return 1;
@@ -1180,7 +1179,7 @@ int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
        struct ubifs_znode *znode;
        unsigned long time = get_seconds();
 
-       dbg_tnc("search key %s", DBGKEY(key));
+       dbg_tnck(key, "search key ");
        ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
 
        znode = c->zroot.znode;
@@ -1316,7 +1315,7 @@ static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
        struct ubifs_znode *znode;
        unsigned long time = get_seconds();
 
-       dbg_tnc("search and dirty key %s", DBGKEY(key));
+       dbg_tnck(key, "search and dirty key ");
 
        znode = c->zroot.znode;
        if (unlikely(!znode)) {
@@ -1723,8 +1722,8 @@ static int validate_data_node(struct ubifs_info *c, void *buf,
        if (!keys_eq(c, &zbr->key, &key1)) {
                ubifs_err("bad key in node at LEB %d:%d",
                          zbr->lnum, zbr->offs);
-               dbg_tnc("looked for key %s found node's key %s",
-                       DBGKEY(&zbr->key), DBGKEY1(&key1));
+               dbg_tnck(&zbr->key, "looked for key ");
+               dbg_tnck(&key1, "found node's key ");
                goto out_err;
        }
 
@@ -1734,8 +1733,8 @@ out_err:
        err = -EINVAL;
 out:
        ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
-       dbg_dump_node(c, buf);
-       dbg_dump_stack();
+       ubifs_dump_node(c, buf);
+       dump_stack();
        return err;
 }
 
@@ -1776,8 +1775,8 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
        if (err && err != -EBADMSG) {
                ubifs_err("failed to read from LEB %d:%d, error %d",
                          lnum, offs, err);
-               dbg_dump_stack();
-               dbg_tnc("key %s", DBGKEY(&bu->key));
+               dump_stack();
+               dbg_tnck(&bu->key, "key ");
                return err;
        }
 
@@ -1812,7 +1811,7 @@ static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
        int found, n, err;
        struct ubifs_znode *znode;
 
-       dbg_tnc("name '%.*s' key %s", nm->len, nm->name, DBGKEY(key));
+       dbg_tnck(key, "name '%.*s' key ", nm->len, nm->name);
        mutex_lock(&c->tnc_mutex);
        found = ubifs_lookup_level0(c, key, &znode, &n);
        if (!found) {
@@ -1986,8 +1985,7 @@ again:
        zp = znode->parent;
        if (znode->child_cnt < c->fanout) {
                ubifs_assert(n != c->fanout);
-               dbg_tnc("inserted at %d level %d, key %s", n, znode->level,
-                       DBGKEY(key));
+               dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level);
 
                insert_zbranch(znode, zbr, n);
 
@@ -2002,7 +2000,7 @@ again:
         * Unfortunately, @znode does not have more empty slots and we have to
         * split it.
         */
-       dbg_tnc("splitting level %d, key %s", znode->level, DBGKEY(key));
+       dbg_tnck(key, "splitting level %d, key ", znode->level);
 
        if (znode->alt)
                /*
@@ -2096,7 +2094,7 @@ do_split:
        }
 
        /* Insert new key and branch */
-       dbg_tnc("inserting at %d level %d, key %s", n, zn->level, DBGKEY(key));
+       dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level);
 
        insert_zbranch(zi, zbr, n);
 
@@ -2172,7 +2170,7 @@ int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
        struct ubifs_znode *znode;
 
        mutex_lock(&c->tnc_mutex);
-       dbg_tnc("%d:%d, len %d, key %s", lnum, offs, len, DBGKEY(key));
+       dbg_tnck(key, "%d:%d, len %d, key ", lnum, offs, len);
        found = lookup_level0_dirty(c, key, &znode, &n);
        if (!found) {
                struct ubifs_zbranch zbr;
@@ -2221,8 +2219,8 @@ int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
        struct ubifs_znode *znode;
 
        mutex_lock(&c->tnc_mutex);
-       dbg_tnc("old LEB %d:%d, new LEB %d:%d, len %d, key %s", old_lnum,
-               old_offs, lnum, offs, len, DBGKEY(key));
+       dbg_tnck(key, "old LEB %d:%d, new LEB %d:%d, len %d, key ", old_lnum,
+                old_offs, lnum, offs, len);
        found = lookup_level0_dirty(c, key, &znode, &n);
        if (found < 0) {
                err = found;
@@ -2304,8 +2302,8 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
        struct ubifs_znode *znode;
 
        mutex_lock(&c->tnc_mutex);
-       dbg_tnc("LEB %d:%d, name '%.*s', key %s", lnum, offs, nm->len, nm->name,
-               DBGKEY(key));
+       dbg_tnck(key, "LEB %d:%d, name '%.*s', key ",
+                lnum, offs, nm->len, nm->name);
        found = lookup_level0_dirty(c, key, &znode, &n);
        if (found < 0) {
                err = found;
@@ -2398,14 +2396,14 @@ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
        /* Delete without merge for now */
        ubifs_assert(znode->level == 0);
        ubifs_assert(n >= 0 && n < c->fanout);
-       dbg_tnc("deleting %s", DBGKEY(&znode->zbranch[n].key));
+       dbg_tnck(&znode->zbranch[n].key, "deleting key ");
 
        zbr = &znode->zbranch[n];
        lnc_free(zbr);
 
        err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
        if (err) {
-               dbg_dump_znode(c, znode);
+               ubifs_dump_znode(c, znode);
                return err;
        }
 
@@ -2508,7 +2506,7 @@ int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
        struct ubifs_znode *znode;
 
        mutex_lock(&c->tnc_mutex);
-       dbg_tnc("key %s", DBGKEY(key));
+       dbg_tnck(key, "key ");
        found = lookup_level0_dirty(c, key, &znode, &n);
        if (found < 0) {
                err = found;
@@ -2539,7 +2537,7 @@ int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
        struct ubifs_znode *znode;
 
        mutex_lock(&c->tnc_mutex);
-       dbg_tnc("%.*s, key %s", nm->len, nm->name, DBGKEY(key));
+       dbg_tnck(key, "%.*s, key ", nm->len, nm->name);
        err = lookup_level0_dirty(c, key, &znode, &n);
        if (err < 0)
                goto out_unlock;
@@ -2651,10 +2649,10 @@ int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
                        err = ubifs_add_dirt(c, znode->zbranch[i].lnum,
                                             znode->zbranch[i].len);
                        if (err) {
-                               dbg_dump_znode(c, znode);
+                               ubifs_dump_znode(c, znode);
                                goto out_unlock;
                        }
-                       dbg_tnc("removing %s", DBGKEY(key));
+                       dbg_tnck(key, "removing key ");
                }
                if (k) {
                        for (i = n + 1 + k; i < znode->child_cnt; i++)
@@ -2774,7 +2772,7 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
        struct ubifs_zbranch *zbr;
        union ubifs_key *dkey;
 
-       dbg_tnc("%s %s", nm->name ? (char *)nm->name : "(lowest)", DBGKEY(key));
+       dbg_tnck(key, "%s ", nm->name ? (char *)nm->name : "(lowest)");
        ubifs_assert(is_hash_key(c, key));
 
        mutex_lock(&c->tnc_mutex);
@@ -3277,8 +3275,6 @@ out_unlock:
        return err;
 }
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
-
 /**
  * dbg_check_inode_size - check if inode size is correct.
  * @c: UBIFS file-system description object
@@ -3333,17 +3329,15 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
 
 out_dump:
        block = key_block(c, key);
-       ubifs_err("inode %lu has size %lld, but there are data at offset %lld "
-                 "(data key %s)", (unsigned long)inode->i_ino, size,
-                 ((loff_t)block) << UBIFS_BLOCK_SHIFT, DBGKEY(key));
+       ubifs_err("inode %lu has size %lld, but there are data at offset %lld",
+                 (unsigned long)inode->i_ino, size,
+                 ((loff_t)block) << UBIFS_BLOCK_SHIFT);
        mutex_unlock(&c->tnc_mutex);
-       dbg_dump_inode(c, inode);
-       dbg_dump_stack();
+       ubifs_dump_inode(c, inode);
+       dump_stack();
        return -EINVAL;
 
 out_unlock:
        mutex_unlock(&c->tnc_mutex);
        return err;
 }
-
-#endif /* CONFIG_UBIFS_FS_DEBUG */
index 4c15f07..523bbad 100644 (file)
@@ -54,18 +54,16 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
                br->len = cpu_to_le32(zbr->len);
                if (!zbr->lnum || !zbr->len) {
                        ubifs_err("bad ref in znode");
-                       dbg_dump_znode(c, znode);
+                       ubifs_dump_znode(c, znode);
                        if (zbr->znode)
-                               dbg_dump_znode(c, zbr->znode);
+                               ubifs_dump_znode(c, zbr->znode);
                }
        }
        ubifs_prepare_node(c, idx, len, 0);
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
        znode->lnum = lnum;
        znode->offs = offs;
        znode->len = len;
-#endif
 
        err = insert_old_idx_znode(c, znode);
 
@@ -322,8 +320,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
                                  0, 0, 0);
        if (err)
                return err;
-       err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len,
-                              UBI_SHORTTERM);
+       err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
        if (err)
                return err;
        dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
@@ -388,8 +385,8 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
                                 * option which forces in-the-gaps is enabled.
                                 */
                                ubifs_warn("out of space");
-                               dbg_dump_budg(c, &c->bi);
-                               dbg_dump_lprops(c);
+                               ubifs_dump_budg(c, &c->bi);
+                               ubifs_dump_lprops(c);
                        }
                        /* Try to commit anyway */
                        err = 0;
@@ -456,11 +453,9 @@ static int layout_in_empty_space(struct ubifs_info *c)
 
                offs = buf_offs + used;
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
                znode->lnum = lnum;
                znode->offs = offs;
                znode->len = len;
-#endif
 
                /* Update the parent */
                zp = znode->parent;
@@ -536,10 +531,8 @@ static int layout_in_empty_space(struct ubifs_info *c)
                break;
        }
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
        c->dbg->new_ihead_lnum = lnum;
        c->dbg->new_ihead_offs = buf_offs;
-#endif
 
        return 0;
 }
@@ -864,9 +857,9 @@ static int write_index(struct ubifs_info *c)
                        br->len = cpu_to_le32(zbr->len);
                        if (!zbr->lnum || !zbr->len) {
                                ubifs_err("bad ref in znode");
-                               dbg_dump_znode(c, znode);
+                               ubifs_dump_znode(c, znode);
                                if (zbr->znode)
-                                       dbg_dump_znode(c, zbr->znode);
+                                       ubifs_dump_znode(c, zbr->znode);
                        }
                }
                len = ubifs_idx_node_sz(c, znode->child_cnt);
@@ -881,13 +874,11 @@ static int write_index(struct ubifs_info *c)
                }
                offs = buf_offs + used;
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
                if (lnum != znode->lnum || offs != znode->offs ||
                    len != znode->len) {
                        ubifs_err("inconsistent znode posn");
                        return -EINVAL;
                }
-#endif
 
                /* Grab some stuff from znode while we still can */
                cnext = znode->cnext;
@@ -959,8 +950,7 @@ static int write_index(struct ubifs_info *c)
                }
 
                /* The buffer is full or there are no more znodes to do */
-               err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen,
-                                     UBI_SHORTTERM);
+               err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
                if (err)
                        return err;
                buf_offs += blen;
@@ -982,13 +972,11 @@ static int write_index(struct ubifs_info *c)
                break;
        }
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
        if (lnum != c->dbg->new_ihead_lnum ||
            buf_offs != c->dbg->new_ihead_offs) {
                ubifs_err("inconsistent ihead");
                return -EINVAL;
        }
-#endif
 
        c->ihead_lnum = lnum;
        c->ihead_offs = buf_offs;
index b48db99..f6bf899 100644 (file)
@@ -293,10 +293,10 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
                lnum, offs, znode->level, znode->child_cnt);
 
        if (znode->child_cnt > c->fanout || znode->level > UBIFS_MAX_LEVELS) {
-               dbg_err("current fanout %d, branch count %d",
-                       c->fanout, znode->child_cnt);
-               dbg_err("max levels %d, znode level %d",
-                       UBIFS_MAX_LEVELS, znode->level);
+               ubifs_err("current fanout %d, branch count %d",
+                         c->fanout, znode->child_cnt);
+               ubifs_err("max levels %d, znode level %d",
+                         UBIFS_MAX_LEVELS, znode->level);
                err = 1;
                goto out_dump;
        }
@@ -316,7 +316,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
                if (zbr->lnum < c->main_first ||
                    zbr->lnum >= c->leb_cnt || zbr->offs < 0 ||
                    zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) {
-                       dbg_err("bad branch %d", i);
+                       ubifs_err("bad branch %d", i);
                        err = 2;
                        goto out_dump;
                }
@@ -328,8 +328,8 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
                case UBIFS_XENT_KEY:
                        break;
                default:
-                       dbg_msg("bad key type at slot %d: %s", i,
-                               DBGKEY(&zbr->key));
+                       ubifs_err("bad key type at slot %d: %d",
+                                 i, key_type(c, &zbr->key));
                        err = 3;
                        goto out_dump;
                }
@@ -340,19 +340,19 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
                type = key_type(c, &zbr->key);
                if (c->ranges[type].max_len == 0) {
                        if (zbr->len != c->ranges[type].len) {
-                               dbg_err("bad target node (type %d) length (%d)",
-                                       type, zbr->len);
-                               dbg_err("have to be %d", c->ranges[type].len);
+                               ubifs_err("bad target node (type %d) length (%d)",
+                                         type, zbr->len);
+                               ubifs_err("have to be %d", c->ranges[type].len);
                                err = 4;
                                goto out_dump;
                        }
                } else if (zbr->len < c->ranges[type].min_len ||
                           zbr->len > c->ranges[type].max_len) {
-                       dbg_err("bad target node (type %d) length (%d)",
-                               type, zbr->len);
-                       dbg_err("have to be in range of %d-%d",
-                               c->ranges[type].min_len,
-                               c->ranges[type].max_len);
+                       ubifs_err("bad target node (type %d) length (%d)",
+                                 type, zbr->len);
+                       ubifs_err("have to be in range of %d-%d",
+                                 c->ranges[type].min_len,
+                                 c->ranges[type].max_len);
                        err = 5;
                        goto out_dump;
                }
@@ -370,13 +370,13 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
 
                cmp = keys_cmp(c, key1, key2);
                if (cmp > 0) {
-                       dbg_err("bad key order (keys %d and %d)", i, i + 1);
+                       ubifs_err("bad key order (keys %d and %d)", i, i + 1);
                        err = 6;
                        goto out_dump;
                } else if (cmp == 0 && !is_hash_key(c, key1)) {
                        /* These can only be keys with colliding hash */
-                       dbg_err("keys %d and %d are not hashed but equivalent",
-                               i, i + 1);
+                       ubifs_err("keys %d and %d are not hashed but equivalent",
+                                 i, i + 1);
                        err = 7;
                        goto out_dump;
                }
@@ -387,7 +387,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
 
 out_dump:
        ubifs_err("bad indexing node at LEB %d:%d, error %d", lnum, offs, err);
-       dbg_dump_node(c, idx);
+       ubifs_dump_node(c, idx);
        kfree(idx);
        return -EINVAL;
 }
@@ -475,7 +475,7 @@ int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                                      zbr->offs);
 
        if (err) {
-               dbg_tnc("key %s", DBGKEY(key));
+               dbg_tnck(key, "key ");
                return err;
        }
 
@@ -484,9 +484,9 @@ int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
        if (!keys_eq(c, key, &key1)) {
                ubifs_err("bad key in node at LEB %d:%d",
                          zbr->lnum, zbr->offs);
-               dbg_tnc("looked for key %s found node's key %s",
-                       DBGKEY(key), DBGKEY1(&key1));
-               dbg_dump_node(c, node);
+               dbg_tnck(key, "looked for key ");
+               dbg_tnck(&key1, "but found node's key ");
+               ubifs_dump_node(c, node);
                return -EINVAL;
        }
 
index 223dd42..3ef953e 100644 (file)
 #define UBIFS_VERSION 1
 
 /* Normal UBIFS messages */
-#define ubifs_msg(fmt, ...) \
-               printk(KERN_NOTICE "UBIFS: " fmt "\n", ##__VA_ARGS__)
+#define ubifs_msg(fmt, ...) pr_notice("UBIFS: " fmt "\n", ##__VA_ARGS__)
 /* UBIFS error messages */
-#define ubifs_err(fmt, ...)                                                  \
-       printk(KERN_ERR "UBIFS error (pid %d): %s: " fmt "\n", current->pid, \
+#define ubifs_err(fmt, ...)                                         \
+       pr_err("UBIFS error (pid %d): %s: " fmt "\n", current->pid, \
               __func__, ##__VA_ARGS__)
 /* UBIFS warning messages */
-#define ubifs_warn(fmt, ...)                                         \
-       printk(KERN_WARNING "UBIFS warning (pid %d): %s: " fmt "\n", \
-              current->pid, __func__, ##__VA_ARGS__)
+#define ubifs_warn(fmt, ...)                                        \
+       pr_warn("UBIFS warning (pid %d): %s: " fmt "\n",            \
+               current->pid, __func__, ##__VA_ARGS__)
 
 /* UBIFS file system VFS magic number */
 #define UBIFS_SUPER_MAGIC 0x24051905
@@ -84,9 +83,6 @@
 #define INUM_WARN_WATERMARK 0xFFF00000
 #define INUM_WATERMARK      0xFFFFFF00
 
-/* Largest key size supported in this implementation */
-#define CUR_MAX_KEY_LEN UBIFS_SK_LEN
-
 /* Maximum number of entries in each LPT (LEB category) heap */
 #define LPT_HEAP_SZ 256
 
@@ -277,10 +273,10 @@ struct ubifs_old_idx {
 
 /* The below union makes it easier to deal with keys */
 union ubifs_key {
-       uint8_t u8[CUR_MAX_KEY_LEN];
-       uint32_t u32[CUR_MAX_KEY_LEN/4];
-       uint64_t u64[CUR_MAX_KEY_LEN/8];
-       __le32 j32[CUR_MAX_KEY_LEN/4];
+       uint8_t u8[UBIFS_SK_LEN];
+       uint32_t u32[UBIFS_SK_LEN/4];
+       uint64_t u64[UBIFS_SK_LEN/8];
+       __le32 j32[UBIFS_SK_LEN/4];
 };
 
 /**
@@ -653,8 +649,6 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c,
  * @avail: number of bytes available in the write-buffer
  * @used:  number of used bytes in the write-buffer
  * @size: write-buffer size (in [@c->min_io_size, @c->max_write_size] range)
- * @dtype: type of data stored in this LEB (%UBI_LONGTERM, %UBI_SHORTTERM,
- * %UBI_UNKNOWN)
  * @jhead: journal head the mutex belongs to (note, needed only to shut lockdep
  *         up by 'mutex_lock_nested()).
  * @sync_callback: write-buffer synchronization callback
@@ -688,7 +682,6 @@ struct ubifs_wbuf {
        int avail;
        int used;
        int size;
-       int dtype;
        int jhead;
        int (*sync_callback)(struct ubifs_info *c, int lnum, int free, int pad);
        struct mutex io_mutex;
@@ -765,6 +758,9 @@ struct ubifs_zbranch {
  * @offs: offset of the corresponding indexing node
  * @len: length  of the corresponding indexing node
  * @zbranch: array of znode branches (@c->fanout elements)
+ *
+ * Note! The @lnum, @offs, and @len fields are not really needed - we have them
+ * only for internal consistency check. They could be removed to save some RAM.
  */
 struct ubifs_znode {
        struct ubifs_znode *parent;
@@ -775,9 +771,9 @@ struct ubifs_znode {
        int child_cnt;
        int iip;
        int alt;
-#ifdef CONFIG_UBIFS_FS_DEBUG
-       int lnum, offs, len;
-#endif
+       int lnum;
+       int offs;
+       int len;
        struct ubifs_zbranch zbranch[];
 };
 
@@ -1450,9 +1446,7 @@ struct ubifs_info {
        struct rb_root size_tree;
        struct ubifs_mount_opts mount_opts;
 
-#ifdef CONFIG_UBIFS_FS_DEBUG
        struct ubifs_debug_info *dbg;
-#endif
 };
 
 extern struct list_head ubifs_infos;
@@ -1474,22 +1468,20 @@ void ubifs_ro_mode(struct ubifs_info *c, int err);
 int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
                   int len, int even_ebadmsg);
 int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
-                   int len, int dtype);
-int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len,
-                    int dtype);
+                   int len);
+int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len);
 int ubifs_leb_unmap(struct ubifs_info *c, int lnum);
-int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype);
+int ubifs_leb_map(struct ubifs_info *c, int lnum);
 int ubifs_is_mapped(const struct ubifs_info *c, int lnum);
 int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len);
-int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
-                          int dtype);
+int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs);
 int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf);
 int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
                    int lnum, int offs);
 int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
                         int lnum, int offs);
 int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum,
-                    int offs, int dtype);
+                    int offs);
 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
                     int offs, int quiet, int must_chk_crc);
 void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad);
index bf18f7a..fd4ac85 100644 (file)
@@ -138,12 +138,11 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
        ui = ubifs_inode(inode);
        ui->xattr = 1;
        ui->flags |= UBIFS_XATTR_FL;
-       ui->data = kmalloc(size, GFP_NOFS);
+       ui->data = kmemdup(value, size, GFP_NOFS);
        if (!ui->data) {
                err = -ENOMEM;
                goto out_free;
        }
-       memcpy(ui->data, value, size);
        inode->i_size = ui->ui_size = size;
        ui->data_len = size;
 
@@ -204,12 +203,11 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
                return err;
 
        kfree(ui->data);
-       ui->data = kmalloc(size, GFP_NOFS);
+       ui->data = kmemdup(value, size, GFP_NOFS);
        if (!ui->data) {
                err = -ENOMEM;
                goto out_free;
        }
-       memcpy(ui->data, value, size);
        inode->i_size = ui->ui_size = size;
        ui->data_len = size;
 
@@ -401,8 +399,8 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
        if (buf) {
                /* If @buf is %NULL we are supposed to return the length */
                if (ui->data_len > size) {
-                       dbg_err("buffer size %zd, xattr len %d",
-                               size, ui->data_len);
+                       ubifs_err("buffer size %zd, xattr len %d",
+                                 size, ui->data_len);
                        err = -ERANGE;
                        goto out_iput;
                }
index 85a3ffa..abfb268 100644 (file)
@@ -3,13 +3,15 @@
 
 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
 /*
- * These two functions are only for dma allocator.
+ * These three functions are only for dma allocator.
  * Don't use them in device drivers.
  */
 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
                                       dma_addr_t *dma_handle, void **ret);
 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
 
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+                           void *cpu_addr, size_t size, int *ret);
 /*
  * Standard interface
  */
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
new file mode 100644 (file)
index 0000000..294b1e7
--- /dev/null
@@ -0,0 +1,28 @@
+#ifndef ASM_DMA_CONTIGUOUS_H
+#define ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/device.h>
+#include <linux/dma-contiguous.h>
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+       if (dev && dev->cma_area)
+               return dev->cma_area;
+       return dma_contiguous_default_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+       if (dev)
+               dev->cma_area = cma;
+       if (!dev && !dma_contiguous_default_area)
+               dma_contiguous_default_area = cma;
+}
+
+#endif
+#endif
+
+#endif
index bc00876..08cde0c 100644 (file)
@@ -86,7 +86,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
                                       pmd_t *pmdp)
 {
        pmd_t pmd = *pmdp;
-       pmd_clear(mm, address, pmdp);
+       pmd_clear(pmdp);
        return pmd;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -157,9 +157,8 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 #endif
 
 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
-extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
-                                 unsigned long address,
-                                 pmd_t *pmdp);
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+                                unsigned long address, pmd_t *pmdp);
 #endif
 
 #ifndef __HAVE_ARCH_PTE_SAME
index a3ce901..2f89d0e 100644 (file)
@@ -65,6 +65,7 @@ header-y += atmppp.h
 header-y += atmsap.h
 header-y += atmsvc.h
 header-y += audit.h
+header-y += aufs_type.h
 header-y += auto_fs.h
 header-y += auto_fs4.h
 header-y += auxvec.h
diff --git a/include/linux/aufs_type.h b/include/linux/aufs_type.h
new file mode 100644 (file)
index 0000000..83dae81
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef __AUFS_TYPE_H__
+#define __AUFS_TYPE_H__
+
+#define AUFS_NAME      "aufs"
+
+#ifdef __KERNEL__
+/*
+ * define it before including all other headers.
+ * sched.h may use pr_* macros before defining "current", so define the
+ * no-current version first, and re-define later.
+ */
+#define pr_fmt(fmt)    AUFS_NAME " %s:%d: " fmt, __func__, __LINE__
+#include <linux/sched.h>
+#undef pr_fmt
+#define pr_fmt(fmt) \
+               AUFS_NAME " %s:%d:%.*s[%d]: " fmt, __func__, __LINE__, \
+               (int)sizeof(current->comm), current->comm, current->pid
+#else
+#include <stdint.h>
+#include <sys/types.h>
+#endif /* __KERNEL__ */
+
+#include <linux/limits.h>
+
+#define AUFS_VERSION   "3.2.x-20131104"
+
+/* todo? move this to linux-2.6.19/include/magic.h */
+#define AUFS_SUPER_MAGIC       ('a' << 24 | 'u' << 16 | 'f' << 8 | 's')
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_BRANCH_MAX_127
+typedef int8_t aufs_bindex_t;
+#define AUFS_BRANCH_MAX 127
+#else
+typedef int16_t aufs_bindex_t;
+#ifdef CONFIG_AUFS_BRANCH_MAX_511
+#define AUFS_BRANCH_MAX 511
+#elif defined(CONFIG_AUFS_BRANCH_MAX_1023)
+#define AUFS_BRANCH_MAX 1023
+#elif defined(CONFIG_AUFS_BRANCH_MAX_32767)
+#define AUFS_BRANCH_MAX 32767
+#endif
+#endif
+
+#ifdef __KERNEL__
+#ifndef AUFS_BRANCH_MAX
+#error unknown CONFIG_AUFS_BRANCH_MAX value
+#endif
+#endif /* __KERNEL__ */
+
+/* ---------------------------------------------------------------------- */
+
+#define AUFS_FSTYPE            AUFS_NAME
+
+#define AUFS_ROOT_INO          2
+#define AUFS_FIRST_INO         11
+
+#define AUFS_WH_PFX            ".wh."
+#define AUFS_WH_PFX_LEN                ((int)sizeof(AUFS_WH_PFX) - 1)
+#define AUFS_WH_TMP_LEN                4
+/* a limit for rmdir/rename a dir and copyup */
+#define AUFS_MAX_NAMELEN       (NAME_MAX \
+                               - AUFS_WH_PFX_LEN * 2   /* doubly whiteouted */\
+                               - 1                     /* dot */\
+                               - AUFS_WH_TMP_LEN)      /* hex */
+#define AUFS_XINO_FNAME                "." AUFS_NAME ".xino"
+#define AUFS_XINO_DEFPATH      "/tmp/" AUFS_XINO_FNAME
+#define AUFS_XINO_DEF_SEC      30 /* seconds */
+#define AUFS_XINO_DEF_TRUNC    45 /* percentage */
+#define AUFS_DIRWH_DEF         3
+#define AUFS_RDCACHE_DEF       10 /* seconds */
+#define AUFS_RDCACHE_MAX       3600 /* seconds */
+#define AUFS_RDBLK_DEF         512 /* bytes */
+#define AUFS_RDHASH_DEF                32
+#define AUFS_WKQ_NAME          AUFS_NAME "d"
+#define AUFS_MFS_DEF_SEC       30 /* seconds */
+#define AUFS_MFS_MAX_SEC       3600 /* seconds */
+#define AUFS_PLINK_WARN                50 /* number of plinks in a single bucket */
+
+/* pseudo-link maintenace under /proc */
+#define AUFS_PLINK_MAINT_NAME  "plink_maint"
+#define AUFS_PLINK_MAINT_DIR   "fs/" AUFS_NAME
+#define AUFS_PLINK_MAINT_PATH  AUFS_PLINK_MAINT_DIR "/" AUFS_PLINK_MAINT_NAME
+
+#define AUFS_DIROPQ_NAME       AUFS_WH_PFX ".opq" /* whiteouted doubly */
+#define AUFS_WH_DIROPQ         AUFS_WH_PFX AUFS_DIROPQ_NAME
+
+#define AUFS_BASE_NAME         AUFS_WH_PFX AUFS_NAME
+#define AUFS_PLINKDIR_NAME     AUFS_WH_PFX "plnk"
+#define AUFS_ORPHDIR_NAME      AUFS_WH_PFX "orph"
+
+/* doubly whiteouted */
+#define AUFS_WH_BASE           AUFS_WH_PFX AUFS_BASE_NAME
+#define AUFS_WH_PLINKDIR       AUFS_WH_PFX AUFS_PLINKDIR_NAME
+#define AUFS_WH_ORPHDIR                AUFS_WH_PFX AUFS_ORPHDIR_NAME
+
+/* branch permissions and attributes */
+#define AUFS_BRPERM_RW         "rw"
+#define AUFS_BRPERM_RO         "ro"
+#define AUFS_BRPERM_RR         "rr"
+#define AUFS_BRRATTR_WH                "wh"
+#define AUFS_BRWATTR_NLWH      "nolwh"
+#define AUFS_BRATTR_UNPIN      "unpin"
+
+/* ---------------------------------------------------------------------- */
+
+/* ioctl */
+enum {
+       /* readdir in userspace */
+       AuCtl_RDU,
+       AuCtl_RDU_INO,
+
+       /* pathconf wrapper */
+       AuCtl_WBR_FD,
+
+       /* busy inode */
+       AuCtl_IBUSY
+};
+
+/* borrowed from linux/include/linux/kernel.h */
+#ifndef ALIGN
+#define ALIGN(x, a)            __ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask)  (((x)+(mask))&~(mask))
+#endif
+
+/* borrowed from linux/include/linux/compiler-gcc3.h */
+#ifndef __aligned
+#define __aligned(x)                   __attribute__((aligned(x)))
+#endif
+
+#ifdef __KERNEL__
+#ifndef __packed
+#define __packed                       __attribute__((packed))
+#endif
+#endif
+
+struct au_rdu_cookie {
+       uint64_t        h_pos;
+       int16_t         bindex;
+       uint8_t         flags;
+       uint8_t         pad;
+       uint32_t        generation;
+} __aligned(8);
+
+struct au_rdu_ent {
+       uint64_t        ino;
+       int16_t         bindex;
+       uint8_t         type;
+       uint8_t         nlen;
+       uint8_t         wh;
+       char            name[0];
+} __aligned(8);
+
+static inline int au_rdu_len(int nlen)
+{
+       /* include the terminating NULL */
+       return ALIGN(sizeof(struct au_rdu_ent) + nlen + 1,
+                    sizeof(uint64_t));
+}
+
+union au_rdu_ent_ul {
+       struct au_rdu_ent __user        *e;
+       uint64_t                        ul;
+};
+
+enum {
+       AufsCtlRduV_SZ,
+       AufsCtlRduV_End
+};
+
+struct aufs_rdu {
+       /* input */
+       union {
+               uint64_t        sz;     /* AuCtl_RDU */
+               uint64_t        nent;   /* AuCtl_RDU_INO */
+       };
+       union au_rdu_ent_ul     ent;
+       uint16_t                verify[AufsCtlRduV_End];
+
+       /* input/output */
+       uint32_t                blk;
+
+       /* output */
+       union au_rdu_ent_ul     tail;
+       /* number of entries which were added in a single call */
+       uint64_t                rent;
+       uint8_t                 full;
+       uint8_t                 shwh;
+
+       struct au_rdu_cookie    cookie;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+struct aufs_wbr_fd {
+       uint32_t        oflags;
+       int16_t         brid;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+struct aufs_ibusy {
+       uint64_t        ino, h_ino;
+       int16_t         bindex;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+#define AuCtlType              'A'
+#define AUFS_CTL_RDU           _IOWR(AuCtlType, AuCtl_RDU, struct aufs_rdu)
+#define AUFS_CTL_RDU_INO       _IOWR(AuCtlType, AuCtl_RDU_INO, struct aufs_rdu)
+#define AUFS_CTL_WBR_FD                _IOW(AuCtlType, AuCtl_WBR_FD, \
+                                    struct aufs_wbr_fd)
+#define AUFS_CTL_IBUSY         _IOWR(AuCtlType, AuCtl_IBUSY, struct aufs_ibusy)
+
+#endif /* __AUFS_TYPE_H__ */
index c7e834b..664a07c 100644 (file)
@@ -1282,19 +1282,70 @@ queue_max_integrity_segments(struct request_queue *q)
 
 #else /* CONFIG_BLK_DEV_INTEGRITY */
 
-#define blk_integrity_rq(rq)                   (0)
-#define blk_rq_count_integrity_sg(a, b)                (0)
-#define blk_rq_map_integrity_sg(a, b, c)       (0)
-#define bdev_get_integrity(a)                  (0)
-#define blk_get_integrity(a)                   (0)
-#define blk_integrity_compare(a, b)            (0)
-#define blk_integrity_register(a, b)           (0)
-#define blk_integrity_unregister(a)            do { } while (0)
-#define blk_queue_max_integrity_segments(a, b) do { } while (0)
-#define queue_max_integrity_segments(a)                (0)
-#define blk_integrity_merge_rq(a, b, c)                (0)
-#define blk_integrity_merge_bio(a, b, c)       (0)
-#define blk_integrity_is_initialized(a)                (0)
+struct bio;
+struct block_device;
+struct gendisk;
+struct blk_integrity;
+
+static inline int blk_integrity_rq(struct request *rq)
+{
+       return 0;
+}
+static inline int blk_rq_count_integrity_sg(struct request_queue *q,
+                                           struct bio *b)
+{
+       return 0;
+}
+static inline int blk_rq_map_integrity_sg(struct request_queue *q,
+                                         struct bio *b,
+                                         struct scatterlist *s)
+{
+       return 0;
+}
+static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
+{
+       return 0;
+}
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+       return NULL;
+}
+static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
+{
+       return 0;
+}
+static inline int blk_integrity_register(struct gendisk *d,
+                                        struct blk_integrity *b)
+{
+       return 0;
+}
+static inline void blk_integrity_unregister(struct gendisk *d)
+{
+}
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+                                                   unsigned int segs)
+{
+}
+static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
+{
+       return 0;
+}
+static inline int blk_integrity_merge_rq(struct request_queue *rq,
+                                        struct request *r1,
+                                        struct request *r2)
+{
+       return 0;
+}
+static inline int blk_integrity_merge_bio(struct request_queue *rq,
+                                         struct request *r,
+                                         struct bio *b)
+{
+       return 0;
+}
+static inline bool blk_integrity_is_initialized(struct gendisk *g)
+{
+       return 0;
+}
 
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
index 7408af8..07eeb25 100644 (file)
@@ -45,6 +45,7 @@ struct cpuidle_state {
        unsigned int    exit_latency; /* in US */
        unsigned int    power_usage; /* in mW */
        unsigned int    target_residency; /* in US */
+       unsigned int    disable;
 
        int (*enter)    (struct cpuidle_device *dev,
                        struct cpuidle_driver *drv,
index e7d9b20..d1ac841 100644 (file)
@@ -34,7 +34,7 @@ extern struct dentry *arch_debugfs_dir;
 extern const struct file_operations debugfs_file_operations;
 extern const struct inode_operations debugfs_link_operations;
 
-struct dentry *debugfs_create_file(const char *name, mode_t mode,
+struct dentry *debugfs_create_file(const char *name, umode_t mode,
                                   struct dentry *parent, void *data,
                                   const struct file_operations *fops);
 
@@ -49,28 +49,28 @@ void debugfs_remove_recursive(struct dentry *dentry);
 struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
                 struct dentry *new_dir, const char *new_name);
 
-struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+struct dentry *debugfs_create_u8(const char *name, umode_t mode,
                                 struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+struct dentry *debugfs_create_u16(const char *name, umode_t mode,
                                  struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+struct dentry *debugfs_create_u32(const char *name, umode_t mode,
                                  struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+struct dentry *debugfs_create_u64(const char *name, umode_t mode,
                                  struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+struct dentry *debugfs_create_x8(const char *name, umode_t mode,
                                 struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+struct dentry *debugfs_create_x16(const char *name, umode_t mode,
                                  struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+struct dentry *debugfs_create_x32(const char *name, umode_t mode,
                                  struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_x64(const char *name, mode_t mode,
+struct dentry *debugfs_create_x64(const char *name, umode_t mode,
                                  struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
                                     struct dentry *parent, size_t *value);
-struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+struct dentry *debugfs_create_bool(const char *name, umode_t mode,
                                  struct dentry *parent, u32 *value);
 
-struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+struct dentry *debugfs_create_blob(const char *name, umode_t mode,
                                  struct dentry *parent,
                                  struct debugfs_blob_wrapper *blob);
 
@@ -86,7 +86,7 @@ bool debugfs_initialized(void);
  * want to duplicate the design decision mistakes of procfs and devfs again.
  */
 
-static inline struct dentry *debugfs_create_file(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
                                        struct dentry *parent, void *data,
                                        const struct file_operations *fops)
 {
@@ -118,70 +118,70 @@ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentr
        return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode,
                                               struct dentry *parent,
                                               u8 *value)
 {
        return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode,
                                                struct dentry *parent,
                                                u16 *value)
 {
        return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
                                                struct dentry *parent,
                                                u32 *value)
 {
        return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
                                                struct dentry *parent,
                                                u64 *value)
 {
        return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
                                               struct dentry *parent,
                                               u8 *value)
 {
        return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode,
                                                struct dentry *parent,
                                                u16 *value)
 {
        return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
                                                struct dentry *parent,
                                                u32 *value)
 {
        return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
                                     struct dentry *parent,
                                     size_t *value)
 {
        return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
                                                 struct dentry *parent,
                                                 u32 *value)
 {
        return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
                                  struct dentry *parent,
                                  struct debugfs_blob_wrapper *blob)
 {
index a31c5d0..a2d7dbc 100644 (file)
@@ -594,6 +594,10 @@ struct device {
 
        struct dma_coherent_mem *dma_mem; /* internal for coherent mem
                                             override */
+#ifdef CONFIG_CMA
+       struct cma *cma_area;           /* contiguous memory area for dma
+                                          allocations */
+#endif
        /* arch specific additions */
        struct dev_archdata     archdata;
 
index 71ad34e..a37c10c 100644 (file)
@@ -13,6 +13,9 @@
 enum dma_attr {
        DMA_ATTR_WRITE_BARRIER,
        DMA_ATTR_WEAK_ORDERING,
+       DMA_ATTR_WRITE_COMBINE,
+       DMA_ATTR_NON_CONSISTENT,
+       DMA_ATTR_NO_KERNEL_MAPPING,
        DMA_ATTR_MAX,
 };
 
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
new file mode 100644 (file)
index 0000000..2f303e4
--- /dev/null
@@ -0,0 +1,110 @@
+#ifndef __LINUX_CMA_H
+#define __LINUX_CMA_H
+
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ *     Marek Szyprowski <m.szyprowski@samsung.com>
+ *     Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+/*
+ * Contiguous Memory Allocator
+ *
+ *   The Contiguous Memory Allocator (CMA) makes it possible to
+ *   allocate big contiguous chunks of memory after the system has
+ *   booted.
+ *
+ * Why is it needed?
+ *
+ *   Various devices on embedded systems have no scatter-getter and/or
+ *   IO map support and require contiguous blocks of memory to
+ *   operate.  They include devices such as cameras, hardware video
+ *   coders, etc.
+ *
+ *   Such devices often require big memory buffers (a full HD frame
+ *   is, for instance, more then 2 mega pixels large, i.e. more than 6
+ *   MB of memory), which makes mechanisms such as kmalloc() or
+ *   alloc_page() ineffective.
+ *
+ *   At the same time, a solution where a big memory region is
+ *   reserved for a device is suboptimal since often more memory is
+ *   reserved then strictly required and, moreover, the memory is
+ *   inaccessible to page system even if device drivers don't use it.
+ *
+ *   CMA tries to solve this issue by operating on memory regions
+ *   where only movable pages can be allocated from.  This way, kernel
+ *   can use the memory for pagecache and when device driver requests
+ *   it, allocated pages can be migrated.
+ *
+ * Driver usage
+ *
+ *   CMA should not be used by the device drivers directly. It is
+ *   only a helper framework for dma-mapping subsystem.
+ *
+ *   For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ */
+
+#ifdef __KERNEL__
+
+struct cma;
+struct page;
+struct device;
+
+#ifdef CONFIG_CMA
+
+/*
+ * There is always at least global CMA area and a few optional device
+ * private areas configured in kernel .config.
+ */
+#define MAX_CMA_AREAS  (1 + CONFIG_CMA_AREAS)
+
+extern struct cma *dma_contiguous_default_area;
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+                          phys_addr_t base, phys_addr_t limit);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+                                      unsigned int order);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+                                int count);
+
+#else
+
+#define MAX_CMA_AREAS  (0)
+
+static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+
+static inline
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+                          phys_addr_t base, phys_addr_t limit)
+{
+       return -ENOSYS;
+}
+
+static inline
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+                                      unsigned int order)
+{
+       return NULL;
+}
+
+static inline
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+                                int count)
+{
+       return false;
+}
+
+#endif
+
+#endif
+
+#endif
index e13117c..dfc099e 100644 (file)
@@ -9,10 +9,15 @@
 #include <linux/scatterlist.h>
 
 struct dma_map_ops {
-       void* (*alloc_coherent)(struct device *dev, size_t size,
-                               dma_addr_t *dma_handle, gfp_t gfp);
-       void (*free_coherent)(struct device *dev, size_t size,
-                             void *vaddr, dma_addr_t dma_handle);
+       void* (*alloc)(struct device *dev, size_t size,
+                               dma_addr_t *dma_handle, gfp_t gfp,
+                               struct dma_attrs *attrs);
+       void (*free)(struct device *dev, size_t size,
+                             void *vaddr, dma_addr_t dma_handle,
+                             struct dma_attrs *attrs);
+       int (*mmap)(struct device *, struct vm_area_struct *,
+                         void *, dma_addr_t, size_t, struct dma_attrs *attrs);
+
        dma_addr_t (*map_page)(struct device *dev, struct page *page,
                               unsigned long offset, size_t size,
                               enum dma_data_direction dir,
@@ -77,7 +82,7 @@ static inline u64 dma_get_mask(struct device *dev)
        return DMA_BIT_MASK(32);
 }
 
-#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
+#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
 int dma_set_coherent_mask(struct device *dev, u64 mask);
 #else
 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
index 82163c4..7675da2 100644 (file)
@@ -38,6 +38,36 @@ struct fdtable {
        struct fdtable *next;
 };
 
+static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
+{
+       FD_SET(fd, fdt->close_on_exec);
+}
+
+static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
+{
+       FD_CLR(fd, fdt->close_on_exec);
+}
+
+static inline bool close_on_exec(int fd, const struct fdtable *fdt)
+{
+       return FD_ISSET(fd, fdt->close_on_exec);
+}
+
+static inline void __set_open_fd(int fd, struct fdtable *fdt)
+{
+       FD_SET(fd, fdt->open_fds);
+}
+
+static inline void __clear_open_fd(int fd, struct fdtable *fdt)
+{
+       FD_CLR(fd, fdt->open_fds);
+}
+
+static inline bool fd_is_open(int fd, const struct fdtable *fdt)
+{
+       return FD_ISSET(fd, fdt->open_fds);
+}
+
 /*
  * Open file table structure
  */
index 3a76faf..2e54200 100644 (file)
@@ -358,6 +358,7 @@ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
 extern void __free_pages(struct page *page, unsigned int order);
 extern void free_pages(unsigned long addr, unsigned int order);
 extern void free_hot_cold_page(struct page *page, int cold);
+extern void free_hot_cold_page_list(struct list_head *list, int cold);
 
 #define __free_page(page) __free_pages((page), 0)
 #define free_page(addr) free_pages((addr), 0)
@@ -367,9 +368,37 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
 void drain_all_pages(void);
 void drain_local_pages(void *dummy);
 
+/*
+ * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
+ * GFP flags are used before interrupts are enabled. Once interrupts are
+ * enabled, it is set to __GFP_BITS_MASK while the system is running. During
+ * hibernation, it is used by PM to avoid I/O during memory allocation while
+ * devices are suspended.
+ */
 extern gfp_t gfp_allowed_mask;
 
 extern void pm_restrict_gfp_mask(void);
 extern void pm_restore_gfp_mask(void);
 
+#ifdef CONFIG_PM_SLEEP
+extern bool pm_suspended_storage(void);
+#else
+static inline bool pm_suspended_storage(void)
+{
+       return false;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_CMA
+
+/* The below functions must be run on a range from a single zone. */
+extern int alloc_contig_range(unsigned long start, unsigned long end,
+                             unsigned migratetype);
+extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+
+/* CMA stuff */
+extern void init_cma_reserved_pageblock(struct page *page);
+
+#endif
+
 #endif /* __LINUX_GFP_H */
index a9ace9c..031a5b9 100644 (file)
@@ -8,6 +8,10 @@ extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                         pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
                         struct vm_area_struct *vma);
+extern void huge_pmd_set_accessed(struct mm_struct *mm,
+                                 struct vm_area_struct *vma,
+                                 unsigned long address, pmd_t *pmd,
+                                 pmd_t orig_pmd, int dirty);
 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                               unsigned long address, pmd_t *pmd,
                               pmd_t orig_pmd);
index 114c0f6..ca0aeec 100644 (file)
@@ -178,6 +178,7 @@ TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
  */
 int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
 int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
+int twl_i2c_rmw_u8(u8 mod_no, u8 bits_to_clear, u8 bits_to_set, u8 reg);
 
 /*
  * Read and write several 8-bit registers at once.
@@ -557,6 +558,8 @@ struct twl4030_clock_init_data {
 struct twl4030_bci_platform_data {
        int *battery_tmp_tbl;
        unsigned int tblsize;
+       char **supplied_to;
+       size_t num_supplicants;
 };
 
 /* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */
@@ -652,10 +655,12 @@ struct twl4030_power_data {
        unsigned num;
        struct twl4030_resconfig *resource_config;
 #define TWL4030_RESCONFIG_UNDEF        ((u8)-1)
+       bool use_poweroff;      /* Board is wired for TWL poweroff */
 };
 
 extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts);
 extern int twl4030_remove_script(u8 flags);
+extern void twl4030_power_off(void);
 
 struct twl4030_codec_data {
        unsigned int digimic_delay; /* in ms */
@@ -710,6 +715,9 @@ struct twl4030_platform_data {
        struct regulator_init_data              *vaux1;
        struct regulator_init_data              *vaux2;
        struct regulator_init_data              *vaux3;
+       struct regulator_init_data              *vdd1;
+       struct regulator_init_data              *vdd2;
+       struct regulator_init_data              *vdd3;
        /* TWL4030 LDO regulators */
        struct regulator_init_data              *vpll1;
        struct regulator_init_data              *vpll2;
@@ -718,8 +726,6 @@ struct twl4030_platform_data {
        struct regulator_init_data              *vsim;
        struct regulator_init_data              *vaux4;
        struct regulator_init_data              *vio;
-       struct regulator_init_data              *vdd1;
-       struct regulator_init_data              *vdd2;
        struct regulator_init_data              *vintana1;
        struct regulator_init_data              *vintana2;
        struct regulator_init_data              *vintdig;
@@ -747,6 +753,13 @@ struct twl4030_platform_data {
        struct regulator_init_data              *vio6025;
 };
 
+struct twl_regulator_driver_data {
+       int             (*set_voltage)(void *data, int target_uV);
+       int             (*get_voltage)(void *data);
+       void            *data;
+       unsigned long   features;
+};
+
 /*----------------------------------------------------------------------*/
 
 int twl4030_sih_setup(int module);
diff --git a/include/linux/i2c/vsense.h b/include/linux/i2c/vsense.h
new file mode 100644 (file)
index 0000000..3e98276
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+       vsense.h
+
+       This program is free software; you can redistribute it and/or modify
+       it under the terms of the GNU General Public License as published by
+       the Free Software Foundation; version 2 of the License.
+*/
+
+#ifndef _VSENSE_H_
+#define _VSENSE_H_
+
+struct vsense_platform_data {
+       int gpio_irq;
+       int gpio_reset;
+};
+
+#endif
index 5884def..84102cf 100644 (file)
@@ -131,6 +131,8 @@ struct led_trigger {
 
        /* Link to next registered trigger */
        struct list_head  next_trig;
+
+       enum led_brightness prev_brightness;
 };
 
 /* Registration functions for complex triggers */
index ca5bd91..c8cb615 100644 (file)
@@ -46,6 +46,7 @@ extern const struct linux_logo logo_superh_mono;
 extern const struct linux_logo logo_superh_vga16;
 extern const struct linux_logo logo_superh_clut224;
 extern const struct linux_logo logo_m32r_clut224;
+extern const struct linux_logo logo_pandora_clut224;
 extern const struct linux_logo logo_spe_clut224;
 
 extern const struct linux_logo *fb_find_logo(int depth);
index 6bea2c2..ff9a9f8 100644 (file)
@@ -15,7 +15,6 @@
 #ifndef _LINUX_MEMORY_H_
 #define _LINUX_MEMORY_H_
 
-#include <linux/sysdev.h>
 #include <linux/node.h>
 #include <linux/compiler.h>
 #include <linux/mutex.h>
@@ -38,7 +37,7 @@ struct memory_block {
        int phys_device;                /* to which fru does this belong? */
        void *hw;                       /* optional pointer to fw/hw data */
        int (*phys_callback)(struct memory_block *);
-       struct sys_device sysdev;
+       struct device dev;
 };
 
 int arch_get_memory_phys_device(unsigned long start_pfn);
index e5ee683..1e320c2 100644 (file)
@@ -1633,5 +1633,22 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
                                unsigned int pages_per_huge_page);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern unsigned int _debug_guardpage_minorder;
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+       return _debug_guardpage_minorder;
+}
+
+static inline bool page_is_guard(struct page *page)
+{
+       return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool page_is_guard(struct page *page) { return false; }
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
index 87967ee..3b843f1 100644 (file)
@@ -206,6 +206,7 @@ struct mmc_card {
 #define MMC_STATE_HIGHSPEED_DDR (1<<4)         /* card is in high speed mode */
 #define MMC_STATE_ULTRAHIGHSPEED (1<<5)                /* card is in ultra high speed mode */
 #define MMC_CARD_SDXC          (1<<6)          /* card is SDXC */
+#define MMC_CARD_REMOVED       (1<<7)          /* card has been removed */
        unsigned int            quirks;         /* card quirks */
 #define MMC_QUIRK_LENIENT_FN0  (1<<0)          /* allow SDIO FN0 writes outside of the VS CCCR range */
 #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1)   /* use func->cur_blksize */
@@ -367,6 +368,7 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
 #define mmc_card_ddr_mode(c)   ((c)->state & MMC_STATE_HIGHSPEED_DDR)
 #define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
 #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
+#define mmc_card_removed(c)    ((c) && ((c)->state & MMC_CARD_REMOVED))
 
 #define mmc_card_set_present(c)        ((c)->state |= MMC_STATE_PRESENT)
 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
@@ -375,6 +377,7 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
 #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
 #define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
 #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
+#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
 
 /*
  * Quirk add/remove for MMC products.
@@ -456,7 +459,7 @@ struct mmc_driver {
        struct device_driver drv;
        int (*probe)(struct mmc_card *);
        void (*remove)(struct mmc_card *);
-       int (*suspend)(struct mmc_card *, pm_message_t);
+       int (*suspend)(struct mmc_card *);
        int (*resume)(struct mmc_card *);
 };
 
index 174a844..87a976c 100644 (file)
@@ -180,6 +180,8 @@ extern int mmc_try_claim_host(struct mmc_host *host);
 
 extern int mmc_flush_cache(struct mmc_card *);
 
+extern int mmc_detect_card_removed(struct mmc_host *host);
+
 /**
  *     mmc_claim_host - exclusively claim a host
  *     @host: mmc host to claim
index deb6282..ef820f3 100644 (file)
@@ -297,6 +297,7 @@ struct mmc_host {
        int                     claim_cnt;      /* "claim" nesting count */
 
        struct delayed_work     detect;
+       int                     detect_change;  /* card detect flag */
 
        const struct mmc_bus_ops *bus_ops;      /* current bus driver */
        unsigned int            bus_refs;       /* reference counter */
index 25842b6..132da3b 100644 (file)
  */
 #define PAGE_ALLOC_COSTLY_ORDER 3
 
-#define MIGRATE_UNMOVABLE     0
-#define MIGRATE_RECLAIMABLE   1
-#define MIGRATE_MOVABLE       2
-#define MIGRATE_PCPTYPES      3 /* the number of types on the pcp lists */
-#define MIGRATE_RESERVE       3
-#define MIGRATE_ISOLATE       4 /* can't allocate from here */
-#define MIGRATE_TYPES         5
+enum {
+       MIGRATE_UNMOVABLE,
+       MIGRATE_RECLAIMABLE,
+       MIGRATE_MOVABLE,
+       MIGRATE_PCPTYPES,       /* the number of types on the pcp lists */
+       MIGRATE_RESERVE = MIGRATE_PCPTYPES,
+#ifdef CONFIG_CMA
+       /*
+        * MIGRATE_CMA migration type is designed to mimic the way
+        * ZONE_MOVABLE works.  Only movable pages can be allocated
+        * from MIGRATE_CMA pageblocks and page allocator never
+        * implicitly change migration type of MIGRATE_CMA pageblock.
+        *
+        * The way to use it is to change migratetype of a range of
+        * pageblocks to MIGRATE_CMA which can be done by
+        * __free_pageblock_cma() function.  What is important though
+        * is that a range of pageblocks must be aligned to
+        * MAX_ORDER_NR_PAGES should biggest page be bigger then
+        * a single pageblock.
+        */
+       MIGRATE_CMA,
+#endif
+       MIGRATE_ISOLATE,        /* can't allocate from here */
+       MIGRATE_TYPES
+};
+
+#ifdef CONFIG_CMA
+#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#  define cma_wmark_pages(zone)        zone->min_cma_pages
+#else
+#  define is_migrate_cma(migratetype) false
+#  define cma_wmark_pages(zone) 0
+#endif
 
 #define for_each_migratetype_order(order, type) \
        for (order = 0; order < MAX_ORDER; order++) \
@@ -319,6 +345,12 @@ struct zone {
         */
        unsigned long           lowmem_reserve[MAX_NR_ZONES];
 
+       /*
+        * This is a per-zone reserve of pages that should not be
+        * considered dirtyable memory.
+        */
+       unsigned long           dirty_balance_reserve;
+
 #ifdef CONFIG_NUMA
        int node;
        /*
@@ -336,6 +368,13 @@ struct zone {
 #ifdef CONFIG_MEMORY_HOTPLUG
        /* see spanned/present_pages for more description */
        seqlock_t               span_seqlock;
+#endif
+#ifdef CONFIG_CMA
+       /*
+        * CMA needs to increase watermark levels during the allocation
+        * process to make sure that the system is not starved.
+        */
+       unsigned long           min_cma_pages;
 #endif
        struct free_area        free_area[MAX_ORDER];
 
index 34066e6..f38d4f0 100644 (file)
@@ -101,6 +101,7 @@ struct __fat_dirent {
 /* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */
 #define FAT_IOCTL_GET_ATTRIBUTES       _IOR('r', 0x10, __u32)
 #define FAT_IOCTL_SET_ATTRIBUTES       _IOW('r', 0x11, __u32)
+#define VFAT_IOCTL_GET_VOLUME_ID       _IOR('r', 0x12, __u32)
 
 struct fat_boot_sector {
        __u8    ignored[3];     /* Boot strap short or near jump */
@@ -138,6 +139,17 @@ struct fat_boot_fsinfo {
        __le32   reserved2[4];
 };
 
+struct fat_boot_bsx {
+       __u8     drive;             /* drive number */
+       __u8     reserved1;
+       __u8     signature;         /* extended boot signature */
+       __u8     vol_id[4];     /* volume ID */
+       __u8     vol_label[11]; /* volume label */
+       __u8     type[8];       /* file system type */
+};
+#define FAT16_BSX_OFFSET       36 /* offset of fat_boot_bsx in FAT12 and FAT16 */
+#define FAT32_BSX_OFFSET       64 /* offset of fat_boot_bsx in FAT32 */
+
 struct msdos_dir_entry {
        __u8    name[MSDOS_NAME];/* name and extension */
        __u8    attr;           /* attribute bits */
index db4836b..c3918a0 100644 (file)
@@ -25,6 +25,9 @@
 #include <linux/types.h>
 #include <mtd/ubi-user.h>
 
+/* All voumes/LEBs */
+#define UBI_ALL -1
+
 /*
  * enum ubi_open_mode - UBI volume open mode constants.
  *
@@ -208,14 +211,15 @@ void ubi_close_volume(struct ubi_volume_desc *desc);
 int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
                 int len, int check);
 int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
-                 int offset, int len, int dtype);
+                 int offset, int len);
 int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
-                  int len, int dtype);
+                  int len);
 int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum);
 int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
-int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype);
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum);
 int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
 int ubi_sync(int ubi_num);
+int ubi_flush(int ubi_num, int vol_id, int lnum);
 
 /*
  * This function is the same as the 'ubi_leb_read()' function, but it does not
@@ -226,25 +230,4 @@ static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf,
 {
        return ubi_leb_read(desc, lnum, buf, offset, len, 0);
 }
-
-/*
- * This function is the same as the 'ubi_leb_write()' functions, but it does
- * not have the data type argument.
- */
-static inline int ubi_write(struct ubi_volume_desc *desc, int lnum,
-                           const void *buf, int offset, int len)
-{
-       return ubi_leb_write(desc, lnum, buf, offset, len, UBI_UNKNOWN);
-}
-
-/*
- * This function is the same as the 'ubi_leb_change()' functions, but it does
- * not have the data type argument.
- */
-static inline int ubi_change(struct ubi_volume_desc *desc, int lnum,
-                                   const void *buf, int len)
-{
-       return ubi_leb_change(desc, lnum, buf, len, UBI_UNKNOWN);
-}
-
 #endif /* !__LINUX_UBI_H__ */
index ffc0213..ef35a31 100644 (file)
@@ -85,6 +85,7 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
 extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
                int (*open)(struct inode *, struct file *));
 
+extern struct dentry *lookup_hash(struct nameidata *nd);
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 
 extern int follow_down_one(struct path *);
index 92370e2..624e53c 100644 (file)
 #ifndef _LINUX_NODE_H_
 #define _LINUX_NODE_H_
 
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/cpumask.h>
 #include <linux/workqueue.h>
 
 struct node {
-       struct sys_device       sysdev;
+       struct device   dev;
 
 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
        struct work_struct      node_work;
@@ -80,6 +80,6 @@ static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
 }
 #endif
 
-#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
+#define to_node(device) container_of(device, struct node, dev)
 
 #endif /* _LINUX_NODE_H_ */
index c0b0187..1f260fd 100644 (file)
@@ -59,6 +59,9 @@
 #define OMAPFB_SET_TEARSYNC    OMAP_IOW(62, struct omapfb_tearsync_info)
 #define OMAPFB_GET_DISPLAY_INFO        OMAP_IOR(63, struct omapfb_display_info)
 
+#define OMAPFB_WAITFORVSYNC_FRAME OMAP_IOWR(70, int)
+#define OMAPFB_GET_LINE_STATUS OMAP_IOR(71, int)
+
 #define OMAPFB_CAPS_GENERIC_MASK       0x00000fff
 #define OMAPFB_CAPS_LCDC_MASK          0x00fff000
 #define OMAPFB_CAPS_PANEL_MASK         0xff000000
index ee94b33..192e4ed 100644 (file)
@@ -47,6 +47,12 @@ int opp_disable(struct device *dev, unsigned long freq);
 
 struct srcu_notifier_head *opp_get_notifier(struct device *dev);
 
+/* hacks */
+int opp_enable_i(struct device *dev, int index);
+int opp_disable_i(struct device *dev, int index);
+int opp_hack_set_freq(struct device *dev, int index, unsigned long freq);
+int opp_hack_get_freq(struct device *dev, int index, unsigned long *freq);
+
 #else
 static inline unsigned long opp_get_voltage(struct opp *opp)
 {
index b0638fd..22691f6 100644 (file)
@@ -13,6 +13,7 @@
 
 enum page_debug_flags {
        PAGE_DEBUG_FLAG_POISON,         /* Page is poisoned */
+       PAGE_DEBUG_FLAG_GUARD,
 };
 
 /*
@@ -21,7 +22,8 @@ enum page_debug_flags {
  */
 
 #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
-#if !defined(CONFIG_PAGE_POISONING) \
+#if !defined(CONFIG_PAGE_POISONING) && \
+    !defined(CONFIG_PAGE_GUARD) \
 /* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
 #error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
 #endif
index 051c1b1..3bdcab3 100644 (file)
@@ -3,7 +3,7 @@
 
 /*
  * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE,
+ * If specified range includes migrate types other than MOVABLE or CMA,
  * this will fail with -EBUSY.
  *
  * For isolating all pages in the range finally, the caller have to
  * test it.
  */
 extern int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+                        unsigned migratetype);
 
 /*
  * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
  * target range is [start_pfn, end_pfn)
  */
 extern int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+                       unsigned migratetype);
 
 /*
- * test all pages in [start_pfn, end_pfn)are isolated or not.
+ * Test all pages in [start_pfn, end_pfn) are isolated or not.
  */
-extern int
-test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
+int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
 
 /*
- * Internal funcs.Changes pageblock's migrate type.
- * Please use make_pagetype_isolated()/make_pagetype_movable().
+ * Internal functions. Changes pageblock's migrate type.
  */
 extern int set_migratetype_isolate(struct page *page);
-extern void unset_migratetype_isolate(struct page *page);
+extern void unset_migratetype_isolate(struct page *page, unsigned migratetype);
 
 
 #endif
index bab82f4..ed17024 100644 (file)
@@ -21,7 +21,6 @@ struct pagevec {
 };
 
 void __pagevec_release(struct pagevec *pvec);
-void __pagevec_free(struct pagevec *pvec);
 void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
 void pagevec_strip(struct pagevec *pvec);
 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
@@ -67,12 +66,6 @@ static inline void pagevec_release(struct pagevec *pvec)
                __pagevec_release(pvec);
 }
 
-static inline void pagevec_free(struct pagevec *pvec)
-{
-       if (pagevec_count(pvec))
-               __pagevec_free(pvec);
-}
-
 static inline void __pagevec_lru_add_anon(struct pagevec *pvec)
 {
        ____pagevec_lru_add(pvec, LRU_INACTIVE_ANON);
diff --git a/include/linux/platform_data/omap_drm.h b/include/linux/platform_data/omap_drm.h
new file mode 100644 (file)
index 0000000..ed56953
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * DRM/KMS platform data for TI OMAP platforms
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __PLATFORM_DATA_OMAP_DRM_H__
+#define __PLATFORM_DATA_OMAP_DRM_H__
+
+/*
+ * Optional platform data to configure the default configuration of which
+ * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
+ * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
+ * one manager, with priority given to managers that are connected to
+ * detected devices.  This should be a good default behavior for most cases,
+ * but yet there still might be times when you wish to do something different.
+ */
+struct omap_drm_platform_data {
+       int ovl_cnt;
+       const int *ovl_ids;
+       int mgr_cnt;
+       const int *mgr_ids;
+       int dev_cnt;
+       const char **dev_names;
+};
+
+#endif /* __PLATFORM_DATA_OMAP_DRM_H__ */
index 14a86bc..a822fd7 100644 (file)
@@ -144,7 +144,7 @@ struct rchan_callbacks
         */
        struct dentry *(*create_buf_file)(const char *filename,
                                          struct dentry *parent,
-                                         int mode,
+                                         umode_t mode,
                                          struct rchan_buf *buf,
                                          int *is_global);
 
index a32bcfd..f52e850 100644 (file)
@@ -45,7 +45,6 @@ struct kmem_cache_cpu {
        unsigned long tid;      /* Globally unique transaction id */
        struct page *page;      /* The slab from which we are allocating */
        struct page *partial;   /* Partially allocated frozen slabs */
-       int node;               /* The node of the page (or -1 for debug) */
 #ifdef CONFIG_SLUB_STATS
        unsigned stat[NR_SLUB_STAT_ITEMS];
 #endif
index 09a545a..1ac5727 100644 (file)
@@ -91,4 +91,10 @@ extern void splice_shrink_spd(struct splice_pipe_desc *);
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
+
+extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+                          loff_t *ppos, size_t len, unsigned int flags);
+extern long do_splice_to(struct file *in, loff_t *ppos,
+                        struct pipe_inode_info *pipe, size_t len,
+                        unsigned int flags);
 #endif
index 67b3fa3..3e60228 100644 (file)
@@ -207,6 +207,7 @@ struct swap_list_t {
 /* linux/mm/page_alloc.c */
 extern unsigned long totalram_pages;
 extern unsigned long totalreserve_pages;
+extern unsigned long dirty_balance_reserve;
 extern unsigned int nr_free_buffer_pages(void);
 extern unsigned int nr_free_pagecache_pages(void);
 
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
new file mode 100644 (file)
index 0000000..2739ccb
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+#ifndef __SOC_BUS_H
+#define __SOC_BUS_H
+
+#include <linux/device.h>
+
+struct soc_device_attribute {
+       const char *machine;
+       const char *family;
+       const char *revision;
+       const char *soc_id;
+};
+
+/**
+ * soc_device_register - register SoC as a device
+ * @soc_plat_dev_attr: Attributes passed from platform to be attributed to a SoC
+ */
+struct soc_device *soc_device_register(
+       struct soc_device_attribute *soc_plat_dev_attr);
+
+/**
+ * soc_device_unregister - unregister SoC device
+ * @dev: SoC device to be unregistered
+ */
+void soc_device_unregister(struct soc_device *soc_dev);
+
+/**
+ * soc_device_to_device - helper function to fetch struct device
+ * @soc: Previously registered SoC device container
+ */
+struct device *soc_device_to_device(struct soc_device *soc);
+
+#endif /* __SOC_BUS_H */
index d87f44f..308b699 100644 (file)
@@ -71,6 +71,7 @@ struct otg_transceiver {
        struct usb_bus          *host;
        struct usb_gadget       *gadget;
 
+       struct device                   *io_dev;
        struct otg_io_access_ops        *io_ops;
        void __iomem                    *io_priv;
 
index 4bde182..6071e91 100644 (file)
@@ -32,7 +32,7 @@ struct vm_struct {
        struct page             **pages;
        unsigned int            nr_pages;
        phys_addr_t             phys_addr;
-       void                    *caller;
+       const void              *caller;
 };
 
 /*
@@ -62,7 +62,7 @@ extern void *vmalloc_32_user(unsigned long size);
 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
                        unsigned long start, unsigned long end, gfp_t gfp_mask,
-                       pgprot_t prot, int node, void *caller);
+                       pgprot_t prot, int node, const void *caller);
 extern void vfree(const void *addr);
 
 extern void *vmap(struct page **pages, unsigned int count,
@@ -85,14 +85,15 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
 
 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
 extern struct vm_struct *get_vm_area_caller(unsigned long size,
-                                       unsigned long flags, void *caller);
+                                       unsigned long flags, const void *caller);
 extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
                                        unsigned long start, unsigned long end);
 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
                                        unsigned long flags,
                                        unsigned long start, unsigned long end,
-                                       void *caller);
+                                       const void *caller);
 extern struct vm_struct *remove_vm_area(const void *addr);
+extern struct vm_struct *find_vm_area(const void *addr);
 
 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
                        struct page ***pages);
@@ -131,6 +132,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
  */
 extern rwlock_t vmlist_lock;
 extern struct vm_struct *vmlist;
+extern __init void vm_area_add_early(struct vm_struct *vm);
 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
 
 #ifdef CONFIG_SMP
index 9f149dd..4ad0c2a 100644 (file)
@@ -138,8 +138,6 @@ extern int vm_highmem_is_dirtyable;
 extern int block_dump;
 extern int laptop_mode;
 
-extern unsigned long determine_dirtyable_memory(void);
-
 extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos);
index 3c41097..8787349 100644 (file)
 /* Maximum amount of UBI volumes that can be re-named at one go */
 #define UBI_MAX_RNVOL 32
 
-/*
- * UBI data type hint constants.
- *
- * UBI_LONGTERM: long-term data
- * UBI_SHORTTERM: short-term data
- * UBI_UNKNOWN: data persistence is unknown
- *
- * These constants are used when data is written to UBI volumes in order to
- * help the UBI wear-leveling unit to find more appropriate physical
- * eraseblocks.
- */
-enum {
-       UBI_LONGTERM  = 1,
-       UBI_SHORTTERM = 2,
-       UBI_UNKNOWN   = 3,
-};
-
 /*
  * UBI volume type constants.
  *
@@ -375,25 +358,34 @@ struct ubi_rnvol_req {
  *                             requests.
  * @lnum: logical eraseblock number to change
  * @bytes: how many bytes will be written to the logical eraseblock
- * @dtype: data type (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN)
+ * @dtype: pass "3" for better compatibility with old kernels
  * @padding: reserved for future, not used, has to be zeroed
+ *
+ * The @dtype field used to inform UBI about what kind of data will be written
+ * to the LEB: long term (value 1), short term (value 2), unknown (value 3).
+ * UBI tried to pick a PEB with lower erase counter for short term data and a
+ * PEB with higher erase counter for long term data. But this was not really
+ * used because users usually do not know this and could easily mislead UBI. We
+ * removed this feature in May 2012. UBI currently just ignores the @dtype
+ * field. But for better compatibility with older kernels it is recommended to
+ * set @dtype to 3 (unknown).
  */
 struct ubi_leb_change_req {
        __s32 lnum;
        __s32 bytes;
-       __s8  dtype;
+       __s8  dtype; /* obsolete, do not use! */
        __s8  padding[7];
 } __packed;
 
 /**
  * struct ubi_map_req - a data structure used in map LEB requests.
+ * @dtype: pass "3" for better compatibility with old kernels
  * @lnum: logical eraseblock number to unmap
- * @dtype: data type (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN)
  * @padding: reserved for future, not used, has to be zeroed
  */
 struct ubi_map_req {
        __s32 lnum;
-       __s8  dtype;
+       __s8  dtype; /* obsolete, do not use! */
        __s8  padding[3];
 } __packed;
 
index 3779ea3..b72437f 100644 (file)
@@ -912,7 +912,6 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
                                        u16 latency, u16 to_multiplier);
 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
                                                        __u8 ltk[16]);
-void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]);
 void hci_le_ltk_neg_reply(struct hci_conn *conn);
 
 #endif /* __HCI_CORE_H */
index 0dfb34a..a417c11 100644 (file)
@@ -47,7 +47,7 @@ static void sb_close(struct sbuff *m)
        if (likely(m != &emergency))
                kfree(m);
        else {
-               xchg(&emergency_ptr, m);
+               emergency_ptr = m;
                local_bh_enable();
        }
 }
index 0cf91b2..9b4b888 100644 (file)
@@ -366,6 +366,12 @@ struct snd_pcm_group {             /* keep linked substreams */
 
 struct pid;
 
+struct snd_pnd_hack_params {
+       unsigned int frames_min;
+       unsigned int frames_max;
+       unsigned int reserved[14];
+};
+
 struct snd_pcm_substream {
        struct snd_pcm *pcm;
        struct snd_pcm_str *pstr;
@@ -413,6 +419,8 @@ struct snd_pcm_substream {
 #endif
        /* misc flags */
        unsigned int hw_opened: 1;
+       /* pandora hack */
+       struct snd_pnd_hack_params pnd_hack_params;
 };
 
 #define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0)
index a9536da..08fa272 100644 (file)
@@ -147,7 +147,7 @@ DEFINE_EVENT(kmem_free, kmem_cache_free,
        TP_ARGS(call_site, ptr)
 );
 
-TRACE_EVENT(mm_page_free_direct,
+TRACE_EVENT(mm_page_free,
 
        TP_PROTO(struct page *page, unsigned int order),
 
@@ -169,7 +169,7 @@ TRACE_EVENT(mm_page_free_direct,
                        __entry->order)
 );
 
-TRACE_EVENT(mm_pagevec_free,
+TRACE_EVENT(mm_page_free_batched,
 
        TP_PROTO(struct page *page, int cold),
 
index 6582c45..50b10ca 100644 (file)
@@ -651,6 +651,8 @@ struct omap_overlay *omap_dss_get_overlay(int num);
 void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres);
 int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings);
 
 typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
 int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -659,6 +661,9 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
 int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout);
 int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
                unsigned long timeout);
+int omap_dispc_wait_for_vsync_on_frame(u32 *frame,
+               unsigned long timeout, bool force);
+int omap_dispc_get_line_status(void);
 
 #define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver)
 #define to_dss_device(x) container_of((x), struct omap_dss_device, dev)
@@ -702,4 +707,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev,
 int omap_rfbi_configure(struct omap_dss_device *dssdev, int pixel_size,
                int data_lines);
 
+int dispc_runtime_get(void);
+void dispc_runtime_put(void);
+
 #endif
index fde15f9..fd79f65 100644 (file)
@@ -499,6 +499,7 @@ struct files_struct *get_files_struct(struct task_struct *task)
 
        return files;
 }
+EXPORT_SYMBOL_GPL(get_files_struct);
 
 void put_files_struct(struct files_struct *files)
 {
@@ -520,6 +521,7 @@ void put_files_struct(struct files_struct *files)
                rcu_read_unlock();
        }
 }
+EXPORT_SYMBOL_GPL(put_files_struct);
 
 void reset_files_struct(struct files_struct *files)
 {
index dcd3f97..8e1fa3b 100644 (file)
@@ -45,7 +45,7 @@ static void resume_irqs(bool want_early)
        struct irq_desc *desc;
        int irq;
 
-       for_each_irq_desc(irq, desc) {
+       for_each_irq_desc_reverse(irq, desc) {
                unsigned long flags;
                bool is_early = desc->action &&
                        desc->action->flags & IRQF_EARLY_RESUME;
index a535fc9..e8cd202 100644 (file)
@@ -306,7 +306,7 @@ static void buf_unmapped_default_callback(struct rchan_buf *buf,
  */
 static struct dentry *create_buf_file_default_callback(const char *filename,
                                                       struct dentry *parent,
-                                                      int mode,
+                                                      umode_t mode,
                                                       struct rchan_buf *buf,
                                                       int *is_global)
 {
index 33ac1e3..abc5279 100644 (file)
@@ -5317,6 +5317,7 @@ int can_nice(const struct task_struct *p, const int nice)
        return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
                capable(CAP_SYS_NICE));
 }
+EXPORT_SYMBOL_GPL(can_nice);
 
 #ifdef __ARCH_WANT_SYS_NICE
 
@@ -8513,6 +8514,7 @@ void __init sched_init(void)
 #ifdef CONFIG_CGROUP_SCHED
        list_add(&root_task_group.list, &task_groups);
        INIT_LIST_HEAD(&root_task_group.children);
+       INIT_LIST_HEAD(&root_task_group.siblings);
        autogroup_init(&init_task);
 #endif /* CONFIG_CGROUP_SCHED */
 
index 3ecf574..6c4cc94 100644 (file)
@@ -1270,6 +1270,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
 
        return sighand;
 }
+EXPORT_SYMBOL_GPL(__lock_task_sighand);
 
 /*
  * send signal info to all the members of a group
index 92cac05..2ad29a2 100644 (file)
@@ -402,7 +402,7 @@ static int blk_remove_buf_file_callback(struct dentry *dentry)
 
 static struct dentry *blk_create_buf_file_callback(const char *filename,
                                                   struct dentry *parent,
-                                                  int mode,
+                                                  umode_t mode,
                                                   struct rchan_buf *buf,
                                                   int *is_global)
 {
index f4b93a2..25efaf2 100644 (file)
@@ -4439,7 +4439,7 @@ static const struct file_operations trace_options_core_fops = {
 };
 
 struct dentry *trace_create_file(const char *name,
-                                mode_t mode,
+                                umode_t mode,
                                 struct dentry *parent,
                                 void *data,
                                 const struct file_operations *fops)
index c3c3f6b..2f2fd46 100644 (file)
@@ -316,7 +316,7 @@ void tracing_reset_current(int cpu);
 void tracing_reset_current_online_cpus(void);
 int tracing_open_generic(struct inode *inode, struct file *filp);
 struct dentry *trace_create_file(const char *name,
-                                mode_t mode,
+                                umode_t mode,
                                 struct dentry *parent,
                                 void *data,
                                 const struct file_operations *fops);
index 4f75540..b4801f5 100644 (file)
@@ -149,7 +149,7 @@ static int debugfs_ul_get(void *data, u64 *val)
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
 
-static struct dentry *debugfs_create_ul(const char *name, mode_t mode,
+static struct dentry *debugfs_create_ul(const char *name, umode_t mode,
                                struct dentry *parent, unsigned long *value)
 {
        return debugfs_create_file(name, mode, parent, value, &fops_ul);
@@ -169,7 +169,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
                        debugfs_stacktrace_depth_set, "%llu\n");
 
 static struct dentry *debugfs_create_stacktrace_depth(
-       const char *name, mode_t mode,
+       const char *name, umode_t mode,
        struct dentry *parent, unsigned long *value)
 {
        return debugfs_create_file(name, mode, parent, value,
@@ -193,7 +193,7 @@ static int debugfs_atomic_t_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
                        debugfs_atomic_t_set, "%lld\n");
 
-static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode,
+static struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
                                struct dentry *parent, atomic_t *value)
 {
        return debugfs_create_file(name, mode, parent, value, &fops_atomic_t);
@@ -202,7 +202,7 @@ static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode,
 struct dentry *fault_create_debugfs_attr(const char *name,
                        struct dentry *parent, struct fault_attr *attr)
 {
-       mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+       umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
        struct dentry *dir;
 
        dir = debugfs_create_dir(name, parent);
index 011b110..cc47f36 100644 (file)
@@ -192,7 +192,7 @@ config COMPACTION
 config MIGRATION
        bool "Page migration"
        def_bool y
-       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
+       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
        help
          Allows the migration of the physical location of pages of processes
          while the virtual addresses are not changed. This is useful in
@@ -307,7 +307,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
 
 config TRANSPARENT_HUGEPAGE
        bool "Transparent Hugepage Support"
-       depends on X86 && MMU
+       depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select COMPACTION
        help
          Transparent Hugepages allows the kernel to use huge pages and
index 8b1a477..4b24432 100644 (file)
@@ -4,6 +4,7 @@ config DEBUG_PAGEALLOC
        depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
        depends on !KMEMCHECK
        select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
+       select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
        ---help---
          Unmap pages from the kernel linear mapping after free_pages().
          This results in a large slowdown, but helps to find certain types
@@ -22,3 +23,7 @@ config WANT_PAGE_DEBUG_FLAGS
 config PAGE_POISONING
        bool
        select WANT_PAGE_DEBUG_FLAGS
+
+config PAGE_GUARD
+       bool
+       select WANT_PAGE_DEBUG_FLAGS
index 50ec00e..8aada89 100644 (file)
@@ -13,7 +13,7 @@ obj-y                 := filemap.o mempool.o oom_kill.o fadvise.o \
                           readahead.o swap.o truncate.o vmscan.o shmem.o \
                           prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
                           page_isolation.o mm_init.o mmu_context.o percpu.o \
-                          $(mmu-y)
+                          compaction.o $(mmu-y)
 obj-y += init-mm.o
 
 ifdef CONFIG_NO_BOOTMEM
@@ -32,7 +32,6 @@ obj-$(CONFIG_NUMA)    += mempolicy.o
 obj-$(CONFIG_SPARSEMEM)        += sparse.o
 obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
 obj-$(CONFIG_SLOB) += slob.o
-obj-$(CONFIG_COMPACTION) += compaction.o
 obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
 obj-$(CONFIG_KSM) += ksm.o
 obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
index 5f8ec82..295ec64 100644 (file)
 #include <linux/sysfs.h>
 #include "internal.h"
 
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/compaction.h>
 
-/*
- * compact_control is used to track pages being migrated and the free pages
- * they are being migrated to during memory compaction. The free_pfn starts
- * at the end of a zone and migrate_pfn begins at the start. Movable pages
- * are moved to the end of a zone during a compaction run and the run
- * completes when free_pfn <= migrate_pfn
- */
-struct compact_control {
-       struct list_head freepages;     /* List of free pages to migrate to */
-       struct list_head migratepages;  /* List of pages being migrated */
-       unsigned long nr_freepages;     /* Number of isolated free pages */
-       unsigned long nr_migratepages;  /* Number of pages to migrate */
-       unsigned long free_pfn;         /* isolate_freepages search base */
-       unsigned long migrate_pfn;      /* isolate_migratepages search base */
-       bool sync;                      /* Synchronous migration */
-
-       unsigned int order;             /* order a direct compactor needs */
-       int migratetype;                /* MOVABLE, RECLAIMABLE etc */
-       struct zone *zone;
-};
-
 static unsigned long release_freepages(struct list_head *freelist)
 {
        struct page *page, *next;
@@ -54,24 +35,35 @@ static unsigned long release_freepages(struct list_head *freelist)
        return count;
 }
 
-/* Isolate free pages onto a private freelist. Must hold zone->lock */
-static unsigned long isolate_freepages_block(struct zone *zone,
-                               unsigned long blockpfn,
-                               struct list_head *freelist)
+static void map_pages(struct list_head *list)
+{
+       struct page *page;
+
+       list_for_each_entry(page, list, lru) {
+               arch_alloc_page(page, 0);
+               kernel_map_pages(page, 1, 1);
+       }
+}
+
+static inline bool migrate_async_suitable(int migratetype)
+{
+       return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
+}
+
+/*
+ * Isolate free pages onto a private freelist. Caller must hold zone->lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+ * pages inside of the pageblock (even though it may still end up isolating
+ * some pages).
+ */
+static unsigned long isolate_freepages_block(unsigned long blockpfn,
+                               unsigned long end_pfn,
+                               struct list_head *freelist,
+                               bool strict)
 {
-       unsigned long zone_end_pfn, end_pfn;
        int nr_scanned = 0, total_isolated = 0;
        struct page *cursor;
 
-       /* Get the last PFN we should scan for free pages at */
-       zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
-       end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
-
-       /* Find the first usable PFN in the block to initialse page cursor */
-       for (; blockpfn < end_pfn; blockpfn++) {
-               if (pfn_valid_within(blockpfn))
-                       break;
-       }
        cursor = pfn_to_page(blockpfn);
 
        /* Isolate free pages. This assumes the block is valid */
@@ -79,15 +71,23 @@ static unsigned long isolate_freepages_block(struct zone *zone,
                int isolated, i;
                struct page *page = cursor;
 
-               if (!pfn_valid_within(blockpfn))
+               if (!pfn_valid_within(blockpfn)) {
+                       if (strict)
+                               return 0;
                        continue;
+               }
                nr_scanned++;
 
-               if (!PageBuddy(page))
+               if (!PageBuddy(page)) {
+                       if (strict)
+                               return 0;
                        continue;
+               }
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
+               if (!isolated && strict)
+                       return 0;
                total_isolated += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
@@ -105,114 +105,71 @@ static unsigned long isolate_freepages_block(struct zone *zone,
        return total_isolated;
 }
 
-/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct page *page)
-{
-
-       int migratetype = get_pageblock_migratetype(page);
-
-       /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
-       if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
-               return false;
-
-       /* If the page is a large free page, then allow migration */
-       if (PageBuddy(page) && page_order(page) >= pageblock_order)
-               return true;
-
-       /* If the block is MIGRATE_MOVABLE, allow migration */
-       if (migratetype == MIGRATE_MOVABLE)
-               return true;
-
-       /* Otherwise skip the block */
-       return false;
-}
-
-/*
- * Based on information in the current compact_control, find blocks
- * suitable for isolating free pages from and then isolate them.
+/**
+ * isolate_freepages_range() - isolate free pages.
+ * @start_pfn: The first PFN to start isolating.
+ * @end_pfn:   The one-past-last PFN.
+ *
+ * Non-free pages, invalid PFNs, or zone boundaries within the
+ * [start_pfn, end_pfn) range are considered errors, cause function to
+ * undo its actions and return zero.
+ *
+ * Otherwise, function returns one-past-the-last PFN of isolated page
+ * (which may be greater then end_pfn if end fell in a middle of
+ * a free page).
  */
-static void isolate_freepages(struct zone *zone,
-                               struct compact_control *cc)
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
 {
-       struct page *page;
-       unsigned long high_pfn, low_pfn, pfn;
-       unsigned long flags;
-       int nr_freepages = cc->nr_freepages;
-       struct list_head *freelist = &cc->freepages;
-
-       /*
-        * Initialise the free scanner. The starting point is where we last
-        * scanned from (or the end of the zone if starting). The low point
-        * is the end of the pageblock the migration scanner is using.
-        */
-       pfn = cc->free_pfn;
-       low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+       unsigned long isolated, pfn, block_end_pfn, flags;
+       struct zone *zone = NULL;
+       LIST_HEAD(freelist);
 
-       /*
-        * Take care that if the migration scanner is at the end of the zone
-        * that the free scanner does not accidentally move to the next zone
-        * in the next isolation cycle.
-        */
-       high_pfn = min(low_pfn, pfn);
-
-       /*
-        * Isolate free pages until enough are available to migrate the
-        * pages on cc->migratepages. We stop searching if the migrate
-        * and free page scanners meet or enough free pages are isolated.
-        */
-       for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
-                                       pfn -= pageblock_nr_pages) {
-               unsigned long isolated;
+       if (pfn_valid(start_pfn))
+               zone = page_zone(pfn_to_page(start_pfn));
 
-               if (!pfn_valid(pfn))
-                       continue;
+       for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
+               if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
+                       break;
 
                /*
-                * Check for overlapping nodes/zones. It's possible on some
-                * configurations to have a setup like
-                * node0 node1 node0
-                * i.e. it's possible that all pages within a zones range of
-                * pages do not belong to a single zone.
+                * On subsequent iterations ALIGN() is actually not needed,
+                * but we keep it that we not to complicate the code.
                 */
-               page = pfn_to_page(pfn);
-               if (page_zone(page) != zone)
-                       continue;
+               block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+               block_end_pfn = min(block_end_pfn, end_pfn);
 
-               /* Check the block is suitable for migration */
-               if (!suitable_migration_target(page))
-                       continue;
+               spin_lock_irqsave(&zone->lock, flags);
+               isolated = isolate_freepages_block(pfn, block_end_pfn,
+                                                  &freelist, true);
+               spin_unlock_irqrestore(&zone->lock, flags);
 
                /*
-                * Found a block suitable for isolating free pages from. Now
-                * we disabled interrupts, double check things are ok and
-                * isolate the pages. This is to minimise the time IRQs
-                * are disabled
+                * In strict mode, isolate_freepages_block() returns 0 if
+                * there are any holes in the block (ie. invalid PFNs or
+                * non-free pages).
                 */
-               isolated = 0;
-               spin_lock_irqsave(&zone->lock, flags);
-               if (suitable_migration_target(page)) {
-                       isolated = isolate_freepages_block(zone, pfn, freelist);
-                       nr_freepages += isolated;
-               }
-               spin_unlock_irqrestore(&zone->lock, flags);
+               if (!isolated)
+                       break;
 
                /*
-                * Record the highest PFN we isolated pages from. When next
-                * looking for free pages, the search will restart here as
-                * page migration may have returned some pages to the allocator
+                * If we managed to isolate pages, it is always (1 << n) *
+                * pageblock_nr_pages for some non-negative n.  (Max order
+                * page may span two pageblocks).
                 */
-               if (isolated)
-                       high_pfn = max(high_pfn, pfn);
        }
 
        /* split_free_page does not map the pages */
-       list_for_each_entry(page, freelist, lru) {
-               arch_alloc_page(page, 0);
-               kernel_map_pages(page, 1, 1);
+       map_pages(&freelist);
+
+       if (pfn < end_pfn) {
+               /* Loop terminated early, cleanup. */
+               release_freepages(&freelist);
+               return 0;
        }
 
-       cc->free_pfn = high_pfn;
-       cc->nr_freepages = nr_freepages;
+       /* We don't use freelists for anything. */
+       return pfn;
 }
 
 /* Update the number of anon and file isolated pages in the zone */
@@ -243,38 +200,34 @@ static bool too_many_isolated(struct zone *zone)
        return isolated > (inactive + active) / 2;
 }
 
-/* possible outcome of isolate_migratepages */
-typedef enum {
-       ISOLATE_ABORT,          /* Abort compaction now */
-       ISOLATE_NONE,           /* No pages isolated, continue scanning */
-       ISOLATE_SUCCESS,        /* Pages isolated, migrate */
-} isolate_migrate_t;
-
-/*
- * Isolate all pages that can be migrated from the block pointed to by
- * the migrate scanner within compact_control.
+/**
+ * isolate_migratepages_range() - isolate all migrate-able pages in range.
+ * @zone:      Zone pages are in.
+ * @cc:                Compaction control structure.
+ * @low_pfn:   The first PFN of the range.
+ * @end_pfn:   The one-past-the-last PFN of the range.
+ *
+ * Isolate all pages that can be migrated from the range specified by
+ * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
+ * pending), otherwise PFN of the first page that was not scanned
+ * (which may be both less, equal to or more then end_pfn).
+ *
+ * Assumes that cc->migratepages is empty and cc->nr_migratepages is
+ * zero.
+ *
+ * Apart from cc->migratepages and cc->nr_migratetypes this function
+ * does not modify any cc's fields, in particular it does not modify
+ * (or read for that matter) cc->migrate_pfn.
  */
-static isolate_migrate_t isolate_migratepages(struct zone *zone,
-                                       struct compact_control *cc)
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+                          unsigned long low_pfn, unsigned long end_pfn)
 {
-       unsigned long low_pfn, end_pfn;
        unsigned long last_pageblock_nr = 0, pageblock_nr;
        unsigned long nr_scanned = 0, nr_isolated = 0;
        struct list_head *migratelist = &cc->migratepages;
        isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
 
-       /* Do not scan outside zone boundaries */
-       low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
-
-       /* Only scan within a pageblock boundary */
-       end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
-
-       /* Do not cross the free scanner or scan within a memory hole */
-       if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
-               cc->migrate_pfn = end_pfn;
-               return ISOLATE_NONE;
-       }
-
        /*
         * Ensure that there are not too many pages isolated from the LRU
         * list by either parallel reclaimers or compaction. If there are,
@@ -283,12 +236,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        while (unlikely(too_many_isolated(zone))) {
                /* async migration should just abort */
                if (!cc->sync)
-                       return ISOLATE_ABORT;
+                       return 0;
 
                congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                if (fatal_signal_pending(current))
-                       return ISOLATE_ABORT;
+                       return 0;
        }
 
        /* Time to isolate some pages for migration */
@@ -351,7 +304,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                 */
                pageblock_nr = low_pfn >> pageblock_order;
                if (!cc->sync && last_pageblock_nr != pageblock_nr &&
-                               get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
+                   !migrate_async_suitable(get_pageblock_migratetype(page))) {
                        low_pfn += pageblock_nr_pages;
                        low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
                        last_pageblock_nr = pageblock_nr;
@@ -387,18 +340,133 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                nr_isolated++;
 
                /* Avoid isolating too much */
-               if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+               if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
+                       ++low_pfn;
                        break;
+               }
        }
 
        acct_isolated(zone, cc);
 
        spin_unlock_irq(&zone->lru_lock);
-       cc->migrate_pfn = low_pfn;
 
        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
-       return ISOLATE_SUCCESS;
+       return low_pfn;
+}
+
+#endif /* CONFIG_COMPACTION || CONFIG_CMA */
+#ifdef CONFIG_COMPACTION
+
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
+{
+
+       int migratetype = get_pageblock_migratetype(page);
+
+       /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+       if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+               return false;
+
+       /* If the page is a large free page, then allow migration */
+       if (PageBuddy(page) && page_order(page) >= pageblock_order)
+               return true;
+
+       /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
+       if (migrate_async_suitable(migratetype))
+               return true;
+
+       /* Otherwise skip the block */
+       return false;
+}
+
+/*
+ * Based on information in the current compact_control, find blocks
+ * suitable for isolating free pages from and then isolate them.
+ */
+static void isolate_freepages(struct zone *zone,
+                               struct compact_control *cc)
+{
+       struct page *page;
+       unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
+       unsigned long flags;
+       int nr_freepages = cc->nr_freepages;
+       struct list_head *freelist = &cc->freepages;
+
+       /*
+        * Initialise the free scanner. The starting point is where we last
+        * scanned from (or the end of the zone if starting). The low point
+        * is the end of the pageblock the migration scanner is using.
+        */
+       pfn = cc->free_pfn;
+       low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+
+       /*
+        * Take care that if the migration scanner is at the end of the zone
+        * that the free scanner does not accidentally move to the next zone
+        * in the next isolation cycle.
+        */
+       high_pfn = min(low_pfn, pfn);
+
+       zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+
+       /*
+        * Isolate free pages until enough are available to migrate the
+        * pages on cc->migratepages. We stop searching if the migrate
+        * and free page scanners meet or enough free pages are isolated.
+        */
+       for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+                                       pfn -= pageblock_nr_pages) {
+               unsigned long isolated;
+
+               if (!pfn_valid(pfn))
+                       continue;
+
+               /*
+                * Check for overlapping nodes/zones. It's possible on some
+                * configurations to have a setup like
+                * node0 node1 node0
+                * i.e. it's possible that all pages within a zones range of
+                * pages do not belong to a single zone.
+                */
+               page = pfn_to_page(pfn);
+               if (page_zone(page) != zone)
+                       continue;
+
+               /* Check the block is suitable for migration */
+               if (!suitable_migration_target(page))
+                       continue;
+
+               /*
+                * Found a block suitable for isolating free pages from. Now
+                * we disabled interrupts, double check things are ok and
+                * isolate the pages. This is to minimise the time IRQs
+                * are disabled
+                */
+               isolated = 0;
+               spin_lock_irqsave(&zone->lock, flags);
+               if (suitable_migration_target(page)) {
+                       end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
+                       isolated = isolate_freepages_block(pfn, end_pfn,
+                                                          freelist, false);
+                       nr_freepages += isolated;
+               }
+               spin_unlock_irqrestore(&zone->lock, flags);
+
+               /*
+                * Record the highest PFN we isolated pages from. When next
+                * looking for free pages, the search will restart here as
+                * page migration may have returned some pages to the allocator
+                */
+               if (isolated)
+                       high_pfn = max(high_pfn, pfn);
+       }
+
+       /* split_free_page does not map the pages */
+       map_pages(freelist);
+
+       cc->free_pfn = high_pfn;
+       cc->nr_freepages = nr_freepages;
 }
 
 /*
@@ -447,6 +515,44 @@ static void update_nr_listpages(struct compact_control *cc)
        cc->nr_freepages = nr_freepages;
 }
 
+/* possible outcome of isolate_migratepages */
+typedef enum {
+       ISOLATE_ABORT,          /* Abort compaction now */
+       ISOLATE_NONE,           /* No pages isolated, continue scanning */
+       ISOLATE_SUCCESS,        /* Pages isolated, migrate */
+} isolate_migrate_t;
+
+/*
+ * Isolate all pages that can be migrated from the block pointed to by
+ * the migrate scanner within compact_control.
+ */
+static isolate_migrate_t isolate_migratepages(struct zone *zone,
+                                       struct compact_control *cc)
+{
+       unsigned long low_pfn, end_pfn;
+
+       /* Do not scan outside zone boundaries */
+       low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
+
+       /* Only scan within a pageblock boundary */
+       end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
+
+       /* Do not cross the free scanner or scan within a memory hole */
+       if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
+               cc->migrate_pfn = end_pfn;
+               return ISOLATE_NONE;
+       }
+
+       /* Perform the isolation */
+       low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
+       if (!low_pfn)
+               return ISOLATE_ABORT;
+
+       cc->migrate_pfn = low_pfn;
+
+       return ISOLATE_SUCCESS;
+}
+
 static int compact_finished(struct zone *zone,
                            struct compact_control *cc)
 {
@@ -744,23 +850,25 @@ int sysctl_extfrag_handler(struct ctl_table *table, int write,
 }
 
 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
-ssize_t sysfs_compact_node(struct sys_device *dev,
-                       struct sysdev_attribute *attr,
+ssize_t sysfs_compact_node(struct device *dev,
+                       struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        compact_node(dev->id);
 
        return count;
 }
-static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
+static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
 
 int compaction_register_node(struct node *node)
 {
-       return sysdev_create_file(&node->sysdev, &attr_compact);
+       return device_create_file(&node->dev, &dev_attr_compact);
 }
 
 void compaction_unregister_node(struct node *node)
 {
-       return sysdev_remove_file(&node->sysdev, &attr_compact);
+       return device_remove_file(&node->dev, &dev_attr_compact);
 }
 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
+
+#endif /* CONFIG_COMPACTION */
index 0dd7b8f..fefaaba 100644 (file)
@@ -35,7 +35,7 @@ __setup("failslab=", setup_failslab);
 static int __init failslab_debugfs_init(void)
 {
        struct dentry *dir;
-       mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+       umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
 
        dir = fault_create_debugfs_attr("failslab", NULL, &failslab.attr);
        if (IS_ERR(dir))
index 2fde516..9d40694 100644 (file)
@@ -65,6 +65,17 @@ static void khugepaged_slab_free(void);
 static struct hlist_head *mm_slots_hash __read_mostly;
 static struct kmem_cache *mm_slot_cache __read_mostly;
 
+#ifdef CONFIG_FB
+extern const struct file_operations fb_fops;
+
+#define is_fb_vma(vma) \
+       (vma->vm_file && vma->vm_file->f_op == &fb_fops)
+#else
+#define is_fb_vma(vma) 0
+#endif
+
+static void split_fb_pmd(struct vm_area_struct *vma, pmd_t *pmd);
+
 /**
  * struct mm_slot - hash lookup from mm to mm_slot
  * @hash: hash collision list
@@ -538,7 +549,7 @@ static int __init hugepage_init(void)
         * where the extra memory used could hurt more than TLB overhead
         * is likely to save.  The admin can still enable it through /sys.
         */
-       if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
+       if (totalram_pages < (200 << (20 - PAGE_SHIFT)))
                transparent_hugepage_flags = 0;
 
        start_khugepaged();
@@ -790,6 +801,28 @@ pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
        return pgtable;
 }
 
+void huge_pmd_set_accessed(struct mm_struct *mm,
+                          struct vm_area_struct *vma,
+                          unsigned long address,
+                          pmd_t *pmd, pmd_t orig_pmd,
+                          int dirty)
+{
+       pmd_t entry;
+       unsigned long haddr;
+
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(*pmd, orig_pmd)))
+               goto unlock;
+
+       entry = pmd_mkyoung(orig_pmd);
+       haddr = address & HPAGE_PMD_MASK;
+       if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
+               update_mmu_cache_pmd(vma, address, pmd);
+
+unlock:
+       spin_unlock(&mm->page_table_lock);
+}
+
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long address,
@@ -903,7 +936,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                entry = pmd_mkyoung(orig_pmd);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
-                       update_mmu_cache(vma, address, entry);
+                       update_mmu_cache(vma, address, pmd);
                ret |= VM_FAULT_WRITE;
                goto out_unlock;
        }
@@ -953,7 +986,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                pmdp_clear_flush_notify(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, pmd);
                page_remove_rmap(page);
                put_page(page);
                ret |= VM_FAULT_WRITE;
@@ -1007,6 +1040,11 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 
        spin_lock(&tlb->mm->page_table_lock);
        if (likely(pmd_trans_huge(*pmd))) {
+               if (is_fb_vma(vma)) {
+                       split_fb_pmd(vma, pmd);
+                       return 0;
+               }
+
                if (unlikely(pmd_trans_splitting(*pmd))) {
                        spin_unlock(&tlb->mm->page_table_lock);
                        wait_split_huge_page(vma->anon_vma,
@@ -1473,6 +1511,157 @@ out:
        return ret;
 }
 
+/* callers must hold mmap_sem (madvise() does) */
+static int collapse_fb_pmd(struct mm_struct *mm, pmd_t *pmd,
+       unsigned long addr, struct vm_area_struct *vma)
+{
+       unsigned long _addr;
+       struct page *page;
+       pgtable_t pgtable;
+       pte_t *pte, *_pte;
+       pmd_t _pmd;
+       u32 pa;
+
+       pte = pte_offset_map(pmd, addr);
+       page = pte_page(*pte);
+       pa = __pfn_to_phys(page_to_pfn(page));
+       _pmd = pmdp_clear_flush_notify(vma, addr, pmd);
+
+       if ((addr | pa) & ~HPAGE_PMD_MASK) {
+               printk(KERN_ERR "collapse_fb: bad alignment: %08lx->%08x\n",
+                       addr, pa);
+               pte_unmap(pte);
+               return -EINVAL;
+       }
+
+       for (_pte = pte, _addr = addr; _pte < pte + HPAGE_PMD_NR; _pte++) {
+               pte_t pteval = *_pte;
+               struct page *src_page;
+
+               if (!pte_none(pteval)) {
+                       src_page = pte_page(pteval);
+
+                       pte_clear(vma->vm_mm, _addr, _pte);
+                       if (pte_present(pteval))
+                               page_remove_rmap(src_page);
+               }
+
+               _addr += PAGE_SIZE;
+       }
+
+       pte_unmap(pte);
+       pgtable = pmd_pgtable(_pmd);
+       VM_BUG_ON(page_count(pgtable) != 1);
+       VM_BUG_ON(page_mapcount(pgtable) != 0);
+
+       _pmd = mk_pmd(page, vma->vm_page_prot);
+       _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
+       _pmd = pmd_mkhuge(_pmd);
+
+       smp_wmb();
+
+       spin_lock(&mm->page_table_lock);
+       BUG_ON(!pmd_none(*pmd));
+       set_pmd_at(mm, addr, pmd, _pmd);
+       update_mmu_cache(vma, addr, pmd);
+       prepare_pmd_huge_pte(pgtable, mm);
+       spin_unlock(&mm->page_table_lock);
+
+       return 0;
+}
+
+static int try_collapse_fb(struct vm_area_struct *vma)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long hstart, hend, addr;
+       int ret = 0;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+       hend = vma->vm_end & HPAGE_PMD_MASK;
+       if (hstart >= hend)
+               return -EINVAL;
+
+       for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
+               pgd = pgd_offset(mm, addr);
+               if (!pgd_present(*pgd))
+                       return -EINVAL;
+
+               pud = pud_offset(pgd, addr);
+               if (!pud_present(*pud))
+                       return -EINVAL;
+
+               pmd = pmd_offset(pud, addr);
+               if (!pmd_present(*pmd))
+                       return -EINVAL;
+               if (pmd_trans_huge(*pmd))
+                       continue;
+
+               ret = collapse_fb_pmd(mm, pmd, addr, vma);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/* undo collapse_fb_pmd(), restore pages so that mm subsys can release them
+ * page_table_lock() should be held */
+static void split_fb_pmd(struct vm_area_struct *vma, pmd_t *pmd)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long addr, haddr, pfn;
+       struct page *page;
+       pgtable_t pgtable;
+       pmd_t _pmd;
+       int i;
+
+       page = pmd_page(*pmd);
+       pgtable = get_pmd_huge_pte(mm);
+       pfn = page_to_pfn(page);
+       addr = pfn << PAGE_SHIFT;
+
+       pmd_populate(mm, &_pmd, pgtable);
+
+       for (i = 0, haddr = addr; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+               pte_t *pte, entry;
+               BUG_ON(PageCompound(page + i));
+               entry = mk_pte(page + i, vma->vm_page_prot);
+               entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+               if (!pmd_young(*pmd))
+                       entry = pte_mkold(entry);
+               atomic_set(&page[i]._mapcount, 0); // hack?
+               pte = pte_offset_map(&_pmd, haddr);
+               BUG_ON(!pte_none(*pte));
+               set_pte_at(mm, haddr, pte, entry);
+               pte_unmap(pte);
+       }
+
+       set_pmd_at(mm, addr, pmd, pmd_mknotpresent(*pmd));
+       flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
+       pmd_populate(mm, pmd, pgtable);
+}
+
+#ifndef __arm__
+#error arm only..
+#endif
+static u32 pmd_to_va(struct mm_struct *mm, pmd_t *pmd)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd0;
+       u32 ret;
+
+       pgd = pgd_offset(mm, 0);
+       pud = pud_offset(pgd, 0);
+       pmd0 = pmd_offset(pud, 0);
+
+       ret = (pmd - pmd0) << SECTION_SHIFT;
+       return ret;
+}
+
 #define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
                   VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
 
@@ -1481,6 +1670,9 @@ int hugepage_madvise(struct vm_area_struct *vma,
 {
        switch (advice) {
        case MADV_HUGEPAGE:
+               if (is_fb_vma(vma))
+                       return try_collapse_fb(vma);
+
                /*
                 * Be somewhat over-protective like KSM for now!
                 */
@@ -1984,7 +2176,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        BUG_ON(!pmd_none(*pmd));
        page_add_new_anon_rmap(new_page, vma, address);
        set_pmd_at(mm, address, pmd, _pmd);
-       update_mmu_cache(vma, address, _pmd);
+       update_mmu_cache(vma, address, pmd);
        prepare_pmd_huge_pte(pgtable, mm);
        spin_unlock(&mm->page_table_lock);
 
@@ -2359,6 +2551,7 @@ static int khugepaged(void *none)
 
 void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
 {
+       struct vm_area_struct *vma;
        struct page *page;
 
        spin_lock(&mm->page_table_lock);
@@ -2366,6 +2559,12 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
                spin_unlock(&mm->page_table_lock);
                return;
        }
+       vma = find_vma(mm, pmd_to_va(mm, pmd));
+       if (vma && is_fb_vma(vma)) {
+               split_fb_pmd(vma, pmd);
+               spin_unlock(&mm->page_table_lock);
+               return;
+       }
        page = pmd_page(*pmd);
        VM_BUG_ON(!page_count(page));
        get_page(page);
index c52095c..deda2d3 100644 (file)
@@ -633,6 +633,7 @@ static void free_huge_page(struct page *page)
                h->surplus_huge_pages--;
                h->surplus_huge_pages_node[nid]--;
        } else {
+               arch_clear_hugepage_flags(page);
                enqueue_huge_page(h, page);
        }
        spin_unlock(&hugetlb_lock);
@@ -1713,9 +1714,9 @@ static void __init hugetlb_sysfs_init(void)
 
 /*
  * node_hstate/s - associate per node hstate attributes, via their kobjects,
- * with node sysdevs in node_devices[] using a parallel array.  The array
- * index of a node sysdev or _hstate == node id.
- * This is here to avoid any static dependency of the node sysdev driver, in
+ * with node devices in node_devices[] using a parallel array.  The array
+ * index of a node device or _hstate == node id.
+ * This is here to avoid any static dependency of the node device driver, in
  * the base kernel, on the hugetlb module.
  */
 struct node_hstate {
@@ -1725,7 +1726,7 @@ struct node_hstate {
 struct node_hstate node_hstates[MAX_NUMNODES];
 
 /*
- * A subset of global hstate attributes for node sysdevs
+ * A subset of global hstate attributes for node devices
  */
 static struct attribute *per_node_hstate_attrs[] = {
        &nr_hugepages_attr.attr,
@@ -1739,7 +1740,7 @@ static struct attribute_group per_node_hstate_attr_group = {
 };
 
 /*
- * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
+ * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
  * Returns node id via non-NULL nidp.
  */
 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
@@ -1762,13 +1763,13 @@ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
 }
 
 /*
- * Unregister hstate attributes from a single node sysdev.
+ * Unregister hstate attributes from a single node device.
  * No-op if no hstate attributes attached.
  */
 void hugetlb_unregister_node(struct node *node)
 {
        struct hstate *h;
-       struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+       struct node_hstate *nhs = &node_hstates[node->dev.id];
 
        if (!nhs->hugepages_kobj)
                return;         /* no hstate attributes */
@@ -1784,7 +1785,7 @@ void hugetlb_unregister_node(struct node *node)
 }
 
 /*
- * hugetlb module exit:  unregister hstate attributes from node sysdevs
+ * hugetlb module exit:  unregister hstate attributes from node devices
  * that have them.
  */
 static void hugetlb_unregister_all_nodes(void)
@@ -1792,7 +1793,7 @@ static void hugetlb_unregister_all_nodes(void)
        int nid;
 
        /*
-        * disable node sysdev registrations.
+        * disable node device registrations.
         */
        register_hugetlbfs_with_node(NULL, NULL);
 
@@ -1804,20 +1805,20 @@ static void hugetlb_unregister_all_nodes(void)
 }
 
 /*
- * Register hstate attributes for a single node sysdev.
+ * Register hstate attributes for a single node device.
  * No-op if attributes already registered.
  */
 void hugetlb_register_node(struct node *node)
 {
        struct hstate *h;
-       struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+       struct node_hstate *nhs = &node_hstates[node->dev.id];
        int err;
 
        if (nhs->hugepages_kobj)
                return;         /* already allocated */
 
        nhs->hugepages_kobj = kobject_create_and_add("hugepages",
-                                                       &node->sysdev.kobj);
+                                                       &node->dev.kobj);
        if (!nhs->hugepages_kobj)
                return;
 
@@ -1828,7 +1829,7 @@ void hugetlb_register_node(struct node *node)
                if (err) {
                        printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
                                        " for node %d\n",
-                                               h->name, node->sysdev.id);
+                                               h->name, node->dev.id);
                        hugetlb_unregister_node(node);
                        break;
                }
@@ -1837,8 +1838,8 @@ void hugetlb_register_node(struct node *node)
 
 /*
  * hugetlb init time:  register hstate attributes for all registered node
- * sysdevs of nodes that have memory.  All on-line nodes should have
- * registered their associated sysdev by this time.
+ * devices of nodes that have memory.  All on-line nodes should have
+ * registered their associated device by this time.
  */
 static void hugetlb_register_all_nodes(void)
 {
@@ -1846,12 +1847,12 @@ static void hugetlb_register_all_nodes(void)
 
        for_each_node_state(nid, N_HIGH_MEMORY) {
                struct node *node = &node_devices[nid];
-               if (node->sysdev.id == nid)
+               if (node->dev.id == nid)
                        hugetlb_register_node(node);
        }
 
        /*
-        * Let the node sysdev driver know we're here so it can
+        * Let the node device driver know we're here so it can
         * [un]register hstate attributes on node hotplug.
         */
        register_hugetlbfs_with_node(hugetlb_register_node,
index 0c26b5e..c444510 100644 (file)
@@ -100,6 +100,39 @@ extern void prep_compound_page(struct page *page, unsigned long order);
 extern bool is_free_buddy_page(struct page *page);
 #endif
 
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+
+/*
+ * in mm/compaction.c
+ */
+/*
+ * compact_control is used to track pages being migrated and the free pages
+ * they are being migrated to during memory compaction. The free_pfn starts
+ * at the end of a zone and migrate_pfn begins at the start. Movable pages
+ * are moved to the end of a zone during a compaction run and the run
+ * completes when free_pfn <= migrate_pfn
+ */
+struct compact_control {
+       struct list_head freepages;     /* List of free pages to migrate to */
+       struct list_head migratepages;  /* List of pages being migrated */
+       unsigned long nr_freepages;     /* Number of isolated free pages */
+       unsigned long nr_migratepages;  /* Number of pages to migrate */
+       unsigned long free_pfn;         /* isolate_freepages search base */
+       unsigned long migrate_pfn;      /* isolate_migratepages search base */
+       bool sync;                      /* Synchronous migration */
+
+       unsigned int order;             /* order a direct compactor needs */
+       int migratetype;                /* MOVABLE, RECLAIMABLE etc */
+       struct zone *zone;
+};
+
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+                          unsigned long low_pfn, unsigned long end_pfn);
+
+#endif
 
 /*
  * function for dealing with page's order in buddy system.
index 23d3a6b..11c0225 100644 (file)
@@ -235,6 +235,67 @@ static long madvise_remove(struct vm_area_struct *vma,
        return error;
 }
 
+#ifdef __arm__
+static long madvise_force_cache(struct vm_area_struct *vma,
+                               struct vm_area_struct **prev,
+                               unsigned long start, unsigned long end,
+                               int tex_cb)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long addr, next_pgd, next_pmd;
+       spinlock_t *ptl;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       u32 val;
+
+       *prev = vma;
+
+       if (mm == NULL)
+               return -EINVAL;
+
+       tex_cb &= 7;
+       vma->vm_page_prot = __pgprot_modify(vma->vm_page_prot,
+               L_PTE_MT_MASK, (tex_cb << 2));
+
+       addr = start;
+       pgd = pgd_offset(mm, addr);
+       flush_cache_range(vma, addr, end);
+       do {
+               next_pgd = pgd_addr_end(addr, end);
+               if (pgd_none_or_clear_bad(pgd))
+                       continue;
+               pud = pud_offset(pgd, addr);
+               pmd = pmd_offset(pud, addr);
+               next_pmd = pmd_addr_end(addr, end);
+               if (pmd_trans_huge(*pmd)) {
+                       val = pmd_val(*pmd);
+                       val &= ~0x100c;
+                       val |= (tex_cb << 10) & 0x1000;
+                       val |= (tex_cb << 2)  & 0x000c;
+                       set_pmd_at(mm, addr, pmd, __pmd(val));
+               }
+               else if (pmd_none_or_clear_bad(pmd))
+                       continue;
+
+               pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+               do {
+                       if (!pte_present(*pte))
+                               continue;
+                       val = pte_val(*pte);
+                       val = (val & ~L_PTE_MT_MASK) | (tex_cb << 2);
+                       set_pte_at(mm, addr, pte, __pte(val));
+               } while (pte++, addr += PAGE_SIZE, addr < next_pmd);
+               pte_unmap_unlock(pte - 1, ptl);
+
+       } while (pgd++, addr = next_pgd, addr < end);
+       flush_tlb_range(vma, start, end);
+
+       return 0;
+}
+#endif
+
 #ifdef CONFIG_MEMORY_FAILURE
 /*
  * Error injection support for memory error handling.
@@ -278,6 +339,11 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
                return madvise_willneed(vma, prev, start, end);
        case MADV_DONTNEED:
                return madvise_dontneed(vma, prev, start, end);
+#ifdef __arm__
+       case 0x2000 ... 0x2007:
+               return madvise_force_cache(vma, prev, start, end,
+                       behavior & 7);
+#endif
        default:
                return madvise_behavior(vma, prev, start, end, behavior);
        }
@@ -302,6 +368,9 @@ madvise_behavior_valid(int behavior)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        case MADV_HUGEPAGE:
        case MADV_NOHUGEPAGE:
+#endif
+#ifdef __arm__
+       case 0x2000 ... 0x2007:
 #endif
                return 1;
 
index 51901b1..42c1244 100644 (file)
@@ -1403,7 +1403,7 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
                /* Not a free page */
                ret = 1;
        }
-       unset_migratetype_isolate(p);
+       unset_migratetype_isolate(p, MIGRATE_MOVABLE);
        unlock_memory_hotplug();
        return ret;
 }
index 675b211..62a3a54 100644 (file)
@@ -1403,6 +1403,7 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
        tlb_finish_mmu(&tlb, address, end);
        return end;
 }
+EXPORT_SYMBOL_GPL(zap_page_range);
 
 /**
  * zap_vma_ptes - remove ptes mapping the vma
@@ -3558,8 +3559,9 @@ retry:
 
                barrier();
                if (pmd_trans_huge(orig_pmd)) {
-                       if (flags & FAULT_FLAG_WRITE &&
-                           !pmd_write(orig_pmd) &&
+                       unsigned int dirty = flags & FAULT_FLAG_WRITE;
+
+                       if (dirty && !pmd_write(orig_pmd) &&
                            !pmd_trans_splitting(orig_pmd)) {
                                ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
                                                          orig_pmd);
@@ -3571,6 +3573,9 @@ retry:
                                if (unlikely(ret & VM_FAULT_OOM))
                                        goto retry;
                                return ret;
+                       } else {
+                               huge_pmd_set_accessed(mm, vma, address, pmd,
+                                                     orig_pmd, dirty);
                        }
                        return 0;
                }
@@ -3885,7 +3890,11 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
                        vma = find_vma(mm, addr);
                        if (!vma || vma->vm_start > addr)
                                break;
-                       if (vma->vm_ops && vma->vm_ops->access)
+                       if ((vma->vm_flags & VM_PFNMAP) &&
+                           !(vma->vm_flags & VM_IO))
+                               ret = generic_access_phys(vma, addr, buf,
+                                                         len, write);
+                       if (ret <= 0 && vma->vm_ops && vma->vm_ops->access)
                                ret = vma->vm_ops->access(vma, addr, buf,
                                                          len, write);
                        if (ret <= 0)
index 223232a..1243ab5 100644 (file)
@@ -903,7 +903,7 @@ static int __ref offline_pages(unsigned long start_pfn,
        nr_pages = end_pfn - start_pfn;
 
        /* set above range as isolated */
-       ret = start_isolate_page_range(start_pfn, end_pfn);
+       ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        if (ret)
                goto out;
 
@@ -968,7 +968,7 @@ repeat:
           We cannot do rollback at this point. */
        offline_isolated_pages(start_pfn, end_pfn);
        /* reset pagetype flags and makes migrate type to be MOVABLE */
-       undo_isolate_page_range(start_pfn, end_pfn);
+       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        /* removal success */
        zone->present_pages -= offlined_pages;
        zone->zone_pgdat->node_present_pages -= offlined_pages;
@@ -993,7 +993,7 @@ failed_removal:
                start_pfn, end_pfn);
        memory_notify(MEM_CANCEL_OFFLINE, &arg);
        /* pushback to free area */
-       undo_isolate_page_range(start_pfn, end_pfn);
+       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 
 out:
        unlock_memory_hotplug();
index 4dda948..85e39c6 100644 (file)
@@ -143,6 +143,7 @@ struct task_struct *find_lock_task_mm(struct task_struct *p)
 
        return NULL;
 }
+EXPORT_SYMBOL_GPL(find_lock_task_mm);
 
 /* return true if the task is not adequate as candidate victim task. */
 static bool oom_unkillable_task(struct task_struct *p,
index 62bfbd9..40c9a46 100644 (file)
@@ -129,6 +129,67 @@ unsigned long global_dirty_limit;
  */
 static struct prop_descriptor vm_completions;
 
+/*
+ * Work out the current dirty-memory clamping and background writeout
+ * thresholds.
+ *
+ * The main aim here is to lower them aggressively if there is a lot of mapped
+ * memory around.  To avoid stressing page reclaim with lots of unreclaimable
+ * pages.  It is better to clamp down on writers than to start swapping, and
+ * performing lots of scanning.
+ *
+ * We only allow 1/2 of the currently-unmapped memory to be dirtied.
+ *
+ * We don't permit the clamping level to fall below 5% - that is getting rather
+ * excessive.
+ *
+ * We make sure that the background writeout level is below the adjusted
+ * clamping level.
+ */
+static unsigned long highmem_dirtyable_memory(unsigned long total)
+{
+#ifdef CONFIG_HIGHMEM
+       int node;
+       unsigned long x = 0;
+
+       for_each_node_state(node, N_HIGH_MEMORY) {
+               struct zone *z =
+                       &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+
+               x += zone_page_state(z, NR_FREE_PAGES) +
+                    zone_reclaimable_pages(z) - z->dirty_balance_reserve;
+       }
+       /*
+        * Make sure that the number of highmem pages is never larger
+        * than the number of the total dirtyable memory. This can only
+        * occur in very strange VM situations but we want to make sure
+        * that this does not occur.
+        */
+       return min(x, total);
+#else
+       return 0;
+#endif
+}
+
+/**
+ * determine_dirtyable_memory - amount of memory that may be used
+ *
+ * Returns the numebr of pages that can currently be freed and used
+ * by the kernel for direct mappings.
+ */
+static unsigned long determine_dirtyable_memory(void)
+{
+       unsigned long x;
+
+       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
+           dirty_balance_reserve;
+
+       if (!vm_highmem_is_dirtyable)
+               x -= highmem_dirtyable_memory(x);
+
+       return x + 1;   /* Ensure that we never return 0 */
+}
+
 /*
  * couple the period to the dirty_ratio:
  *
@@ -196,7 +257,6 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
        return ret;
 }
 
-
 int dirty_bytes_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
@@ -291,67 +351,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 }
 EXPORT_SYMBOL(bdi_set_max_ratio);
 
-/*
- * Work out the current dirty-memory clamping and background writeout
- * thresholds.
- *
- * The main aim here is to lower them aggressively if there is a lot of mapped
- * memory around.  To avoid stressing page reclaim with lots of unreclaimable
- * pages.  It is better to clamp down on writers than to start swapping, and
- * performing lots of scanning.
- *
- * We only allow 1/2 of the currently-unmapped memory to be dirtied.
- *
- * We don't permit the clamping level to fall below 5% - that is getting rather
- * excessive.
- *
- * We make sure that the background writeout level is below the adjusted
- * clamping level.
- */
-
-static unsigned long highmem_dirtyable_memory(unsigned long total)
-{
-#ifdef CONFIG_HIGHMEM
-       int node;
-       unsigned long x = 0;
-
-       for_each_node_state(node, N_HIGH_MEMORY) {
-               struct zone *z =
-                       &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
-
-               x += zone_page_state(z, NR_FREE_PAGES) +
-                    zone_reclaimable_pages(z);
-       }
-       /*
-        * Make sure that the number of highmem pages is never larger
-        * than the number of the total dirtyable memory. This can only
-        * occur in very strange VM situations but we want to make sure
-        * that this does not occur.
-        */
-       return min(x, total);
-#else
-       return 0;
-#endif
-}
-
-/**
- * determine_dirtyable_memory - amount of memory that may be used
- *
- * Returns the numebr of pages that can currently be freed and used
- * by the kernel for direct mappings.
- */
-unsigned long determine_dirtyable_memory(void)
-{
-       unsigned long x;
-
-       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
-
-       if (!vm_highmem_is_dirtyable)
-               x -= highmem_dirtyable_memory(x);
-
-       return x + 1;   /* Ensure that we never return 0 */
-}
-
 static unsigned long dirty_freerun_ceiling(unsigned long thresh,
                                           unsigned long bg_thresh)
 {
index 62a7fa2..092c992 100644 (file)
@@ -57,6 +57,8 @@
 #include <linux/ftrace_event.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/migrate.h>
+#include <linux/page-debug-flags.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -96,6 +98,14 @@ EXPORT_SYMBOL(node_states);
 
 unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
+/*
+ * When calculating the number of globally allowed dirty pages, there
+ * is a certain number of per-zone reserves that should not be
+ * considered dirtyable memory.  This is the sum of those reserves
+ * over all existing zones that contribute dirtyable memory.
+ */
+unsigned long dirty_balance_reserve __read_mostly;
+
 int percpu_pagelist_fraction;
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
@@ -127,6 +137,13 @@ void pm_restrict_gfp_mask(void)
        saved_gfp_mask = gfp_allowed_mask;
        gfp_allowed_mask &= ~GFP_IOFS;
 }
+
+bool pm_suspended_storage(void)
+{
+       if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
+               return false;
+       return true;
+}
 #endif /* CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -403,6 +420,37 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
                clear_highpage(page + i);
 }
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+unsigned int _debug_guardpage_minorder;
+
+static int __init debug_guardpage_minorder_setup(char *buf)
+{
+       unsigned long res;
+
+       if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
+               printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
+               return 0;
+       }
+       _debug_guardpage_minorder = res;
+       printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
+       return 0;
+}
+__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
+
+static inline void set_page_guard_flag(struct page *page)
+{
+       __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+
+static inline void clear_page_guard_flag(struct page *page)
+{
+       __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline void set_page_guard_flag(struct page *page) { }
+static inline void clear_page_guard_flag(struct page *page) { }
+#endif
+
 static inline void set_page_order(struct page *page, int order)
 {
        set_page_private(page, order);
@@ -460,6 +508,11 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
        if (page_zone_id(page) != page_zone_id(buddy))
                return 0;
 
+       if (page_is_guard(buddy) && page_order(buddy) == order) {
+               VM_BUG_ON(page_count(buddy) != 0);
+               return 1;
+       }
+
        if (PageBuddy(buddy) && page_order(buddy) == order) {
                VM_BUG_ON(page_count(buddy) != 0);
                return 1;
@@ -483,10 +536,10 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
  * free pages of length of (1 << order) and marked with _mapcount -2. Page's
  * order is recorded in page_private(page) field.
  * So when we are allocating or freeing one, we can derive the state of the
- * other.  That is, if we allocate a small block, and both were   
- * free, the remainder of the region must be split into blocks.   
+ * other.  That is, if we allocate a small block, and both were
+ * free, the remainder of the region must be split into blocks.
  * If a block is freed, and its buddy is also free, then this
- * triggers coalescing into a block of larger size.            
+ * triggers coalescing into a block of larger size.
  *
  * -- wli
  */
@@ -516,11 +569,19 @@ static inline void __free_one_page(struct page *page,
                buddy = page + (buddy_idx - page_idx);
                if (!page_is_buddy(page, buddy, order))
                        break;
-
-               /* Our buddy is free, merge with it and move up one order. */
-               list_del(&buddy->lru);
-               zone->free_area[order].nr_free--;
-               rmv_page_order(buddy);
+               /*
+                * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
+                * merge with it and move up one order.
+                */
+               if (page_is_guard(buddy)) {
+                       clear_page_guard_flag(buddy);
+                       set_page_private(page, 0);
+                       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+               } else {
+                       list_del(&buddy->lru);
+                       zone->free_area[order].nr_free--;
+                       rmv_page_order(buddy);
+               }
                combined_idx = buddy_idx & page_idx;
                page = page + (combined_idx - page_idx);
                page_idx = combined_idx;
@@ -654,7 +715,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
        int i;
        int bad = 0;
 
-       trace_mm_page_free_direct(page, order);
+       trace_mm_page_free(page, order);
        kmemcheck_free_shadow(page, order);
 
        if (PageAnon(page))
@@ -720,6 +781,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
        }
 }
 
+#ifdef CONFIG_CMA
+/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
+void __init init_cma_reserved_pageblock(struct page *page)
+{
+       unsigned i = pageblock_nr_pages;
+       struct page *p = page;
+
+       do {
+               __ClearPageReserved(p);
+               set_page_count(p, 0);
+       } while (++p, --i);
+
+       set_page_refcounted(page);
+       set_pageblock_migratetype(page, MIGRATE_CMA);
+       __free_pages(page, pageblock_order);
+       totalram_pages += pageblock_nr_pages;
+}
+#endif
 
 /*
  * The order of subdivision here is critical for the IO subsystem.
@@ -746,6 +825,23 @@ static inline void expand(struct zone *zone, struct page *page,
                high--;
                size >>= 1;
                VM_BUG_ON(bad_range(zone, &page[size]));
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+               if (high < debug_guardpage_minorder()) {
+                       /*
+                        * Mark as guard pages (or page), that will allow to
+                        * merge back to allocator when buddy will be freed.
+                        * Corresponding page table entries will not be touched,
+                        * pages will stay not present in virtual address space
+                        */
+                       INIT_LIST_HEAD(&page[size].lru);
+                       set_page_guard_flag(&page[size]);
+                       set_page_private(&page[size], high);
+                       /* Guard pages are not available for any usage */
+                       __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
+                       continue;
+               }
+#endif
                list_add(&page[size].lru, &area->free_list[migratetype]);
                area->nr_free++;
                set_page_order(&page[size], high);
@@ -828,11 +924,17 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  * This array describes the order lists are fallen back to when
  * the free lists for the desirable migrate type are depleted
  */
-static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
-       [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
-       [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
-       [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
-       [MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
+static int fallbacks[MIGRATE_TYPES][4] = {
+       [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
+       [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
+#ifdef CONFIG_CMA
+       [MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+       [MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
+#else
+       [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
+#endif
+       [MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
+       [MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
 };
 
 /*
@@ -927,12 +1029,12 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
        /* Find the largest possible block of pages in the other list */
        for (current_order = MAX_ORDER-1; current_order >= order;
                                                --current_order) {
-               for (i = 0; i < MIGRATE_TYPES - 1; i++) {
+               for (i = 0;; i++) {
                        migratetype = fallbacks[start_migratetype][i];
 
                        /* MIGRATE_RESERVE handled later if necessary */
                        if (migratetype == MIGRATE_RESERVE)
-                               continue;
+                               break;
 
                        area = &(zone->free_area[current_order]);
                        if (list_empty(&area->free_list[migratetype]))
@@ -947,11 +1049,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                         * pages to the preferred allocation list. If falling
                         * back for a reclaimable kernel allocation, be more
                         * aggressive about taking ownership of free pages
+                        *
+                        * On the other hand, never change migration
+                        * type of MIGRATE_CMA pageblocks nor move CMA
+                        * pages on different free lists. We don't
+                        * want unmovable pages to be allocated from
+                        * MIGRATE_CMA areas.
                         */
-                       if (unlikely(current_order >= (pageblock_order >> 1)) ||
-                                       start_migratetype == MIGRATE_RECLAIMABLE ||
-                                       page_group_by_mobility_disabled) {
-                               unsigned long pages;
+                       if (!is_migrate_cma(migratetype) &&
+                           (unlikely(current_order >= pageblock_order / 2) ||
+                            start_migratetype == MIGRATE_RECLAIMABLE ||
+                            page_group_by_mobility_disabled)) {
+                               int pages;
                                pages = move_freepages_block(zone, page,
                                                                start_migratetype);
 
@@ -969,11 +1078,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                        rmv_page_order(page);
 
                        /* Take ownership for orders >= pageblock_order */
-                       if (current_order >= pageblock_order)
+                       if (current_order >= pageblock_order &&
+                           !is_migrate_cma(migratetype))
                                change_pageblock_range(page, current_order,
                                                        start_migratetype);
 
-                       expand(zone, page, order, current_order, area, migratetype);
+                       expand(zone, page, order, current_order, area,
+                              is_migrate_cma(migratetype)
+                            ? migratetype : start_migratetype);
 
                        trace_mm_page_alloc_extfrag(page, order, current_order,
                                start_migratetype, migratetype);
@@ -1015,17 +1127,17 @@ retry_reserve:
        return page;
 }
 
-/* 
+/*
  * Obtain a specified number of elements from the buddy allocator, all under
  * a single hold of the lock, for efficiency.  Add them to the supplied list.
  * Returns the number of new pages which were placed at *list.
  */
-static int rmqueue_bulk(struct zone *zone, unsigned int order, 
+static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        unsigned long count, struct list_head *list,
                        int migratetype, int cold)
 {
-       int i;
-       
+       int mt = migratetype, i;
+
        spin_lock(&zone->lock);
        for (i = 0; i < count; ++i) {
                struct page *page = __rmqueue(zone, order, migratetype);
@@ -1045,7 +1157,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        list_add(&page->lru, list);
                else
                        list_add_tail(&page->lru, list);
-               set_page_private(page, migratetype);
+               if (IS_ENABLED(CONFIG_CMA)) {
+                       mt = get_pageblock_migratetype(page);
+                       if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
+                               mt = migratetype;
+               }
+               set_page_private(page, mt);
                list = &page->lru;
        }
        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
@@ -1210,6 +1327,19 @@ out:
        local_irq_restore(flags);
 }
 
+/*
+ * Free a list of 0-order pages
+ */
+void free_hot_cold_page_list(struct list_head *list, int cold)
+{
+       struct page *page, *next;
+
+       list_for_each_entry_safe(page, next, list, lru) {
+               trace_mm_page_free_batched(page, cold);
+               free_hot_cold_page(page, cold);
+       }
+}
+
 /*
  * split_page takes a non-compound higher-order page, and splits it into
  * n (1<<order) sub-pages: page[0..n]
@@ -1276,8 +1406,12 @@ int split_free_page(struct page *page)
 
        if (order >= pageblock_order - 1) {
                struct page *endpage = page + (1 << order) - 1;
-               for (; page < endpage; page += pageblock_nr_pages)
-                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+               for (; page < endpage; page += pageblock_nr_pages) {
+                       int mt = get_pageblock_migratetype(page);
+                       if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
+                               set_pageblock_migratetype(page,
+                                                         MIGRATE_MOVABLE);
+               }
        }
 
        return 1 << order;
@@ -1408,7 +1542,7 @@ static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
 
 static int __init fail_page_alloc_debugfs(void)
 {
-       mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+       umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
        struct dentry *dir;
 
        dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
@@ -1457,7 +1591,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
        long min = mark;
        int o;
 
-       free_pages -= (1 << order) + 1;
+       free_pages -= (1 << order) - 1;
        if (alloc_flags & ALLOC_HIGH)
                min -= min / 2;
        if (alloc_flags & ALLOC_HARDER)
@@ -1756,7 +1890,8 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
 {
        unsigned int filter = SHOW_MEM_FILTER_NODES;
 
-       if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
+       if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
+           debug_guardpage_minorder() > 0)
                return;
 
        /*
@@ -1802,12 +1937,25 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
 
 static inline int
 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
+                               unsigned long did_some_progress,
                                unsigned long pages_reclaimed)
 {
        /* Do not loop if specifically requested */
        if (gfp_mask & __GFP_NORETRY)
                return 0;
 
+       /* Always retry if specifically requested */
+       if (gfp_mask & __GFP_NOFAIL)
+               return 1;
+
+       /*
+        * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
+        * making forward progress without invoking OOM. Suspend also disables
+        * storage devices so kswapd will not help. Bail if we are suspending.
+        */
+       if (!did_some_progress && pm_suspended_storage())
+               return 0;
+
        /*
         * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
         * means __GFP_NOFAIL, but that may not be true in other
@@ -1826,13 +1974,6 @@ should_alloc_retry(gfp_t gfp_mask, unsigned int order,
        if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
                return 1;
 
-       /*
-        * Don't let big-order allocations loop unless the caller
-        * explicitly requests that.
-        */
-       if (gfp_mask & __GFP_NOFAIL)
-               return 1;
-
        return 0;
 }
 
@@ -1960,16 +2101,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 }
 #endif /* CONFIG_COMPACTION */
 
-/* The really slow allocator path where we enter direct reclaim */
-static inline struct page *
-__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
-       struct zonelist *zonelist, enum zone_type high_zoneidx,
-       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-       int migratetype, unsigned long *did_some_progress)
+/* Perform direct synchronous page reclaim */
+static int
+__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
+                 nodemask_t *nodemask)
 {
-       struct page *page = NULL;
        struct reclaim_state reclaim_state;
-       bool drained = false;
+       int progress;
 
        cond_resched();
 
@@ -1980,7 +2118,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
        reclaim_state.reclaimed_slab = 0;
        current->reclaim_state = &reclaim_state;
 
-       *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
+       progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
 
        current->reclaim_state = NULL;
        lockdep_clear_current_reclaim_state();
@@ -1988,6 +2126,21 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        cond_resched();
 
+       return progress;
+}
+
+/* The really slow allocator path where we enter direct reclaim */
+static inline struct page *
+__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+       int migratetype, unsigned long *did_some_progress)
+{
+       struct page *page = NULL;
+       bool drained = false;
+
+       *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+                                              nodemask);
        if (unlikely(!(*did_some_progress)))
                return NULL;
 
@@ -2249,7 +2402,8 @@ rebalance:
 
        /* Check if we should retry the allocation */
        pages_reclaimed += did_some_progress;
-       if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
+       if (should_alloc_retry(gfp_mask, order, did_some_progress,
+                                               pages_reclaimed)) {
                /* Wait for some write requests to complete then retry */
                wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
                goto rebalance;
@@ -2371,16 +2525,6 @@ unsigned long get_zeroed_page(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(get_zeroed_page);
 
-void __pagevec_free(struct pagevec *pvec)
-{
-       int i = pagevec_count(pvec);
-
-       while (--i >= 0) {
-               trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
-               free_hot_cold_page(pvec->pages[i], pvec->cold);
-       }
-}
-
 void __free_pages(struct page *page, unsigned int order)
 {
        if (put_page_testzero(page)) {
@@ -4345,7 +4489,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
        init_waitqueue_head(&pgdat->kswapd_wait);
        pgdat->kswapd_max_order = 0;
        pgdat_page_cgroup_init(pgdat);
-       
+
        for (j = 0; j < MAX_NR_ZONES; j++) {
                struct zone *zone = pgdat->node_zones + j;
                unsigned long size, realsize, memmap_pages;
@@ -5135,8 +5279,19 @@ static void calculate_totalreserve_pages(void)
                        if (max > zone->present_pages)
                                max = zone->present_pages;
                        reserve_pages += max;
+                       /*
+                        * Lowmem reserves are not available to
+                        * GFP_HIGHUSER page cache allocations and
+                        * kswapd tries to balance zones to their high
+                        * watermark.  As a result, neither should be
+                        * regarded as dirtyable memory, to prevent a
+                        * situation where reclaim has to clean pages
+                        * in order to balance the zones.
+                        */
+                       zone->dirty_balance_reserve = max;
                }
        }
+       dirty_balance_reserve = reserve_pages;
        totalreserve_pages = reserve_pages;
 }
 
@@ -5179,14 +5334,7 @@ static void setup_per_zone_lowmem_reserve(void)
        calculate_totalreserve_pages();
 }
 
-/**
- * setup_per_zone_wmarks - called when min_free_kbytes changes
- * or when memory is hot-{added|removed}
- *
- * Ensures that the watermark[min,low,high] values for each zone are set
- * correctly with respect to min_free_kbytes.
- */
-void setup_per_zone_wmarks(void)
+static void __setup_per_zone_wmarks(void)
 {
        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
        unsigned long lowmem_pages = 0;
@@ -5233,6 +5381,11 @@ void setup_per_zone_wmarks(void)
 
                zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
                zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+
+               zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
+               zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
+               zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
+
                setup_zone_migrate_reserve(zone);
                spin_unlock_irqrestore(&zone->lock, flags);
        }
@@ -5241,6 +5394,20 @@ void setup_per_zone_wmarks(void)
        calculate_totalreserve_pages();
 }
 
+/**
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
+ *
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
+ */
+void setup_per_zone_wmarks(void)
+{
+       mutex_lock(&zonelists_mutex);
+       __setup_per_zone_wmarks();
+       mutex_unlock(&zonelists_mutex);
+}
+
 /*
  * The inactive anon list should be small enough that the VM never has to
  * do too much work, but large enough that each inactive page has a chance
@@ -5614,14 +5781,16 @@ static int
 __count_immobile_pages(struct zone *zone, struct page *page, int count)
 {
        unsigned long pfn, iter, found;
+       int mt;
+
        /*
         * For avoiding noise data, lru_add_drain_all() should be called
         * If ZONE_MOVABLE, the zone never contains immobile pages
         */
        if (zone_idx(zone) == ZONE_MOVABLE)
                return true;
-
-       if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
+       mt = get_pageblock_migratetype(page);
+       if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
                return true;
 
        pfn = page_to_pfn(page);
@@ -5731,7 +5900,7 @@ out:
        return ret;
 }
 
-void unset_migratetype_isolate(struct page *page)
+void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 {
        struct zone *zone;
        unsigned long flags;
@@ -5739,12 +5908,264 @@ void unset_migratetype_isolate(struct page *page)
        spin_lock_irqsave(&zone->lock, flags);
        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
                goto out;
-       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-       move_freepages_block(zone, page, MIGRATE_MOVABLE);
+       set_pageblock_migratetype(page, migratetype);
+       move_freepages_block(zone, page, migratetype);
 out:
        spin_unlock_irqrestore(&zone->lock, flags);
 }
 
+#ifdef CONFIG_CMA
+
+static unsigned long pfn_max_align_down(unsigned long pfn)
+{
+       return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
+                            pageblock_nr_pages) - 1);
+}
+
+static unsigned long pfn_max_align_up(unsigned long pfn)
+{
+       return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
+                               pageblock_nr_pages));
+}
+
+static struct page *
+__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
+                            int **resultp)
+{
+       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+
+       if (PageHighMem(page))
+               gfp_mask |= __GFP_HIGHMEM;
+
+       return alloc_page(gfp_mask);
+}
+
+/* [start, end) must belong to a single zone. */
+static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
+{
+       /* This function is based on compact_zone() from compaction.c. */
+
+       unsigned long pfn = start;
+       unsigned int tries = 0;
+       int ret = 0;
+
+       struct compact_control cc = {
+               .nr_migratepages = 0,
+               .order = -1,
+               .zone = page_zone(pfn_to_page(start)),
+               .sync = true,
+       };
+       INIT_LIST_HEAD(&cc.migratepages);
+
+       migrate_prep_local();
+
+       while (pfn < end || !list_empty(&cc.migratepages)) {
+               if (fatal_signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
+               if (list_empty(&cc.migratepages)) {
+                       cc.nr_migratepages = 0;
+                       pfn = isolate_migratepages_range(cc.zone, &cc,
+                                                        pfn, end);
+                       if (!pfn) {
+                               ret = -EINTR;
+                               break;
+                       }
+                       tries = 0;
+               } else if (++tries == 5) {
+                       ret = ret < 0 ? ret : -EBUSY;
+                       break;
+               }
+
+               ret = migrate_pages(&cc.migratepages,
+                                   __alloc_contig_migrate_alloc,
+                                   0, false, MIGRATE_SYNC);
+       }
+
+       putback_lru_pages(&cc.migratepages);
+       return ret > 0 ? 0 : ret;
+}
+
+/*
+ * Update zone's cma pages counter used for watermark level calculation.
+ */
+static inline void __update_cma_watermarks(struct zone *zone, int count)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&zone->lock, flags);
+       zone->min_cma_pages += count;
+       spin_unlock_irqrestore(&zone->lock, flags);
+       setup_per_zone_wmarks();
+}
+
+/*
+ * Trigger memory pressure bump to reclaim some pages in order to be able to
+ * allocate 'count' pages in single page units. Does similar work as
+ *__alloc_pages_slowpath() function.
+ */
+static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
+{
+       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+       struct zonelist *zonelist = node_zonelist(0, gfp_mask);
+       int did_some_progress = 0;
+       int order = 1;
+
+       /*
+        * Increase level of watermarks to force kswapd do his job
+        * to stabilise at new watermark level.
+        */
+       __update_cma_watermarks(zone, count);
+
+       /* Obey watermarks as if the page was being allocated */
+       while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
+               wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
+
+               did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+                                                     NULL);
+               if (!did_some_progress) {
+                       /* Exhausted what can be done so it's blamo time */
+                       out_of_memory(zonelist, gfp_mask, order, NULL);
+               }
+       }
+
+       /* Restore original watermark levels. */
+       __update_cma_watermarks(zone, -count);
+
+       return count;
+}
+
+/**
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start:     start PFN to allocate
+ * @end:       one-past-the-last PFN to allocate
+ * @migratetype:       migratetype of the underlaying pageblocks (either
+ *                     #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
+ *                     in range must have the same migratetype and it must
+ *                     be either of the two.
+ *
+ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
+ * aligned, however it's the caller's responsibility to guarantee that
+ * we are the only thread that changes migrate type of pageblocks the
+ * pages fall in.
+ *
+ * The PFN range must belong to a single zone.
+ *
+ * Returns zero on success or negative error code.  On success all
+ * pages which PFN is in [start, end) are allocated for the caller and
+ * need to be freed with free_contig_range().
+ */
+int alloc_contig_range(unsigned long start, unsigned long end,
+                      unsigned migratetype)
+{
+       struct zone *zone = page_zone(pfn_to_page(start));
+       unsigned long outer_start, outer_end;
+       int ret = 0, order;
+
+       /*
+        * What we do here is we mark all pageblocks in range as
+        * MIGRATE_ISOLATE.  Because pageblock and max order pages may
+        * have different sizes, and due to the way page allocator
+        * work, we align the range to biggest of the two pages so
+        * that page allocator won't try to merge buddies from
+        * different pageblocks and change MIGRATE_ISOLATE to some
+        * other migration type.
+        *
+        * Once the pageblocks are marked as MIGRATE_ISOLATE, we
+        * migrate the pages from an unaligned range (ie. pages that
+        * we are interested in).  This will put all the pages in
+        * range back to page allocator as MIGRATE_ISOLATE.
+        *
+        * When this is done, we take the pages in range from page
+        * allocator removing them from the buddy system.  This way
+        * page allocator will never consider using them.
+        *
+        * This lets us mark the pageblocks back as
+        * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
+        * aligned range but not in the unaligned, original range are
+        * put back to page allocator so that buddy can use them.
+        */
+
+       ret = start_isolate_page_range(pfn_max_align_down(start),
+                                      pfn_max_align_up(end), migratetype);
+       if (ret)
+               goto done;
+
+       ret = __alloc_contig_migrate_range(start, end);
+       if (ret)
+               goto done;
+
+       /*
+        * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
+        * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
+        * more, all pages in [start, end) are free in page allocator.
+        * What we are going to do is to allocate all pages from
+        * [start, end) (that is remove them from page allocator).
+        *
+        * The only problem is that pages at the beginning and at the
+        * end of interesting range may be not aligned with pages that
+        * page allocator holds, ie. they can be part of higher order
+        * pages.  Because of this, we reserve the bigger range and
+        * once this is done free the pages we are not interested in.
+        *
+        * We don't have to hold zone->lock here because the pages are
+        * isolated thus they won't get removed from buddy.
+        */
+
+       lru_add_drain_all();
+       drain_all_pages();
+
+       order = 0;
+       outer_start = start;
+       while (!PageBuddy(pfn_to_page(outer_start))) {
+               if (++order >= MAX_ORDER) {
+                       ret = -EBUSY;
+                       goto done;
+               }
+               outer_start &= ~0UL << order;
+       }
+
+       /* Make sure the range is really isolated. */
+       if (test_pages_isolated(outer_start, end)) {
+               pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
+                      outer_start, end);
+               ret = -EBUSY;
+               goto done;
+       }
+
+       /*
+        * Reclaim enough pages to make sure that contiguous allocation
+        * will not starve the system.
+        */
+       __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
+
+       /* Grab isolated pages from freelists. */
+       outer_end = isolate_freepages_range(outer_start, end);
+       if (!outer_end) {
+               ret = -EBUSY;
+               goto done;
+       }
+
+       /* Free head and tail (if any) */
+       if (start != outer_start)
+               free_contig_range(outer_start, start - outer_start);
+       if (end != outer_end)
+               free_contig_range(end, outer_end - end);
+
+done:
+       undo_isolate_page_range(pfn_max_align_down(start),
+                               pfn_max_align_up(end), migratetype);
+       return ret;
+}
+
+void free_contig_range(unsigned long pfn, unsigned nr_pages)
+{
+       for (; nr_pages--; ++pfn)
+               __free_page(pfn_to_page(pfn));
+}
+#endif
+
 #ifdef CONFIG_MEMORY_HOTREMOVE
 /*
  * All pages in the range must be isolated before calling this.
index 4ae42bb..c9f0477 100644 (file)
@@ -24,6 +24,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  * to be MIGRATE_ISOLATE.
  * @start_pfn: The lower PFN of the range to be isolated.
  * @end_pfn: The upper PFN of the range to be isolated.
+ * @migratetype: migrate type to set in error recovery.
  *
  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  * the range will never be allocated. Any free pages and pages freed in the
@@ -32,8 +33,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  * start_pfn/end_pfn must be aligned to pageblock_order.
  * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
  */
-int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+                            unsigned migratetype)
 {
        unsigned long pfn;
        unsigned long undo_pfn;
@@ -56,7 +57,7 @@ undo:
        for (pfn = start_pfn;
             pfn < undo_pfn;
             pfn += pageblock_nr_pages)
-               unset_migratetype_isolate(pfn_to_page(pfn));
+               unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
 
        return -EBUSY;
 }
@@ -64,8 +65,8 @@ undo:
 /*
  * Make isolated pages available again.
  */
-int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+                           unsigned migratetype)
 {
        unsigned long pfn;
        struct page *page;
@@ -77,7 +78,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
                page = __first_valid_page(pfn, pageblock_nr_pages);
                if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
                        continue;
-               unset_migratetype_isolate(page);
+               unset_migratetype_isolate(page, migratetype);
        }
        return 0;
 }
@@ -86,7 +87,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
  * all pages in [start_pfn...end_pfn) must be in the same zone.
  * zone->lock must be held before call this.
  *
- * Returns 1 if all pages in the range is isolated.
+ * Returns 1 if all pages in the range are isolated.
  */
 static int
 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
index eb663fb..326bb60 100644 (file)
@@ -108,8 +108,8 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
 
 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
-                          pmd_t *pmdp)
+void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+                         pmd_t *pmdp)
 {
        pmd_t pmd = pmd_mksplitting(*pmdp);
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
index cbcbb02..b771c4b 100644 (file)
@@ -399,6 +399,7 @@ ondemand_readahead(struct address_space *mapping,
                   unsigned long req_size)
 {
        unsigned long max = max_sane_readahead(ra->ra_pages);
+       pgoff_t prev_offset;
 
        /*
         * start of file
@@ -450,8 +451,11 @@ ondemand_readahead(struct address_space *mapping,
 
        /*
         * sequential cache miss
+        * trivial case: (offset - prev_offset) == 1
+        * unaligned reads: (offset - prev_offset) == 0
         */
-       if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
+       prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT;
+       if (offset - prev_offset <= 1UL)
                goto initial_readahead;
 
        /*
index 83efac6..1de2441 100644 (file)
@@ -1185,6 +1185,7 @@ int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
        mutex_unlock(&inode->i_mutex);
        return 0;
 }
+EXPORT_SYMBOL_GPL(vmtruncate_range);
 
 #ifdef CONFIG_NUMA
 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
@@ -2713,6 +2714,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
        vma->vm_flags |= VM_CAN_NONLINEAR;
        return 0;
 }
+EXPORT_SYMBOL_GPL(shmem_zero_setup);
 
 /**
  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
index 60c6969..a737852 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -269,6 +269,11 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
        return *(void **)(object + s->offset);
 }
 
+static void prefetch_freepointer(const struct kmem_cache *s, void *object)
+{
+       prefetch(object + s->offset);
+}
+
 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
 {
        void *p;
@@ -1555,7 +1560,6 @@ static void *get_partial_node(struct kmem_cache *s,
 
                if (!object) {
                        c->page = page;
-                       c->node = page_to_nid(page);
                        stat(s, ALLOC_FROM_PARTIAL);
                        object = t;
                        available =  page->objects - page->inuse;
@@ -2021,7 +2025,7 @@ static void flush_all(struct kmem_cache *s)
 static inline int node_match(struct kmem_cache_cpu *c, int node)
 {
 #ifdef CONFIG_NUMA
-       if (node != NUMA_NO_NODE && c->node != node)
+       if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
                return 0;
 #endif
        return 1;
@@ -2110,7 +2114,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
                page->freelist = NULL;
 
                stat(s, ALLOC_SLAB);
-               c->node = page_to_nid(page);
                c->page = page;
                *pc = c;
        } else
@@ -2212,7 +2215,6 @@ new_slab:
        if (c->partial) {
                c->page = c->partial;
                c->partial = c->page->next;
-               c->node = page_to_nid(c->page);
                stat(s, CPU_PARTIAL_ALLOC);
                c->freelist = NULL;
                goto redo;
@@ -2243,7 +2245,6 @@ new_slab:
 
        c->freelist = get_freepointer(s, object);
        deactivate_slab(s, c);
-       c->node = NUMA_NO_NODE;
        local_irq_restore(flags);
        return object;
 }
@@ -2293,6 +2294,8 @@ redo:
                object = __slab_alloc(s, gfpflags, node, addr, c);
 
        else {
+               void *next_object = get_freepointer_safe(s, object);
+
                /*
                 * The cmpxchg will only match if there was no additional
                 * operation and if we are on the right processor.
@@ -2308,11 +2311,12 @@ redo:
                if (unlikely(!irqsafe_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
                                object, tid,
-                               get_freepointer_safe(s, object), next_tid(tid)))) {
+                               next_object, next_tid(tid)))) {
 
                        note_cmpxchg_failure("slab_alloc", s, tid);
                        goto redo;
                }
+               prefetch_freepointer(s, next_object);
                stat(s, ALLOC_FASTPATH);
        }
 
@@ -4446,25 +4450,25 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 
                for_each_possible_cpu(cpu) {
                        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
-                       int node = ACCESS_ONCE(c->node);
+                       int node;
                        struct page *page;
 
-                       if (node < 0)
-                               continue;
                        page = ACCESS_ONCE(c->page);
-                       if (page) {
-                               if (flags & SO_TOTAL)
-                                       x = page->objects;
-                               else if (flags & SO_OBJECTS)
-                                       x = page->inuse;
-                               else
-                                       x = 1;
+                       if (!page)
+                               continue;
 
-                               total += x;
-                               nodes[node] += x;
-                       }
-                       page = c->partial;
+                       node = page_to_nid(page);
+                       if (flags & SO_TOTAL)
+                               x = page->objects;
+                       else if (flags & SO_OBJECTS)
+                               x = page->inuse;
+                       else
+                               x = 1;
 
+                       total += x;
+                       nodes[node] += x;
+
+                       page = ACCESS_ONCE(c->partial);
                        if (page) {
                                node = page_to_nid(page);
                                if (flags & SO_TOTAL)
@@ -4476,6 +4480,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                                total += x;
                                nodes[node] += x;
                        }
+
                        per_cpu[node]++;
                }
        }
index a4b9016..2e04ca6 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -661,11 +661,10 @@ int lru_add_drain_all(void)
 void release_pages(struct page **pages, int nr, int cold)
 {
        int i;
-       struct pagevec pages_to_free;
+       LIST_HEAD(pages_to_free);
        struct zone *zone = NULL;
        unsigned long uninitialized_var(flags);
 
-       pagevec_init(&pages_to_free, cold);
        for (i = 0; i < nr; i++) {
                struct page *page = pages[i];
 
@@ -696,19 +695,12 @@ void release_pages(struct page **pages, int nr, int cold)
                        del_page_from_lru(zone, page);
                }
 
-               if (!pagevec_add(&pages_to_free, page)) {
-                       if (zone) {
-                               spin_unlock_irqrestore(&zone->lru_lock, flags);
-                               zone = NULL;
-                       }
-                       __pagevec_free(&pages_to_free);
-                       pagevec_reinit(&pages_to_free);
-               }
+               list_add(&page->lru, &pages_to_free);
        }
        if (zone)
                spin_unlock_irqrestore(&zone->lru_lock, flags);
 
-       pagevec_free(&pages_to_free);
+       free_hot_cold_page_list(&pages_to_free, cold);
 }
 EXPORT_SYMBOL(release_pages);
 
index dbd2b67..3579afb 100644 (file)
@@ -667,10 +667,10 @@ int try_to_free_swap(struct page *page)
         * original page might be freed under memory pressure, then
         * later read back in from swap, now with the wrong data.
         *
-        * Hibernation clears bits from gfp_allowed_mask to prevent
-        * memory reclaim from writing to disk, so check that here.
+        * Hibration suspends storage while it is writing the image
+        * to disk so check that here.
         */
-       if (!(gfp_allowed_mask & __GFP_IO))
+       if (pm_suspended_storage())
                return 0;
 
        delete_from_swap_cache(page);
index 1431458..5368aae 100644 (file)
@@ -1123,6 +1123,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
 }
 EXPORT_SYMBOL(vm_map_ram);
 
+/**
+ * vm_area_add_early - add vmap area early during boot
+ * @vm: vm_struct to add
+ *
+ * This function is used to add fixed kernel vm area to vmlist before
+ * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
+ * should contain proper values and the other fields should be zero.
+ *
+ * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+ */
+void __init vm_area_add_early(struct vm_struct *vm)
+{
+       struct vm_struct *tmp, **p;
+
+       BUG_ON(vmap_initialized);
+       for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+               if (tmp->addr >= vm->addr) {
+                       BUG_ON(tmp->addr < vm->addr + vm->size);
+                       break;
+               } else
+                       BUG_ON(tmp->addr + tmp->size > vm->addr);
+       }
+       vm->next = *p;
+       *p = vm;
+}
+
 /**
  * vm_area_register_early - register vmap area early during boot
  * @vm: vm_struct to register
@@ -1145,8 +1171,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
 
        vm->addr = (void *)addr;
 
-       vm->next = vmlist;
-       vmlist = vm;
+       vm_area_add_early(vm);
 }
 
 void __init vmalloc_init(void)
@@ -1239,6 +1264,7 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
        vunmap_page_range(addr, end);
        flush_tlb_kernel_range(addr, end);
 }
+EXPORT_SYMBOL_GPL(unmap_kernel_range);
 
 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
 {
@@ -1261,7 +1287,7 @@ DEFINE_RWLOCK(vmlist_lock);
 struct vm_struct *vmlist;
 
 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
-                             unsigned long flags, void *caller)
+                             unsigned long flags, const void *caller)
 {
        vm->flags = flags;
        vm->addr = (void *)va->va_start;
@@ -1287,7 +1313,7 @@ static void insert_vmalloc_vmlist(struct vm_struct *vm)
 }
 
 static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
-                             unsigned long flags, void *caller)
+                             unsigned long flags, const void *caller)
 {
        setup_vmalloc_vm(vm, va, flags, caller);
        insert_vmalloc_vmlist(vm);
@@ -1295,7 +1321,7 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
 
 static struct vm_struct *__get_vm_area_node(unsigned long size,
                unsigned long align, unsigned long flags, unsigned long start,
-               unsigned long end, int node, gfp_t gfp_mask, void *caller)
+               unsigned long end, int node, gfp_t gfp_mask, const void *caller)
 {
        struct vmap_area *va;
        struct vm_struct *area;
@@ -1356,7 +1382,7 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
 
 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
                                       unsigned long start, unsigned long end,
-                                      void *caller)
+                                      const void *caller)
 {
        return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
                                  caller);
@@ -1376,15 +1402,24 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
        return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
                                -1, GFP_KERNEL, __builtin_return_address(0));
 }
+EXPORT_SYMBOL_GPL(get_vm_area);
 
 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
-                               void *caller)
+                               const void *caller)
 {
        return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
                                                -1, GFP_KERNEL, caller);
 }
 
-static struct vm_struct *find_vm_area(const void *addr)
+/**
+ *     find_vm_area  -  find a continuous kernel virtual area
+ *     @addr:          base address
+ *
+ *     Search for the kernel VM area starting at @addr, and return it.
+ *     It is up to the caller to do all required locking to keep the returned
+ *     pointer valid.
+ */
+struct vm_struct *find_vm_area(const void *addr)
 {
        struct vmap_area *va;
 
@@ -1549,9 +1584,9 @@ EXPORT_SYMBOL(vmap);
 
 static void *__vmalloc_node(unsigned long size, unsigned long align,
                            gfp_t gfp_mask, pgprot_t prot,
-                           int node, void *caller);
+                           int node, const void *caller);
 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
-                                pgprot_t prot, int node, void *caller)
+                                pgprot_t prot, int node, const void *caller)
 {
        const int order = 0;
        struct page **pages;
@@ -1624,7 +1659,7 @@ fail:
  */
 void *__vmalloc_node_range(unsigned long size, unsigned long align,
                        unsigned long start, unsigned long end, gfp_t gfp_mask,
-                       pgprot_t prot, int node, void *caller)
+                       pgprot_t prot, int node, const void *caller)
 {
        struct vm_struct *area;
        void *addr;
@@ -1680,7 +1715,7 @@ fail:
  */
 static void *__vmalloc_node(unsigned long size, unsigned long align,
                            gfp_t gfp_mask, pgprot_t prot,
-                           int node, void *caller)
+                           int node, const void *caller)
 {
        return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
                                gfp_mask, prot, node, caller);
index ab98dc6..a3b1578 100644 (file)
@@ -734,24 +734,6 @@ static enum page_references page_check_references(struct page *page,
        return PAGEREF_RECLAIM;
 }
 
-static noinline_for_stack void free_page_list(struct list_head *free_pages)
-{
-       struct pagevec freed_pvec;
-       struct page *page, *tmp;
-
-       pagevec_init(&freed_pvec, 1);
-
-       list_for_each_entry_safe(page, tmp, free_pages, lru) {
-               list_del(&page->lru);
-               if (!pagevec_add(&freed_pvec, page)) {
-                       __pagevec_free(&freed_pvec);
-                       pagevec_reinit(&freed_pvec);
-               }
-       }
-
-       pagevec_free(&freed_pvec);
-}
-
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -1015,7 +997,7 @@ keep_lumpy:
        if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
                zone_set_flag(zone, ZONE_CONGESTED);
 
-       free_page_list(&free_pages);
+       free_hot_cold_page_list(&free_pages, 1);
 
        list_splice(&ret_pages, page_list);
        count_vm_events(PGACTIVATE, pgactivate);
@@ -3524,16 +3506,16 @@ int scan_unevictable_handler(struct ctl_table *table, int write,
  * a specified node's per zone unevictable lists for evictable pages.
  */
 
-static ssize_t read_scan_unevictable_node(struct sys_device *dev,
-                                         struct sysdev_attribute *attr,
+static ssize_t read_scan_unevictable_node(struct device *dev,
+                                         struct device_attribute *attr,
                                          char *buf)
 {
        warn_scan_unevictable_pages();
        return sprintf(buf, "0\n");     /* always zero; should fit... */
 }
 
-static ssize_t write_scan_unevictable_node(struct sys_device *dev,
-                                          struct sysdev_attribute *attr,
+static ssize_t write_scan_unevictable_node(struct device *dev,
+                                          struct device_attribute *attr,
                                        const char *buf, size_t count)
 {
        warn_scan_unevictable_pages();
@@ -3541,17 +3523,17 @@ static ssize_t write_scan_unevictable_node(struct sys_device *dev,
 }
 
 
-static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
                        read_scan_unevictable_node,
                        write_scan_unevictable_node);
 
 int scan_unevictable_register_node(struct node *node)
 {
-       return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
+       return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
 }
 
 void scan_unevictable_unregister_node(struct node *node)
 {
-       sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
+       device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
 }
 #endif
index 887e83f..901e631 100644 (file)
@@ -613,6 +613,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
        "Reclaimable",
        "Movable",
        "Reserve",
+#ifdef CONFIG_CMA
+       "CMA",
+#endif
        "Isolate",
 };
 
index f456645..b2b4404 100644 (file)
@@ -225,22 +225,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
 }
 EXPORT_SYMBOL(hci_le_start_enc);
 
-void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
-{
-       struct hci_dev *hdev = conn->hdev;
-       struct hci_cp_le_ltk_reply cp;
-
-       BT_DBG("%p", conn);
-
-       memset(&cp, 0, sizeof(cp));
-
-       cp.handle = cpu_to_le16(conn->handle);
-       memcpy(cp.ltk, ltk, sizeof(ltk));
-
-       hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
-}
-EXPORT_SYMBOL(hci_le_ltk_reply);
-
 void hci_le_ltk_neg_reply(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
index 58a8955..05c8a5c 100644 (file)
@@ -154,6 +154,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
                    struct ieee80211_sta *sta, void *priv_sta,
                   struct sk_buff *skb)
 {
+       struct minstrel_priv *mp = priv;
        struct minstrel_sta_info *mi = priv_sta;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_rate *ar = info->status.rates;
@@ -181,6 +182,10 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
 
        if (mi->sample_deferred > 0)
                mi->sample_deferred--;
+
+       if (time_after(jiffies, mi->stats_update +
+                               (mp->update_interval * HZ) / 1000))
+               minstrel_update_stats(mp, mi);
 }
 
 
@@ -235,10 +240,6 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
 
        mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot;
 
-       if (time_after(jiffies, mi->stats_update + (mp->update_interval *
-                       HZ) / 1000))
-               minstrel_update_stats(mp, mi);
-
        ndx = mi->max_tp_rate;
 
        if (mrr)
@@ -334,8 +335,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
 
 
 static void
-calc_rate_durations(struct minstrel_sta_info *mi, struct ieee80211_local *local,
-                    struct minstrel_rate *d, struct ieee80211_rate *rate)
+calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d,
+                   struct ieee80211_rate *rate)
 {
        int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
 
@@ -402,8 +403,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
 
                mr->rix = i;
                mr->bitrate = sband->bitrates[i].bitrate / 5;
-               calc_rate_durations(mi, local, mr,
-                               &sband->bitrates[i]);
+               calc_rate_durations(local, mr, &sband->bitrates[i]);
 
                /* calculate maximum number of retransmissions before
                 * fallback (based on maximum segment size) */
index d5a5622..55d257d 100644 (file)
@@ -68,7 +68,7 @@ minstrel_stats_open(struct inode *inode, struct file *file)
 
        file->private_data = ms;
        p = ms->buf;
-       p += sprintf(p, "rate     throughput  ewma prob   this prob  "
+       p += sprintf(p, "rate      throughput  ewma prob  this prob  "
                        "this succ/attempt   success    attempts\n");
        for (i = 0; i < mi->n_rates; i++) {
                struct minstrel_rate *mr = &mi->r[i];
@@ -84,7 +84,7 @@ minstrel_stats_open(struct inode *inode, struct file *file)
                eprob = mr->probability / 18;
 
                p += sprintf(p, "  %6u.%1u   %6u.%1u   %6u.%1u        "
-                               "%3u(%3u)   %8llu    %8llu\n",
+                               "   %3u(%3u)  %8llu    %8llu\n",
                                tp / 10, tp % 10,
                                eprob / 10, eprob % 10,
                                prob / 10, prob % 10,
index 2fe20c9..11f4803 100644 (file)
 /* Transmit duration for the raw data part of an average sized packet */
 #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
 
+/*
+ * Define group sort order: HT40 -> SGI -> #streams
+ */
+#define GROUP_IDX(_streams, _sgi, _ht40)       \
+       MINSTREL_MAX_STREAMS * 2 * _ht40 +      \
+       MINSTREL_MAX_STREAMS * _sgi +           \
+       _streams - 1
+
 /* MCS rate information for an MCS group */
-#define MCS_GROUP(_streams, _sgi, _ht40) {                             \
+#define MCS_GROUP(_streams, _sgi, _ht40)                               \
+       [GROUP_IDX(_streams, _sgi, _ht40)] = {                          \
        .streams = _streams,                                            \
        .flags =                                                        \
                (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) |                 \
@@ -58,6 +67,9 @@
  * To enable sufficiently targeted rate sampling, MCS rates are divided into
  * groups, based on the number of streams and flags (HT40, SGI) that they
  * use.
+ *
+ * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
+ * HT40 -> SGI -> #streams
  */
 const struct mcs_group minstrel_mcs_groups[] = {
        MCS_GROUP(1, 0, 0),
@@ -102,21 +114,9 @@ minstrel_ewma(int old, int new, int weight)
 static int
 minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
 {
-       int streams = (rate->idx / MCS_GROUP_RATES) + 1;
-       u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
-               if (minstrel_mcs_groups[i].streams != streams)
-                       continue;
-               if (minstrel_mcs_groups[i].flags != (rate->flags & flags))
-                       continue;
-
-               return i;
-       }
-
-       WARN_ON(1);
-       return 0;
+       return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
+                        !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
+                        !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
 }
 
 static inline struct minstrel_rate_stats *
@@ -130,7 +130,7 @@ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
  * Recalculate success probabilities and counters for a rate using EWMA
  */
 static void
-minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr)
+minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
 {
        if (unlikely(mr->attempts > 0)) {
                mr->sample_skipped = 0;
@@ -156,8 +156,7 @@ minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr
  * the expected number of retransmissions and their expected length
  */
 static void
-minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
-                    int group, int rate)
+minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
 {
        struct minstrel_rate_stats *mr;
        unsigned int usecs;
@@ -226,8 +225,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
                        mr = &mg->rates[i];
                        mr->retry_updated = false;
                        index = MCS_GROUP_RATES * group + i;
-                       minstrel_calc_rate_ewma(mp, mr);
-                       minstrel_ht_calc_tp(mp, mi, group, i);
+                       minstrel_calc_rate_ewma(mr);
+                       minstrel_ht_calc_tp(mi, group, i);
 
                        if (!mr->cur_tp)
                                continue;
@@ -300,10 +299,10 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
 static bool
 minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
 {
-       if (!rate->count)
+       if (rate->idx < 0)
                return false;
 
-       if (rate->idx < 0)
+       if (!rate->count)
                return false;
 
        return !!(rate->flags & IEEE80211_TX_RC_MCS);
@@ -357,7 +356,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
 }
 
 static void
-minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb)
+minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
@@ -455,7 +454,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
        if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
                minstrel_ht_update_stats(mp, mi);
                if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
-                       minstrel_aggr_check(mp, sta, skb);
+                       minstrel_aggr_check(sta, skb);
        }
 }
 
@@ -515,7 +514,6 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
 static void
 minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                      struct ieee80211_tx_rate *rate, int index,
-                     struct ieee80211_tx_rate_control *txrc,
                      bool sample, bool rtscts)
 {
        const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
@@ -569,6 +567,13 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
        sample_idx += mi->sample_group * MCS_GROUP_RATES;
        minstrel_next_sample_idx(mi);
 
+       /*
+        * Sampling might add some overhead (RTS, no aggregation)
+        * to the frame. Hence, don't use sampling for the currently
+        * used max TP rate.
+        */
+       if (sample_idx == mi->max_tp_rate)
+               return -1;
        /*
         * When not using MRR, do not sample if the probability is already
         * higher than 95% to avoid wasting airtime
@@ -628,11 +633,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
        if (sample_idx >= 0) {
                sample = true;
                minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
-                       txrc, true, false);
+                       true, false);
                info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
        } else {
                minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
-                       txrc, false, false);
+                       false, false);
        }
 
        if (mp->hw->max_rates >= 3) {
@@ -643,13 +648,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                 */
                if (sample_idx >= 0)
                        minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
-                               txrc, false, false);
+                               false, false);
                else
                        minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
-                               txrc, false, true);
+                               false, true);
 
                minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate,
-                                    txrc, false, !sample);
+                                    false, !sample);
 
                ar[3].count = 0;
                ar[3].idx = -1;
@@ -660,7 +665,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                 * max_tp_rate -> max_prob_rate by default.
                 */
                minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate,
-                                    txrc, false, !sample);
+                                    false, !sample);
 
                ar[2].count = 0;
                ar[2].idx = -1;
@@ -694,6 +699,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
        int ack_dur;
        int stbc;
        int i;
+       unsigned int smps;
 
        /* fall back to the old minstrel for legacy stations */
        if (!sta->ht_cap.ht_supported)
@@ -733,6 +739,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
            oper_chan_type != NL80211_CHAN_HT40PLUS)
                sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 
+       smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
+               IEEE80211_HT_CAP_SM_PS_SHIFT;
+
        for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
                u16 req = 0;
 
@@ -750,6 +759,11 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
                if ((sta_cap & req) != req)
                        continue;
 
+               /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
+               if (smps == WLAN_HT_CAP_SM_PS_STATIC &&
+                   minstrel_mcs_groups[i].streams > 1)
+                       continue;
+
                mi->groups[i].supported =
                        mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
 
@@ -803,7 +817,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
                        max_rates = sband->n_bitrates;
        }
 
-       msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
+       msp = kzalloc(sizeof(*msp), gfp);
        if (!msp)
                return NULL;
 
index cbc0193..fc83355 100644 (file)
@@ -99,7 +99,7 @@ int main(int argc, char *argv[])
        const char *inform = "dts";
        const char *outform = "dts";
        const char *outname = "-";
-       int force = 0, check = 0, sort = 0;
+       int force = 0, sort = 0;
        const char *arg;
        int opt;
        FILE *outf = NULL;
@@ -137,9 +137,6 @@ int main(int argc, char *argv[])
                case 'f':
                        force = 1;
                        break;
-               case 'c':
-                       check = 1;
-                       break;
                case 'q':
                        quiet++;
                        break;
index ead0332..28d0b23 100644 (file)
@@ -697,7 +697,6 @@ static struct reserve_info *flat_read_mem_reserve(struct inbuf *inb)
 {
        struct reserve_info *reservelist = NULL;
        struct reserve_info *new;
-       const char *p;
        struct fdt_reserve_entry re;
 
        /*
@@ -706,7 +705,6 @@ static struct reserve_info *flat_read_mem_reserve(struct inbuf *inb)
         *
         * First pass, count entries.
         */
-       p = inb->ptr;
        while (1) {
                flat_read_chunk(inb, &re, sizeof(re));
                re.address  = fdt64_to_cpu(re.address);
index 12440ee..f90d82f 100644 (file)
@@ -981,3 +981,4 @@ int cap_file_mmap(struct file *file, unsigned long reqprot,
        }
        return ret;
 }
+EXPORT_SYMBOL(cap_file_mmap);
index 4450fbe..bc94175 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/device_cgroup.h>
 #include <linux/cgroup.h>
 #include <linux/ctype.h>
+#include <linux/export.h>
 #include <linux/list.h>
 #include <linux/uaccess.h>
 #include <linux/seq_file.h>
@@ -500,6 +501,7 @@ found:
 
        return -EPERM;
 }
+EXPORT_SYMBOL(__devcgroup_inode_permission);
 
 int devcgroup_inode_mknod(int mode, dev_t dev)
 {
index e2f684a..892000c 100644 (file)
@@ -411,6 +411,7 @@ int security_path_rmdir(struct path *dir, struct dentry *dentry)
                return 0;
        return security_ops->path_rmdir(dir, dentry);
 }
+EXPORT_SYMBOL(security_path_rmdir);
 
 int security_path_unlink(struct path *dir, struct dentry *dentry)
 {
@@ -427,6 +428,7 @@ int security_path_symlink(struct path *dir, struct dentry *dentry,
                return 0;
        return security_ops->path_symlink(dir, dentry, old_name);
 }
+EXPORT_SYMBOL(security_path_symlink);
 
 int security_path_link(struct dentry *old_dentry, struct path *new_dir,
                       struct dentry *new_dentry)
@@ -435,6 +437,7 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir,
                return 0;
        return security_ops->path_link(old_dentry, new_dir, new_dentry);
 }
+EXPORT_SYMBOL(security_path_link);
 
 int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
                         struct path *new_dir, struct dentry *new_dentry)
@@ -453,6 +456,7 @@ int security_path_truncate(struct path *path)
                return 0;
        return security_ops->path_truncate(path);
 }
+EXPORT_SYMBOL(security_path_truncate);
 
 int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
                        mode_t mode)
@@ -461,6 +465,7 @@ int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
                return 0;
        return security_ops->path_chmod(dentry, mnt, mode);
 }
+EXPORT_SYMBOL(security_path_chmod);
 
 int security_path_chown(struct path *path, uid_t uid, gid_t gid)
 {
@@ -468,6 +473,7 @@ int security_path_chown(struct path *path, uid_t uid, gid_t gid)
                return 0;
        return security_ops->path_chown(path, uid, gid);
 }
+EXPORT_SYMBOL(security_path_chown);
 
 int security_path_chroot(struct path *path)
 {
@@ -544,6 +550,7 @@ int security_inode_readlink(struct dentry *dentry)
                return 0;
        return security_ops->inode_readlink(dentry);
 }
+EXPORT_SYMBOL(security_inode_readlink);
 
 int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
@@ -558,6 +565,7 @@ int security_inode_permission(struct inode *inode, int mask)
                return 0;
        return security_ops->inode_permission(inode, mask);
 }
+EXPORT_SYMBOL(security_inode_permission);
 
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
 {
@@ -673,6 +681,7 @@ int security_file_permission(struct file *file, int mask)
 
        return fsnotify_perm(file, mask);
 }
+EXPORT_SYMBOL(security_file_permission);
 
 int security_file_alloc(struct file *file)
 {
@@ -700,6 +709,7 @@ int security_file_mmap(struct file *file, unsigned long reqprot,
                return ret;
        return ima_file_mmap(file, prot);
 }
+EXPORT_SYMBOL(security_file_mmap);
 
 int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
                            unsigned long prot)
index 8a00555..5b3c19a 100644 (file)
@@ -2587,6 +2587,15 @@ static int snd_pcm_common_ioctl1(struct file *file,
                snd_pcm_stream_unlock_irq(substream);
                return res;
        }
+       case _IOW('A', 0xfe, struct snd_pnd_hack_params):
+       {
+               /* pandora HACK */
+               if (copy_from_user(&substream->pnd_hack_params, arg,
+                   sizeof(substream->pnd_hack_params)))
+                       return -EFAULT;
+               printk(KERN_INFO "%s hack set\n", __func__);
+               return 0;
+       }
        }
        snd_printd("unknown ioctl = 0x%x\n", cmd);
        return -ENOTTY;
index a2c7842..a90bbaa 100644 (file)
@@ -90,6 +90,8 @@ snd-soc-wm9713-objs := wm9713.o
 snd-soc-wm-hubs-objs := wm_hubs.o
 snd-soc-jz4740-codec-objs := jz4740.o
 
+CFLAGS_twl4030.o += -Wno-unused-variable -Wno-unused-function
+
 # Amp
 snd-soc-lm4857-objs := lm4857.o
 snd-soc-max9877-objs := max9877.o
index f798247..321a0b9 100644 (file)
@@ -55,7 +55,7 @@ static const u8 twl4030_reg[TWL4030_CACHEREGNUM] = {
        0x00, /* REG_OPTION             (0x2)   */
        0x00, /* REG_UNKNOWN            (0x3)   */
        0x00, /* REG_MICBIAS_CTL        (0x4)   */
-       0x00, /* REG_ANAMICL            (0x5)   */
+       0x01, /* REG_ANAMICL            (0x5)   */
        0x00, /* REG_ANAMICR            (0x6)   */
        0x00, /* REG_AVADC_CTL          (0x7)   */
        0x00, /* REG_ADCMICSEL          (0x8)   */
@@ -136,6 +136,8 @@ struct twl4030_priv {
        /* reference counts of AIF/APLL users */
        unsigned int apll_enabled;
 
+       unsigned int using_256fs;
+
        struct snd_pcm_substream *master_substream;
        struct snd_pcm_substream *slave_substream;
 
@@ -501,12 +503,16 @@ SOC_DAPM_ENUM("Route", twl4030_vibrapath_enum);
 static const struct snd_kcontrol_new twl4030_dapm_analoglmic_controls[] = {
        SOC_DAPM_SINGLE("Main Mic Capture Switch",
                        TWL4030_REG_ANAMICL, 0, 1, 0),
+#if 0
        SOC_DAPM_SINGLE("Headset Mic Capture Switch",
                        TWL4030_REG_ANAMICL, 1, 1, 0),
+#endif
        SOC_DAPM_SINGLE("AUXL Capture Switch",
                        TWL4030_REG_ANAMICL, 2, 1, 0),
+#if 0
        SOC_DAPM_SINGLE("Carkit Mic Capture Switch",
                        TWL4030_REG_ANAMICL, 3, 1, 0),
+#endif
 };
 
 /* Right analog microphone selection */
@@ -1125,6 +1131,7 @@ static const struct soc_enum twl4030_digimicswap_enum =
                        twl4030_digimicswap_texts);
 
 static const struct snd_kcontrol_new twl4030_snd_controls[] = {
+#if 0
        /* Codec operation mode control */
        SOC_ENUM_EXT("Codec Operation Mode", twl4030_op_modes_enum,
                snd_soc_get_enum_double,
@@ -1194,10 +1201,10 @@ static const struct snd_kcontrol_new twl4030_snd_controls[] = {
        SOC_DOUBLE_R_TLV("TX2 Digital Capture Volume",
                TWL4030_REG_AVTXL2PGA, TWL4030_REG_AVTXR2PGA,
                0, 0x1f, 0, digital_capture_tlv),
-
+#endif
        SOC_DOUBLE_TLV("Analog Capture Volume", TWL4030_REG_ANAMIC_GAIN,
                0, 3, 5, 0, input_gain_tlv),
-
+#if 0
        SOC_ENUM("AVADC Clock Priority", twl4030_avadc_clk_priority_enum),
 
        SOC_ENUM("HS ramp delay", twl4030_rampdelay_enum),
@@ -1206,6 +1213,7 @@ static const struct snd_kcontrol_new twl4030_snd_controls[] = {
        SOC_ENUM("Vibra H-bridge direction", twl4030_vibradir_enum),
 
        SOC_ENUM("Digimic LR Swap", twl4030_digimicswap_enum),
+#endif
 };
 
 static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
@@ -1234,8 +1242,9 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
        SND_SOC_DAPM_OUTPUT("VIBRA"),
 
        /* AIF and APLL clocks for running DAIs (including loopback) */
-       SND_SOC_DAPM_OUTPUT("Virtual HiFi OUT"),
        SND_SOC_DAPM_INPUT("Virtual HiFi IN"),
+#if 0
+       SND_SOC_DAPM_OUTPUT("Virtual HiFi OUT"),
        SND_SOC_DAPM_OUTPUT("Virtual Voice OUT"),
 
        /* DACs */
@@ -1297,13 +1306,13 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
                        TWL4030_REG_ARXL2_APGA_CTL, 0, 0, NULL, 0),
        SND_SOC_DAPM_MIXER("Analog Voice Playback Mixer",
                        TWL4030_REG_VDL_APGA_CTL, 0, 0, NULL, 0),
-
+#endif
        SND_SOC_DAPM_SUPPLY("APLL Enable", SND_SOC_NOPM, 0, 0, apll_event,
                            SND_SOC_DAPM_PRE_PMU|SND_SOC_DAPM_POST_PMD),
 
        SND_SOC_DAPM_SUPPLY("AIF Enable", SND_SOC_NOPM, 0, 0, aif_event,
                            SND_SOC_DAPM_PRE_PMU|SND_SOC_DAPM_POST_PMD),
-
+#if 0
        /* Output MIXER controls */
        /* Earpiece */
        SND_SOC_DAPM_MIXER("Earpiece Mixer", SND_SOC_NOPM, 0, 0,
@@ -1374,13 +1383,14 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
                           SND_SOC_DAPM_PRE_PMU),
        SND_SOC_DAPM_MUX("Vibra Route", SND_SOC_NOPM, 0, 0,
                &twl4030_dapm_vibrapath_control),
-
+#endif
        /* Introducing four virtual ADC, since TWL4030 have four channel for
           capture */
        SND_SOC_DAPM_ADC("ADC Virtual Left1", "Left Front Capture",
                SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_ADC("ADC Virtual Right1", "Right Front Capture",
                SND_SOC_NOPM, 0, 0),
+#if 0
        SND_SOC_DAPM_ADC("ADC Virtual Left2", "Left Rear Capture",
                SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_ADC("ADC Virtual Right2", "Right Rear Capture",
@@ -1393,7 +1403,7 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
                &twl4030_dapm_micpathtx1_control),
        SND_SOC_DAPM_MUX("TX2 Capture Route", SND_SOC_NOPM, 0, 0,
                &twl4030_dapm_micpathtx2_control),
-
+#endif
        /* Analog input mixers for the capture amplifiers */
        SND_SOC_DAPM_MIXER("Analog Left",
                TWL4030_REG_ANAMICL, 4, 0,
@@ -1408,7 +1418,7 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
                TWL4030_REG_AVADC_CTL, 3, 0, NULL, 0),
        SND_SOC_DAPM_PGA("ADC Physical Right",
                TWL4030_REG_AVADC_CTL, 1, 0, NULL, 0),
-
+#if 0
        SND_SOC_DAPM_PGA_E("Digimic0 Enable",
                TWL4030_REG_ADCMICSEL, 1, 0, NULL, 0,
                digimic_event, SND_SOC_DAPM_POST_PMU),
@@ -1420,14 +1430,16 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
                            NULL, 0),
        SND_SOC_DAPM_SUPPLY("micbias2 select", TWL4030_REG_MICBIAS_CTL, 6, 0,
                            NULL, 0),
-
+#endif
        SND_SOC_DAPM_MICBIAS("Mic Bias 1", TWL4030_REG_MICBIAS_CTL, 0, 0),
        SND_SOC_DAPM_MICBIAS("Mic Bias 2", TWL4030_REG_MICBIAS_CTL, 1, 0),
+#if 0
        SND_SOC_DAPM_MICBIAS("Headset Mic Bias", TWL4030_REG_MICBIAS_CTL, 2, 0),
-
+#endif
 };
 
 static const struct snd_soc_dapm_route intercon[] = {
+#if 0
        {"Digital L1 Playback Mixer", NULL, "DAC Left1"},
        {"Digital R1 Playback Mixer", NULL, "DAC Right1"},
        {"Digital L2 Playback Mixer", NULL, "DAC Left2"},
@@ -1530,25 +1542,25 @@ static const struct snd_soc_dapm_route intercon[] = {
        {"HFR", NULL, "HandsfreeR PGA"},
        {"Vibra Route", "Audio", "Vibra Mux"},
        {"VIBRA", NULL, "Vibra Route"},
-
+#endif
        /* Capture path */
        /* Must be always connected (for AIF and APLL) */
        {"ADC Virtual Left1", NULL, "Virtual HiFi IN"},
        {"ADC Virtual Right1", NULL, "Virtual HiFi IN"},
-       {"ADC Virtual Left2", NULL, "Virtual HiFi IN"},
-       {"ADC Virtual Right2", NULL, "Virtual HiFi IN"},
+//     {"ADC Virtual Left2", NULL, "Virtual HiFi IN"},
+//     {"ADC Virtual Right2", NULL, "Virtual HiFi IN"},
        /* Physical inputs */
        {"Analog Left", "Main Mic Capture Switch", "MAINMIC"},
-       {"Analog Left", "Headset Mic Capture Switch", "HSMIC"},
+//     {"Analog Left", "Headset Mic Capture Switch", "HSMIC"},
        {"Analog Left", "AUXL Capture Switch", "AUXL"},
-       {"Analog Left", "Carkit Mic Capture Switch", "CARKITMIC"},
+//     {"Analog Left", "Carkit Mic Capture Switch", "CARKITMIC"},
 
        {"Analog Right", "Sub Mic Capture Switch", "SUBMIC"},
        {"Analog Right", "AUXR Capture Switch", "AUXR"},
 
        {"ADC Physical Left", NULL, "Analog Left"},
        {"ADC Physical Right", NULL, "Analog Right"},
-
+#if 0
        {"Digimic0 Enable", NULL, "DIGIMIC0"},
        {"Digimic1 Enable", NULL, "DIGIMIC1"},
 
@@ -1567,14 +1579,15 @@ static const struct snd_soc_dapm_route intercon[] = {
        /* TX2 Right capture path */
        {"TX2 Capture Route", "Analog", "ADC Physical Right"},
        {"TX2 Capture Route", "Digimic1", "Digimic1 Enable"},
-
-       {"ADC Virtual Left1", NULL, "TX1 Capture Route"},
-       {"ADC Virtual Right1", NULL, "TX1 Capture Route"},
-       {"ADC Virtual Left2", NULL, "TX2 Capture Route"},
-       {"ADC Virtual Right2", NULL, "TX2 Capture Route"},
+#endif
+       {"ADC Virtual Left1", NULL, "ADC Physical Left"},
+       {"ADC Virtual Right1", NULL, "ADC Physical Right"},
+//     {"ADC Virtual Left2", NULL, "TX2 Capture Route"},
+//     {"ADC Virtual Right2", NULL, "TX2 Capture Route"},
 
        {"ADC Virtual Left1", NULL, "AIF Enable"},
        {"ADC Virtual Right1", NULL, "AIF Enable"},
+#if 0
        {"ADC Virtual Left2", NULL, "AIF Enable"},
        {"ADC Virtual Right2", NULL, "AIF Enable"},
 
@@ -1606,7 +1619,7 @@ static const struct snd_soc_dapm_route intercon[] = {
        {"Digital R2 Playback Mixer", NULL, "Right Digital Loopback"},
        {"Digital L2 Playback Mixer", NULL, "Left Digital Loopback"},
        {"Digital Voice Playback Mixer", NULL, "Voice Digital Loopback"},
-
+#endif
 };
 
 static int twl4030_set_bias_level(struct snd_soc_codec *codec,
@@ -1688,6 +1701,7 @@ static int twl4030_startup(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct snd_soc_codec *codec = rtd->codec;
        struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
+       u8 format;
 
        snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24);
        if (twl4030->master_substream) {
@@ -1710,6 +1724,12 @@ static int twl4030_startup(struct snd_pcm_substream *substream,
                twl4030->master_substream = substream;
        }
 
+       format = twl4030_read_reg_cache(codec, TWL4030_REG_AUDIO_IF);
+       if (twl4030->using_256fs && !(format & TWL4030_CLK256FS_EN)) {
+               format |= TWL4030_CLK256FS_EN;
+               twl4030_write(codec, TWL4030_REG_AUDIO_IF, format);
+       }
+
        return 0;
 }
 
@@ -1719,6 +1739,7 @@ static void twl4030_shutdown(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct snd_soc_codec *codec = rtd->codec;
        struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
+       u8 format;
 
        if (twl4030->master_substream == substream)
                twl4030->master_substream = twl4030->slave_substream;
@@ -1735,6 +1756,13 @@ static void twl4030_shutdown(struct snd_pcm_substream *substream,
         /* If the closing substream had 4 channel, do the necessary cleanup */
        if (substream->runtime->channels == 4)
                twl4030_tdm_enable(codec, substream->stream, 0);
+
+       /* Disable 256fs clock to avoid noisy output in power save modes */
+       format = twl4030_read_reg_cache(codec, TWL4030_REG_AUDIO_IF);
+       if (format & TWL4030_CLK256FS_EN) {
+               format &= ~TWL4030_CLK256FS_EN;
+               twl4030_write(codec, TWL4030_REG_AUDIO_IF, format);
+       }
 }
 
 static int twl4030_hw_params(struct snd_pcm_substream *substream,
@@ -1760,9 +1788,11 @@ static int twl4030_hw_params(struct snd_pcm_substream *substream,
                        return -EINVAL;
        }
 
+#if 0
        if (twl4030->configured)
                /* Ignoring hw_params for already configured DAI */
                return 0;
+#endif
 
        /* bit rate */
        old_mode = twl4030_read_reg_cache(codec,
@@ -1898,10 +1928,12 @@ static int twl4030_set_dai_fmt(struct snd_soc_dai *codec_dai,
        case SND_SOC_DAIFMT_CBM_CFM:
                format &= ~(TWL4030_AIF_SLAVE_EN);
                format &= ~(TWL4030_CLK256FS_EN);
+               twl4030->using_256fs = 0;
                break;
        case SND_SOC_DAIFMT_CBS_CFS:
                format |= TWL4030_AIF_SLAVE_EN;
                format |= TWL4030_CLK256FS_EN;
+               twl4030->using_256fs = 1;
                break;
        default:
                return -EINVAL;
@@ -2174,7 +2206,7 @@ static struct snd_soc_dai_driver twl4030_dai[] = {
                .stream_name = "HiFi Playback",
                .channels_min = 2,
                .channels_max = 4,
-               .rates = TWL4030_RATES | SNDRV_PCM_RATE_96000,
+               .rates = TWL4030_RATES, // | SNDRV_PCM_RATE_96000,
                .formats = TWL4030_FORMATS,},
        .capture = {
                .stream_name = "Capture",
index 4314647..308339f 100644 (file)
@@ -59,6 +59,8 @@ struct omap_mcbsp_data {
        unsigned int                    in_freq;
        int                             clk_div;
        int                             wlen;
+       /* pandora hack */
+       struct snd_pcm_substream        *substream;
 };
 
 static struct omap_mcbsp_data mcbsp_data[NUM_LINKS];
@@ -110,6 +112,7 @@ static int omap_mcbsp_hwrule_min_buffersize(struct snd_pcm_hw_params *params,
        struct snd_interval *channels = hw_param_interval(params,
                                        SNDRV_PCM_HW_PARAM_CHANNELS);
        struct omap_mcbsp_data *mcbsp_data = rule->private;
+       struct snd_pcm_substream *substream = mcbsp_data->substream;
        struct snd_interval frames;
        int size;
 
@@ -118,6 +121,15 @@ static int omap_mcbsp_hwrule_min_buffersize(struct snd_pcm_hw_params *params,
 
        frames.min = size / channels->min;
        frames.integer = 1;
+
+       if (substream && substream->pnd_hack_params.frames_min > 0
+           && substream->pnd_hack_params.frames_max
+              >= substream->pnd_hack_params.frames_min)
+       {
+               frames.min = substream->pnd_hack_params.frames_min;
+               frames.max = substream->pnd_hack_params.frames_max;
+       }
+
        return snd_interval_refine(buffer_size, &frames);
 }
 
@@ -128,8 +140,10 @@ static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
        int bus_id = mcbsp_data->bus_id;
        int err = 0;
 
-       if (!cpu_dai->active)
+       if (!cpu_dai->active) {
                err = omap_mcbsp_request(bus_id);
+               mcbsp_data->substream = substream;
+       }
 
        /*
         * OMAP3 McBSP FIFO is word structured.
@@ -152,10 +166,10 @@ static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
                * smaller buffer than the FIFO size to avoid underruns
                */
                snd_pcm_hw_rule_add(substream->runtime, 0,
-                                   SNDRV_PCM_HW_PARAM_CHANNELS,
+                                   SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
                                    omap_mcbsp_hwrule_min_buffersize,
                                    mcbsp_data,
-                                   SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
+                                   SNDRV_PCM_HW_PARAM_CHANNELS, -1);
 
                /* Make sure, that the period size is always even */
                snd_pcm_hw_constraint_step(substream->runtime, 0,
@@ -173,6 +187,11 @@ static void omap_mcbsp_dai_shutdown(struct snd_pcm_substream *substream,
        if (!cpu_dai->active) {
                omap_mcbsp_free(mcbsp_data->bus_id);
                mcbsp_data->configured = 0;
+
+               /* undo pandora hack */
+               mcbsp_data->substream = NULL;
+               substream->pnd_hack_params.frames_min = 0;
+               substream->pnd_hack_params.frames_max = 0;
        }
 }
 
@@ -312,10 +331,12 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
 
        snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
 
+#if 0
        if (mcbsp_data->configured) {
                /* McBSP already configured by another stream */
                return 0;
        }
+#endif
 
        regs->rcr2      &= ~(RPHASE | RFRLEN2(0x7f) | RWDLEN2(7));
        regs->xcr2      &= ~(RPHASE | XFRLEN2(0x7f) | XWDLEN2(7));
@@ -599,6 +620,36 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
        return err;
 }
 
+/* 
+ * We have to be sure there is more than FIFO size worth of data ready
+ * before starting, or else we get underflow right after start.
+ * XXX: To make realtime streaming work, setting this to fifo+period
+ * as DMA uses period boundaries, there must be enough data at those.
+ */
+static int omap_mcbsp_dai_prepare(struct snd_pcm_substream *substream,
+                                 struct snd_soc_dai *cpu_dai)
+{
+       struct omap_mcbsp_data *mcbsp_data = snd_soc_dai_get_drvdata(cpu_dai);
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       int size;
+
+       if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
+               return 0;
+
+       size = omap_mcbsp_get_fifo_size(mcbsp_data->bus_id);
+       size /= substream->runtime->channels;
+
+       size += bytes_to_frames(runtime, snd_pcm_lib_period_bytes(substream));
+
+       if (runtime->start_threshold < size) {
+               runtime->start_threshold = size;
+               if (runtime->start_threshold > runtime->buffer_size)
+                       runtime->start_threshold = runtime->buffer_size;
+       }
+
+       return 0;
+}
+
 static struct snd_soc_dai_ops mcbsp_dai_ops = {
        .startup        = omap_mcbsp_dai_startup,
        .shutdown       = omap_mcbsp_dai_shutdown,
@@ -608,6 +659,7 @@ static struct snd_soc_dai_ops mcbsp_dai_ops = {
        .set_fmt        = omap_mcbsp_dai_set_dai_fmt,
        .set_clkdiv     = omap_mcbsp_dai_set_clkdiv,
        .set_sysclk     = omap_mcbsp_dai_set_dai_sysclk,
+       .prepare        = omap_mcbsp_dai_prepare,
 };
 
 static int mcbsp_dai_probe(struct snd_soc_dai *dai)
index 9563728..63d66da 100644 (file)
@@ -402,6 +402,10 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
        }
 
 out:
+       /* free preallocated buffers in case of error */
+       if (ret)
+               omap_pcm_free_dma_buffers(pcm);
+
        return ret;
 }
 
index 7605c37..796484d 100644 (file)
@@ -29,6 +29,7 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
+#include <sound/pcm_params.h>
 
 #include <asm/mach-types.h>
 #include <plat/mcbsp.h>
@@ -49,6 +50,7 @@ static int omap3pandora_hw_params(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct snd_soc_dai *codec_dai = rtd->codec_dai;
        struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+       int divider;
        int ret;
 
        /* Set the codec system clock for DAC and ADC */
@@ -68,9 +70,30 @@ static int omap3pandora_hw_params(struct snd_pcm_substream *substream,
                return ret;
        }
 
-       ret = snd_soc_dai_set_clkdiv(cpu_dai, OMAP_MCBSP_CLKGDV, 8);
+       divider = 8;
+       if (snd_pcm_format_physical_width(params_format(params)) > 16)
+               divider = 4;
+
+       ret = snd_soc_dai_set_clkdiv(cpu_dai, OMAP_MCBSP_CLKGDV, divider);
+       if (ret < 0) {
+               pr_err(PREFIX "can't set SRG clock divider to %d\n", divider);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int omap3pandora_hw_free(struct snd_pcm_substream *substream)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+       int ret;
+
+       /* Set McBSP clock back to internal for power saving to work */
+       ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_MCBSP_SYSCLK_CLKS_FCLK,
+                                    0, SND_SOC_CLOCK_IN);
        if (ret < 0) {
-               pr_err(PREFIX "can't set SRG clock divider\n");
+               pr_err(PREFIX "can't set cpu system clock\n");
                return ret;
        }
 
@@ -201,6 +224,7 @@ static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd)
 
 static struct snd_soc_ops omap3pandora_ops = {
        .hw_params = omap3pandora_hw_params,
+       .hw_free = omap3pandora_hw_free,
 };
 
 /* Digital audio interface glue - connects codec <--> CPU */
index 3eb605b..88e3f81 100644 (file)
@@ -485,7 +485,7 @@ static int __devinit snd_probe(struct usb_interface *intf,
                     const struct usb_device_id *id)
 {
        int ret;
-       struct snd_card *card;
+       struct snd_card *card = NULL;
        struct usb_device *device = interface_to_usbdev(intf);
 
        ret = create_card(device, intf, &card);