Merge branch 'for-linux-next' of git://people.freedesktop.org/~danvet/drm-intel into...
authorDave Airlie <airlied@redhat.com>
Fri, 10 May 2013 04:35:48 +0000 (14:35 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 10 May 2013 04:35:48 +0000 (14:35 +1000)
Daniel writes:
A few intel fixes for smaller issues and one revert for an sdv hack which
we've wanted to kill anyway. Plus two drm patches included for your
convenience, both regression fixers for mine own screw-ups.

+ both fixes for stolen mem handling.

* 'for-linux-next' of git://people.freedesktop.org/~danvet/drm-intel:
  drm/i915: clear the stolen fb before resuming
  Revert "drm/i915: Calculate correct stolen size for GEN7+"
  drm/i915: hsw: fix link training for eDP on port-A
  Revert "drm/i915: revert eDP bpp clamping code changes"
  drm: don't check modeset locks in panic handler
  drm/i915: Fix pipe enabled mask for pipe C in WM calculations
  drm/mm: fix dump table BUG
  drm/i915: Always normalize return timeout for wait_timeout_ioctl

532 files changed:
Documentation/EDID/1600x1200.S [new file with mode: 0644]
Documentation/EDID/HOWTO.txt
Documentation/devicetree/bindings/drm/exynos/g2d.txt [deleted file]
MAINTAINERS
arch/arm/Kconfig
arch/arm/boot/dts/armada-370-mirabox.dts
arch/arm/boot/dts/armada-370.dtsi
arch/arm/boot/dts/dbx5x0.dtsi
arch/arm/boot/dts/kirkwood-goflexnet.dts
arch/arm/boot/dts/orion5x.dtsi
arch/arm/include/asm/delay.h
arch/arm/include/asm/highmem.h
arch/arm/include/asm/mmu_context.h
arch/arm/include/asm/tlbflush.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/head.S
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/setup.c
arch/arm/kernel/smp.c
arch/arm/kernel/smp_tlb.c
arch/arm/kvm/vgic.c
arch/arm/lib/delay.c
arch/arm/mach-cns3xxx/core.c
arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
arch/arm/mach-ep93xx/include/mach/uncompress.h
arch/arm/mach-imx/common.h
arch/arm/mach-imx/hotplug.c
arch/arm/mach-imx/src.c
arch/arm/mach-kirkwood/guruplug-setup.c
arch/arm/mach-kirkwood/openrd-setup.c
arch/arm/mach-kirkwood/rd88f6281-setup.c
arch/arm/mach-msm/timer.c
arch/arm/mach-mvebu/irq-armada-370-xp.c
arch/arm/mach-omap1/clock_data.c
arch/arm/mach-omap2/board-2430sdp.c
arch/arm/mach-omap2/board-3430sdp.c
arch/arm/mach-omap2/board-am3517evm.c
arch/arm/mach-omap2/board-cm-t35.c
arch/arm/mach-omap2/board-devkit8000.c
arch/arm/mach-omap2/board-h4.c
arch/arm/mach-omap2/board-igep0020.c
arch/arm/mach-omap2/board-ldp.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-omap3evm.c
arch/arm/mach-omap2/board-omap3stalker.c
arch/arm/mach-omap2/board-overo.c
arch/arm/mach-omap2/cclock44xx_data.c
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/dss-common.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-ux500/board-mop500-sdi.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/board-mop500.h
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/mm/cache-l2x0.c
arch/arm/mm/context.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7.S
arch/mips/Kconfig
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/bcm63xx/nvram.c
arch/mips/bcm63xx/setup.c
arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/signal.h
arch/mips/kernel/Makefile
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/linux32.c
arch/mips/kernel/proc.c
arch/mips/lib/bitops.c
arch/mips/lib/csum_partial.S
arch/s390/include/asm/pgtable.h
arch/s390/lib/uaccess_pt.c
arch/tile/kernel/setup.c
drivers/base/regmap/regcache-rbtree.c
drivers/base/regmap/regmap.c
drivers/block/aoe/aoecmd.c
drivers/block/loop.c
drivers/char/hw_random/core.c
drivers/char/virtio_console.c
drivers/clk/tegra/clk-tegra20.c
drivers/dma/Kconfig
drivers/gpio/gpio-ich.c
drivers/gpio/gpio-stmpe.c
drivers/gpu/Makefile
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_fb.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_fbdev.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_edid_load.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos_drm_connector.c
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.h
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/exynos/regs-fimc.h
drivers/gpu/drm/gma500/Kconfig
drivers/gpu/drm/gma500/cdv_intel_crt.c
drivers/gpu/drm/gma500/cdv_intel_hdmi.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/gtt.c
drivers/gpu/drm/gma500/gtt.h
drivers/gpu/drm/gma500/intel_bios.c
drivers/gpu/drm/gma500/intel_bios.h
drivers/gpu/drm/gma500/mdfld_dsi_output.c
drivers/gpu/drm/gma500/power.c
drivers/gpu/drm/gma500/power.h
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/gma500/psb_intel_display.c
drivers/gpu/drm/gma500/psb_intel_display.h
drivers/gpu/drm/gma500/psb_intel_drv.h
drivers/gpu/drm/gma500/psb_intel_reg.h
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/gma500/psb_irq.c
drivers/gpu/drm/gma500/psb_irq.h
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/mgag200/mgag200_drv.h
drivers/gpu/drm/mgag200/mgag200_fb.c
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/core/core/client.c
drivers/gpu/drm/nouveau/core/core/engine.c
drivers/gpu/drm/nouveau/core/core/event.c
drivers/gpu/drm/nouveau/core/core/object.c
drivers/gpu/drm/nouveau/core/core/parent.c
drivers/gpu/drm/nouveau/core/engine/device/base.c [moved from drivers/gpu/drm/nouveau/core/subdev/device/base.c with 89% similarity]
drivers/gpu/drm/nouveau/core/engine/device/nv04.c [moved from drivers/gpu/drm/nouveau/core/subdev/device/nv04.c with 99% similarity]
drivers/gpu/drm/nouveau/core/engine/device/nv10.c [moved from drivers/gpu/drm/nouveau/core/subdev/device/nv10.c with 99% similarity]
drivers/gpu/drm/nouveau/core/engine/device/nv20.c [moved from drivers/gpu/drm/nouveau/core/subdev/device/nv20.c with 99% similarity]
drivers/gpu/drm/nouveau/core/engine/device/nv30.c [moved from drivers/gpu/drm/nouveau/core/subdev/device/nv30.c with 99% similarity]
drivers/gpu/drm/nouveau/core/engine/device/nv40.c [moved from drivers/gpu/drm/nouveau/core/subdev/device/nv40.c with 99% similarity]
drivers/gpu/drm/nouveau/core/engine/device/nv50.c [moved from drivers/gpu/drm/nouveau/core/subdev/device/nv50.c with 97% similarity]
drivers/gpu/drm/nouveau/core/engine/device/nvc0.c [moved from drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c with 90% similarity]
drivers/gpu/drm/nouveau/core/engine/device/nve0.c [moved from drivers/gpu/drm/nouveau/core/subdev/device/nve0.c with 79% similarity]
drivers/gpu/drm/nouveau/core/engine/disp/dport.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
drivers/gpu/drm/nouveau/core/engine/fifo/base.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
drivers/gpu/drm/nouveau/core/include/core/class.h
drivers/gpu/drm/nouveau/core/include/core/device.h
drivers/gpu/drm/nouveau/core/include/core/parent.h
drivers/gpu/drm/nouveau/core/include/engine/device.h [moved from drivers/gpu/drm/nouveau/core/include/subdev/device.h with 93% similarity]
drivers/gpu/drm/nouveau/core/include/engine/disp.h
drivers/gpu/drm/nouveau/core/include/engine/fifo.h
drivers/gpu/drm/nouveau/core/include/engine/graph.h
drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
drivers/gpu/drm/nouveau/core/include/subdev/mc.h
drivers/gpu/drm/nouveau/core/include/subdev/therm.h
drivers/gpu/drm/nouveau/core/os.h
drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/mc/base.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
drivers/gpu/drm/nouveau/dispnv04/Makefile [new file with mode: 0644]
drivers/gpu/drm/nouveau/dispnv04/arb.c [moved from drivers/gpu/drm/nouveau/nouveau_calc.c with 99% similarity]
drivers/gpu/drm/nouveau/dispnv04/crtc.c [moved from drivers/gpu/drm/nouveau/nv04_crtc.c with 99% similarity]
drivers/gpu/drm/nouveau/dispnv04/cursor.c [moved from drivers/gpu/drm/nouveau/nv04_cursor.c with 98% similarity]
drivers/gpu/drm/nouveau/dispnv04/dac.c [moved from drivers/gpu/drm/nouveau/nv04_dac.c with 99% similarity]
drivers/gpu/drm/nouveau/dispnv04/dfp.c [moved from drivers/gpu/drm/nouveau/nv04_dfp.c with 99% similarity]
drivers/gpu/drm/nouveau/dispnv04/disp.c [moved from drivers/gpu/drm/nouveau/nv04_display.c with 99% similarity]
drivers/gpu/drm/nouveau/dispnv04/disp.h [moved from drivers/gpu/drm/nouveau/nv04_display.h with 100% similarity]
drivers/gpu/drm/nouveau/dispnv04/hw.c [moved from drivers/gpu/drm/nouveau/nouveau_hw.c with 99% similarity]
drivers/gpu/drm/nouveau/dispnv04/hw.h [moved from drivers/gpu/drm/nouveau/nouveau_hw.h with 99% similarity]
drivers/gpu/drm/nouveau/dispnv04/nvreg.h [moved from drivers/gpu/drm/nouveau/nvreg.h with 100% similarity]
drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c [moved from drivers/gpu/drm/nouveau/nv17_tv_modes.c with 99% similarity]
drivers/gpu/drm/nouveau/dispnv04/tvnv04.c [moved from drivers/gpu/drm/nouveau/nv04_tv.c with 99% similarity]
drivers/gpu/drm/nouveau/dispnv04/tvnv17.c [moved from drivers/gpu/drm/nouveau/nv17_tv.c with 99% similarity]
drivers/gpu/drm/nouveau/dispnv04/tvnv17.h [moved from drivers/gpu/drm/nouveau/nv17_tv.h with 100% similarity]
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_backlight.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drm.h
drivers/gpu/drm/nouveau/nouveau_encoder.h
drivers/gpu/drm/nouveau/nouveau_irq.c [deleted file]
drivers/gpu/drm/nouveau/nouveau_irq.h [deleted file]
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nv04_pm.c
drivers/gpu/drm/nouveau/nv40_pm.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_pm.c
drivers/gpu/drm/omapdrm/omap_connector.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_encoder.c
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
drivers/gpu/drm/omapdrm/omap_irq.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/qxl/Kconfig [new file with mode: 0644]
drivers/gpu/drm/qxl/Makefile [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_cmd.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_dev.h [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_display.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_draw.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_drv.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_drv.h [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_dumb.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_fb.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_fence.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_gem.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_image.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_ioctl.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_irq.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_kms.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_object.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_object.h [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_release.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_ttm.c [new file with mode: 0644]
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r500_reg.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_sa.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_uvd.c [new file with mode: 0644]
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rs690d.h
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/tegra/Makefile [deleted file]
drivers/gpu/drm/tegra/drm.c [deleted file]
drivers/gpu/drm/tegra/fb.c [deleted file]
drivers/gpu/drm/tegra/host1x.c [deleted file]
drivers/gpu/drm/tilcdc/Makefile
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_panel.c
drivers/gpu/drm/tilcdc/tilcdc_slave.c
drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/host1x/Kconfig [new file with mode: 0644]
drivers/gpu/host1x/Makefile [new file with mode: 0644]
drivers/gpu/host1x/cdma.c [new file with mode: 0644]
drivers/gpu/host1x/cdma.h [new file with mode: 0644]
drivers/gpu/host1x/channel.c [new file with mode: 0644]
drivers/gpu/host1x/channel.h [new file with mode: 0644]
drivers/gpu/host1x/debug.c [new file with mode: 0644]
drivers/gpu/host1x/debug.h [new file with mode: 0644]
drivers/gpu/host1x/dev.c [new file with mode: 0644]
drivers/gpu/host1x/dev.h [new file with mode: 0644]
drivers/gpu/host1x/drm/Kconfig [moved from drivers/gpu/drm/tegra/Kconfig with 53% similarity]
drivers/gpu/host1x/drm/dc.c [moved from drivers/gpu/drm/tegra/dc.c with 97% similarity]
drivers/gpu/host1x/drm/dc.h [moved from drivers/gpu/drm/tegra/dc.h with 100% similarity]
drivers/gpu/host1x/drm/drm.c [new file with mode: 0644]
drivers/gpu/host1x/drm/drm.h [moved from drivers/gpu/drm/tegra/drm.h with 77% similarity]
drivers/gpu/host1x/drm/fb.c [new file with mode: 0644]
drivers/gpu/host1x/drm/gem.c [new file with mode: 0644]
drivers/gpu/host1x/drm/gem.h [new file with mode: 0644]
drivers/gpu/host1x/drm/gr2d.c [new file with mode: 0644]
drivers/gpu/host1x/drm/hdmi.c [moved from drivers/gpu/drm/tegra/hdmi.c with 99% similarity]
drivers/gpu/host1x/drm/hdmi.h [moved from drivers/gpu/drm/tegra/hdmi.h with 100% similarity]
drivers/gpu/host1x/drm/output.c [moved from drivers/gpu/drm/tegra/output.c with 100% similarity]
drivers/gpu/host1x/drm/rgb.c [moved from drivers/gpu/drm/tegra/rgb.c with 100% similarity]
drivers/gpu/host1x/host1x.h [new file with mode: 0644]
drivers/gpu/host1x/host1x_bo.h [new file with mode: 0644]
drivers/gpu/host1x/host1x_client.h [new file with mode: 0644]
drivers/gpu/host1x/hw/Makefile [new file with mode: 0644]
drivers/gpu/host1x/hw/cdma_hw.c [new file with mode: 0644]
drivers/gpu/host1x/hw/channel_hw.c [new file with mode: 0644]
drivers/gpu/host1x/hw/debug_hw.c [new file with mode: 0644]
drivers/gpu/host1x/hw/host1x01.c [new file with mode: 0644]
drivers/gpu/host1x/hw/host1x01.h [new file with mode: 0644]
drivers/gpu/host1x/hw/host1x01_hardware.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x01_channel.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x01_sync.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x01_uclass.h [new file with mode: 0644]
drivers/gpu/host1x/hw/intr_hw.c [new file with mode: 0644]
drivers/gpu/host1x/hw/syncpt_hw.c [new file with mode: 0644]
drivers/gpu/host1x/intr.c [new file with mode: 0644]
drivers/gpu/host1x/intr.h [new file with mode: 0644]
drivers/gpu/host1x/job.c [new file with mode: 0644]
drivers/gpu/host1x/job.h [new file with mode: 0644]
drivers/gpu/host1x/syncpt.c [new file with mode: 0644]
drivers/gpu/host1x/syncpt.h [new file with mode: 0644]
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/media/platform/Kconfig
drivers/media/radio/radio-ma901.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/sja1000/Kconfig
drivers/net/can/sja1000/plx_pci.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/sja1000/sja1000.h
drivers/net/ethernet/atheros/atl1e/atl1e.h
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/davicom/dm9000.h
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/marvell/sky2.h
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/usb/smsc75xx.c
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
drivers/net/wireless/iwlegacy/4965-rs.c
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mwifiex/pcie.c
drivers/s390/block/scm_blk.c
drivers/s390/block/scm_drv.c
drivers/s390/char/tty3270.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/libfc/fc_disc.c
drivers/spi/Kconfig
drivers/video/Kconfig
drivers/video/console/fbcon.c
drivers/video/hdmi.c
drivers/video/omap2/Makefile
drivers/video/omap2/displays/panel-acx565akm.c
drivers/video/omap2/displays/panel-generic-dpi.c
drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
drivers/video/omap2/displays/panel-n8x0.c
drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
drivers/video/omap2/displays/panel-picodlp.c
drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
drivers/video/omap2/displays/panel-taal.c
drivers/video/omap2/displays/panel-tfp410.c
drivers/video/omap2/displays/panel-tpo-td043mtea1.c
drivers/video/omap2/dss/apply.c
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/dispc.h
drivers/video/omap2/dss/dpi.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/dss.h
drivers/video/omap2/dss/dss_features.c
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/dss/output.c
drivers/video/omap2/dss/rfbi.c
drivers/video/omap2/dss/sdi.c
drivers/video/omap2/dss/venc.c
drivers/video/omap2/omapfb/omapfb-main.c
drivers/video/sh_mobile_lcdcfb.c
fs/block_dev.c
fs/ext4/extents.c
fs/ext4/indirect.c
fs/nfsd/nfs4xdr.c
fs/reiserfs/xattr.c
fs/ubifs/super.c
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_edid.h
include/drm/drm_fb_helper.h
include/drm/drm_pciids.h
include/drm/ttm/ttm_bo_driver.h
include/linux/compat.h
include/linux/netdevice.h
include/linux/signal.h
include/scsi/libfc.h
include/trace/events/host1x.h [new file with mode: 0644]
include/uapi/drm/Kbuild
include/uapi/drm/drm.h
include/uapi/drm/drm_mode.h
include/uapi/drm/qxl_drm.h [new file with mode: 0644]
include/uapi/drm/radeon_drm.h
include/uapi/drm/tegra_drm.h [new file with mode: 0644]
include/video/omap-panel-data.h [new file with mode: 0644]
include/video/omap-panel-generic-dpi.h [deleted file]
include/video/omap-panel-n8x0.h [deleted file]
include/video/omap-panel-nokia-dsi.h [deleted file]
include/video/omap-panel-picodlp.h [deleted file]
include/video/omap-panel-tfp410.h [deleted file]
include/video/omapdss.h
ipc/msg.c
mm/mmap.c
mm/nommu.c
net/core/dev.c
net/core/flow.c
net/core/rtnetlink.c
net/ipv6/addrconf.c
net/ipv6/ip6_input.c
net/key/af_key.c
net/mac80211/iface.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/sched/sch_cbq.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/vmw_vsock/vsock_addr.c
net/vmw_vsock/vsock_addr.h
net/wireless/core.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/scan.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/wext-sme.c
net/xfrm/xfrm_replay.c

diff --git a/Documentation/EDID/1600x1200.S b/Documentation/EDID/1600x1200.S
new file mode 100644 (file)
index 0000000..0ded64c
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+   1600x1200.S: EDID data set for standard 1600x1200 60 Hz monitor
+
+   Copyright (C) 2013 Carsten Emde <C.Emde@osadl.org>
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License
+   as published by the Free Software Foundation; either version 2
+   of the License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
+*/
+
+/* EDID */
+#define VERSION 1
+#define REVISION 3
+
+/* Display */
+#define CLOCK 162000 /* kHz */
+#define XPIX 1600
+#define YPIX 1200
+#define XY_RATIO XY_RATIO_4_3
+#define XBLANK 560
+#define YBLANK 50
+#define XOFFSET 64
+#define XPULSE 192
+#define YOFFSET (63+1)
+#define YPULSE (63+3)
+#define DPI 72
+#define VFREQ 60 /* Hz */
+#define TIMING_NAME "Linux UXGA"
+#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */
+#define HSYNC_POL 1
+#define VSYNC_POL 1
+#define CRC 0x9d
+
+#include "edid.S"
index 2d0a8f0..7146db1 100644 (file)
@@ -18,12 +18,12 @@ CONFIG_DRM_LOAD_EDID_FIRMWARE was introduced. It allows to provide an
 individually prepared or corrected EDID data set in the /lib/firmware
 directory from where it is loaded via the firmware interface. The code
 (see drivers/gpu/drm/drm_edid_load.c) contains built-in data sets for
-commonly used screen resolutions (1024x768, 1280x1024, 1680x1050,
-1920x1080) as binary blobs, but the kernel source tree does not contain
-code to create these data. In order to elucidate the origin of the
-built-in binary EDID blobs and to facilitate the creation of individual
-data for a specific misbehaving monitor, commented sources and a
-Makefile environment are given here.
+commonly used screen resolutions (1024x768, 1280x1024, 1600x1200,
+1680x1050, 1920x1080) as binary blobs, but the kernel source tree does
+not contain code to create these data. In order to elucidate the origin
+of the built-in binary EDID blobs and to facilitate the creation of
+individual data for a specific misbehaving monitor, commented sources
+and a Makefile environment are given here.
 
 To create binary EDID and C source code files from the existing data
 material, simply type "make".
diff --git a/Documentation/devicetree/bindings/drm/exynos/g2d.txt b/Documentation/devicetree/bindings/drm/exynos/g2d.txt
deleted file mode 100644 (file)
index 1eb124d..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-Samsung 2D Graphic Accelerator using DRM frame work
-
-Samsung FIMG2D is a graphics 2D accelerator which supports Bit Block Transfer.
-We set the drawing-context registers for configuring rendering parameters and
-then start rendering.
-This driver is for SOCs which contain G2D IPs with version 4.1.
-
-Required properties:
-       -compatible:
-               should be "samsung,exynos-g2d-41".
-       -reg:
-               physical base address of the controller and length
-               of memory mapped region.
-       -interrupts:
-               interrupt combiner values.
-
-Example:
-       g2d {
-               compatible = "samsung,exynos-g2d-41";
-               reg = <0x10850000 0x1000>;
-               interrupts = <0 91 0>;
-       };
index 74e58a4..836a618 100644 (file)
@@ -5065,9 +5065,8 @@ S:        Maintained
 F:     drivers/net/ethernet/marvell/sk*
 
 MARVELL LIBERTAS WIRELESS DRIVER
-M:     Dan Williams <dcbw@redhat.com>
 L:     libertas-dev@lists.infradead.org
-S:     Maintained
+S:     Orphan
 F:     drivers/net/wireless/libertas/
 
 MARVELL MV643XX ETHERNET DRIVER
@@ -5569,6 +5568,7 @@ F:        include/uapi/linux/if_*
 F:     include/uapi/linux/netdevice.h
 
 NETXEN (1/10) GbE SUPPORT
+M:     Manish Chopra <manish.chopra@qlogic.com>
 M:     Sony Chacko <sony.chacko@qlogic.com>
 M:     Rajesh Borundia <rajesh.borundia@qlogic.com>
 L:     netdev@vger.kernel.org
index 13b7394..1cacda4 100644 (file)
@@ -1183,9 +1183,9 @@ config ARM_NR_BANKS
        default 8
 
 config IWMMXT
-       bool "Enable iWMMXt support"
+       bool "Enable iWMMXt support" if !CPU_PJ4
        depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
-       default y if PXA27x || PXA3xx || ARCH_MMP
+       default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
        help
          Enable support for iWMMXt context switching at run time if
          running on a CPU that supports it.
@@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420
         to deadlock. This workaround puts DSB before executing ISB if
         an abort may occur on cache maintenance.
 
+config ARM_ERRATA_798181
+       bool "ARM errata: TLBI/DSB failure on Cortex-A15"
+       depends on CPU_V7 && SMP
+       help
+         On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
+         adequately shooting down all use of the old entries. This
+         option enables the Linux kernel workaround for this erratum
+         which sends an IPI to the CPUs that are running the same ASID
+         as the one being invalidated.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
index dd0c57d..3234875 100644 (file)
@@ -54,7 +54,7 @@
                };
 
                mvsdio@d00d4000 {
-                       pinctrl-0 = <&sdio_pins2>;
+                       pinctrl-0 = <&sdio_pins3>;
                        pinctrl-names = "default";
                        status = "okay";
                        /*
index 8188d13..a195deb 100644 (file)
                                             "mpp50", "mpp51", "mpp52";
                              marvell,function = "sd0";
                        };
+
+                       sdio_pins3: sdio-pins3 {
+                             marvell,pins = "mpp48", "mpp49", "mpp50",
+                                            "mpp51", "mpp52", "mpp53";
+                             marvell,function = "sd0";
+                       };
                };
 
                gpio0: gpio@d0018100 {
index 9de9309..aaa63d0 100644 (file)
 
                prcmu: prcmu@80157000 {
                        compatible = "stericsson,db8500-prcmu";
-                       reg = <0x80157000 0x1000>;
-                       reg-names = "prcmu";
+                       reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>;
+                       reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm";
                        interrupts = <0 47 0x4>;
                        #address-cells = <1>;
                        #size-cells = <1>;
index bd83b8f..c3573be 100644 (file)
@@ -77,6 +77,7 @@
                };
 
                nand@3000000 {
+                       chip-delay = <40>;
                        status = "okay";
 
                        partition@0 {
index 8aad00f..f7bec3b 100644 (file)
@@ -13,6 +13,9 @@
        compatible = "marvell,orion5x";
        interrupt-parent = <&intc>;
 
+       aliases {
+               gpio0 = &gpio0;
+       };
        intc: interrupt-controller {
                compatible = "marvell,orion-intc", "marvell,intc";
                interrupt-controller;
@@ -32,7 +35,9 @@
                        #gpio-cells = <2>;
                        gpio-controller;
                        reg = <0x10100 0x40>;
-                       ngpio = <32>;
+                       ngpios = <32>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <6>, <7>, <8>, <9>;
                };
 
@@ -91,7 +96,7 @@
                        reg = <0x90000 0x10000>,
                              <0xf2200000 0x800>;
                        reg-names = "regs", "sram";
-                       interrupts = <22>;
+                       interrupts = <28>;
                        status = "okay";
                };
        };
index 720799f..dff714d 100644 (file)
@@ -24,7 +24,7 @@ extern struct arm_delay_ops {
        void (*delay)(unsigned long);
        void (*const_udelay)(unsigned long);
        void (*udelay)(unsigned long);
-       bool const_clock;
+       unsigned long ticks_per_jiffy;
 } arm_delay_ops;
 
 #define __delay(n)             arm_delay_ops.delay(n)
index 8c5e828..91b99ab 100644 (file)
@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
 #endif
 #endif
 
+/*
+ * Needed to be able to broadcast the TLB invalidation for kmap.
+ */
+#ifdef CONFIG_ARM_ERRATA_798181
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#endif
+
 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
 extern void *kmap_high_get(struct page *page);
 #else
index 863a661..a7b85e0 100644 (file)
@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
 #define init_new_context(tsk,mm)       ({ atomic64_set(&mm->context.id, 0); 0; })
 
+DECLARE_PER_CPU(atomic64_t, active_asids);
+
 #else  /* !CONFIG_CPU_HAS_ASID */
 
 #ifdef CONFIG_MMU
index 4db8c88..9e9c041 100644 (file)
@@ -450,6 +450,21 @@ static inline void local_flush_bp_all(void)
                isb();
 }
 
+#ifdef CONFIG_ARM_ERRATA_798181
+static inline void dummy_flush_tlb_a15_erratum(void)
+{
+       /*
+        * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
+        */
+       asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+       dsb();
+}
+#else
+static inline void dummy_flush_tlb_a15_erratum(void)
+{
+}
+#endif
+
 /*
  *     flush_pmd_entry
  *
index 3248cde..fefd7f9 100644 (file)
@@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old)
  */
 
 .macro mcount_enter
+/*
+ * This pad compensates for the push {lr} at the call site.  Note that we are
+ * unable to unwind through a function which does not otherwise save its lr.
+ */
+ UNWIND(.pad   #4)
        stmdb   sp!, {r0-r3, lr}
+ UNWIND(.save  {r0-r3, lr})
 .endm
 
 .macro mcount_get_lr reg
@@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old)
 .endm
 
 ENTRY(__gnu_mcount_nc)
+UNWIND(.fnstart)
 #ifdef CONFIG_DYNAMIC_FTRACE
        mov     ip, lr
        ldmia   sp!, {lr}
@@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc)
 #else
        __mcount
 #endif
+UNWIND(.fnend)
 ENDPROC(__gnu_mcount_nc)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(ftrace_caller)
+UNWIND(.fnstart)
        __ftrace_caller
+UNWIND(.fnend)
 ENDPROC(ftrace_caller)
 #endif
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
+UNWIND(.fnstart)
        __ftrace_graph_caller
+UNWIND(.fnend)
 ENDPROC(ftrace_graph_caller)
 #endif
 
index e0eb9a1..8bac553 100644 (file)
@@ -267,7 +267,7 @@ __create_page_tables:
        addne   r6, r6, #1 << SECTION_SHIFT
        strne   r6, [r3]
 
-#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
+#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
        sub     r4, r4, #4                      @ Fixup page table pointer
                                                @ for 64-bit descriptors
 #endif
index 96093b7..5dc1aa6 100644 (file)
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
        }
 
        if (err) {
-               pr_warning("CPU %d debug is powered down!\n", cpu);
+               pr_warn_once("CPU %d debug is powered down!\n", cpu);
                cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
                return;
        }
@@ -987,7 +987,7 @@ clear_vcr:
        isb();
 
        if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
-               pr_warning("CPU %d failed to disable vector catch\n", cpu);
+               pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
                return;
        }
 
@@ -1007,7 +1007,7 @@ clear_vcr:
        }
 
        if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
-               pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
+               pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
                return;
        }
 
index 3f6cbb2..d343a6c 100644 (file)
@@ -353,6 +353,23 @@ void __init early_print(const char *str, ...)
        printk("%s", buf);
 }
 
+static void __init cpuid_init_hwcaps(void)
+{
+       unsigned int divide_instrs;
+
+       if (cpu_architecture() < CPU_ARCH_ARMv7)
+               return;
+
+       divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
+
+       switch (divide_instrs) {
+       case 2:
+               elf_hwcap |= HWCAP_IDIVA;
+       case 1:
+               elf_hwcap |= HWCAP_IDIVT;
+       }
+}
+
 static void __init feat_v6_fixup(void)
 {
        int id = read_cpuid_id();
@@ -483,8 +500,11 @@ static void __init setup_processor(void)
        snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
                 list->elf_name, ENDIANNESS);
        elf_hwcap = list->elf_hwcap;
+
+       cpuid_init_hwcaps();
+
 #ifndef CONFIG_ARM_THUMB
-       elf_hwcap &= ~HWCAP_THUMB;
+       elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 #endif
 
        feat_v6_fixup();
@@ -524,7 +544,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
        size -= start & ~PAGE_MASK;
        bank->start = PAGE_ALIGN(start);
 
-#ifndef CONFIG_LPAE
+#ifndef CONFIG_ARM_LPAE
        if (bank->start + size < bank->start) {
                printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
                        "32-bit physical address space\n", (long long)start);
index 79078ed..1f2cccc 100644 (file)
@@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb,
        if (freq->flags & CPUFREQ_CONST_LOOPS)
                return NOTIFY_OK;
 
-       if (arm_delay_ops.const_clock)
-               return NOTIFY_OK;
-
        if (!per_cpu(l_p_j_ref, cpu)) {
                per_cpu(l_p_j_ref, cpu) =
                        per_cpu(cpu_data, cpu).loops_per_jiffy;
index bd03005..e82e1d2 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
 
 /**********************************************************************/
 
@@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored)
        local_flush_bp_all();
 }
 
+#ifdef CONFIG_ARM_ERRATA_798181
+static int erratum_a15_798181(void)
+{
+       unsigned int midr = read_cpuid_id();
+
+       /* Cortex-A15 r0p0..r3p2 affected */
+       if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
+               return 0;
+       return 1;
+}
+#else
+static int erratum_a15_798181(void)
+{
+       return 0;
+}
+#endif
+
+static void ipi_flush_tlb_a15_erratum(void *arg)
+{
+       dmb();
+}
+
+static void broadcast_tlb_a15_erratum(void)
+{
+       if (!erratum_a15_798181())
+               return;
+
+       dummy_flush_tlb_a15_erratum();
+       smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
+                              NULL, 1);
+}
+
+static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
+{
+       int cpu;
+       cpumask_t mask = { CPU_BITS_NONE };
+
+       if (!erratum_a15_798181())
+               return;
+
+       dummy_flush_tlb_a15_erratum();
+       for_each_online_cpu(cpu) {
+               if (cpu == smp_processor_id())
+                       continue;
+               /*
+                * We only need to send an IPI if the other CPUs are running
+                * the same ASID as the one being invalidated. There is no
+                * need for locking around the active_asids check since the
+                * switch_mm() function has at least one dmb() (as required by
+                * this workaround) in case a context switch happens on
+                * another CPU after the condition below.
+                */
+               if (atomic64_read(&mm->context.id) ==
+                   atomic64_read(&per_cpu(active_asids, cpu)))
+                       cpumask_set_cpu(cpu, &mask);
+       }
+       smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
+}
+
 void flush_tlb_all(void)
 {
        if (tlb_ops_need_broadcast())
                on_each_cpu(ipi_flush_tlb_all, NULL, 1);
        else
                local_flush_tlb_all();
+       broadcast_tlb_a15_erratum();
 }
 
 void flush_tlb_mm(struct mm_struct *mm)
@@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm)
                on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
        else
                local_flush_tlb_mm(mm);
+       broadcast_tlb_mm_a15_erratum(mm);
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
                                        &ta, 1);
        } else
                local_flush_tlb_page(vma, uaddr);
+       broadcast_tlb_mm_a15_erratum(vma->vm_mm);
 }
 
 void flush_tlb_kernel_page(unsigned long kaddr)
@@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
                on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
        } else
                local_flush_tlb_kernel_page(kaddr);
+       broadcast_tlb_a15_erratum();
 }
 
 void flush_tlb_range(struct vm_area_struct *vma,
@@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
                                        &ta, 1);
        } else
                local_flush_tlb_range(vma, start, end);
+       broadcast_tlb_mm_a15_erratum(vma->vm_mm);
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
                on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
        } else
                local_flush_tlb_kernel_range(start, end);
+       broadcast_tlb_a15_erratum();
 }
 
 void flush_bp_all(void)
index c9a1731..0e4cfe1 100644 (file)
@@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
                          lr, irq, vgic_cpu->vgic_lr[lr]);
                BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
                vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
-
-               goto out;
+               return true;
        }
 
        /* Try to use another LR for this interrupt */
@@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
        vgic_cpu->vgic_irq_lr_map[irq] = lr;
        set_bit(lr, vgic_cpu->lr_used);
 
-out:
        if (!vgic_irq_is_edge(vcpu, irq))
                vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
 
@@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 
        kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
 
-       /*
-        * We do not need to take the distributor lock here, since the only
-        * action we perform is clearing the irq_active_bit for an EOIed
-        * level interrupt.  There is a potential race with
-        * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
-        * check if the interrupt is already active. Two possibilities:
-        *
-        * - The queuing is occurring on the same vcpu: cannot happen,
-        *   as we're already in the context of this vcpu, and
-        *   executing the handler
-        * - The interrupt has been migrated to another vcpu, and we
-        *   ignore this interrupt for this run. Big deal. It is still
-        *   pending though, and will get considered when this vcpu
-        *   exits.
-        */
        if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
                /*
                 * Some level interrupts have been EOIed. Clear their
@@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
                        } else {
                                vgic_cpu_irq_clear(vcpu, irq);
                        }
+
+                       /*
+                        * Despite being EOIed, the LR may not have
+                        * been marked as empty.
+                        */
+                       set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+                       vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
                }
        }
 
@@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 }
 
 /*
- * Sync back the VGIC state after a guest run. We do not really touch
- * the distributor here (the irq_pending_on_cpu bit is safe to set),
- * so there is no need for taking its lock.
+ * Sync back the VGIC state after a guest run. The distributor lock is
+ * needed so we don't get preempted in the middle of the state processing.
  */
 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
@@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 
 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
        if (!irqchip_in_kernel(vcpu->kvm))
                return;
 
+       spin_lock(&dist->lock);
        __kvm_vgic_sync_hwstate(vcpu);
+       spin_unlock(&dist->lock);
 }
 
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
index 6b93f6a..64dbfa5 100644 (file)
@@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles)
 static void __timer_const_udelay(unsigned long xloops)
 {
        unsigned long long loops = xloops;
-       loops *= loops_per_jiffy;
+       loops *= arm_delay_ops.ticks_per_jiffy;
        __timer_delay(loops >> UDELAY_SHIFT);
 }
 
@@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
                pr_info("Switching to timer-based delay loop\n");
                delay_timer                     = timer;
                lpj_fine                        = timer->freq / HZ;
-               loops_per_jiffy                 = lpj_fine;
+
+               /* cpufreq may scale loops_per_jiffy, so keep a private copy */
+               arm_delay_ops.ticks_per_jiffy   = lpj_fine;
                arm_delay_ops.delay             = __timer_delay;
                arm_delay_ops.const_udelay      = __timer_const_udelay;
                arm_delay_ops.udelay            = __timer_udelay;
-               arm_delay_ops.const_clock       = true;
+
                delay_calibrated                = true;
        } else {
                pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
index e698f26..52e4bb5 100644 (file)
 
 static struct map_desc cns3xxx_io_desc[] __initdata = {
        {
-               .virtual        = CNS3XXX_TC11MP_TWD_BASE_VIRT,
-               .pfn            = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
-       }, {
-               .virtual        = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
-               .pfn            = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
-       }, {
-               .virtual        = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
-               .pfn            = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
-               .length         = SZ_4K,
+               .virtual        = CNS3XXX_TC11MP_SCU_BASE_VIRT,
+               .pfn            = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE),
+               .length         = SZ_8K,
                .type           = MT_DEVICE,
        }, {
                .virtual        = CNS3XXX_TIMER1_2_3_BASE_VIRT,
index 191c8e5..b1021aa 100644 (file)
 #define RTC_INTR_STS_OFFSET                    0x34
 
 #define CNS3XXX_MISC_BASE                      0x76000000      /* Misc Control */
-#define CNS3XXX_MISC_BASE_VIRT                 0xFFF07000      /* Misc Control */
+#define CNS3XXX_MISC_BASE_VIRT                 0xFB000000      /* Misc Control */
 
 #define CNS3XXX_PM_BASE                                0x77000000      /* Power Management Control */
-#define CNS3XXX_PM_BASE_VIRT                   0xFFF08000
+#define CNS3XXX_PM_BASE_VIRT                   0xFB001000
 
 #define PM_CLK_GATE_OFFSET                     0x00
 #define PM_SOFT_RST_OFFSET                     0x04
 #define PM_PLL_HM_PD_OFFSET                    0x1C
 
 #define CNS3XXX_UART0_BASE                     0x78000000      /* UART 0 */
-#define CNS3XXX_UART0_BASE_VIRT                        0xFFF09000
+#define CNS3XXX_UART0_BASE_VIRT                        0xFB002000
 
 #define CNS3XXX_UART1_BASE                     0x78400000      /* UART 1 */
 #define CNS3XXX_UART1_BASE_VIRT                        0xFFF0A000
 #define CNS3XXX_I2S_BASE_VIRT                  0xFFF10000
 
 #define CNS3XXX_TIMER1_2_3_BASE                        0x7C800000      /* Timer */
-#define CNS3XXX_TIMER1_2_3_BASE_VIRT           0xFFF10800
+#define CNS3XXX_TIMER1_2_3_BASE_VIRT           0xFB003000
 
 #define TIMER1_COUNTER_OFFSET                  0x00
 #define TIMER1_AUTO_RELOAD_OFFSET              0x04
  * Testchip peripheral and fpga gic regions
  */
 #define CNS3XXX_TC11MP_SCU_BASE                        0x90000000      /* IRQ, Test chip */
-#define CNS3XXX_TC11MP_SCU_BASE_VIRT           0xFF000000
+#define CNS3XXX_TC11MP_SCU_BASE_VIRT           0xFB004000
 
 #define CNS3XXX_TC11MP_GIC_CPU_BASE            0x90000100      /* Test chip interrupt controller CPU interface */
-#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT       0xFF000100
+#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT       (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100)
 
 #define CNS3XXX_TC11MP_TWD_BASE                        0x90000600
-#define CNS3XXX_TC11MP_TWD_BASE_VIRT           0xFF000600
+#define CNS3XXX_TC11MP_TWD_BASE_VIRT           (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600)
 
 #define CNS3XXX_TC11MP_GIC_DIST_BASE           0x90001000      /* Test chip interrupt controller distributor */
-#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT      0xFF001000
+#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT      (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000)
 
 #define CNS3XXX_TC11MP_L220_BASE               0x92002000      /* L220 registers */
 #define CNS3XXX_TC11MP_L220_BASE_VIRT          0xFF002000
index d2afb4d..b5cc77d 100644 (file)
@@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
 
 static inline void putc(int c)
 {
-       /* Transmit fifo not full?  */
-       while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)
-               ;
+       int i;
+
+       for (i = 0; i < 10000; i++) {
+               /* Transmit fifo not full? */
+               if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF))
+                       break;
+       }
 
        __raw_writeb(c, PHYS_UART_DATA);
 }
index 5a800bf..5bf4a97 100644 (file)
@@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *);
 
 extern void imx_enable_cpu(int cpu, bool enable);
 extern void imx_set_cpu_jump(int cpu, void *jump_addr);
+extern u32 imx_get_cpu_arg(int cpu);
+extern void imx_set_cpu_arg(int cpu, u32 arg);
 extern void v7_cpu_resume(void);
 extern u32 *pl310_get_save_ptr(void);
 #ifdef CONFIG_SMP
index 7bc5fe1..361a253 100644 (file)
@@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void)
 void imx_cpu_die(unsigned int cpu)
 {
        cpu_enter_lowpower();
+       /*
+        * We use the cpu jumping argument register to sync with
+        * imx_cpu_kill() which is running on cpu0 and waiting for
+        * the register being cleared to kill the cpu.
+        */
+       imx_set_cpu_arg(cpu, ~0);
        cpu_do_idle();
 }
 
 int imx_cpu_kill(unsigned int cpu)
 {
+       unsigned long timeout = jiffies + msecs_to_jiffies(50);
+
+       while (imx_get_cpu_arg(cpu) == 0)
+               if (time_after(jiffies, timeout))
+                       return 0;
        imx_enable_cpu(cpu, false);
+       imx_set_cpu_arg(cpu, 0);
        return 1;
 }
index e15f155..09a742f 100644 (file)
@@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr)
                       src_base + SRC_GPR1 + cpu * 8);
 }
 
+u32 imx_get_cpu_arg(int cpu)
+{
+       cpu = cpu_logical_map(cpu);
+       return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
+}
+
+void imx_set_cpu_arg(int cpu, u32 arg)
+{
+       cpu = cpu_logical_map(cpu);
+       writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
+}
+
 void imx_src_prepare_restart(void)
 {
        u32 val;
index 1c6e736..08dd739 100644 (file)
@@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = {
 
 static struct mvsdio_platform_data guruplug_mvsdio_data = {
        /* unfortunately the CD signal has not been connected */
+       .gpio_card_detect = -1,
+       .gpio_write_protect = -1,
 };
 
 static struct gpio_led guruplug_led_pins[] = {
index 8ddd69f..6a6eb54 100644 (file)
@@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = {
 
 static struct mvsdio_platform_data openrd_mvsdio_data = {
        .gpio_card_detect = 29, /* MPP29 used as SD card detect */
+       .gpio_write_protect = -1,
 };
 
 static unsigned int openrd_mpp_config[] __initdata = {
index c7d93b4..d242231 100644 (file)
@@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = {
 
 static struct mvsdio_platform_data rd88f6281_mvsdio_data = {
        .gpio_card_detect = 28,
+       .gpio_write_protect = -1,
 };
 
 static unsigned int rd88f6281_mpp_config[] __initdata = {
index 2969027..f9fd77e 100644 (file)
@@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles,
 {
        u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
 
-       writel_relaxed(0, event_base + TIMER_CLEAR);
+       ctrl &= ~TIMER_ENABLE_EN;
+       writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+
+       writel_relaxed(ctrl, event_base + TIMER_CLEAR);
        writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
        writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
        return 0;
index 274ff58..6a9195e 100644 (file)
@@ -44,6 +44,8 @@
 
 #define ARMADA_370_XP_MAX_PER_CPU_IRQS         (28)
 
+#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ       (5)
+
 #define ACTIVE_DOORBELLS                       (8)
 
 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
@@ -62,7 +64,7 @@ static void armada_370_xp_irq_mask(struct irq_data *d)
 #ifdef CONFIG_SMP
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+       if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
                writel(hwirq, main_int_base +
                                ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
        else
@@ -79,7 +81,7 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
 #ifdef CONFIG_SMP
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+       if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
                writel(hwirq, main_int_base +
                                ARMADA_370_XP_INT_SET_ENABLE_OFFS);
        else
@@ -147,7 +149,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
        writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
        irq_set_status_flags(virq, IRQ_LEVEL);
 
-       if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) {
+       if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
                irq_set_percpu_devid(virq);
                irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
                                        handle_percpu_devid_irq);
index cb7c6ae..6c4f766 100644 (file)
@@ -538,15 +538,6 @@ static struct clk usb_hhc_ck16xx = {
 };
 
 static struct clk usb_dc_ck = {
-       .name           = "usb_dc_ck",
-       .ops            = &clkops_generic,
-       /* Direct from ULPD, no parent */
-       .rate           = 48000000,
-       .enable_reg     = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
-       .enable_bit     = USB_REQ_EN_SHIFT,
-};
-
-static struct clk usb_dc_ck7xx = {
        .name           = "usb_dc_ck",
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent */
@@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = {
        CLK(NULL,       "usb_clko",     &usb_clko,      CK_16XX | CK_1510 | CK_310),
        CLK(NULL,       "usb_hhc_ck",   &usb_hhc_ck1510, CK_1510 | CK_310),
        CLK(NULL,       "usb_hhc_ck",   &usb_hhc_ck16xx, CK_16XX),
-       CLK(NULL,       "usb_dc_ck",    &usb_dc_ck,     CK_16XX),
-       CLK(NULL,       "usb_dc_ck",    &usb_dc_ck7xx,  CK_7XX),
+       CLK(NULL,       "usb_dc_ck",    &usb_dc_ck,     CK_16XX | CK_7XX),
        CLK(NULL,       "mclk",         &mclk_1510,     CK_1510 | CK_310),
        CLK(NULL,       "mclk",         &mclk_16xx,     CK_16XX),
        CLK(NULL,       "bclk",         &bclk_1510,     CK_1510 | CK_310),
index a3e0aaa..1322ed7 100644 (file)
@@ -38,7 +38,7 @@
 #include "gpmc-smc91x.h"
 
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
+#include <video/omap-panel-data.h>
 
 #include "mux.h"
 #include "hsmmc.h"
index ce812de..2612eea 100644 (file)
@@ -35,7 +35,7 @@
 #include "common.h"
 #include <linux/omap-dma.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include "gpmc.h"
 #include "gpmc-smc91x.h"
index 9fb8590..1d6c288 100644 (file)
@@ -35,8 +35,7 @@
 
 #include "common.h"
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include "am35xx-emac.h"
 #include "mux.h"
index af2bb21..bccd3e5 100644 (file)
@@ -41,8 +41,7 @@
 
 #include <linux/platform_data/mtd-nand-omap2.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 #include <linux/platform_data/spi-omap2-mcspi.h>
 
 #include "common.h"
index 53056c3..12d2126 100644 (file)
@@ -43,8 +43,7 @@
 #include "gpmc.h"
 #include <linux/platform_data/mtd-nand-omap2.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include <linux/platform_data/spi-omap2-mcspi.h>
 #include <linux/input/matrix_keypad.h>
index 812c829..0c1bdd7 100644 (file)
@@ -34,7 +34,7 @@
 #include <asm/mach/map.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
+#include <video/omap-panel-data.h>
 
 #include "common.h"
 #include "mux.h"
index bf92678..e979d48 100644 (file)
@@ -31,7 +31,7 @@
 #include <asm/mach/arch.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 #include <linux/platform_data/mtd-onenand-omap2.h>
 
 #include "common.h"
index b12fe96..8a8e505 100644 (file)
@@ -41,7 +41,7 @@
 #include "gpmc-smsc911x.h"
 
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
+#include <video/omap-panel-data.h>
 
 #include "board-flash.h"
 #include "mux.h"
index c3558f9..0ce91af 100644 (file)
@@ -43,7 +43,7 @@
 #include <asm/mach/flash.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 #include <linux/platform_data/mtd-nand-omap2.h>
 
 #include "common.h"
index 48789e0..233a0d5 100644 (file)
@@ -51,7 +51,7 @@
 #include "common.h"
 #include <linux/platform_data/spi-omap2-mcspi.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include "soc.h"
 #include "mux.h"
index 95c10b3..495b989 100644 (file)
@@ -44,8 +44,7 @@
 #include "gpmc.h"
 #include <linux/platform_data/mtd-nand-omap2.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include <linux/platform_data/spi-omap2-mcspi.h>
 
index 86bab51..6308332 100644 (file)
@@ -47,8 +47,7 @@
 #include <asm/mach/map.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include "common.h"
 #include "mux.h"
index 3d58f33..0c6834a 100644 (file)
  */
 #define OMAP4_DPLL_ABE_DEFFREQ                         98304000
 
+/*
+ * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
+ * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
+ * locked frequency for the USB DPLL is 960MHz.
+ */
+#define OMAP4_DPLL_USB_DEFFREQ                         960000000
+
 /* Root clocks */
 
 DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
@@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,
                    OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,
                    hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);
 
+DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0,
+               OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
+               OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL);
+
 DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,
                OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
                OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
@@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = {
        CLK(NULL,       "per_mcbsp4_gfclk",                     &per_mcbsp4_gfclk,      CK_443X),
        CLK(NULL,       "hsmmc1_fclk",                  &hsmmc1_fclk,   CK_443X),
        CLK(NULL,       "hsmmc2_fclk",                  &hsmmc2_fclk,   CK_443X),
+       CLK(NULL,       "ocp2scp_usb_phy_phy_48m",      &ocp2scp_usb_phy_phy_48m,       CK_443X),
        CLK(NULL,       "sha2md5_fck",                  &sha2md5_fck,   CK_443X),
        CLK(NULL,       "slimbus1_fclk_1",              &slimbus1_fclk_1,       CK_443X),
        CLK(NULL,       "slimbus1_fclk_0",              &slimbus1_fclk_0,       CK_443X),
@@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void)
        if (rc)
                pr_err("%s: failed to configure ABE DPLL!\n", __func__);
 
+       /*
+        * Lock USB DPLL on OMAP4 devices so that the L3INIT power
+        * domain can transition to retention state when not in use.
+        */
+       rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
        return 0;
 }
index 40f4a03..d6ba13e 100644 (file)
@@ -293,5 +293,8 @@ extern void omap_reserve(void);
 struct omap_hwmod;
 extern int omap_dss_reset(struct omap_hwmod *);
 
+/* SoC specific clock initializer */
+extern int (*omap_clk_init)(void);
+
 #endif /* __ASSEMBLER__ */
 #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
index 4be5cfc..9c49bbe 100644 (file)
@@ -27,9 +27,7 @@
 #include <linux/gpio.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-tfp410.h>
-#include <video/omap-panel-nokia-dsi.h>
-#include <video/omap-panel-picodlp.h>
+#include <video/omap-panel-data.h>
 
 #include "soc.h"
 #include "dss-common.h"
index 2c3fdd6..5c445ca 100644 (file)
 #include "prm3xxx.h"
 #include "prm44xx.h"
 
+/*
+ * omap_clk_init: points to a function that does the SoC-specific
+ * clock initializations
+ */
+int (*omap_clk_init)(void);
+
 /*
  * The machine specific code may provide the extra mapping besides the
  * default mapping provided here.
@@ -397,7 +403,7 @@ void __init omap2420_init_early(void)
        omap242x_clockdomains_init();
        omap2420_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap2420_clk_init();
+       omap_clk_init = omap2420_clk_init;
 }
 
 void __init omap2420_init_late(void)
@@ -427,7 +433,7 @@ void __init omap2430_init_early(void)
        omap243x_clockdomains_init();
        omap2430_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap2430_clk_init();
+       omap_clk_init = omap2430_clk_init;
 }
 
 void __init omap2430_init_late(void)
@@ -462,7 +468,7 @@ void __init omap3_init_early(void)
        omap3xxx_clockdomains_init();
        omap3xxx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap3xxx_clk_init();
+       omap_clk_init = omap3xxx_clk_init;
 }
 
 void __init omap3430_init_early(void)
@@ -500,7 +506,7 @@ void __init ti81xx_init_early(void)
        omap3xxx_clockdomains_init();
        omap3xxx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap3xxx_clk_init();
+       omap_clk_init = omap3xxx_clk_init;
 }
 
 void __init omap3_init_late(void)
@@ -568,7 +574,7 @@ void __init am33xx_init_early(void)
        am33xx_clockdomains_init();
        am33xx_hwmod_init();
        omap_hwmod_init_postsetup();
-       am33xx_clk_init();
+       omap_clk_init = am33xx_clk_init;
 }
 #endif
 
@@ -593,7 +599,7 @@ void __init omap4430_init_early(void)
        omap44xx_clockdomains_init();
        omap44xx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap4xxx_clk_init();
+       omap_clk_init = omap4xxx_clk_init;
 }
 
 void __init omap4430_init_late(void)
index c2c798c..a202a47 100644 (file)
@@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh)
        }
 
        if (sf & SYSC_HAS_MIDLEMODE) {
-               if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+               if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
                        idlemode = HWMOD_IDLEMODE_NO;
                } else {
                        if (sf & SYSC_HAS_ENAWAKEUP)
@@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh)
        }
 
        if (sf & SYSC_HAS_MIDLEMODE) {
-               if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+               if ((oh->flags & HWMOD_SWSUP_MSTANDBY) ||
+                   (oh->flags & HWMOD_FORCE_MSTANDBY)) {
                        idlemode = HWMOD_IDLEMODE_FORCE;
                } else {
                        if (sf & SYSC_HAS_ENAWAKEUP)
index d43d9b6..d5dc935 100644 (file)
@@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm {
  *
  * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out
  *     of idle, rather than relying on module smart-idle
- * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out
- *     of standby, rather than relying on module smart-standby
+ * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and
+ *     out of standby, rather than relying on module smart-standby
  * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
  *     SDRAM controller, etc. XXX probably belongs outside the main hwmod file
  *     XXX Should be HWMOD_SETUP_NO_RESET
@@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm {
  *     correctly, or this is being abused to deal with some PM latency
  *     issues -- but we're currently suffering from a shortage of
  *     folks who are able to track these issues down properly.
+ * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device
+ *     is kept in force-standby mode. Failing to do so causes PM problems
+ *     with musb on OMAP3630 at least. Note that musb has a dedicated register
+ *     to control MSTANDBY signal when MIDLEMODE is set to force-standby.
  */
 #define HWMOD_SWSUP_SIDLE                      (1 << 0)
 #define HWMOD_SWSUP_MSTANDBY                   (1 << 1)
@@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm {
 #define HWMOD_16BIT_REG                                (1 << 8)
 #define HWMOD_EXT_OPT_MAIN_CLK                 (1 << 9)
 #define HWMOD_BLOCK_WFI                                (1 << 10)
+#define HWMOD_FORCE_MSTANDBY                   (1 << 11)
 
 /*
  * omap_hwmod._int_flags definitions
index ac7e03e..5112d04 100644 (file)
@@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
         * Erratum ID: i479  idle_req / idle_ack mechanism potentially
         * broken when autoidle is enabled
         * workaround is to disable the autoidle bit at module level.
+        *
+        * Enabling the device in any other MIDLEMODE setting but force-idle
+        * causes core_pwrdm not enter idle states at least on OMAP3630.
+        * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY
+        * signal when MIDLEMODE is set to force-idle.
         */
        .flags          = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
-                               | HWMOD_SWSUP_MSTANDBY,
+                               | HWMOD_FORCE_MSTANDBY,
 };
 
 /* usb_otg_hs */
index 0e47d2e..9e05765 100644 (file)
@@ -2714,6 +2714,10 @@ static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
        { }
 };
 
+static struct omap_hwmod_opt_clk ocp2scp_usb_phy_opt_clks[] = {
+       { .role = "48mhz", .clk = "ocp2scp_usb_phy_phy_48m" },
+};
+
 /* ocp2scp_usb_phy */
 static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
        .name           = "ocp2scp_usb_phy",
@@ -2728,6 +2732,8 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
                },
        },
        .dev_attr       = ocp2scp_dev_attr,
+       .opt_clks       = ocp2scp_usb_phy_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(ocp2scp_usb_phy_opt_clks),
 };
 
 /*
index 2bdd4cf..f62b509 100644 (file)
@@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void)
                               clksrc_nr, clksrc_src)                   \
 void __init omap##name##_gptimer_timer_init(void)                      \
 {                                                                      \
+       if (omap_clk_init)                                              \
+               omap_clk_init();                                        \
        omap_dmtimer_init();                                            \
        omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop);    \
        omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src);        \
@@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void)                   \
                                clksrc_nr, clksrc_src)                  \
 void __init omap##name##_sync32k_timer_init(void)              \
 {                                                                      \
+       if (omap_clk_init)                                              \
+               omap_clk_init();                                        \
        omap_dmtimer_init();                                            \
        omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop);    \
        /* Enable the use of clocksource="gp_timer" kernel parameter */ \
index 051b62c..7f2cb6c 100644 (file)
@@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
 #endif
 
 struct mmci_platform_data mop500_sdi0_data = {
-       .ios_handler    = mop500_sdi0_ios_handler,
        .ocr_mask       = MMC_VDD_29_30,
        .f_max          = 50000000,
        .capabilities   = MMC_CAP_4_BIT_DATA |
index b034578..87d2d7b 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/i2c.h>
 #include <linux/platform_data/i2c-nomadik.h>
@@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev)
        regulator_put(prox_regulator);
 }
 
+void mop500_snowball_ethernet_clock_enable(void)
+{
+       struct clk *clk;
+
+       clk = clk_get_sys("fsmc", NULL);
+       if (!IS_ERR(clk))
+               clk_prepare_enable(clk);
+}
+
 static struct cryp_platform_data u8500_cryp1_platform_data = {
                .mem_to_engine = {
                                .dir = STEDMA40_MEM_TO_PERIPH,
@@ -683,6 +693,8 @@ static void __init snowball_init_machine(void)
        mop500_audio_init(parent);
        mop500_uart_init(parent);
 
+       mop500_snowball_ethernet_clock_enable();
+
        /* This board has full regulator constraints */
        regulator_has_full_constraints();
 }
index eaa605f..d38951b 100644 (file)
@@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void);
 void __init snowball_pinmaps_init(void);
 void __init hrefv60_pinmaps_init(void);
 void mop500_audio_init(struct device *parent);
+void mop500_snowball_ethernet_clock_enable(void);
 
 int __init mop500_uib_init(void);
 void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
index 19235cf..f1a5818 100644 (file)
@@ -312,9 +312,10 @@ static void __init u8500_init_machine(void)
        /* Pinmaps must be in place before devices register */
        if (of_machine_is_compatible("st-ericsson,mop500"))
                mop500_pinmaps_init();
-       else if (of_machine_is_compatible("calaosystems,snowball-a9500"))
+       else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
                snowball_pinmaps_init();
-       else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
+               mop500_snowball_ethernet_clock_enable();
+       } else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
                hrefv60_pinmaps_init();
        else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}
                /* TODO: Add pinmaps for ccu9540 board. */
index c2f3739..c465fac 100644 (file)
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
        int lockregs;
        int i;
 
-       switch (cache_id) {
+       switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
        case L2X0_CACHE_ID_PART_L310:
                lockregs = 8;
                break;
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
        if (cache_id_part_number_from_dt)
                cache_id = cache_id_part_number_from_dt;
        else
-               cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
-                       & L2X0_CACHE_ID_PART_MASK;
+               cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
        aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 
        aux &= aux_mask;
        aux |= aux_val;
 
        /* Determine the number of ways */
-       switch (cache_id) {
+       switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
        case L2X0_CACHE_ID_PART_L310:
                if (aux & (1 << 16))
                        ways = 16;
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
                .flush_all   = l2x0_flush_all,
                .inv_all     = l2x0_inv_all,
                .disable     = l2x0_disable,
-               .set_debug   = pl310_set_debug,
        },
 };
 
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
                data->save();
 
        of_init = true;
-       l2x0_init(l2x0_base, aux_val, aux_mask);
-
        memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+       l2x0_init(l2x0_base, aux_val, aux_mask);
 
        return 0;
 }
index a5a4b2b..2ac3737 100644 (file)
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 
-static DEFINE_PER_CPU(atomic64_t, active_asids);
+DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
 static cpumask_t tlb_flush_pending;
 
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
        if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
                local_flush_bp_all();
                local_flush_tlb_all();
+               dummy_flush_tlb_a15_erratum();
        }
 
        atomic64_set(&per_cpu(active_asids, cpu), asid);
index e95a996..7897894 100644 (file)
@@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
-                                     unsigned long end, phys_addr_t phys,
-                                     const struct mem_type *type)
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+                       unsigned long end, phys_addr_t phys,
+                       const struct mem_type *type)
 {
-       pmd_t *pmd = pmd_offset(pud, addr);
-
+#ifndef CONFIG_ARM_LPAE
        /*
-        * Try a section mapping - end, addr and phys must all be aligned
-        * to a section boundary.  Note that PMDs refer to the individual
-        * L1 entries, whereas PGDs refer to a group of L1 entries making
-        * up one logical pointer to an L2 table.
+        * In classic MMU format, puds and pmds are folded in to
+        * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+        * group of L1 entries making up one logical pointer to
+        * an L2 table (2MB), where as PMDs refer to the individual
+        * L1 entries (1MB). Hence increment to get the correct
+        * offset for odd 1MB sections.
+        * (See arch/arm/include/asm/pgtable-2level.h)
         */
-       if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
-               pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
-               if (addr & SECTION_SIZE)
-                       pmd++;
+       if (addr & SECTION_SIZE)
+               pmd++;
 #endif
+       do {
+               *pmd = __pmd(phys | type->prot_sect);
+               phys += SECTION_SIZE;
+       } while (pmd++, addr += SECTION_SIZE, addr != end);
 
-               do {
-                       *pmd = __pmd(phys | type->prot_sect);
-                       phys += SECTION_SIZE;
-               } while (pmd++, addr += SECTION_SIZE, addr != end);
+       flush_pmd_entry(pmd);
+}
 
-               flush_pmd_entry(p);
-       } else {
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+                                     unsigned long end, phys_addr_t phys,
+                                     const struct mem_type *type)
+{
+       pmd_t *pmd = pmd_offset(pud, addr);
+       unsigned long next;
+
+       do {
                /*
-                * No need to loop; pte's aren't interested in the
-                * individual L1 entries.
+                * With LPAE, we must loop over to map
+                * all the pmds for the given range.
                 */
-               alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
-       }
+               next = pmd_addr_end(addr, end);
+
+               /*
+                * Try a section mapping - addr, next and phys must all be
+                * aligned to a section boundary.
+                */
+               if (type->prot_sect &&
+                               ((addr | next | phys) & ~SECTION_MASK) == 0) {
+                       map_init_section(pmd, addr, next, phys, type);
+               } else {
+                       alloc_init_pte(pmd, addr, next,
+                                               __phys_to_pfn(phys), type);
+               }
+
+               phys += next - addr;
+
+       } while (pmd++, addr = next, addr != end);
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
 
        do {
                next = pud_addr_end(addr, end);
-               alloc_init_section(pud, addr, next, phys, type);
+               alloc_init_pmd(pud, addr, next, phys, type);
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
 }
index 3a3c015..f584d3f 100644 (file)
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
 __v7_ca7mp_proc_info:
        .long   0x410fc070
        .long   0xff0ffff0
-       __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+       __v7_proc __v7_ca7mp_setup
        .size   __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
 
        /*
@@ -430,9 +430,24 @@ __v7_ca7mp_proc_info:
 __v7_ca15mp_proc_info:
        .long   0x410fc0f0
        .long   0xff0ffff0
-       __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
+       __v7_proc __v7_ca15mp_setup
        .size   __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
 
+       /*
+        * Qualcomm Inc. Krait processors.
+        */
+       .type   __krait_proc_info, #object
+__krait_proc_info:
+       .long   0x510f0400              @ Required ID value
+       .long   0xff0ffc00              @ Mask for ID
+       /*
+        * Some Krait processors don't indicate support for SDIV and UDIV
+        * instructions in the ARM instruction set, even though they actually
+        * do support them.
+        */
+       __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
+       .size   __krait_proc_info, . - __krait_proc_info
+
        /*
         * Match any ARMv7 processor core.
         */
index cd2e21f..a6fdd16 100644 (file)
@@ -18,7 +18,7 @@ config MIPS
        select HAVE_KRETPROBES
        select HAVE_DEBUG_KMEMLEAK
        select ARCH_BINFMT_ELF_RANDOMIZE_PIE
-       select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
        select RTC_LIB if !MACH_LOONGSON
        select GENERIC_ATOMIC64 if !64BIT
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -1493,7 +1493,6 @@ config CPU_XLP
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_64BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
-       select CPU_HAS_LLSC
        select WEAK_ORDERING
        select WEAK_REORDERING_BEYOND_LLSC
        select CPU_HAS_PREFETCH
index ed1949c..9aa7d44 100644 (file)
@@ -745,10 +745,7 @@ void __init board_prom_init(void)
                strcpy(cfe_version, "unknown");
        printk(KERN_INFO PFX "CFE version: %s\n", cfe_version);
 
-       if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) {
-               printk(KERN_ERR PFX "invalid nvram checksum\n");
-               return;
-       }
+       bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);
 
        board_name = bcm63xx_nvram_get_name();
        /* find board by name */
index 6206116..a4b8864 100644 (file)
@@ -38,7 +38,7 @@ struct bcm963xx_nvram {
 static struct bcm963xx_nvram nvram;
 static int mac_addr_used;
 
-int __init bcm63xx_nvram_init(void *addr)
+void __init bcm63xx_nvram_init(void *addr)
 {
        unsigned int check_len;
        u32 crc, expected_crc;
@@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr)
        crc = crc32_le(~0, (u8 *)&nvram, check_len);
 
        if (crc != expected_crc)
-               return -EINVAL;
-
-       return 0;
+               pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
+                       expected_crc, crc);
 }
 
 u8 *bcm63xx_nvram_get_name(void)
index 314231b..35e18e9 100644 (file)
@@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void)
        return board_register_devices();
 }
 
-device_initcall(bcm63xx_register_devices);
+arch_initcall(bcm63xx_register_devices);
index 62d6a3b..4e0b6bc 100644 (file)
@@ -9,10 +9,8 @@
  *
  * Initialized the local nvram copy from the target address and checks
  * its checksum.
- *
- * Returns 0 on success.
  */
-int __init bcm63xx_nvram_init(void *nvram);
+void bcm63xx_nvram_init(void *nvram);
 
 /**
  * bcm63xx_nvram_get_name() - returns the board name according to nvram
index d9c8284..193c091 100644 (file)
 /* #define cpu_has_prefetch    ? */
 #define cpu_has_mcheck         1
 /* #define cpu_has_ejtag       ? */
-#ifdef CONFIG_CPU_HAS_LLSC
 #define cpu_has_llsc           1
-#else
-#define cpu_has_llsc           0
-#endif
 /* #define cpu_has_vtag_icache ? */
 /* #define cpu_has_dc_aliases  ? */
 /* #define cpu_has_ic_fills_f_dc ? */
index 12b70c2..0da44d4 100644 (file)
@@ -1166,7 +1166,10 @@ do {                                                                     \
        unsigned int __dspctl;                                          \
                                                                        \
        __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
        "       rddsp   %0, %x1                                 \n"     \
+       "       .set pop                                        \n"     \
        : "=r" (__dspctl)                                               \
        : "i" (mask));                                                  \
        __dspctl;                                                       \
@@ -1175,30 +1178,198 @@ do {                                                                   \
 #define wrdsp(val, mask)                                               \
 do {                                                                   \
        __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
        "       wrdsp   %0, %x1                                 \n"     \
+       "       .set pop                                        \n"     \
        :                                                               \
        : "r" (val), "i" (mask));                                       \
 } while (0)
 
-#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;})
-#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;})
-#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;})
-#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;})
-
-#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;})
-#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;})
-#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;})
-#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;})
-
-#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x))
-#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x))
-#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x))
-#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x))
-
-#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x))
-#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x))
-#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x))
-#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x))
+#define mflo0()                                                                \
+({                                                                     \
+       long mflo0;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mflo %0, $ac0                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mflo0));                                                \
+       mflo0;                                                          \
+})
+
+#define mflo1()                                                                \
+({                                                                     \
+       long mflo1;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mflo %0, $ac1                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mflo1));                                                \
+       mflo1;                                                          \
+})
+
+#define mflo2()                                                                \
+({                                                                     \
+       long mflo2;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mflo %0, $ac2                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mflo2));                                                \
+       mflo2;                                                          \
+})
+
+#define mflo3()                                                                \
+({                                                                     \
+       long mflo3;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mflo %0, $ac3                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mflo3));                                                \
+       mflo3;                                                          \
+})
+
+#define mfhi0()                                                                \
+({                                                                     \
+       long mfhi0;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mfhi %0, $ac0                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mfhi0));                                                \
+       mfhi0;                                                          \
+})
+
+#define mfhi1()                                                                \
+({                                                                     \
+       long mfhi1;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mfhi %0, $ac1                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mfhi1));                                                \
+       mfhi1;                                                          \
+})
+
+#define mfhi2()                                                                \
+({                                                                     \
+       long mfhi2;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mfhi %0, $ac2                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mfhi2));                                                \
+       mfhi2;                                                          \
+})
+
+#define mfhi3()                                                                \
+({                                                                     \
+       long mfhi3;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mfhi %0, $ac3                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mfhi3));                                                \
+       mfhi3;                                                          \
+})
+
+
+#define mtlo0(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mtlo %0, $ac0                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mtlo1(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mtlo %0, $ac1                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mtlo2(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mtlo %0, $ac2                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mtlo3(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mtlo %0, $ac3                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mthi0(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mthi %0, $ac0                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mthi1(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mthi %0, $ac1                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mthi2(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mthi %0, $ac2                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mthi3(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mthi %0, $ac3                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
 
 #else
 
index 197f636..8efe5a9 100644 (file)
@@ -21,6 +21,6 @@
 #include <asm/sigcontext.h>
 #include <asm/siginfo.h>
 
-#define __ARCH_HAS_ODD_SIGACTION
+#define __ARCH_HAS_IRIX_SIGACTION
 
 #endif /* _ASM_SIGNAL_H */
index f81d98f..de75fb5 100644 (file)
@@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS)      += perf_event_mipsxx.o
 obj-$(CONFIG_JUMP_LABEL)       += jump_label.o
 
 #
-# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe
-# to enable DSP assembler support here even if the MIPS Release 2 CPU we
-# are targetting does not support DSP because all code-paths making use of
-# it properly check that the running CPU *actually does* support these
-# instructions.
+# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
+# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
+# here because the compiler may use DSP ASE instructions (such as lwx) in
+# code paths where we cannot check that the CPU we are running on supports it.
+# Proper abstraction using HAVE_AS_DSP and macros is done in
+# arch/mips/include/asm/mipsregs.h.
 #
 ifeq ($(CONFIG_CPU_MIPSR2), y)
 CFLAGS_DSP                     = -DHAVE_AS_DSP
 
-#
-# Check if assembler supports DSP ASE
-#
-ifeq ($(call cc-option-yn,-mdsp), y)
-CFLAGS_DSP                     += -mdsp
-endif
-
-#
-# Check if assembler supports DSP ASE Rev2
-#
-ifeq ($(call cc-option-yn,-mdspr2), y)
-CFLAGS_DSP                     += -mdspr2
-endif
-
 CFLAGS_signal.o                        = $(CFLAGS_DSP)
 CFLAGS_signal32.o              = $(CFLAGS_DSP)
 CFLAGS_process.o               = $(CFLAGS_DSP)
index 6bfccc2..d069a19 100644 (file)
@@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                c->tlbsize = 48;
                break;
        case PRID_IMP_VR41XX:
+               set_isa(c, MIPS_CPU_ISA_III);
+               c->options = R4K_OPTS;
+               c->tlbsize = 32;
                switch (c->processor_id & 0xf0) {
                case PRID_REV_VR4111:
                        c->cputype = CPU_VR4111;
@@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                                __cpu_name[cpu] = "NEC VR4131";
                        } else {
                                c->cputype = CPU_VR4133;
+                               c->options |= MIPS_CPU_LLSC;
                                __cpu_name[cpu] = "NEC VR4133";
                        }
                        break;
@@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                        __cpu_name[cpu] = "NEC Vr41xx";
                        break;
                }
-               set_isa(c, MIPS_CPU_ISA_III);
-               c->options = R4K_OPTS;
-               c->tlbsize = 32;
                break;
        case PRID_IMP_R4300:
                c->cputype = CPU_R4300;
index 8eeee1c..db9655f 100644 (file)
@@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
                err = compat_sys_shmctl(first, second, compat_ptr(ptr));
                break;
        default:
-               err = -EINVAL;
+               err = -ENOSYS;
                break;
        }
 
index 135c4aa..7a54f74 100644 (file)
@@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_mips_r) {
                seq_printf(m, "isa\t\t\t:");
                if (cpu_has_mips_1)
-                       seq_printf(m, "%s", "mips1");
+                       seq_printf(m, "%s", " mips1");
                if (cpu_has_mips_2)
                        seq_printf(m, "%s", " mips2");
                if (cpu_has_mips_3)
index 81f1dcf..a64daee 100644 (file)
@@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr,
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
-       unsigned long res;
+       int res;
 
        a += nr >> SZLONG_LOG;
        mask = 1UL << bit;
        raw_local_irq_save(flags);
-       res = (mask & *a);
+       res = (mask & *a) != 0;
        *a |= mask;
        raw_local_irq_restore(flags);
        return res;
@@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr,
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
-       unsigned long res;
+       int res;
 
        a += nr >> SZLONG_LOG;
        mask = 1UL << bit;
        raw_local_irq_save(flags);
-       res = (mask & *a);
+       res = (mask & *a) != 0;
        *a |= mask;
        raw_local_irq_restore(flags);
        return res;
@@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
-       unsigned long res;
+       int res;
 
        a += nr >> SZLONG_LOG;
        mask = 1UL << bit;
        raw_local_irq_save(flags);
-       res = (mask & *a);
+       res = (mask & *a) != 0;
        *a &= ~mask;
        raw_local_irq_restore(flags);
        return res;
@@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
-       unsigned long res;
+       int res;
 
        a += nr >> SZLONG_LOG;
        mask = 1UL << bit;
        raw_local_irq_save(flags);
-       res = (mask & *a);
+       res = (mask & *a) != 0;
        *a ^= mask;
        raw_local_irq_restore(flags);
        return res;
index 507147a..a6adffb 100644 (file)
@@ -270,7 +270,7 @@ LEAF(csum_partial)
 #endif
 
        /* odd buffer alignment? */
-#ifdef CPU_MIPSR2
+#ifdef CONFIG_CPU_MIPSR2
        wsbh    v1, sum
        movn    sum, v1, t7
 #else
@@ -670,7 +670,7 @@ EXC(        sb      t0, NBYTES-2(dst), .Ls_exc)
        addu    sum, v1
 #endif
 
-#ifdef CPU_MIPSR2
+#ifdef CONFIG_CPU_MIPSR2
        wsbh    v1, sum
        movn    sum, v1, odd
 #else
index 4a29308..4a54431 100644 (file)
@@ -344,6 +344,7 @@ extern unsigned long MODULES_END;
 #define _REGION3_ENTRY_CO      0x100   /* change-recording override        */
 
 /* Bits in the segment table entry */
+#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address       */
 #define _SEGMENT_ENTRY_ORIGIN  ~0x7ffUL/* segment table origin             */
 #define _SEGMENT_ENTRY_RO      0x200   /* page protection bit              */
 #define _SEGMENT_ENTRY_INV     0x20    /* invalid segment table entry      */
@@ -1531,7 +1532,8 @@ extern int s390_enable_sie(void);
 /*
  * No page table caches to initialise
  */
-#define pgtable_cache_init()   do { } while (0)
+static inline void pgtable_cache_init(void) { }
+static inline void check_pgt_cache(void) { }
 
 #include <asm-generic/pgtable.h>
 
index dff631d..466fb33 100644 (file)
@@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to,
  * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
  * contains the (negative) exception code.
  */
-static __always_inline unsigned long follow_table(struct mm_struct *mm,
-                                                 unsigned long addr, int write)
+#ifdef CONFIG_64BIT
+static unsigned long follow_table(struct mm_struct *mm,
+                                 unsigned long address, int write)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ptep;
+       unsigned long *table = (unsigned long *)__pa(mm->pgd);
+
+       switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
+       case _ASCE_TYPE_REGION1:
+               table = table + ((address >> 53) & 0x7ff);
+               if (unlikely(*table & _REGION_ENTRY_INV))
+                       return -0x39UL;
+               table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+       case _ASCE_TYPE_REGION2:
+               table = table + ((address >> 42) & 0x7ff);
+               if (unlikely(*table & _REGION_ENTRY_INV))
+                       return -0x3aUL;
+               table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+       case _ASCE_TYPE_REGION3:
+               table = table + ((address >> 31) & 0x7ff);
+               if (unlikely(*table & _REGION_ENTRY_INV))
+                       return -0x3bUL;
+               table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+       case _ASCE_TYPE_SEGMENT:
+               table = table + ((address >> 20) & 0x7ff);
+               if (unlikely(*table & _SEGMENT_ENTRY_INV))
+                       return -0x10UL;
+               if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
+                       if (write && (*table & _SEGMENT_ENTRY_RO))
+                               return -0x04UL;
+                       return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
+                               (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
+               }
+               table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+       }
+       table = table + ((address >> 12) & 0xff);
+       if (unlikely(*table & _PAGE_INVALID))
+               return -0x11UL;
+       if (write && (*table & _PAGE_RO))
+               return -0x04UL;
+       return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
+}
 
-       pgd = pgd_offset(mm, addr);
-       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               return -0x3aUL;
+#else /* CONFIG_64BIT */
 
-       pud = pud_offset(pgd, addr);
-       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
-               return -0x3bUL;
+static unsigned long follow_table(struct mm_struct *mm,
+                                 unsigned long address, int write)
+{
+       unsigned long *table = (unsigned long *)__pa(mm->pgd);
 
-       pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       table = table + ((address >> 20) & 0x7ff);
+       if (unlikely(*table & _SEGMENT_ENTRY_INV))
                return -0x10UL;
-       if (pmd_large(*pmd)) {
-               if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
-                       return -0x04UL;
-               return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
-       }
-       if (unlikely(pmd_bad(*pmd)))
-               return -0x10UL;
-
-       ptep = pte_offset_map(pmd, addr);
-       if (!pte_present(*ptep))
+       table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+       table = table + ((address >> 12) & 0xff);
+       if (unlikely(*table & _PAGE_INVALID))
                return -0x11UL;
-       if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
+       if (write && (*table & _PAGE_RO))
                return -0x04UL;
-
-       return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
+       return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
 }
 
+#endif /* CONFIG_64BIT */
+
 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
                                             size_t n, int write_user)
 {
@@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
 
 static size_t clear_user_pt(size_t n, void __user *to)
 {
-       void *zpage = &empty_zero_page;
+       void *zpage = (void *) empty_zero_page;
        long done, size, ret;
 
        done = 0;
index d1e15f7..7a5aa1a 100644 (file)
@@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot)
 
 #ifdef CONFIG_BLK_DEV_INITRD
 
-/*
- * Note that the kernel can potentially support other compression
- * techniques than gz, though we don't do so by default.  If we ever
- * decide to do so we can either look for other filename extensions,
- * or just allow a file with this name to be compressed with an
- * arbitrary compressor (somewhat counterintuitively).
- */
 static int __initdata set_initramfs_file;
-static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
+static char __initdata initramfs_file[128] = "initramfs";
 
 static int __init setup_initramfs_file(char *str)
 {
@@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str)
 early_param("initramfs_file", setup_initramfs_file);
 
 /*
- * We look for an "initramfs.cpio.gz" file in the hvfs.
- * If there is one, we allocate some memory for it and it will be
- * unpacked to the initramfs.
+ * We look for a file called "initramfs" in the hvfs.  If there is one, we
+ * allocate some memory for it and it will be unpacked to the initramfs.
+ * If it's compressed, the initd code will uncompress it first.
  */
 static void __init load_hv_initrd(void)
 {
@@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void)
 
        fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
        if (fd == HV_ENOENT) {
-               if (set_initramfs_file)
+               if (set_initramfs_file) {
                        pr_warning("No such hvfs initramfs file '%s'\n",
                                   initramfs_file);
-               return;
+                       return;
+               } else {
+                       /* Try old backwards-compatible name. */
+                       fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
+                       if (fd == HV_ENOENT)
+                               return;
+               }
        }
        BUG_ON(fd < 0);
        stat = hv_fs_fstat(fd);
index e6732cf..79f4fca 100644 (file)
@@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
                        base = 0;
 
                if (max < rbnode->base_reg + rbnode->blklen)
-                       end = rbnode->base_reg + rbnode->blklen - max;
+                       end = max - rbnode->base_reg + 1;
                else
                        end = rbnode->blklen;
 
index 3d23675..d34adef 100644 (file)
@@ -710,12 +710,12 @@ skip_format_initialization:
                }
        }
 
+       regmap_debugfs_init(map, config->name);
+
        ret = regcache_init(map, config);
        if (ret != 0)
                goto err_range;
 
-       regmap_debugfs_init(map, config->name);
-
        /* Add a devres resource for dev_get_regmap() */
        m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
        if (!m) {
@@ -943,8 +943,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
                unsigned int ival;
                int val_bytes = map->format.val_bytes;
                for (i = 0; i < val_len / val_bytes; i++) {
-                       memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
-                       ival = map->format.parse_val(map->work_buf);
+                       ival = map->format.parse_val(val + (i * val_bytes));
                        ret = regcache_write(map, reg + (i * map->reg_stride),
                                             ival);
                        if (ret) {
@@ -1036,6 +1035,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
                        kfree(async->work_buf);
                        kfree(async);
                }
+
+               return ret;
        }
 
        trace_regmap_hw_write_start(map->dev, reg,
index 25ef5c0..92b6d7c 100644 (file)
@@ -51,8 +51,9 @@ new_skb(ulong len)
 {
        struct sk_buff *skb;
 
-       skb = alloc_skb(len, GFP_ATOMIC);
+       skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
        if (skb) {
+               skb_reserve(skb, MAX_HEADER);
                skb_reset_mac_header(skb);
                skb_reset_network_header(skb);
                skb->protocol = __constant_htons(ETH_P_AOE);
index fe5f640..2c127f9 100644 (file)
@@ -922,6 +922,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
                lo->lo_flags |= LO_FLAGS_PARTSCAN;
        if (lo->lo_flags & LO_FLAGS_PARTSCAN)
                ioctl_by_bdev(bdev, BLKRRPART, 0);
+
+       /* Grab the block_device to prevent its destruction after we
+        * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
+        */
+       bdgrab(bdev);
        return 0;
 
 out_clr:
@@ -1031,8 +1036,10 @@ static int loop_clr_fd(struct loop_device *lo)
        memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
        memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
        memset(lo->lo_file_name, 0, LO_NAME_SIZE);
-       if (bdev)
+       if (bdev) {
+               bdput(bdev);
                invalidate_bdev(bdev);
+       }
        set_capacity(lo->lo_disk, 0);
        loop_sysfs_exit(lo);
        if (bdev) {
index 69ae597..a0f7724 100644 (file)
@@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng)
 }
 EXPORT_SYMBOL_GPL(hwrng_unregister);
 
+static void __exit hwrng_exit(void)
+{
+       mutex_lock(&rng_mutex);
+       BUG_ON(current_rng);
+       kfree(rng_buffer);
+       mutex_unlock(&rng_mutex);
+}
+
+module_exit(hwrng_exit);
 
 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
 MODULE_LICENSE("GPL");
index e905d5f..ce5f3fc 100644 (file)
@@ -149,7 +149,8 @@ struct ports_device {
        spinlock_t ports_lock;
 
        /* To protect the vq operations for the control channel */
-       spinlock_t cvq_lock;
+       spinlock_t c_ivq_lock;
+       spinlock_t c_ovq_lock;
 
        /* The current config space is stored here */
        struct virtio_console_config config;
@@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
        vq = portdev->c_ovq;
 
        sg_init_one(sg, &cpkt, sizeof(cpkt));
+
+       spin_lock(&portdev->c_ovq_lock);
        if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
                virtqueue_kick(vq);
                while (!virtqueue_get_buf(vq, &len))
                        cpu_relax();
        }
+       spin_unlock(&portdev->c_ovq_lock);
        return 0;
 }
 
@@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id)
                 * rproc_serial does not want the console port, only
                 * the generic port implementation.
                 */
-               port->host_connected = port->guest_connected = true;
+               port->host_connected = true;
        else if (!use_multiport(port->portdev)) {
                /*
                 * If we're not using multiport support,
@@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work)
        portdev = container_of(work, struct ports_device, control_work);
        vq = portdev->c_ivq;
 
-       spin_lock(&portdev->cvq_lock);
+       spin_lock(&portdev->c_ivq_lock);
        while ((buf = virtqueue_get_buf(vq, &len))) {
-               spin_unlock(&portdev->cvq_lock);
+               spin_unlock(&portdev->c_ivq_lock);
 
                buf->len = len;
                buf->offset = 0;
 
                handle_control_message(portdev, buf);
 
-               spin_lock(&portdev->cvq_lock);
+               spin_lock(&portdev->c_ivq_lock);
                if (add_inbuf(portdev->c_ivq, buf) < 0) {
                        dev_warn(&portdev->vdev->dev,
                                 "Error adding buffer to queue\n");
                        free_buf(buf, false);
                }
        }
-       spin_unlock(&portdev->cvq_lock);
+       spin_unlock(&portdev->c_ivq_lock);
 }
 
 static void out_intr(struct virtqueue *vq)
@@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq)
        port->inbuf = get_inbuf(port);
 
        /*
-        * Don't queue up data when port is closed.  This condition
+        * Normally the port should not accept data when the port is
+        * closed. For generic serial ports, the host won't (shouldn't)
+        * send data till the guest is connected. But this condition
         * can be reached when a console port is not yet connected (no
-        * tty is spawned) and the host sends out data to console
-        * ports.  For generic serial ports, the host won't
-        * (shouldn't) send data till the guest is connected.
+        * tty is spawned) and the other side sends out data over the
+        * vring, or when a remote devices start sending data before
+        * the ports are opened.
+        *
+        * A generic serial port will discard data if not connected,
+        * while console ports and rproc-serial ports accepts data at
+        * any time. rproc-serial is initiated with guest_connected to
+        * false because port_fops_open expects this. Console ports are
+        * hooked up with an HVC console and is initialized with
+        * guest_connected to true.
         */
-       if (!port->guest_connected)
+
+       if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
                discard_port_data(port);
 
        spin_unlock_irqrestore(&port->inbuf_lock, flags);
@@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev)
        if (multiport) {
                unsigned int nr_added_bufs;
 
-               spin_lock_init(&portdev->cvq_lock);
+               spin_lock_init(&portdev->c_ivq_lock);
+               spin_lock_init(&portdev->c_ovq_lock);
                INIT_WORK(&portdev->control_work, &control_work_handler);
 
-               nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+               nr_added_bufs = fill_queue(portdev->c_ivq,
+                                          &portdev->c_ivq_lock);
                if (!nr_added_bufs) {
                        dev_err(&vdev->dev,
                                "Error allocating buffers for control queue\n");
@@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev)
                return ret;
 
        if (use_multiport(portdev))
-               fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+               fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
 
        list_for_each_entry(port, &portdev->ports, list) {
                port->in_vq = portdev->in_vqs[port->id];
index 1e2de73..f873dce 100644 (file)
@@ -703,7 +703,7 @@ static void tegra20_pll_init(void)
        clks[pll_a_out0] = clk;
 
        /* PLLE */
-       clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL,
+       clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base,
                             0, 100000000, &pll_e_params,
                             0, pll_e_freq_table, NULL);
        clk_register_clkdev(clk, "pll_e", NULL);
index 80b6997..aeaea32 100644 (file)
@@ -83,6 +83,7 @@ config INTEL_IOP_ADMA
 
 config DW_DMAC
        tristate "Synopsys DesignWare AHB DMA support"
+       depends on GENERIC_HARDIRQS
        select DMA_ENGINE
        default y if CPU_AT32AP7000
        help
index f9dbd50..de3c317 100644 (file)
@@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
         * If it can't be trusted, assume that the pin can be used as a GPIO.
         */
        if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
-               return 1;
+               return 0;
 
        return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
 }
index 770476a..3ce5bc3 100644 (file)
@@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = {
        .xlate = irq_domain_xlate_twocell,
 };
 
-static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio)
+static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio,
+               struct device_node *np)
 {
-       int base = stmpe_gpio->irq_base;
+       int base = 0;
 
-       stmpe_gpio->domain = irq_domain_add_simple(NULL,
+       if (!np)
+               base = stmpe_gpio->irq_base;
+
+       stmpe_gpio->domain = irq_domain_add_simple(np,
                                stmpe_gpio->chip.ngpio, base,
                                &stmpe_gpio_irq_simple_ops, stmpe_gpio);
        if (!stmpe_gpio->domain) {
@@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
        stmpe_gpio->chip = template_chip;
        stmpe_gpio->chip.ngpio = stmpe->num_gpios;
        stmpe_gpio->chip.dev = &pdev->dev;
+#ifdef CONFIG_OF
+       stmpe_gpio->chip.of_node = np;
+#endif
        stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1;
 
        if (pdata)
@@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
                goto out_free;
 
        if (irq >= 0) {
-               ret = stmpe_gpio_irq_init(stmpe_gpio);
+               ret = stmpe_gpio_irq_init(stmpe_gpio, np);
                if (ret)
                        goto out_disable;
 
index 30879df..d8a22c2 100644 (file)
@@ -1 +1,2 @@
 obj-y                  += drm/ vga/
+obj-$(CONFIG_TEGRA_HOST1X)     += host1x/
index 1e82882..b16c50e 100644 (file)
@@ -215,8 +215,8 @@ source "drivers/gpu/drm/cirrus/Kconfig"
 
 source "drivers/gpu/drm/shmobile/Kconfig"
 
-source "drivers/gpu/drm/tegra/Kconfig"
-
 source "drivers/gpu/drm/omapdrm/Kconfig"
 
 source "drivers/gpu/drm/tilcdc/Kconfig"
+
+source "drivers/gpu/drm/qxl/Kconfig"
index 0d59b24..1c9f243 100644 (file)
@@ -49,7 +49,7 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
-obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-$(CONFIG_DRM_OMAP) += omapdrm/
 obj-$(CONFIG_DRM_TILCDC)       += tilcdc/
+obj-$(CONFIG_DRM_QXL) += qxl/
 obj-y                  += i2c/
index 5284292..02e52d5 100644 (file)
@@ -241,6 +241,8 @@ struct ast_fbdev {
        void *sysram;
        int size;
        struct ttm_bo_kmap_obj mapping;
+       int x1, y1, x2, y2; /* dirty rect */
+       spinlock_t dirty_lock;
 };
 
 #define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
index 34931fe..fbc0823 100644 (file)
@@ -53,16 +53,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
        int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
        int ret;
        bool unmap = false;
+       bool store_for_later = false;
+       int x2, y2;
+       unsigned long flags;
 
        obj = afbdev->afb.obj;
        bo = gem_to_ast_bo(obj);
 
+       /*
+        * try and reserve the BO, if we fail with busy
+        * then the BO is being moved and we should
+        * store up the damage until later.
+        */
        ret = ast_bo_reserve(bo, true);
        if (ret) {
-               DRM_ERROR("failed to reserve fb bo\n");
+               if (ret != -EBUSY)
+                       return;
+
+               store_for_later = true;
+       }
+
+       x2 = x + width - 1;
+       y2 = y + height - 1;
+       spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+       if (afbdev->y1 < y)
+               y = afbdev->y1;
+       if (afbdev->y2 > y2)
+               y2 = afbdev->y2;
+       if (afbdev->x1 < x)
+               x = afbdev->x1;
+       if (afbdev->x2 > x2)
+               x2 = afbdev->x2;
+
+       if (store_for_later) {
+               afbdev->x1 = x;
+               afbdev->x2 = x2;
+               afbdev->y1 = y;
+               afbdev->y2 = y2;
+               spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
                return;
        }
 
+       afbdev->x1 = afbdev->y1 = INT_MAX;
+       afbdev->x2 = afbdev->y2 = 0;
+       spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
        if (!bo->kmap.virtual) {
                ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
                if (ret) {
@@ -72,10 +108,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
                }
                unmap = true;
        }
-       for (i = y; i < y + height; i++) {
+       for (i = y; i <= y2; i++) {
                /* assume equal stride for now */
                src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
-               memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+               memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
 
        }
        if (unmap)
@@ -292,6 +328,7 @@ int ast_fbdev_init(struct drm_device *dev)
 
        ast->fbdev = afbdev;
        afbdev->helper.funcs = &ast_fb_helper_funcs;
+       spin_lock_init(&afbdev->dirty_lock);
        ret = drm_fb_helper_init(dev, &afbdev->helper,
                                 1, 1);
        if (ret) {
index 3602731..09da339 100644 (file)
@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
 
        ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
        if (ret) {
-               if (ret != -ERESTARTSYS)
+               if (ret != -ERESTARTSYS && ret != -EBUSY)
                        DRM_ERROR("reserve failed %p\n", bo);
                return ret;
        }
index 6e0cc72..7ca0595 100644 (file)
@@ -154,6 +154,8 @@ struct cirrus_fbdev {
        struct list_head fbdev_list;
        void *sysram;
        int size;
+       int x1, y1, x2, y2; /* dirty rect */
+       spinlock_t dirty_lock;
 };
 
 struct cirrus_bo {
index e25afcc..3541b56 100644 (file)
@@ -27,16 +27,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
        int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
        int ret;
        bool unmap = false;
+       bool store_for_later = false;
+       int x2, y2;
+       unsigned long flags;
 
        obj = afbdev->gfb.obj;
        bo = gem_to_cirrus_bo(obj);
 
+       /*
+        * try and reserve the BO, if we fail with busy
+        * then the BO is being moved and we should
+        * store up the damage until later.
+        */
        ret = cirrus_bo_reserve(bo, true);
        if (ret) {
-               DRM_ERROR("failed to reserve fb bo\n");
+               if (ret != -EBUSY)
+                       return;
+               store_for_later = true;
+       }
+
+       x2 = x + width - 1;
+       y2 = y + height - 1;
+       spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+       if (afbdev->y1 < y)
+               y = afbdev->y1;
+       if (afbdev->y2 > y2)
+               y2 = afbdev->y2;
+       if (afbdev->x1 < x)
+               x = afbdev->x1;
+       if (afbdev->x2 > x2)
+               x2 = afbdev->x2;
+
+       if (store_for_later) {
+               afbdev->x1 = x;
+               afbdev->x2 = x2;
+               afbdev->y1 = y;
+               afbdev->y2 = y2;
+               spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
                return;
        }
 
+       afbdev->x1 = afbdev->y1 = INT_MAX;
+       afbdev->x2 = afbdev->y2 = 0;
+       spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
        if (!bo->kmap.virtual) {
                ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
                if (ret) {
@@ -268,6 +303,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
 
        cdev->mode_info.gfbdev = gfbdev;
        gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+       spin_lock_init(&gfbdev->dirty_lock);
 
        ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
                                 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
index 1413a26..2ed8cfc 100644 (file)
@@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
 
        ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
        if (ret) {
-               if (ret != -ERESTARTSYS)
+               if (ret != -ERESTARTSYS && ret != -EBUSY)
                        DRM_ERROR("reserve failed %p\n", bo);
                return ret;
        }
index 3be0802..d7c449f 100644 (file)
@@ -182,9 +182,6 @@ static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
        { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
 };
 
-DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
-                drm_dirty_info_enum_list)
-
 struct drm_conn_prop_enum_list {
        int type;
        char *name;
@@ -416,7 +413,7 @@ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
        mutex_lock(&dev->mode_config.fb_lock);
        fb = __drm_framebuffer_lookup(dev, id);
        if (fb)
-               kref_get(&fb->refcount);
+               drm_framebuffer_reference(fb);
        mutex_unlock(&dev->mode_config.fb_lock);
 
        return fb;
@@ -710,7 +707,6 @@ int drm_connector_init(struct drm_device *dev,
        connector->connector_type = connector_type;
        connector->connector_type_id =
                ++drm_connector_enum_list[connector_type].count; /* TODO */
-       INIT_LIST_HEAD(&connector->user_modes);
        INIT_LIST_HEAD(&connector->probed_modes);
        INIT_LIST_HEAD(&connector->modes);
        connector->edid_blob_ptr = NULL;
@@ -751,9 +747,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
        list_for_each_entry_safe(mode, t, &connector->modes, head)
                drm_mode_remove(connector, mode);
 
-       list_for_each_entry_safe(mode, t, &connector->user_modes, head)
-               drm_mode_remove(connector, mode);
-
        drm_mode_object_put(dev, &connector->base);
        list_del(&connector->head);
        dev->mode_config.num_connector--;
@@ -1124,45 +1117,7 @@ int drm_mode_create_dirty_info_property(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
 
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- *
- * Since this initializes the modeset locks, no locking is possible. Which is no
- * problem, since this should happen single threaded at init time. It is the
- * driver's problem to ensure this guarantee.
- *
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
-       mutex_init(&dev->mode_config.mutex);
-       mutex_init(&dev->mode_config.idr_mutex);
-       mutex_init(&dev->mode_config.fb_lock);
-       INIT_LIST_HEAD(&dev->mode_config.fb_list);
-       INIT_LIST_HEAD(&dev->mode_config.crtc_list);
-       INIT_LIST_HEAD(&dev->mode_config.connector_list);
-       INIT_LIST_HEAD(&dev->mode_config.encoder_list);
-       INIT_LIST_HEAD(&dev->mode_config.property_list);
-       INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
-       INIT_LIST_HEAD(&dev->mode_config.plane_list);
-       idr_init(&dev->mode_config.crtc_idr);
-
-       drm_modeset_lock_all(dev);
-       drm_mode_create_standard_connector_properties(dev);
-       drm_modeset_unlock_all(dev);
-
-       /* Just to be sure */
-       dev->mode_config.num_fb = 0;
-       dev->mode_config.num_connector = 0;
-       dev->mode_config.num_crtc = 0;
-       dev->mode_config.num_encoder = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
-int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
 {
        uint32_t total_objects = 0;
 
@@ -1206,69 +1161,6 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
 
-/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * Note that since this /should/ happen single-threaded at driver/device
- * teardown time, no locking is required. It's the driver's job to ensure that
- * this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
-       struct drm_connector *connector, *ot;
-       struct drm_crtc *crtc, *ct;
-       struct drm_encoder *encoder, *enct;
-       struct drm_framebuffer *fb, *fbt;
-       struct drm_property *property, *pt;
-       struct drm_plane *plane, *plt;
-
-       list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
-                                head) {
-               encoder->funcs->destroy(encoder);
-       }
-
-       list_for_each_entry_safe(connector, ot,
-                                &dev->mode_config.connector_list, head) {
-               connector->funcs->destroy(connector);
-       }
-
-       list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
-                                head) {
-               drm_property_destroy(dev, property);
-       }
-
-       /*
-        * Single-threaded teardown context, so it's not required to grab the
-        * fb_lock to protect against concurrent fb_list access. Contrary, it
-        * would actually deadlock with the drm_framebuffer_cleanup function.
-        *
-        * Also, if there are any framebuffers left, that's a driver leak now,
-        * so politely WARN about this.
-        */
-       WARN_ON(!list_empty(&dev->mode_config.fb_list));
-       list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
-               drm_framebuffer_remove(fb);
-       }
-
-       list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
-                                head) {
-               plane->funcs->destroy(plane);
-       }
-
-       list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
-               crtc->funcs->destroy(crtc);
-       }
-
-       idr_destroy(&dev->mode_config.crtc_idr);
-}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
-
 /**
  * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
  * @out: drm_mode_modeinfo struct to return to the user
@@ -2330,7 +2222,6 @@ int drm_mode_addfb(struct drm_device *dev,
        fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("could not create framebuffer\n");
-               drm_modeset_unlock_all(dev);
                return PTR_ERR(fb);
        }
 
@@ -2510,7 +2401,6 @@ int drm_mode_addfb2(struct drm_device *dev,
        fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("could not create framebuffer\n");
-               drm_modeset_unlock_all(dev);
                return PTR_ERR(fb);
        }
 
@@ -2723,192 +2613,6 @@ void drm_fb_release(struct drm_file *priv)
        mutex_unlock(&priv->fbs_lock);
 }
 
-/**
- * drm_mode_attachmode - add a mode to the user mode list
- * @dev: DRM device
- * @connector: connector to add the mode to
- * @mode: mode to add
- *
- * Add @mode to @connector's user mode list.
- */
-static void drm_mode_attachmode(struct drm_device *dev,
-                               struct drm_connector *connector,
-                               struct drm_display_mode *mode)
-{
-       list_add_tail(&mode->head, &connector->user_modes);
-}
-
-int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
-                            const struct drm_display_mode *mode)
-{
-       struct drm_connector *connector;
-       int ret = 0;
-       struct drm_display_mode *dup_mode, *next;
-       LIST_HEAD(list);
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (!connector->encoder)
-                       continue;
-               if (connector->encoder->crtc == crtc) {
-                       dup_mode = drm_mode_duplicate(dev, mode);
-                       if (!dup_mode) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       list_add_tail(&dup_mode->head, &list);
-               }
-       }
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (!connector->encoder)
-                       continue;
-               if (connector->encoder->crtc == crtc)
-                       list_move_tail(list.next, &connector->user_modes);
-       }
-
-       WARN_ON(!list_empty(&list));
-
- out:
-       list_for_each_entry_safe(dup_mode, next, &list, head)
-               drm_mode_destroy(dev, dup_mode);
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_mode_attachmode_crtc);
-
-static int drm_mode_detachmode(struct drm_device *dev,
-                              struct drm_connector *connector,
-                              struct drm_display_mode *mode)
-{
-       int found = 0;
-       int ret = 0;
-       struct drm_display_mode *match_mode, *t;
-
-       list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
-               if (drm_mode_equal(match_mode, mode)) {
-                       list_del(&match_mode->head);
-                       drm_mode_destroy(dev, match_mode);
-                       found = 1;
-                       break;
-               }
-       }
-
-       if (!found)
-               ret = -EINVAL;
-
-       return ret;
-}
-
-int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
-{
-       struct drm_connector *connector;
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               drm_mode_detachmode(dev, connector, mode);
-       }
-       return 0;
-}
-EXPORT_SYMBOL(drm_mode_detachmode_crtc);
-
-/**
- * drm_fb_attachmode - Attach a user mode to an connector
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * This attaches a user specified mode to an connector.
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_attachmode_ioctl(struct drm_device *dev,
-                             void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_mode_cmd *mode_cmd = data;
-       struct drm_connector *connector;
-       struct drm_display_mode *mode;
-       struct drm_mode_object *obj;
-       struct drm_mode_modeinfo *umode = &mode_cmd->mode;
-       int ret;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       drm_modeset_lock_all(dev);
-
-       obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
-       if (!obj) {
-               ret = -EINVAL;
-               goto out;
-       }
-       connector = obj_to_connector(obj);
-
-       mode = drm_mode_create(dev);
-       if (!mode) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       ret = drm_crtc_convert_umode(mode, umode);
-       if (ret) {
-               DRM_DEBUG_KMS("Invalid mode\n");
-               drm_mode_destroy(dev, mode);
-               goto out;
-       }
-
-       drm_mode_attachmode(dev, connector, mode);
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
-}
-
-
-/**
- * drm_fb_detachmode - Detach a user specified mode from an connector
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Called by the user via ioctl.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_mode_detachmode_ioctl(struct drm_device *dev,
-                             void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_object *obj;
-       struct drm_mode_mode_cmd *mode_cmd = data;
-       struct drm_connector *connector;
-       struct drm_display_mode mode;
-       struct drm_mode_modeinfo *umode = &mode_cmd->mode;
-       int ret;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       drm_modeset_lock_all(dev);
-
-       obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
-       if (!obj) {
-               ret = -EINVAL;
-               goto out;
-       }
-       connector = obj_to_connector(obj);
-
-       ret = drm_crtc_convert_umode(&mode, umode);
-       if (ret) {
-               DRM_DEBUG_KMS("Invalid mode\n");
-               goto out;
-       }
-
-       ret = drm_mode_detachmode(dev, connector, &mode);
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
-}
-
 struct drm_property *drm_property_create(struct drm_device *dev, int flags,
                                         const char *name, int num_values)
 {
@@ -3745,6 +3449,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
                goto out;
        }
 
+       if (crtc->fb->pixel_format != fb->pixel_format) {
+               DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
        if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
                ret = -ENOMEM;
                spin_lock_irqsave(&dev->event_lock, flags);
@@ -4070,3 +3780,110 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
        }
 }
 EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+       mutex_init(&dev->mode_config.mutex);
+       mutex_init(&dev->mode_config.idr_mutex);
+       mutex_init(&dev->mode_config.fb_lock);
+       INIT_LIST_HEAD(&dev->mode_config.fb_list);
+       INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+       INIT_LIST_HEAD(&dev->mode_config.connector_list);
+       INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+       INIT_LIST_HEAD(&dev->mode_config.plane_list);
+       idr_init(&dev->mode_config.crtc_idr);
+
+       drm_modeset_lock_all(dev);
+       drm_mode_create_standard_connector_properties(dev);
+       drm_modeset_unlock_all(dev);
+
+       /* Just to be sure */
+       dev->mode_config.num_fb = 0;
+       dev->mode_config.num_connector = 0;
+       dev->mode_config.num_crtc = 0;
+       dev->mode_config.num_encoder = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+       struct drm_connector *connector, *ot;
+       struct drm_crtc *crtc, *ct;
+       struct drm_encoder *encoder, *enct;
+       struct drm_framebuffer *fb, *fbt;
+       struct drm_property *property, *pt;
+       struct drm_property_blob *blob, *bt;
+       struct drm_plane *plane, *plt;
+
+       list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+                                head) {
+               encoder->funcs->destroy(encoder);
+       }
+
+       list_for_each_entry_safe(connector, ot,
+                                &dev->mode_config.connector_list, head) {
+               connector->funcs->destroy(connector);
+       }
+
+       list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+                                head) {
+               drm_property_destroy(dev, property);
+       }
+
+       list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
+                                head) {
+               drm_property_destroy_blob(dev, blob);
+       }
+
+       /*
+        * Single-threaded teardown context, so it's not required to grab the
+        * fb_lock to protect against concurrent fb_list access. Contrary, it
+        * would actually deadlock with the drm_framebuffer_cleanup function.
+        *
+        * Also, if there are any framebuffers left, that's a driver leak now,
+        * so politely WARN about this.
+        */
+       WARN_ON(!list_empty(&dev->mode_config.fb_list));
+       list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+               drm_framebuffer_remove(fb);
+       }
+
+       list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+                                head) {
+               plane->funcs->destroy(plane);
+       }
+
+       list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+               crtc->funcs->destroy(crtc);
+       }
+
+       idr_destroy(&dev->mode_config.crtc_idr);
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
index 7b2d378..e974f93 100644 (file)
@@ -648,6 +648,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                } else if (set->fb->bits_per_pixel !=
                           set->crtc->fb->bits_per_pixel) {
                        mode_changed = true;
+               } else if (set->fb->pixel_format !=
+                          set->crtc->fb->pixel_format) {
+                       mode_changed = true;
                } else
                        fb_changed = true;
        }
index 25f91cd..8d4f290 100644 (file)
@@ -60,7 +60,7 @@ static int drm_version(struct drm_device *dev, void *data,
        [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
 
 /** Ioctl table */
-static struct drm_ioctl_desc drm_ioctls[] = {
+static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
@@ -150,8 +150,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
 {
        struct drm_file *file_priv = filp->private_data;
        struct drm_device *dev;
-       struct drm_ioctl_desc *ioctl;
+       const struct drm_ioctl_desc *ioctl;
        drm_ioctl_t *func;
        unsigned int nr = DRM_IOCTL_NR(cmd);
        int retcode = -EINVAL;
@@ -408,6 +408,7 @@ long drm_ioctl(struct file *filp,
                usize = asize = _IOC_SIZE(cmd);
                if (drv_size > asize)
                        asize = drv_size;
+               cmd = ioctl->cmd_drv;
        }
        else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
                ioctl = &drm_ioctls[nr];
index e2acfdb..9e62bbe 100644 (file)
@@ -587,284 +587,348 @@ static const struct drm_display_mode edid_cea_modes[] = {
        /* 1 - 640x480@60Hz */
        { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
                   752, 800, 0, 480, 490, 492, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 60, },
        /* 2 - 720x480@60Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 60, },
        /* 3 - 720x480@60Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 60, },
        /* 4 - 1280x720@60Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 60, },
        /* 5 - 1920x1080i@60Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE) },
+                       DRM_MODE_FLAG_INTERLACE),
+         .vrefresh = 60, },
        /* 6 - 1440x480i@60Hz */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 60, },
        /* 7 - 1440x480i@60Hz */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 60, },
        /* 8 - 1440x240@60Hz */
        { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
                   1602, 1716, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 60, },
        /* 9 - 1440x240@60Hz */
        { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
                   1602, 1716, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 60, },
        /* 10 - 2880x480i@60Hz */
        { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE) },
+                       DRM_MODE_FLAG_INTERLACE),
+         .vrefresh = 60, },
        /* 11 - 2880x480i@60Hz */
        { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE) },
+                       DRM_MODE_FLAG_INTERLACE),
+         .vrefresh = 60, },
        /* 12 - 2880x240@60Hz */
        { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 240, 244, 247, 262, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 60, },
        /* 13 - 2880x240@60Hz */
        { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 240, 244, 247, 262, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 60, },
        /* 14 - 1440x480@60Hz */
        { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
                   1596, 1716, 0, 480, 489, 495, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 60, },
        /* 15 - 1440x480@60Hz */
        { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
                   1596, 1716, 0, 480, 489, 495, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 60, },
        /* 16 - 1920x1080@60Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 60, },
        /* 17 - 720x576@50Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 50, },
        /* 18 - 720x576@50Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 50, },
        /* 19 - 1280x720@50Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 50, },
        /* 20 - 1920x1080i@50Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE) },
+                       DRM_MODE_FLAG_INTERLACE),
+         .vrefresh = 50, },
        /* 21 - 1440x576i@50Hz */
        { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 50, },
        /* 22 - 1440x576i@50Hz */
        { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 50, },
        /* 23 - 1440x288@50Hz */
        { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
                   1590, 1728, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 50, },
        /* 24 - 1440x288@50Hz */
        { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
                   1590, 1728, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 50, },
        /* 25 - 2880x576i@50Hz */
        { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE) },
+                       DRM_MODE_FLAG_INTERLACE),
+         .vrefresh = 50, },
        /* 26 - 2880x576i@50Hz */
        { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE) },
+                       DRM_MODE_FLAG_INTERLACE),
+         .vrefresh = 50, },
        /* 27 - 2880x288@50Hz */
        { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 288, 290, 293, 312, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 50, },
        /* 28 - 2880x288@50Hz */
        { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 288, 290, 293, 312, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 50, },
        /* 29 - 1440x576@50Hz */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1592, 1728, 0, 576, 581, 586, 625, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 50, },
        /* 30 - 1440x576@50Hz */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1592, 1728, 0, 576, 581, 586, 625, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 50, },
        /* 31 - 1920x1080@50Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 50, },
        /* 32 - 1920x1080@24Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
                   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 24, },
        /* 33 - 1920x1080@25Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 25, },
        /* 34 - 1920x1080@30Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 30, },
        /* 35 - 2880x480@60Hz */
        { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
                   3192, 3432, 0, 480, 489, 495, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 60, },
        /* 36 - 2880x480@60Hz */
        { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
                   3192, 3432, 0, 480, 489, 495, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 60, },
        /* 37 - 2880x576@50Hz */
        { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
                   3184, 3456, 0, 576, 581, 586, 625, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 50, },
        /* 38 - 2880x576@50Hz */
        { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
                   3184, 3456, 0, 576, 581, 586, 625, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 50, },
        /* 39 - 1920x1080i@50Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
                   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE) },
+                       DRM_MODE_FLAG_INTERLACE),
+         .vrefresh = 50, },
        /* 40 - 1920x1080i@100Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE) },
+                       DRM_MODE_FLAG_INTERLACE),
+         .vrefresh = 100, },
        /* 41 - 1280x720@100Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 100, },
        /* 42 - 720x576@100Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 100, },
        /* 43 - 720x576@100Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 100, },
        /* 44 - 1440x576i@100Hz */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 100, },
        /* 45 - 1440x576i@100Hz */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 100, },
        /* 46 - 1920x1080i@120Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE) },
+                       DRM_MODE_FLAG_INTERLACE),
+         .vrefresh = 120, },
        /* 47 - 1280x720@120Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 120, },
        /* 48 - 720x480@120Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 120, },
        /* 49 - 720x480@120Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 120, },
        /* 50 - 1440x480i@120Hz */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 120, },
        /* 51 - 1440x480i@120Hz */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 120, },
        /* 52 - 720x576@200Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 200, },
        /* 53 - 720x576@200Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 200, },
        /* 54 - 1440x576i@200Hz */
        { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 200, },
        /* 55 - 1440x576i@200Hz */
        { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 200, },
        /* 56 - 720x480@240Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 240, },
        /* 57 - 720x480@240Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
-                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+         .vrefresh = 240, },
        /* 58 - 1440x480i@240 */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 240, },
        /* 59 - 1440x480i@240 */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+         .vrefresh = 240, },
        /* 60 - 1280x720@24Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 24, },
        /* 61 - 1280x720@25Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
                   3740, 3960, 0, 720, 725, 730, 750, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 25, },
        /* 62 - 1280x720@30Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+         .vrefresh = 30, },
        /* 63 - 1920x1080@120Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+        .vrefresh = 120, },
        /* 64 - 1920x1080@100Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
-                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+        .vrefresh = 100, },
 };
 
 /*** DDC fetch and block validation ***/
@@ -2266,13 +2330,34 @@ EXPORT_SYMBOL(drm_find_cea_extension);
  */
 u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
 {
-       struct drm_display_mode *cea_mode;
        u8 mode;
 
+       if (!to_match->clock)
+               return 0;
+
        for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
-               cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+               const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
+               unsigned int clock1, clock2;
+
+               clock1 = clock2 = cea_mode->clock;
 
-               if (drm_mode_equal(to_match, cea_mode))
+               /* Check both 60Hz and 59.94Hz */
+               if (cea_mode->vrefresh % 6 == 0) {
+                       /*
+                        * edid_cea_modes contains the 59.94Hz
+                        * variant for 240 and 480 line modes,
+                        * and the 60Hz variant otherwise.
+                        */
+                       if (cea_mode->vdisplay == 240 ||
+                           cea_mode->vdisplay == 480)
+                               clock1 = clock1 * 1001 / 1000;
+                       else
+                               clock2 = DIV_ROUND_UP(clock2 * 1000, 1001);
+               }
+
+               if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
+                    KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+                   drm_mode_equal_no_clocks(to_match, cea_mode))
                        return mode + 1;
        }
        return 0;
@@ -2294,6 +2379,7 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
                        newmode = drm_mode_duplicate(dev,
                                                     &edid_cea_modes[cea_mode]);
                        if (newmode) {
+                               newmode->vrefresh = 0;
                                drm_mode_probed_add(connector, newmode);
                                modes++;
                        }
@@ -2510,6 +2596,65 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
 }
 EXPORT_SYMBOL(drm_edid_to_eld);
 
+/**
+ * drm_edid_to_sad - extracts SADs from EDID
+ * @edid: EDID to parse
+ * @sads: pointer that will be set to the extracted SADs
+ *
+ * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
+ * Note: returned pointer needs to be kfreed
+ *
+ * Return number of found SADs or negative number on error.
+ */
+int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
+{
+       int count = 0;
+       int i, start, end, dbl;
+       u8 *cea;
+
+       cea = drm_find_cea_extension(edid);
+       if (!cea) {
+               DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+               return -ENOENT;
+       }
+
+       if (cea_revision(cea) < 3) {
+               DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+               return -ENOTSUPP;
+       }
+
+       if (cea_db_offsets(cea, &start, &end)) {
+               DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+               return -EPROTO;
+       }
+
+       for_each_cea_db(cea, i, start, end) {
+               u8 *db = &cea[i];
+
+               if (cea_db_tag(db) == AUDIO_BLOCK) {
+                       int j;
+                       dbl = cea_db_payload_len(db);
+
+                       count = dbl / 3; /* SAD is 3B */
+                       *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
+                       if (!*sads)
+                               return -ENOMEM;
+                       for (j = 0; j < count; j++) {
+                               u8 *sad = &db[1 + j * 3];
+
+                               (*sads)[j].format = (sad[0] & 0x78) >> 3;
+                               (*sads)[j].channels = sad[0] & 0x7;
+                               (*sads)[j].freq = sad[1] & 0x7F;
+                               (*sads)[j].byte2 = sad[2];
+                       }
+                       break;
+               }
+       }
+
+       return count;
+}
+EXPORT_SYMBOL(drm_edid_to_sad);
+
 /**
  * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
  * @connector: connector associated with the HDMI/DP sink
index 38d3943..fa445dd 100644 (file)
@@ -31,10 +31,11 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
 MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
        "from built-in data or /lib/firmware instead. ");
 
-#define GENERIC_EDIDS 4
+#define GENERIC_EDIDS 5
 static char *generic_edid_name[GENERIC_EDIDS] = {
        "edid/1024x768.bin",
        "edid/1280x1024.bin",
+       "edid/1600x1200.bin",
        "edid/1680x1050.bin",
        "edid/1920x1080.bin",
 };
@@ -79,6 +80,24 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
        {
        0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
        0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78,
+       0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+       0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40,
+       0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+       0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f,
+       0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0,
+       0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e,
+       0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+       0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+       0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+       0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20,
+       0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+       0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55,
+       0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d,
+       },
+       {
+       0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+       0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
        0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
        0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
index 13fdcd1..429e07d 100644 (file)
@@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp)
        int retcode = 0;
        int need_setup = 0;
        struct address_space *old_mapping;
+       struct address_space *old_imapping;
 
        minor = idr_find(&drm_minors_idr, minor_id);
        if (!minor)
@@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp)
        if (!dev->open_count++)
                need_setup = 1;
        mutex_lock(&dev->struct_mutex);
+       old_imapping = inode->i_mapping;
        old_mapping = dev->dev_mapping;
        if (old_mapping == NULL)
                dev->dev_mapping = &inode->i_data;
@@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp)
 
 err_undo:
        mutex_lock(&dev->struct_mutex);
-       filp->f_mapping = old_mapping;
-       inode->i_mapping = old_mapping;
+       filp->f_mapping = old_imapping;
+       inode->i_mapping = old_imapping;
        iput(container_of(dev->dev_mapping, struct inode, i_data));
        dev->dev_mapping = old_mapping;
        mutex_unlock(&dev->struct_mutex);
index af779ae..cf919e3 100644 (file)
@@ -205,11 +205,11 @@ static void
 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 {
        if (obj->import_attach) {
-               drm_prime_remove_imported_buf_handle(&filp->prime,
+               drm_prime_remove_buf_handle(&filp->prime,
                                obj->import_attach->dmabuf);
        }
        if (obj->export_dma_buf) {
-               drm_prime_remove_imported_buf_handle(&filp->prime,
+               drm_prime_remove_buf_handle(&filp->prime,
                                obj->export_dma_buf);
        }
 }
index 04fa6f1..f264d08 100644 (file)
@@ -848,6 +848,26 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
        } else if (mode1->clock != mode2->clock)
                return false;
 
+       return drm_mode_equal_no_clocks(mode1, mode2);
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_equal_no_clocks - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent, but
+ * don't check the pixel clocks.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+{
        if (mode1->hdisplay == mode2->hdisplay &&
            mode1->hsync_start == mode2->hsync_start &&
            mode1->hsync_end == mode2->hsync_end &&
@@ -863,7 +883,7 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
 
        return false;
 }
-EXPORT_SYMBOL(drm_mode_equal);
+EXPORT_SYMBOL(drm_mode_equal_no_clocks);
 
 /**
  * drm_mode_validate_size - make sure modes adhere to size constraints
index bd719e9..14194b6 100644 (file)
@@ -152,7 +152,7 @@ static const char *drm_pci_get_name(struct drm_device *dev)
        return pdriver->name;
 }
 
-int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
 {
        int len, ret;
        struct pci_driver *pdriver = dev->driver->kdriver.pci;
@@ -194,9 +194,9 @@ err:
        return ret;
 }
 
-int drm_pci_set_unique(struct drm_device *dev,
-                      struct drm_master *master,
-                      struct drm_unique *u)
+static int drm_pci_set_unique(struct drm_device *dev,
+                             struct drm_master *master,
+                             struct drm_unique *u)
 {
        int domain, bus, slot, func, ret;
        const char *bus_name;
@@ -266,7 +266,7 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
        return 0;
 }
 
-int drm_pci_agp_init(struct drm_device *dev)
+static int drm_pci_agp_init(struct drm_device *dev)
 {
        if (drm_core_has_AGP(dev)) {
                if (drm_pci_device_is_agp(dev))
index 25d0218..dcde352 100644 (file)
@@ -62,6 +62,7 @@ struct drm_prime_member {
        struct dma_buf *dma_buf;
        uint32_t handle;
 };
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
 
 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
                enum dma_data_direction dir)
@@ -200,7 +201,8 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
 {
        struct drm_gem_object *obj;
        void *buf;
-       int ret;
+       int ret = 0;
+       struct dma_buf *dmabuf;
 
        obj = drm_gem_object_lookup(dev, file_priv, handle);
        if (!obj)
@@ -209,43 +211,44 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
        mutex_lock(&file_priv->prime.lock);
        /* re-export the original imported object */
        if (obj->import_attach) {
-               get_dma_buf(obj->import_attach->dmabuf);
-               *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
-               drm_gem_object_unreference_unlocked(obj);
-               mutex_unlock(&file_priv->prime.lock);
-               return 0;
+               dmabuf = obj->import_attach->dmabuf;
+               goto out_have_obj;
        }
 
        if (obj->export_dma_buf) {
-               get_dma_buf(obj->export_dma_buf);
-               *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
-               drm_gem_object_unreference_unlocked(obj);
-       } else {
-               buf = dev->driver->gem_prime_export(dev, obj, flags);
-               if (IS_ERR(buf)) {
-                       /* normally the created dma-buf takes ownership of the ref,
-                        * but if that fails then drop the ref
-                        */
-                       drm_gem_object_unreference_unlocked(obj);
-                       mutex_unlock(&file_priv->prime.lock);
-                       return PTR_ERR(buf);
-               }
-               obj->export_dma_buf = buf;
-               *prime_fd = dma_buf_fd(buf, flags);
+               dmabuf = obj->export_dma_buf;
+               goto out_have_obj;
+       }
+
+       buf = dev->driver->gem_prime_export(dev, obj, flags);
+       if (IS_ERR(buf)) {
+               /* normally the created dma-buf takes ownership of the ref,
+                * but if that fails then drop the ref
+                */
+               ret = PTR_ERR(buf);
+               goto out;
        }
+       obj->export_dma_buf = buf;
+
        /* if we've exported this buffer the cheat and add it to the import list
         * so we get the correct handle back
         */
-       ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
-                       obj->export_dma_buf, handle);
-       if (ret) {
-               drm_gem_object_unreference_unlocked(obj);
-               mutex_unlock(&file_priv->prime.lock);
-               return ret;
-       }
+       ret = drm_prime_add_buf_handle(&file_priv->prime,
+                                      obj->export_dma_buf, handle);
+       if (ret)
+               goto out;
 
+       *prime_fd = dma_buf_fd(buf, flags);
        mutex_unlock(&file_priv->prime.lock);
        return 0;
+
+out_have_obj:
+       get_dma_buf(dmabuf);
+       *prime_fd = dma_buf_fd(dmabuf, flags);
+out:
+       drm_gem_object_unreference_unlocked(obj);
+       mutex_unlock(&file_priv->prime.lock);
+       return ret;
 }
 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
 
@@ -268,7 +271,6 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
                         * refcount on gem itself instead of f_count of dmabuf.
                         */
                        drm_gem_object_reference(obj);
-                       dma_buf_put(dma_buf);
                        return obj;
                }
        }
@@ -277,6 +279,8 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
        if (IS_ERR(attach))
                return ERR_PTR(PTR_ERR(attach));
 
+       get_dma_buf(dma_buf);
+
        sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
        if (IS_ERR_OR_NULL(sgt)) {
                ret = PTR_ERR(sgt);
@@ -297,6 +301,8 @@ fail_unmap:
        dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
 fail_detach:
        dma_buf_detach(dma_buf, attach);
+       dma_buf_put(dma_buf);
+
        return ERR_PTR(ret);
 }
 EXPORT_SYMBOL(drm_gem_prime_import);
@@ -314,7 +320,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 
        mutex_lock(&file_priv->prime.lock);
 
-       ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
+       ret = drm_prime_lookup_buf_handle(&file_priv->prime,
                        dma_buf, handle);
        if (!ret) {
                ret = 0;
@@ -333,12 +339,15 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
        if (ret)
                goto out_put;
 
-       ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+       ret = drm_prime_add_buf_handle(&file_priv->prime,
                        dma_buf, *handle);
        if (ret)
                goto fail;
 
        mutex_unlock(&file_priv->prime.lock);
+
+       dma_buf_put(dma_buf);
+
        return 0;
 
 fail:
@@ -479,15 +488,12 @@ EXPORT_SYMBOL(drm_prime_init_file_private);
 
 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
 {
-       struct drm_prime_member *member, *safe;
-       list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
-               list_del(&member->entry);
-               kfree(member);
-       }
+       /* by now drm_gem_release should've made sure the list is empty */
+       WARN_ON(!list_empty(&prime_fpriv->head));
 }
 EXPORT_SYMBOL(drm_prime_destroy_file_private);
 
-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
 {
        struct drm_prime_member *member;
 
@@ -495,14 +501,14 @@ int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv
        if (!member)
                return -ENOMEM;
 
+       get_dma_buf(dma_buf);
        member->dma_buf = dma_buf;
        member->handle = handle;
        list_add(&member->entry, &prime_fpriv->head);
        return 0;
 }
-EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
 
-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
 {
        struct drm_prime_member *member;
 
@@ -514,19 +520,20 @@ int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fp
        }
        return -ENOENT;
 }
-EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
+EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
 
-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
 {
        struct drm_prime_member *member, *safe;
 
        mutex_lock(&prime_fpriv->lock);
        list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
                if (member->dma_buf == dma_buf) {
+                       dma_buf_put(dma_buf);
                        list_del(&member->entry);
                        kfree(member);
                }
        }
        mutex_unlock(&prime_fpriv->lock);
 }
-EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
+EXPORT_SYMBOL(drm_prime_remove_buf_handle);
index db7bd29..1d4f7c9 100644 (file)
@@ -422,6 +422,7 @@ void drm_vm_open_locked(struct drm_device *dev,
                list_add(&vma_entry->head, &dev->vmalist);
        }
 }
+EXPORT_SYMBOL_GPL(drm_vm_open_locked);
 
 static void drm_vm_open(struct vm_area_struct *vma)
 {
index 046bcda..772c62a 100644 (file)
@@ -24,7 +24,9 @@ config DRM_EXYNOS_DMABUF
 
 config DRM_EXYNOS_FIMD
        bool "Exynos DRM FIMD"
-       depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
+       depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
+       select FB_MODE_HELPERS
+       select VIDEOMODE_HELPERS
        help
          Choose this option if you want to use Exynos FIMD for DRM.
 
@@ -54,7 +56,7 @@ config DRM_EXYNOS_IPP
 
 config DRM_EXYNOS_FIMC
        bool "Exynos DRM FIMC"
-       depends on DRM_EXYNOS_IPP
+       depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF
        help
          Choose this option if you want to use Exynos FIMC for DRM.
 
index 4c5b685..8bcc13a 100644 (file)
@@ -124,7 +124,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
                }
 
                count = drm_add_edid_modes(connector, edid);
-               if (count < 0) {
+               if (!count) {
                        DRM_ERROR("Add edid modes failed %d\n", count);
                        goto out;
                }
index ba0a3aa..ff7f2a8 100644 (file)
@@ -235,7 +235,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
                         * refcount on gem itself instead of f_count of dmabuf.
                         */
                        drm_gem_object_reference(obj);
-                       dma_buf_put(dma_buf);
                        return obj;
                }
        }
@@ -244,6 +243,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        if (IS_ERR(attach))
                return ERR_PTR(-EINVAL);
 
+       get_dma_buf(dma_buf);
 
        sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
        if (IS_ERR_OR_NULL(sgt)) {
@@ -298,6 +298,8 @@ err_unmap_attach:
        dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
 err_buf_detach:
        dma_buf_detach(dma_buf, attach);
+       dma_buf_put(dma_buf);
+
        return ERR_PTR(ret);
 }
 
index 3da5c2d..ba6d995 100644 (file)
@@ -380,6 +380,10 @@ static int __init exynos_drm_init(void)
        ret = platform_driver_register(&ipp_driver);
        if (ret < 0)
                goto out_ipp;
+
+       ret = exynos_platform_device_ipp_register();
+       if (ret < 0)
+               goto out_ipp_dev;
 #endif
 
        ret = platform_driver_register(&exynos_drm_platform_driver);
@@ -388,7 +392,7 @@ static int __init exynos_drm_init(void)
 
        exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
                                NULL, 0);
-       if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+       if (IS_ERR(exynos_drm_pdev)) {
                ret = PTR_ERR(exynos_drm_pdev);
                goto out;
        }
@@ -400,6 +404,8 @@ out:
 
 out_drm:
 #ifdef CONFIG_DRM_EXYNOS_IPP
+       exynos_platform_device_ipp_unregister();
+out_ipp_dev:
        platform_driver_unregister(&ipp_driver);
 out_ipp:
 #endif
@@ -456,6 +462,7 @@ static void __exit exynos_drm_exit(void)
        platform_driver_unregister(&exynos_drm_platform_driver);
 
 #ifdef CONFIG_DRM_EXYNOS_IPP
+       exynos_platform_device_ipp_unregister();
        platform_driver_unregister(&ipp_driver);
 #endif
 
index 4606fac..680a7c1 100644 (file)
@@ -322,13 +322,23 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
  * this function registers exynos drm hdmi platform device. It ensures only one
  * instance of the device is created.
  */
-extern int exynos_platform_device_hdmi_register(void);
+int exynos_platform_device_hdmi_register(void);
 
 /*
  * this function unregisters exynos drm hdmi platform device if it exists.
  */
 void exynos_platform_device_hdmi_unregister(void);
 
+/*
+ * this function registers exynos drm ipp platform device.
+ */
+int exynos_platform_device_ipp_register(void);
+
+/*
+ * this function unregisters exynos drm ipp platform device if it exists.
+ */
+void exynos_platform_device_ipp_unregister(void);
+
 extern struct platform_driver fimd_driver;
 extern struct platform_driver hdmi_driver;
 extern struct platform_driver mixer_driver;
index 411f69b..773f583 100644 (file)
  *
  */
 #include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
-#include <plat/map-base.h>
 
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
@@ -76,6 +77,27 @@ enum fimc_wb {
        FIMC_WB_B,
 };
 
+enum {
+       FIMC_CLK_LCLK,
+       FIMC_CLK_GATE,
+       FIMC_CLK_WB_A,
+       FIMC_CLK_WB_B,
+       FIMC_CLK_MUX,
+       FIMC_CLK_PARENT,
+       FIMC_CLKS_MAX
+};
+
+static const char * const fimc_clock_names[] = {
+       [FIMC_CLK_LCLK]   = "sclk_fimc",
+       [FIMC_CLK_GATE]   = "fimc",
+       [FIMC_CLK_WB_A]   = "pxl_async0",
+       [FIMC_CLK_WB_B]   = "pxl_async1",
+       [FIMC_CLK_MUX]    = "mux",
+       [FIMC_CLK_PARENT] = "parent",
+};
+
+#define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL
+
 /*
  * A structure of scaler.
  *
@@ -118,15 +140,6 @@ struct fimc_capability {
        u32     rl_h_rot;
 };
 
-/*
- * A structure of fimc driver data.
- *
- * @parent_clk: name of parent clock.
- */
-struct fimc_driverdata {
-       char    *parent_clk;
-};
-
 /*
  * A structure of fimc context.
  *
@@ -134,13 +147,10 @@ struct fimc_driverdata {
  * @regs_res: register resources.
  * @regs: memory mapped io registers.
  * @lock: locking of operations.
- * @sclk_fimc_clk: fimc source clock.
- * @fimc_clk: fimc clock.
- * @wb_clk: writeback a clock.
- * @wb_b_clk: writeback b clock.
+ * @clocks: fimc clocks.
+ * @clk_frequency: LCLK clock frequency.
+ * @sysreg: handle to SYSREG block regmap.
  * @sc: scaler infomations.
- * @odr: ordering of YUV.
- * @ver: fimc version.
  * @pol: porarity of writeback.
  * @id: fimc id.
  * @irq: irq number.
@@ -151,12 +161,10 @@ struct fimc_context {
        struct resource *regs_res;
        void __iomem    *regs;
        struct mutex    lock;
-       struct clk      *sclk_fimc_clk;
-       struct clk      *fimc_clk;
-       struct clk      *wb_clk;
-       struct clk      *wb_b_clk;
+       struct clk      *clocks[FIMC_CLKS_MAX];
+       u32             clk_frequency;
+       struct regmap   *sysreg;
        struct fimc_scaler      sc;
-       struct fimc_driverdata  *ddata;
        struct exynos_drm_ipp_pol       pol;
        int     id;
        int     irq;
@@ -200,17 +208,13 @@ static void fimc_sw_reset(struct fimc_context *ctx)
        fimc_write(0x0, EXYNOS_CIFCNTSEQ);
 }
 
-static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
 {
-       u32 camblk_cfg;
-
        DRM_DEBUG_KMS("%s\n", __func__);
 
-       camblk_cfg = readl(SYSREG_CAMERA_BLK);
-       camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
-       camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
-
-       writel(camblk_cfg, SYSREG_CAMERA_BLK);
+       return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK,
+                                 SYSREG_FIMD0WB_DEST_MASK,
+                                 ctx->id << SYSREG_FIMD0WB_DEST_SHIFT);
 }
 
 static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
@@ -1301,14 +1305,12 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
        DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
 
        if (enable) {
-               clk_enable(ctx->sclk_fimc_clk);
-               clk_enable(ctx->fimc_clk);
-               clk_enable(ctx->wb_clk);
+               clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+               clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
                ctx->suspended = false;
        } else {
-               clk_disable(ctx->sclk_fimc_clk);
-               clk_disable(ctx->fimc_clk);
-               clk_disable(ctx->wb_clk);
+               clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+               clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
                ctx->suspended = true;
        }
 
@@ -1613,7 +1615,11 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
                fimc_handle_lastend(ctx, true);
 
                /* setup FIMD */
-               fimc_set_camblk_fimd0_wb(ctx);
+               ret = fimc_set_camblk_fimd0_wb(ctx);
+               if (ret < 0) {
+                       dev_err(dev, "camblk setup failed.\n");
+                       return ret;
+               }
 
                set_wb.enable = 1;
                set_wb.refresh = property->refresh_rate;
@@ -1713,76 +1719,118 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
        fimc_write(cfg, EXYNOS_CIGCTRL);
 }
 
+static void fimc_put_clocks(struct fimc_context *ctx)
+{
+       int i;
+
+       for (i = 0; i < FIMC_CLKS_MAX; i++) {
+               if (IS_ERR(ctx->clocks[i]))
+                       continue;
+               clk_put(ctx->clocks[i]);
+               ctx->clocks[i] = ERR_PTR(-EINVAL);
+       }
+}
+
+static int fimc_setup_clocks(struct fimc_context *ctx)
+{
+       struct device *fimc_dev = ctx->ippdrv.dev;
+       struct device *dev;
+       int ret, i;
+
+       for (i = 0; i < FIMC_CLKS_MAX; i++)
+               ctx->clocks[i] = ERR_PTR(-EINVAL);
+
+       for (i = 0; i < FIMC_CLKS_MAX; i++) {
+               if (i == FIMC_CLK_WB_A || i == FIMC_CLK_WB_B)
+                       dev = fimc_dev->parent;
+               else
+                       dev = fimc_dev;
+
+               ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]);
+               if (IS_ERR(ctx->clocks[i])) {
+                       if (i >= FIMC_CLK_MUX)
+                               break;
+                       ret = PTR_ERR(ctx->clocks[i]);
+                       dev_err(fimc_dev, "failed to get clock: %s\n",
+                                               fimc_clock_names[i]);
+                       goto e_clk_free;
+               }
+       }
+
+       /* Optional FIMC LCLK parent clock setting */
+       if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) {
+               ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX],
+                                    ctx->clocks[FIMC_CLK_PARENT]);
+               if (ret < 0) {
+                       dev_err(fimc_dev, "failed to set parent.\n");
+                       goto e_clk_free;
+               }
+       }
+
+       ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency);
+       if (ret < 0)
+               goto e_clk_free;
+
+       ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]);
+       if (!ret)
+               return ret;
+e_clk_free:
+       fimc_put_clocks(ctx);
+       return ret;
+}
+
+static int fimc_parse_dt(struct fimc_context *ctx)
+{
+       struct device_node *node = ctx->ippdrv.dev->of_node;
+
+       /* Handle only devices that support the LCD Writeback data path */
+       if (!of_property_read_bool(node, "samsung,lcd-wb"))
+               return -ENODEV;
+
+       if (of_property_read_u32(node, "clock-frequency",
+                                       &ctx->clk_frequency))
+               ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY;
+
+       ctx->id = of_alias_get_id(node, "fimc");
+
+       if (ctx->id < 0) {
+               dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int fimc_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct fimc_context *ctx;
-       struct clk      *parent_clk;
        struct resource *res;
        struct exynos_drm_ippdrv *ippdrv;
-       struct exynos_drm_fimc_pdata *pdata;
-       struct fimc_driverdata *ddata;
        int ret;
 
-       pdata = pdev->dev.platform_data;
-       if (!pdata) {
-               dev_err(dev, "no platform data specified.\n");
-               return -EINVAL;
+       if (!dev->of_node) {
+               dev_err(dev, "device tree node not found.\n");
+               return -ENODEV;
        }
 
        ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
-       ddata = (struct fimc_driverdata *)
-               platform_get_device_id(pdev)->driver_data;
-
-       /* clock control */
-       ctx->sclk_fimc_clk = devm_clk_get(dev, "sclk_fimc");
-       if (IS_ERR(ctx->sclk_fimc_clk)) {
-               dev_err(dev, "failed to get src fimc clock.\n");
-               return PTR_ERR(ctx->sclk_fimc_clk);
-       }
-       clk_enable(ctx->sclk_fimc_clk);
-
-       ctx->fimc_clk = devm_clk_get(dev, "fimc");
-       if (IS_ERR(ctx->fimc_clk)) {
-               dev_err(dev, "failed to get fimc clock.\n");
-               clk_disable(ctx->sclk_fimc_clk);
-               return PTR_ERR(ctx->fimc_clk);
-       }
-
-       ctx->wb_clk = devm_clk_get(dev, "pxl_async0");
-       if (IS_ERR(ctx->wb_clk)) {
-               dev_err(dev, "failed to get writeback a clock.\n");
-               clk_disable(ctx->sclk_fimc_clk);
-               return PTR_ERR(ctx->wb_clk);
-       }
-
-       ctx->wb_b_clk = devm_clk_get(dev, "pxl_async1");
-       if (IS_ERR(ctx->wb_b_clk)) {
-               dev_err(dev, "failed to get writeback b clock.\n");
-               clk_disable(ctx->sclk_fimc_clk);
-               return PTR_ERR(ctx->wb_b_clk);
-       }
+       ctx->ippdrv.dev = dev;
 
-       parent_clk = devm_clk_get(dev, ddata->parent_clk);
-
-       if (IS_ERR(parent_clk)) {
-               dev_err(dev, "failed to get parent clock.\n");
-               clk_disable(ctx->sclk_fimc_clk);
-               return PTR_ERR(parent_clk);
-       }
+       ret = fimc_parse_dt(ctx);
+       if (ret < 0)
+               return ret;
 
-       if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
-               dev_err(dev, "failed to set parent.\n");
-               clk_disable(ctx->sclk_fimc_clk);
-               return -EINVAL;
+       ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+                                               "samsung,sysreg");
+       if (IS_ERR(ctx->sysreg)) {
+               dev_err(dev, "syscon regmap lookup failed.\n");
+               return PTR_ERR(ctx->sysreg);
        }
 
-       devm_clk_put(dev, parent_clk);
-       clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
-
        /* resource memory */
        ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
@@ -1804,13 +1852,11 @@ static int fimc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       /* context initailization */
-       ctx->id = pdev->id;
-       ctx->pol = pdata->pol;
-       ctx->ddata = ddata;
+       ret = fimc_setup_clocks(ctx);
+       if (ret < 0)
+               goto err_free_irq;
 
        ippdrv = &ctx->ippdrv;
-       ippdrv->dev = dev;
        ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
        ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
        ippdrv->check_property = fimc_ippdrv_check_property;
@@ -1820,7 +1866,7 @@ static int fimc_probe(struct platform_device *pdev)
        ret = fimc_init_prop_list(ippdrv);
        if (ret < 0) {
                dev_err(dev, "failed to init property list.\n");
-               goto err_get_irq;
+               goto err_put_clk;
        }
 
        DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
@@ -1835,17 +1881,18 @@ static int fimc_probe(struct platform_device *pdev)
        ret = exynos_drm_ippdrv_register(ippdrv);
        if (ret < 0) {
                dev_err(dev, "failed to register drm fimc device.\n");
-               goto err_ippdrv_register;
+               goto err_pm_dis;
        }
 
        dev_info(&pdev->dev, "drm fimc registered successfully.\n");
 
        return 0;
 
-err_ippdrv_register:
-       devm_kfree(dev, ippdrv->prop_list);
+err_pm_dis:
        pm_runtime_disable(dev);
-err_get_irq:
+err_put_clk:
+       fimc_put_clocks(ctx);
+err_free_irq:
        free_irq(ctx->irq, ctx);
 
        return ret;
@@ -1857,10 +1904,10 @@ static int fimc_remove(struct platform_device *pdev)
        struct fimc_context *ctx = get_fimc_context(dev);
        struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 
-       devm_kfree(dev, ippdrv->prop_list);
        exynos_drm_ippdrv_unregister(ippdrv);
        mutex_destroy(&ctx->lock);
 
+       fimc_put_clocks(ctx);
        pm_runtime_set_suspended(dev);
        pm_runtime_disable(dev);
 
@@ -1915,36 +1962,22 @@ static int fimc_runtime_resume(struct device *dev)
 }
 #endif
 
-static struct fimc_driverdata exynos4210_fimc_data = {
-       .parent_clk = "mout_mpll",
-};
-
-static struct fimc_driverdata exynos4410_fimc_data = {
-       .parent_clk = "mout_mpll_user",
-};
-
-static struct platform_device_id fimc_driver_ids[] = {
-       {
-               .name           = "exynos4210-fimc",
-               .driver_data    = (unsigned long)&exynos4210_fimc_data,
-       }, {
-               .name           = "exynos4412-fimc",
-               .driver_data    = (unsigned long)&exynos4410_fimc_data,
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
-
 static const struct dev_pm_ops fimc_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
        SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
 };
 
+static const struct of_device_id fimc_of_match[] = {
+       { .compatible = "samsung,exynos4210-fimc" },
+       { .compatible = "samsung,exynos4212-fimc" },
+       { },
+};
+
 struct platform_driver fimc_driver = {
        .probe          = fimc_probe,
        .remove         = fimc_remove,
-       .id_table       = fimc_driver_ids,
        .driver         = {
+               .of_match_table = fimc_of_match,
                .name   = "exynos-drm-fimc",
                .owner  = THIS_MODULE,
                .pm     = &fimc_pm_ops,
index 98cc147..746b282 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 
+#include <video/of_display_timing.h>
 #include <video/samsung_fimd.h>
 #include <drm/exynos_drm.h>
 
@@ -800,18 +801,18 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
        if (enable) {
                int ret;
 
-               ret = clk_enable(ctx->bus_clk);
+               ret = clk_prepare_enable(ctx->bus_clk);
                if (ret < 0)
                        return ret;
 
-               ret = clk_enable(ctx->lcd_clk);
+               ret = clk_prepare_enable(ctx->lcd_clk);
                if  (ret < 0) {
-                       clk_disable(ctx->bus_clk);
+                       clk_disable_unprepare(ctx->bus_clk);
                        return ret;
                }
        } else {
-               clk_disable(ctx->lcd_clk);
-               clk_disable(ctx->bus_clk);
+               clk_disable_unprepare(ctx->lcd_clk);
+               clk_disable_unprepare(ctx->bus_clk);
        }
 
        return 0;
@@ -884,10 +885,25 @@ static int fimd_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       pdata = pdev->dev.platform_data;
-       if (!pdata) {
-               dev_err(dev, "no platform data specified\n");
-               return -EINVAL;
+       if (pdev->dev.of_node) {
+               pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+               if (!pdata) {
+                       DRM_ERROR("memory allocation for pdata failed\n");
+                       return -ENOMEM;
+               }
+
+               ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
+                                       OF_USE_NATIVE_MODE);
+               if (ret) {
+                       DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
+                       return ret;
+               }
+       } else {
+               pdata = pdev->dev.platform_data;
+               if (!pdata) {
+                       DRM_ERROR("no platform data specified\n");
+                       return -EINVAL;
+               }
        }
 
        panel = &pdata->panel;
@@ -918,7 +934,7 @@ static int fimd_probe(struct platform_device *pdev)
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
        if (!res) {
                dev_err(dev, "irq request failed.\n");
                return -ENXIO;
@@ -980,9 +996,6 @@ static int fimd_remove(struct platform_device *pdev)
        if (ctx->suspended)
                goto out;
 
-       clk_disable(ctx->lcd_clk);
-       clk_disable(ctx->bus_clk);
-
        pm_runtime_set_suspended(dev);
        pm_runtime_put_sync(dev);
 
index 0e6fe00..cf4543f 100644 (file)
@@ -682,7 +682,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        args->pitch = args->width * ((args->bpp + 7) / 8);
        args->size = args->pitch * args->height;
 
-       exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
+       exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
+                                               EXYNOS_BO_WC, args->size);
        if (IS_ERR(exynos_gem_obj))
                return PTR_ERR(exynos_gem_obj);
 
index 7c27df0..ba2f0f1 100644 (file)
@@ -51,21 +51,27 @@ struct drm_hdmi_context {
 
 int exynos_platform_device_hdmi_register(void)
 {
+       struct platform_device *pdev;
+
        if (exynos_drm_hdmi_pdev)
                return -EEXIST;
 
-       exynos_drm_hdmi_pdev = platform_device_register_simple(
+       pdev = platform_device_register_simple(
                        "exynos-drm-hdmi", -1, NULL, 0);
-       if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
-               return PTR_ERR(exynos_drm_hdmi_pdev);
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
+
+       exynos_drm_hdmi_pdev = pdev;
 
        return 0;
 }
 
 void exynos_platform_device_hdmi_unregister(void)
 {
-       if (exynos_drm_hdmi_pdev)
+       if (exynos_drm_hdmi_pdev) {
                platform_device_unregister(exynos_drm_hdmi_pdev);
+               exynos_drm_hdmi_pdev = NULL;
+       }
 }
 
 void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
@@ -205,13 +211,45 @@ static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
                                const struct drm_display_mode *mode,
                                struct drm_display_mode *adjusted_mode)
 {
-       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+       struct drm_display_mode *m;
+       int mode_ok;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (hdmi_ops && hdmi_ops->mode_fixup)
-               hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode,
-                                    adjusted_mode);
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+       mode_ok = drm_hdmi_check_timing(subdrv_dev, adjusted_mode);
+
+       /* just return if user desired mode exists. */
+       if (mode_ok == 0)
+               return;
+
+       /*
+        * otherwise, find the most suitable mode among modes and change it
+        * to adjusted_mode.
+        */
+       list_for_each_entry(m, &connector->modes, head) {
+               mode_ok = drm_hdmi_check_timing(subdrv_dev, m);
+
+               if (mode_ok == 0) {
+                       struct drm_mode_object base;
+                       struct list_head head;
+
+                       DRM_INFO("desired mode doesn't exist so\n");
+                       DRM_INFO("use the most suitable mode among modes.\n");
+
+                       DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+                               m->hdisplay, m->vdisplay, m->vrefresh);
+
+                       /* preserve display mode header while copying. */
+                       head = adjusted_mode->head;
+                       base = adjusted_mode->base;
+                       memcpy(adjusted_mode, m, sizeof(*m));
+                       adjusted_mode->head = head;
+                       adjusted_mode->base = base;
+                       break;
+               }
+       }
 }
 
 static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
index b7faa36..6b70944 100644 (file)
@@ -36,9 +36,6 @@ struct exynos_hdmi_ops {
        int (*power_on)(void *ctx, int mode);
 
        /* manager */
-       void (*mode_fixup)(void *ctx, struct drm_connector *connector,
-                               const struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode);
        void (*mode_set)(void *ctx, void *mode);
        void (*get_max_resol)(void *ctx, unsigned int *width,
                                unsigned int *height);
index 1adce07..29d2ad3 100644 (file)
@@ -47,6 +47,9 @@
 #define get_ipp_context(dev)   platform_get_drvdata(to_platform_device(dev))
 #define ipp_is_m2m_cmd(c)      (c == IPP_CMD_M2M)
 
+/* platform device pointer for ipp device. */
+static struct platform_device *exynos_drm_ipp_pdev;
+
 /*
  * A structure of event.
  *
@@ -102,6 +105,30 @@ static LIST_HEAD(exynos_drm_ippdrv_list);
 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
 
+int exynos_platform_device_ipp_register(void)
+{
+       struct platform_device *pdev;
+
+       if (exynos_drm_ipp_pdev)
+               return -EEXIST;
+
+       pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
+
+       exynos_drm_ipp_pdev = pdev;
+
+       return 0;
+}
+
+void exynos_platform_device_ipp_unregister(void)
+{
+       if (exynos_drm_ipp_pdev) {
+               platform_device_unregister(exynos_drm_ipp_pdev);
+               exynos_drm_ipp_pdev = NULL;
+       }
+}
+
 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
 {
        DRM_DEBUG_KMS("%s\n", __func__);
index a40b9fb..947f09f 100644 (file)
@@ -674,7 +674,7 @@ static int rotator_probe(struct platform_device *pdev)
        }
 
        rot->clock = devm_clk_get(dev, "rotator");
-       if (IS_ERR_OR_NULL(rot->clock)) {
+       if (IS_ERR(rot->clock)) {
                dev_err(dev, "failed to get clock\n");
                ret = PTR_ERR(rot->clock);
                goto err_clk_get;
index 2c5f266..bbfc384 100644 (file)
@@ -108,7 +108,20 @@ struct hdmi_tg_regs {
        u8 tg_3d[1];
 };
 
-struct hdmi_core_regs {
+struct hdmi_v13_core_regs {
+       u8 h_blank[2];
+       u8 v_blank[3];
+       u8 h_v_line[3];
+       u8 vsync_pol[1];
+       u8 int_pro_mode[1];
+       u8 v_blank_f[3];
+       u8 h_sync_gen[3];
+       u8 v_sync_gen1[3];
+       u8 v_sync_gen2[3];
+       u8 v_sync_gen3[3];
+};
+
+struct hdmi_v14_core_regs {
        u8 h_blank[2];
        u8 v2_blank[2];
        u8 v1_blank[2];
@@ -147,11 +160,23 @@ struct hdmi_core_regs {
        u8 vact_space_6[2];
 };
 
+struct hdmi_v13_conf {
+       struct hdmi_v13_core_regs core;
+       struct hdmi_tg_regs tg;
+};
+
 struct hdmi_v14_conf {
-       int pixel_clock;
-       struct hdmi_core_regs core;
+       struct hdmi_v14_core_regs core;
        struct hdmi_tg_regs tg;
+};
+
+struct hdmi_conf_regs {
+       int pixel_clock;
        int cea_video_id;
+       union {
+               struct hdmi_v13_conf v13_conf;
+               struct hdmi_v14_conf v14_conf;
+       } conf;
 };
 
 struct hdmi_context {
@@ -169,9 +194,8 @@ struct hdmi_context {
        struct i2c_client               *ddc_port;
        struct i2c_client               *hdmiphy_port;
 
-       /* current hdmiphy conf index */
-       int cur_conf;
-       struct hdmi_v14_conf            mode_conf;
+       /* current hdmiphy conf regs */
+       struct hdmi_conf_regs           mode_conf;
 
        struct hdmi_resources           res;
 
@@ -180,292 +204,60 @@ struct hdmi_context {
        enum hdmi_type                  type;
 };
 
-/* HDMI Version 1.3 */
-static const u8 hdmiphy_v13_conf27[32] = {
-       0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
-       0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
-       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
-       0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const u8 hdmiphy_v13_conf27_027[32] = {
-       0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
-       0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
-       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
-       0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const u8 hdmiphy_v13_conf74_175[32] = {
-       0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
-       0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
-       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
-       0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
-};
-
-static const u8 hdmiphy_v13_conf74_25[32] = {
-       0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
-       0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
-       0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
-       0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
-};
-
-static const u8 hdmiphy_v13_conf148_5[32] = {
-       0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
-       0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
-       0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
-       0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
-};
-
-struct hdmi_v13_tg_regs {
-       u8 cmd;
-       u8 h_fsz_l;
-       u8 h_fsz_h;
-       u8 hact_st_l;
-       u8 hact_st_h;
-       u8 hact_sz_l;
-       u8 hact_sz_h;
-       u8 v_fsz_l;
-       u8 v_fsz_h;
-       u8 vsync_l;
-       u8 vsync_h;
-       u8 vsync2_l;
-       u8 vsync2_h;
-       u8 vact_st_l;
-       u8 vact_st_h;
-       u8 vact_sz_l;
-       u8 vact_sz_h;
-       u8 field_chg_l;
-       u8 field_chg_h;
-       u8 vact_st2_l;
-       u8 vact_st2_h;
-       u8 vsync_top_hdmi_l;
-       u8 vsync_top_hdmi_h;
-       u8 vsync_bot_hdmi_l;
-       u8 vsync_bot_hdmi_h;
-       u8 field_top_hdmi_l;
-       u8 field_top_hdmi_h;
-       u8 field_bot_hdmi_l;
-       u8 field_bot_hdmi_h;
-};
-
-struct hdmi_v13_core_regs {
-       u8 h_blank[2];
-       u8 v_blank[3];
-       u8 h_v_line[3];
-       u8 vsync_pol[1];
-       u8 int_pro_mode[1];
-       u8 v_blank_f[3];
-       u8 h_sync_gen[3];
-       u8 v_sync_gen1[3];
-       u8 v_sync_gen2[3];
-       u8 v_sync_gen3[3];
-};
-
-struct hdmi_v13_preset_conf {
-       struct hdmi_v13_core_regs core;
-       struct hdmi_v13_tg_regs tg;
-};
-
-struct hdmi_v13_conf {
-       int width;
-       int height;
-       int vrefresh;
-       bool interlace;
-       int cea_video_id;
-       const u8 *hdmiphy_data;
-       const struct hdmi_v13_preset_conf *conf;
-};
-
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_480p = {
-       .core = {
-               .h_blank = {0x8a, 0x00},
-               .v_blank = {0x0d, 0x6a, 0x01},
-               .h_v_line = {0x0d, 0xa2, 0x35},
-               .vsync_pol = {0x01},
-               .int_pro_mode = {0x00},
-               .v_blank_f = {0x00, 0x00, 0x00},
-               .h_sync_gen = {0x0e, 0x30, 0x11},
-               .v_sync_gen1 = {0x0f, 0x90, 0x00},
-               /* other don't care */
-       },
-       .tg = {
-               0x00, /* cmd */
-               0x5a, 0x03, /* h_fsz */
-               0x8a, 0x00, 0xd0, 0x02, /* hact */
-               0x0d, 0x02, /* v_fsz */
-               0x01, 0x00, 0x33, 0x02, /* vsync */
-               0x2d, 0x00, 0xe0, 0x01, /* vact */
-               0x33, 0x02, /* field_chg */
-               0x49, 0x02, /* vact_st2 */
-               0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
-               0x01, 0x00, 0x33, 0x02, /* field top/bot */
-       },
-};
-
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_720p60 = {
-       .core = {
-               .h_blank = {0x72, 0x01},
-               .v_blank = {0xee, 0xf2, 0x00},
-               .h_v_line = {0xee, 0x22, 0x67},
-               .vsync_pol = {0x00},
-               .int_pro_mode = {0x00},
-               .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
-               .h_sync_gen = {0x6c, 0x50, 0x02},
-               .v_sync_gen1 = {0x0a, 0x50, 0x00},
-               .v_sync_gen2 = {0x01, 0x10, 0x00},
-               .v_sync_gen3 = {0x01, 0x10, 0x00},
-               /* other don't care */
-       },
-       .tg = {
-               0x00, /* cmd */
-               0x72, 0x06, /* h_fsz */
-               0x71, 0x01, 0x01, 0x05, /* hact */
-               0xee, 0x02, /* v_fsz */
-               0x01, 0x00, 0x33, 0x02, /* vsync */
-               0x1e, 0x00, 0xd0, 0x02, /* vact */
-               0x33, 0x02, /* field_chg */
-               0x49, 0x02, /* vact_st2 */
-               0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
-               0x01, 0x00, 0x33, 0x02, /* field top/bot */
-       },
-};
-
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i50 = {
-       .core = {
-               .h_blank = {0xd0, 0x02},
-               .v_blank = {0x32, 0xB2, 0x00},
-               .h_v_line = {0x65, 0x04, 0xa5},
-               .vsync_pol = {0x00},
-               .int_pro_mode = {0x01},
-               .v_blank_f = {0x49, 0x2A, 0x23},
-               .h_sync_gen = {0x0E, 0xEA, 0x08},
-               .v_sync_gen1 = {0x07, 0x20, 0x00},
-               .v_sync_gen2 = {0x39, 0x42, 0x23},
-               .v_sync_gen3 = {0x38, 0x87, 0x73},
-               /* other don't care */
-       },
-       .tg = {
-               0x00, /* cmd */
-               0x50, 0x0A, /* h_fsz */
-               0xCF, 0x02, 0x81, 0x07, /* hact */
-               0x65, 0x04, /* v_fsz */
-               0x01, 0x00, 0x33, 0x02, /* vsync */
-               0x16, 0x00, 0x1c, 0x02, /* vact */
-               0x33, 0x02, /* field_chg */
-               0x49, 0x02, /* vact_st2 */
-               0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
-               0x01, 0x00, 0x33, 0x02, /* field top/bot */
-       },
+struct hdmiphy_config {
+       int pixel_clock;
+       u8 conf[32];
 };
 
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p50 = {
-       .core = {
-               .h_blank = {0xd0, 0x02},
-               .v_blank = {0x65, 0x6c, 0x01},
-               .h_v_line = {0x65, 0x04, 0xa5},
-               .vsync_pol = {0x00},
-               .int_pro_mode = {0x00},
-               .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
-               .h_sync_gen = {0x0e, 0xea, 0x08},
-               .v_sync_gen1 = {0x09, 0x40, 0x00},
-               .v_sync_gen2 = {0x01, 0x10, 0x00},
-               .v_sync_gen3 = {0x01, 0x10, 0x00},
-               /* other don't care */
-       },
-       .tg = {
-               0x00, /* cmd */
-               0x50, 0x0A, /* h_fsz */
-               0xCF, 0x02, 0x81, 0x07, /* hact */
-               0x65, 0x04, /* v_fsz */
-               0x01, 0x00, 0x33, 0x02, /* vsync */
-               0x2d, 0x00, 0x38, 0x04, /* vact */
-               0x33, 0x02, /* field_chg */
-               0x48, 0x02, /* vact_st2 */
-               0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
-               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+/* list of phy config settings */
+static const struct hdmiphy_config hdmiphy_v13_configs[] = {
+       {
+               .pixel_clock = 27000000,
+               .conf = {
+                       0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+                       0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+                       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+                       0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+               },
        },
-};
-
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i60 = {
-       .core = {
-               .h_blank = {0x18, 0x01},
-               .v_blank = {0x32, 0xB2, 0x00},
-               .h_v_line = {0x65, 0x84, 0x89},
-               .vsync_pol = {0x00},
-               .int_pro_mode = {0x01},
-               .v_blank_f = {0x49, 0x2A, 0x23},
-               .h_sync_gen = {0x56, 0x08, 0x02},
-               .v_sync_gen1 = {0x07, 0x20, 0x00},
-               .v_sync_gen2 = {0x39, 0x42, 0x23},
-               .v_sync_gen3 = {0xa4, 0x44, 0x4a},
-               /* other don't care */
+       {
+               .pixel_clock = 27027000,
+               .conf = {
+                       0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+                       0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+                       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+                       0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+               },
        },
-       .tg = {
-               0x00, /* cmd */
-               0x98, 0x08, /* h_fsz */
-               0x17, 0x01, 0x81, 0x07, /* hact */
-               0x65, 0x04, /* v_fsz */
-               0x01, 0x00, 0x33, 0x02, /* vsync */
-               0x16, 0x00, 0x1c, 0x02, /* vact */
-               0x33, 0x02, /* field_chg */
-               0x49, 0x02, /* vact_st2 */
-               0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
-               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       {
+               .pixel_clock = 74176000,
+               .conf = {
+                       0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+                       0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+                       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+                       0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+               },
        },
-};
-
-static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
-       .core = {
-               .h_blank = {0x18, 0x01},
-               .v_blank = {0x65, 0x6c, 0x01},
-               .h_v_line = {0x65, 0x84, 0x89},
-               .vsync_pol = {0x00},
-               .int_pro_mode = {0x00},
-               .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
-               .h_sync_gen = {0x56, 0x08, 0x02},
-               .v_sync_gen1 = {0x09, 0x40, 0x00},
-               .v_sync_gen2 = {0x01, 0x10, 0x00},
-               .v_sync_gen3 = {0x01, 0x10, 0x00},
-               /* other don't care */
+       {
+               .pixel_clock = 74250000,
+               .conf = {
+                       0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+                       0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+                       0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
+                       0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+               },
        },
-       .tg = {
-               0x00, /* cmd */
-               0x98, 0x08, /* h_fsz */
-               0x17, 0x01, 0x81, 0x07, /* hact */
-               0x65, 0x04, /* v_fsz */
-               0x01, 0x00, 0x33, 0x02, /* vsync */
-               0x2d, 0x00, 0x38, 0x04, /* vact */
-               0x33, 0x02, /* field_chg */
-               0x48, 0x02, /* vact_st2 */
-               0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
-               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       {
+               .pixel_clock = 148500000,
+               .conf = {
+                       0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+                       0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+                       0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+                       0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+               },
        },
 };
 
-static const struct hdmi_v13_conf hdmi_v13_confs[] = {
-       { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
-                       &hdmi_v13_conf_720p60 },
-       { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
-                       &hdmi_v13_conf_720p60 },
-       { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
-                       &hdmi_v13_conf_480p },
-       { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
-                       &hdmi_v13_conf_1080i50 },
-       { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
-                       &hdmi_v13_conf_1080p50 },
-       { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
-                       &hdmi_v13_conf_1080i60 },
-       { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
-                       &hdmi_v13_conf_1080p60 },
-};
-
-/* HDMI Version 1.4 */
-struct hdmiphy_config {
-       int pixel_clock;
-       u8 conf[32];
-};
-
-/* list of all required phy config settings */
 static const struct hdmiphy_config hdmiphy_v14_configs[] = {
        {
                .pixel_clock = 25200000,
@@ -873,22 +665,6 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
                hdmi_v14_regs_dump(hdata, prefix);
 }
 
-static int hdmi_v13_conf_index(struct drm_display_mode *mode)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
-               if (hdmi_v13_confs[i].width == mode->hdisplay &&
-                               hdmi_v13_confs[i].height == mode->vdisplay &&
-                               hdmi_v13_confs[i].vrefresh == mode->vrefresh &&
-                               hdmi_v13_confs[i].interlace ==
-                               ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
-                                true : false))
-                       return i;
-
-       return -EINVAL;
-}
-
 static u8 hdmi_chksum(struct hdmi_context *hdata,
                        u32 start, u8 len, u32 hdr_sum)
 {
@@ -943,11 +719,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
                hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
                                AVI_SAME_AS_PIC_ASPECT_RATIO);
 
-               if (hdata->type == HDMI_TYPE13)
-                       vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
-               else
-                       vic = hdata->mode_conf.cea_video_id;
-
+               vic = hdata->mode_conf.cea_video_id;
                hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
 
                chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
@@ -1000,63 +772,34 @@ static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
        return raw_edid;
 }
 
-static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
+static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
 {
-       int i;
+       const struct hdmiphy_config *confs;
+       int count, i;
 
-       DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n",
-                       check_timing->xres, check_timing->yres,
-                       check_timing->refresh, (check_timing->vmode &
-                       FB_VMODE_INTERLACED) ? true : false);
-
-       for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
-               if (hdmi_v13_confs[i].width == check_timing->xres &&
-                       hdmi_v13_confs[i].height == check_timing->yres &&
-                       hdmi_v13_confs[i].vrefresh == check_timing->refresh &&
-                       hdmi_v13_confs[i].interlace ==
-                       ((check_timing->vmode & FB_VMODE_INTERLACED) ?
-                        true : false))
-                               return 0;
-
-       /* TODO */
-
-       return -EINVAL;
-}
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-static int hdmi_v14_find_phy_conf(int pixel_clock)
-{
-       int i;
+       if (hdata->type == HDMI_TYPE13) {
+               confs = hdmiphy_v13_configs;
+               count = ARRAY_SIZE(hdmiphy_v13_configs);
+       } else if (hdata->type == HDMI_TYPE14) {
+               confs = hdmiphy_v14_configs;
+               count = ARRAY_SIZE(hdmiphy_v14_configs);
+       } else
+               return -EINVAL;
 
-       for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++) {
-               if (hdmiphy_v14_configs[i].pixel_clock == pixel_clock)
+       for (i = 0; i < count; i++)
+               if (confs[i].pixel_clock == pixel_clock)
                        return i;
-       }
 
        DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
        return -EINVAL;
 }
 
-static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
-{
-       int i;
-
-       DRM_DEBUG_KMS("mode: xres=%d, yres=%d, refresh=%d, clock=%d, intl=%d\n",
-                       check_timing->xres, check_timing->yres,
-                       check_timing->refresh, check_timing->pixclock,
-                       (check_timing->vmode & FB_VMODE_INTERLACED) ?
-                       true : false);
-
-       for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++)
-               if (hdmiphy_v14_configs[i].pixel_clock ==
-                       check_timing->pixclock)
-                       return 0;
-
-       return -EINVAL;
-}
-
 static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
 {
        struct hdmi_context *hdata = ctx;
+       int ret;
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
@@ -1064,10 +807,10 @@ static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
                        timing->yres, timing->refresh,
                        timing->vmode);
 
-       if (hdata->type == HDMI_TYPE13)
-               return hdmi_v13_check_timing(timing);
-       else
-               return hdmi_v14_check_timing(timing);
+       ret = hdmi_find_phy_conf(hdata, timing->pixclock);
+       if (ret < 0)
+               return ret;
+       return 0;
 }
 
 static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1301,10 +1044,9 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
 
 static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
 {
-       const struct hdmi_v13_preset_conf *conf =
-               hdmi_v13_confs[hdata->cur_conf].conf;
-       const struct hdmi_v13_core_regs *core = &conf->core;
-       const struct hdmi_v13_tg_regs *tg = &conf->tg;
+       const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
+       const struct hdmi_v13_core_regs *core =
+               &hdata->mode_conf.conf.v13_conf.core;
        int tries;
 
        /* setting core registers */
@@ -1334,34 +1076,34 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
        hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
        hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
        /* Timing generator registers */
-       hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
-       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
-       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
 
        /* waiting for HDMIPHY's PLL to get to steady state */
        for (tries = 100; tries; --tries) {
@@ -1391,8 +1133,9 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
 
 static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
 {
-       struct hdmi_core_regs *core = &hdata->mode_conf.core;
-       struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
+       const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
+       const struct hdmi_v14_core_regs *core =
+               &hdata->mode_conf.conf.v14_conf.core;
        int tries;
 
        /* setting core registers */
@@ -1624,17 +1367,16 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
        }
 
        /* pixel clock */
-       if (hdata->type == HDMI_TYPE13) {
-               hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data;
-       } else {
-               i = hdmi_v14_find_phy_conf(hdata->mode_conf.pixel_clock);
-               if (i < 0) {
-                       DRM_ERROR("failed to find hdmiphy conf\n");
-                       return;
-               }
+       i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
+       if (i < 0) {
+               DRM_ERROR("failed to find hdmiphy conf\n");
+               return;
+       }
 
+       if (hdata->type == HDMI_TYPE13)
+               hdmiphy_data = hdmiphy_v13_configs[i].conf;
+       else
                hdmiphy_data = hdmiphy_v14_configs[i].conf;
-       }
 
        memcpy(buffer, hdmiphy_data, 32);
        ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
@@ -1687,75 +1429,121 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
        hdmi_regs_dump(hdata, "start");
 }
 
-static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
-                               const struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode)
+static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
 {
-       struct drm_display_mode *m;
-       struct hdmi_context *hdata = ctx;
-       int index;
+       int i;
+       BUG_ON(num_bytes > 4);
+       for (i = 0; i < num_bytes; i++)
+               reg_pair[i] = (value >> (8 * i)) & 0xff;
+}
 
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+static void hdmi_v13_mode_set(struct hdmi_context *hdata,
+                       struct drm_display_mode *m)
+{
+       struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
+       struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
+       unsigned int val;
 
-       drm_mode_set_crtcinfo(adjusted_mode, 0);
+       hdata->mode_conf.cea_video_id =
+               drm_match_cea_mode((struct drm_display_mode *)m);
+       hdata->mode_conf.pixel_clock = m->clock * 1000;
 
-       if (hdata->type == HDMI_TYPE13)
-               index = hdmi_v13_conf_index(adjusted_mode);
-       else
-               index = hdmi_v14_find_phy_conf(adjusted_mode->clock * 1000);
+       hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
+       hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
 
-       /* just return if user desired mode exists. */
-       if (index >= 0)
-               return;
+       val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+       hdmi_set_reg(core->vsync_pol, 1, val);
+
+       val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
+       hdmi_set_reg(core->int_pro_mode, 1, val);
+
+       val = (m->hsync_start - m->hdisplay - 2);
+       val |= ((m->hsync_end - m->hdisplay - 2) << 10);
+       val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20;
+       hdmi_set_reg(core->h_sync_gen, 3, val);
 
        /*
-        * otherwise, find the most suitable mode among modes and change it
-        * to adjusted_mode.
+        * Quirk requirement for exynos HDMI IP design,
+        * 2 pixels less than the actual calculation for hsync_start
+        * and end.
         */
-       list_for_each_entry(m, &connector->modes, head) {
-               if (hdata->type == HDMI_TYPE13)
-                       index = hdmi_v13_conf_index(m);
-               else
-                       index = hdmi_v14_find_phy_conf(m->clock * 1000);
-
-               if (index >= 0) {
-                       struct drm_mode_object base;
-                       struct list_head head;
-
-                       DRM_INFO("desired mode doesn't exist so\n");
-                       DRM_INFO("use the most suitable mode among modes.\n");
-
-                       DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
-                               m->hdisplay, m->vdisplay, m->vrefresh);
-
-                       /* preserve display mode header while copying. */
-                       head = adjusted_mode->head;
-                       base = adjusted_mode->base;
-                       memcpy(adjusted_mode, m, sizeof(*m));
-                       adjusted_mode->head = head;
-                       adjusted_mode->base = base;
-                       break;
-               }
+
+       /* Following values & calculations differ for different type of modes */
+       if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+               /* Interlaced Mode */
+               val = ((m->vsync_end - m->vdisplay) / 2);
+               val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
+               hdmi_set_reg(core->v_sync_gen1, 3, val);
+
+               val = m->vtotal / 2;
+               val |= ((m->vtotal - m->vdisplay) / 2) << 11;
+               hdmi_set_reg(core->v_blank, 3, val);
+
+               val = (m->vtotal +
+                       ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
+               val |= m->vtotal << 11;
+               hdmi_set_reg(core->v_blank_f, 3, val);
+
+               val = ((m->vtotal / 2) + 7);
+               val |= ((m->vtotal / 2) + 2) << 12;
+               hdmi_set_reg(core->v_sync_gen2, 3, val);
+
+               val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
+               val |= ((m->htotal / 2) +
+                       (m->hsync_start - m->hdisplay)) << 12;
+               hdmi_set_reg(core->v_sync_gen3, 3, val);
+
+               hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
+               hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
+
+               hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
+       } else {
+               /* Progressive Mode */
+
+               val = m->vtotal;
+               val |= (m->vtotal - m->vdisplay) << 11;
+               hdmi_set_reg(core->v_blank, 3, val);
+
+               hdmi_set_reg(core->v_blank_f, 3, 0);
+
+               val = (m->vsync_end - m->vdisplay);
+               val |= ((m->vsync_start - m->vdisplay) << 12);
+               hdmi_set_reg(core->v_sync_gen1, 3, val);
+
+               hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value  */
+               hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value  */
+               hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
+               hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
+               hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
        }
-}
 
-static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
-{
-       int i;
-       BUG_ON(num_bytes > 4);
-       for (i = 0; i < num_bytes; i++)
-               reg_pair[i] = (value >> (8 * i)) & 0xff;
+       /* Timing generator registers */
+       hdmi_set_reg(tg->cmd, 1, 0x0);
+       hdmi_set_reg(tg->h_fsz, 2, m->htotal);
+       hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
+       hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
+       hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
+       hdmi_set_reg(tg->vsync, 2, 0x1);
+       hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
+       hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
+       hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
+       hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
+       hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
+       hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
+       hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
 }
 
 static void hdmi_v14_mode_set(struct hdmi_context *hdata,
                        struct drm_display_mode *m)
 {
-       struct hdmi_core_regs *core = &hdata->mode_conf.core;
-       struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
-
-       hdata->mode_conf.cea_video_id = drm_match_cea_mode(m);
+       struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
+       struct hdmi_v14_core_regs *core =
+               &hdata->mode_conf.conf.v14_conf.core;
 
+       hdata->mode_conf.cea_video_id =
+               drm_match_cea_mode((struct drm_display_mode *)m);
        hdata->mode_conf.pixel_clock = m->clock * 1000;
+
        hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
        hdmi_set_reg(core->v_line, 2, m->vtotal);
        hdmi_set_reg(core->h_line, 2, m->htotal);
@@ -1852,25 +1640,22 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
        hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
        hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
        hdmi_set_reg(tg->tg_3d, 1, 0x0);
-
 }
 
 static void hdmi_mode_set(void *ctx, void *mode)
 {
        struct hdmi_context *hdata = ctx;
-       int conf_idx;
+       struct drm_display_mode *m = mode;
 
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+       DRM_DEBUG_KMS("[%s]: xres=%d, yres=%d, refresh=%d, intl=%s\n",
+               __func__, m->hdisplay, m->vdisplay,
+               m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
+               "INTERLACED" : "PROGERESSIVE");
 
-       if (hdata->type == HDMI_TYPE13) {
-               conf_idx = hdmi_v13_conf_index(mode);
-               if (conf_idx >= 0)
-                       hdata->cur_conf = conf_idx;
-               else
-                       DRM_DEBUG_KMS("not supported mode\n");
-       } else {
+       if (hdata->type == HDMI_TYPE13)
+               hdmi_v13_mode_set(hdata, mode);
+       else
                hdmi_v14_mode_set(hdata, mode);
-       }
 }
 
 static void hdmi_get_max_resol(void *ctx, unsigned int *width,
@@ -1983,7 +1768,6 @@ static struct exynos_hdmi_ops hdmi_ops = {
        .check_timing   = hdmi_check_timing,
 
        /* manager */
-       .mode_fixup     = hdmi_mode_fixup,
        .mode_set       = hdmi_mode_set,
        .get_max_resol  = hdmi_get_max_resol,
        .commit         = hdmi_commit,
@@ -2023,27 +1807,27 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
 
        /* get clocks, power */
        res->hdmi = devm_clk_get(dev, "hdmi");
-       if (IS_ERR_OR_NULL(res->hdmi)) {
+       if (IS_ERR(res->hdmi)) {
                DRM_ERROR("failed to get clock 'hdmi'\n");
                goto fail;
        }
        res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
-       if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+       if (IS_ERR(res->sclk_hdmi)) {
                DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
                goto fail;
        }
        res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
-       if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+       if (IS_ERR(res->sclk_pixel)) {
                DRM_ERROR("failed to get clock 'sclk_pixel'\n");
                goto fail;
        }
        res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
-       if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+       if (IS_ERR(res->sclk_hdmiphy)) {
                DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
                goto fail;
        }
        res->hdmiphy = devm_clk_get(dev, "hdmiphy");
-       if (IS_ERR_OR_NULL(res->hdmiphy)) {
+       if (IS_ERR(res->hdmiphy)) {
                DRM_ERROR("failed to get clock 'hdmiphy'\n");
                goto fail;
        }
index 2f4f72f..ec3e376 100644 (file)
@@ -643,12 +643,14 @@ static void mixer_win_reset(struct mixer_context *ctx)
        /* setting graphical layers */
        val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
        val |= MXR_GRP_CFG_WIN_BLEND_EN;
-       val |= MXR_GRP_CFG_BLEND_PRE_MUL;
-       val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
        val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
 
-       /* the same configuration for both layers */
+       /* Don't blend layer 0 onto the mixer background */
        mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+       /* Blend layer 1 into layer 0 */
+       val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+       val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
        mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
 
        /* setting video layers */
@@ -820,7 +822,6 @@ static void mixer_win_disable(void *ctx, int win)
 
 static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
 {
-       struct mixer_context *mixer_ctx = ctx;
        u32 w, h;
 
        w = timing->xres;
@@ -831,9 +832,6 @@ static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
                timing->refresh, (timing->vmode &
                FB_VMODE_INTERLACED) ? true : false);
 
-       if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16)
-               return 0;
-
        if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
                (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
                (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
@@ -1047,13 +1045,13 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
        spin_lock_init(&mixer_res->reg_slock);
 
        mixer_res->mixer = devm_clk_get(dev, "mixer");
-       if (IS_ERR_OR_NULL(mixer_res->mixer)) {
+       if (IS_ERR(mixer_res->mixer)) {
                dev_err(dev, "failed to get clock 'mixer'\n");
                return -ENODEV;
        }
 
        mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
-       if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
+       if (IS_ERR(mixer_res->sclk_hdmi)) {
                dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
                return -ENODEV;
        }
@@ -1096,17 +1094,17 @@ static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
        struct resource *res;
 
        mixer_res->vp = devm_clk_get(dev, "vp");
-       if (IS_ERR_OR_NULL(mixer_res->vp)) {
+       if (IS_ERR(mixer_res->vp)) {
                dev_err(dev, "failed to get clock 'vp'\n");
                return -ENODEV;
        }
        mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
-       if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
+       if (IS_ERR(mixer_res->sclk_mixer)) {
                dev_err(dev, "failed to get clock 'sclk_mixer'\n");
                return -ENODEV;
        }
        mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
-       if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
+       if (IS_ERR(mixer_res->sclk_dac)) {
                dev_err(dev, "failed to get clock 'sclk_dac'\n");
                return -ENODEV;
        }
index b4f9ca1..3049613 100644 (file)
 #define EXYNOS_CLKSRC_SCLK                             (1 << 1)
 
 /* SYSREG for FIMC writeback */
-#define SYSREG_CAMERA_BLK                      (S3C_VA_SYS + 0x0218)
-#define SYSREG_ISP_BLK                         (S3C_VA_SYS + 0x020c)
-#define SYSREG_FIMD0WB_DEST_MASK       (0x3 << 23)
-#define SYSREG_FIMD0WB_DEST_SHIFT      23
+#define SYSREG_CAMERA_BLK                      (0x0218)
+#define SYSREG_FIMD0WB_DEST_MASK               (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT              23
 
 #endif /* EXYNOS_REGS_FIMC_H */
index 1188f0f..1f6e2df 100644 (file)
@@ -2,10 +2,15 @@ config DRM_GMA500
        tristate "Intel GMA5/600 KMS Framebuffer"
        depends on DRM && PCI && X86
        select FB_CFB_COPYAREA
-        select FB_CFB_FILLRECT
-        select FB_CFB_IMAGEBLIT
-        select DRM_KMS_HELPER
-        select DRM_TTM
+       select FB_CFB_FILLRECT
+       select FB_CFB_IMAGEBLIT
+       select DRM_KMS_HELPER
+       select DRM_TTM
+       # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
+       select ACPI_VIDEO if ACPI
+       select BACKLIGHT_CLASS_DEVICE if ACPI
+       select VIDEO_OUTPUT_CONTROL if ACPI
+       select INPUT if ACPI
        help
          Say yes for an experimental 2D KMS framebuffer driver for the
          Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
index 8c17534..7b8386f 100644 (file)
@@ -276,6 +276,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
                goto failed_connector;
 
        connector = &psb_intel_connector->base;
+       connector->polled = DRM_CONNECTOR_POLL_HPD;
        drm_connector_init(dev, connector,
                &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
index e223b50..464153d 100644 (file)
@@ -319,6 +319,7 @@ void cdv_hdmi_init(struct drm_device *dev,
                goto err_priv;
 
        connector = &psb_intel_connector->base;
+       connector->polled = DRM_CONNECTOR_POLL_HPD;
        encoder = &psb_intel_encoder->base;
        drm_connector_init(dev, connector,
                           &cdv_hdmi_connector_funcs,
index 2590cac..1534e22 100644 (file)
@@ -431,7 +431,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
        fbdev->psb_fb_helper.fbdev = info;
 
        drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
-       strcpy(info->fix.id, "psbfb");
+       strcpy(info->fix.id, "psbdrmfb");
 
        info->flags = FBINFO_DEFAULT;
        if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
@@ -772,8 +772,8 @@ void psb_modeset_init(struct drm_device *dev)
        for (i = 0; i < dev_priv->num_pipe; i++)
                psb_intel_crtc_init(dev, i, mode_dev);
 
-       dev->mode_config.max_width = 2048;
-       dev->mode_config.max_height = 2048;
+       dev->mode_config.max_width = 4096;
+       dev->mode_config.max_height = 4096;
 
        psb_setup_outputs(dev);
 
index 054e26e..1f82183 100644 (file)
@@ -80,7 +80,8 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
  *     the GTT. This is protected via the gtt mutex which the caller
  *     must hold.
  */
-static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
+static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
+                         int resume)
 {
        u32 __iomem *gtt_slot;
        u32 pte;
@@ -97,8 +98,10 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
        gtt_slot = psb_gtt_entry(dev, r);
        pages = r->pages;
 
-       /* Make sure changes are visible to the GPU */
-       set_pages_array_wc(pages, r->npage);
+       if (!resume) {
+               /* Make sure changes are visible to the GPU */
+               set_pages_array_wc(pages, r->npage);
+       }
 
        /* Write our page entries into the GTT itself */
        for (i = r->roll; i < r->npage; i++) {
@@ -269,7 +272,7 @@ int psb_gtt_pin(struct gtt_range *gt)
                ret = psb_gtt_attach_pages(gt);
                if (ret < 0)
                        goto out;
-               ret = psb_gtt_insert(dev, gt);
+               ret = psb_gtt_insert(dev, gt, 0);
                if (ret < 0) {
                        psb_gtt_detach_pages(gt);
                        goto out;
@@ -421,9 +424,11 @@ int psb_gtt_init(struct drm_device *dev, int resume)
        int ret = 0;
        uint32_t pte;
 
-       mutex_init(&dev_priv->gtt_mutex);
+       if (!resume) {
+               mutex_init(&dev_priv->gtt_mutex);
+               psb_gtt_alloc(dev);
+       }
 
-       psb_gtt_alloc(dev);
        pg = &dev_priv->gtt;
 
        /* Enable the GTT */
@@ -505,7 +510,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
        /*
         *      Map the GTT and the stolen memory area
         */
-       dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+       if (!resume)
+               dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
                                                gtt_pages << PAGE_SHIFT);
        if (!dev_priv->gtt_map) {
                dev_err(dev->dev, "Failure to map gtt.\n");
@@ -513,7 +519,9 @@ int psb_gtt_init(struct drm_device *dev, int resume)
                goto out_err;
        }
 
-       dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
+       if (!resume)
+               dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
+                                                stolen_size);
        if (!dev_priv->vram_addr) {
                dev_err(dev->dev, "Failure to map stolen base.\n");
                ret = -ENOMEM;
@@ -549,3 +557,31 @@ out_err:
        psb_gtt_takedown(dev);
        return ret;
 }
+
+int psb_gtt_restore(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct resource *r = dev_priv->gtt_mem->child;
+       struct gtt_range *range;
+       unsigned int restored = 0, total = 0, size = 0;
+
+       /* On resume, the gtt_mutex is already initialized */
+       mutex_lock(&dev_priv->gtt_mutex);
+       psb_gtt_init(dev, 1);
+
+       while (r != NULL) {
+               range = container_of(r, struct gtt_range, resource);
+               if (range->pages) {
+                       psb_gtt_insert(dev, range, 1);
+                       size += range->resource.end - range->resource.start;
+                       restored++;
+               }
+               r = r->sibling;
+               total++;
+       }
+       mutex_unlock(&dev_priv->gtt_mutex);
+       DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
+                        total, (size / 1024));
+
+       return 0;
+}
index aa17423..6191d10 100644 (file)
@@ -60,5 +60,5 @@ extern int psb_gtt_pin(struct gtt_range *gt);
 extern void psb_gtt_unpin(struct gtt_range *gt);
 extern void psb_gtt_roll(struct drm_device *dev,
                                        struct gtt_range *gt, int roll);
-
+extern int psb_gtt_restore(struct drm_device *dev);
 #endif
index 403fffb..d349734 100644 (file)
@@ -218,12 +218,11 @@ static void parse_backlight_data(struct drm_psb_private *dev_priv,
        bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
        vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
 
-       lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
+       lvds_bl = kmemdup(vbt_lvds_bl, sizeof(*vbt_lvds_bl), GFP_KERNEL);
        if (!lvds_bl) {
                dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
                return;
        }
-       memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
        dev_priv->lvds_bl = lvds_bl;
 }
 
index c6267c9..978ae4b 100644 (file)
@@ -19,8 +19,8 @@
  *
  */
 
-#ifndef _I830_BIOS_H_
-#define _I830_BIOS_H_
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
 
 #include <drm/drmP.h>
 #include <drm/drm_dp_helper.h>
@@ -618,4 +618,4 @@ extern void psb_intel_destroy_bios(struct drm_device *dev);
 #define                PORT_IDPC       8
 #define                PORT_IDPD       9
 
-#endif /* _I830_BIOS_H_ */
+#endif /* _INTEL_BIOS_H_ */
index 2d4ab48..3abf831 100644 (file)
@@ -92,8 +92,8 @@ void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
 {
        struct mdfld_dsi_pkg_sender *sender =
                                mdfld_dsi_get_pkg_sender(dsi_config);
-       struct drm_device *dev = sender->dev;
-       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_device *dev;
+       struct drm_psb_private *dev_priv;
        u32 gen_ctrl_val;
 
        if (!sender) {
@@ -101,6 +101,9 @@ void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
                return;
        }
 
+       dev = sender->dev;
+       dev_priv = dev->dev_private;
+
        /* Set default display backlight value to 85% (0xd8)*/
        mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
                                true);
index 889b854..b6b135f 100644 (file)
@@ -110,6 +110,8 @@ static void gma_resume_display(struct pci_dev *pdev)
        PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
        pci_write_config_word(pdev, PSB_GMCH_CTRL,
                        dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+       psb_gtt_restore(dev); /* Rebuild our GTT mappings */
        dev_priv->ops->restore_regs(dev);
 }
 
@@ -313,3 +315,18 @@ int psb_runtime_idle(struct device *dev)
        else
                return 1;
 }
+
+int gma_power_thaw(struct device *_dev)
+{
+       return gma_power_resume(_dev);
+}
+
+int gma_power_freeze(struct device *_dev)
+{
+       return gma_power_suspend(_dev);
+}
+
+int gma_power_restore(struct device *_dev)
+{
+       return gma_power_resume(_dev);
+}
index 1969d2e..56d8708 100644 (file)
@@ -41,6 +41,9 @@ void gma_power_uninit(struct drm_device *dev);
  */
 int gma_power_suspend(struct device *dev);
 int gma_power_resume(struct device *dev);
+int gma_power_thaw(struct device *dev);
+int gma_power_freeze(struct device *dev);
+int gma_power_restore(struct device *_dev);
 
 /*
  * These are the functions the driver should use to wrap all hw access
index 111e3df..bddea58 100644 (file)
@@ -601,6 +601,9 @@ static void psb_remove(struct pci_dev *pdev)
 static const struct dev_pm_ops psb_pm_ops = {
        .resume = gma_power_resume,
        .suspend = gma_power_suspend,
+       .thaw = gma_power_thaw,
+       .freeze = gma_power_freeze,
+       .restore = gma_power_restore,
        .runtime_suspend = psb_runtime_suspend,
        .runtime_resume = psb_runtime_resume,
        .runtime_idle = psb_runtime_idle,
index a7fd6c4..6053b8a 100644 (file)
@@ -876,7 +876,6 @@ extern const struct psb_ops cdv_chip_ops;
 #define PSB_D_MSVDX   (1 << 9)
 #define PSB_D_TOPAZ   (1 << 10)
 
-extern int drm_psb_no_fb;
 extern int drm_idle_check_interval;
 
 /*
index 9edb190..6e8f42b 100644 (file)
@@ -50,119 +50,41 @@ struct psb_intel_p2_t {
        int p2_slow, p2_fast;
 };
 
-#define INTEL_P2_NUM                 2
-
 struct psb_intel_limit_t {
        struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
        struct psb_intel_p2_t p2;
 };
 
-#define I8XX_DOT_MIN             25000
-#define I8XX_DOT_MAX            350000
-#define I8XX_VCO_MIN            930000
-#define I8XX_VCO_MAX           1400000
-#define I8XX_N_MIN                   3
-#define I8XX_N_MAX                  16
-#define I8XX_M_MIN                  96
-#define I8XX_M_MAX                 140
-#define I8XX_M1_MIN                 18
-#define I8XX_M1_MAX                 26
-#define I8XX_M2_MIN                  6
-#define I8XX_M2_MAX                 16
-#define I8XX_P_MIN                   4
-#define I8XX_P_MAX                 128
-#define I8XX_P1_MIN                  2
-#define I8XX_P1_MAX                 33
-#define I8XX_P1_LVDS_MIN             1
-#define I8XX_P1_LVDS_MAX             6
-#define I8XX_P2_SLOW                 4
-#define I8XX_P2_FAST                 2
-#define I8XX_P2_LVDS_SLOW            14
-#define I8XX_P2_LVDS_FAST            14        /* No fast option */
-#define I8XX_P2_SLOW_LIMIT      165000
-
-#define I9XX_DOT_MIN             20000
-#define I9XX_DOT_MAX            400000
-#define I9XX_VCO_MIN           1400000
-#define I9XX_VCO_MAX           2800000
-#define I9XX_N_MIN                   1
-#define I9XX_N_MAX                   6
-#define I9XX_M_MIN                  70
-#define I9XX_M_MAX                 120
-#define I9XX_M1_MIN                  8
-#define I9XX_M1_MAX                 18
-#define I9XX_M2_MIN                  3
-#define I9XX_M2_MAX                  7
-#define I9XX_P_SDVO_DAC_MIN          5
-#define I9XX_P_SDVO_DAC_MAX         80
-#define I9XX_P_LVDS_MIN                      7
-#define I9XX_P_LVDS_MAX                     98
-#define I9XX_P1_MIN                  1
-#define I9XX_P1_MAX                  8
-#define I9XX_P2_SDVO_DAC_SLOW               10
-#define I9XX_P2_SDVO_DAC_FAST                5
-#define I9XX_P2_SDVO_DAC_SLOW_LIMIT     200000
-#define I9XX_P2_LVDS_SLOW                   14
-#define I9XX_P2_LVDS_FAST                    7
-#define I9XX_P2_LVDS_SLOW_LIMIT                 112000
-
-#define INTEL_LIMIT_I8XX_DVO_DAC    0
-#define INTEL_LIMIT_I8XX_LVDS      1
-#define INTEL_LIMIT_I9XX_SDVO_DAC   2
-#define INTEL_LIMIT_I9XX_LVDS      3
+#define INTEL_LIMIT_I9XX_SDVO_DAC   0
+#define INTEL_LIMIT_I9XX_LVDS      1
 
 static const struct psb_intel_limit_t psb_intel_limits[] = {
-       {                       /* INTEL_LIMIT_I8XX_DVO_DAC */
-        .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
-        .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
-        .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
-        .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
-        .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
-        .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
-        .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
-        .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
-        .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
-               .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
-        },
-       {                       /* INTEL_LIMIT_I8XX_LVDS */
-        .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
-        .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
-        .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
-        .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
-        .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
-        .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
-        .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
-        .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
-        .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
-               .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
-        },
        {                       /* INTEL_LIMIT_I9XX_SDVO_DAC */
-        .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
-        .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
-        .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
-        .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
-        .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
-        .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
-        .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
-        .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
-        .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
-               .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
-               I9XX_P2_SDVO_DAC_FAST},
+        .dot = {.min = 20000, .max = 400000},
+        .vco = {.min = 1400000, .max = 2800000},
+        .n = {.min = 1, .max = 6},
+        .m = {.min = 70, .max = 120},
+        .m1 = {.min = 8, .max = 18},
+        .m2 = {.min = 3, .max = 7},
+        .p = {.min = 5, .max = 80},
+        .p1 = {.min = 1, .max = 8},
+        .p2 = {.dot_limit = 200000,
+               .p2_slow = 10, .p2_fast = 5},
         },
        {                       /* INTEL_LIMIT_I9XX_LVDS */
-        .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
-        .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
-        .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
-        .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
-        .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
-        .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
-        .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
-        .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+        .dot = {.min = 20000, .max = 400000},
+        .vco = {.min = 1400000, .max = 2800000},
+        .n = {.min = 1, .max = 6},
+        .m = {.min = 70, .max = 120},
+        .m1 = {.min = 8, .max = 18},
+        .m2 = {.min = 3, .max = 7},
+        .p = {.min = 7, .max = 98},
+        .p1 = {.min = 1, .max = 8},
         /* The single-channel range is 25-112Mhz, and dual-channel
          * is 80-224Mhz.  Prefer single channel as much as possible.
          */
-        .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
-               .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
+        .p2 = {.dot_limit = 112000,
+               .p2_slow = 14, .p2_fast = 7},
         },
 };
 
@@ -177,9 +99,7 @@ static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
        return limit;
 }
 
-/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-
-static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
+static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
 {
        clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
        clock->p = clock->p1 * clock->p2;
@@ -187,22 +107,6 @@ static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
        clock->dot = clock->vco / clock->p;
 }
 
-/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
-
-static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
-{
-       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
-       clock->p = clock->p1 * clock->p2;
-       clock->vco = refclk * clock->m / (clock->n + 2);
-       clock->dot = clock->vco / clock->p;
-}
-
-static void psb_intel_clock(struct drm_device *dev, int refclk,
-                       struct psb_intel_clock_t *clock)
-{
-       return i9xx_clock(refclk, clock);
-}
-
 /**
  * Returns whether any output on the specified pipe is of the specified type
  */
@@ -308,7 +212,7 @@ static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
                                     clock.p1++) {
                                        int this_err;
 
-                                       psb_intel_clock(dev, refclk, &clock);
+                                       psb_intel_clock(refclk, &clock);
 
                                        if (!psb_intel_PLL_is_valid
                                            (crtc, &clock))
@@ -1068,7 +972,7 @@ static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
        return 0;
 }
 
-void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
                         u16 *green, u16 *blue, uint32_t type, uint32_t size)
 {
        struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
@@ -1149,9 +1053,9 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
                if ((dpll & PLL_REF_INPUT_MASK) ==
                    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
                        /* XXX: might not be 66MHz */
-                       i8xx_clock(66000, &clock);
+                       psb_intel_clock(66000, &clock);
                } else
-                       i8xx_clock(48000, &clock);
+                       psb_intel_clock(48000, &clock);
        } else {
                if (dpll & PLL_P1_DIVIDE_BY_TWO)
                        clock.p1 = 2;
@@ -1166,7 +1070,7 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
                else
                        clock.p2 = 2;
 
-               i8xx_clock(48000, &clock);
+               psb_intel_clock(48000, &clock);
        }
 
        /* XXX: It would be nice to validate the clocks, but we can't reuse
@@ -1225,7 +1129,7 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
        return mode;
 }
 
-void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
 {
        struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
        struct gtt_range *gt;
index 535b49a..3724b97 100644 (file)
@@ -21,8 +21,5 @@
 #define _INTEL_DISPLAY_H_
 
 bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
-void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
-                        u16 *green, u16 *blue, uint32_t type, uint32_t size);
-void psb_intel_crtc_destroy(struct drm_crtc *crtc);
 
 #endif
index 90f2d11..4dcae42 100644 (file)
@@ -32,9 +32,6 @@
 /* maximum connectors per crtcs in the mode set */
 #define INTELFB_CONN_LIMIT 4
 
-#define INTEL_I2C_BUS_DVO 1
-#define INTEL_I2C_BUS_SDVO 2
-
 /* Intel Pipe Clone Bit */
 #define INTEL_HDMIB_CLONE_BIT 1
 #define INTEL_HDMIC_CLONE_BIT 2
 #define INTEL_OUTPUT_DISPLAYPORT 9
 #define INTEL_OUTPUT_EDP 10
 
-#define INTEL_DVO_CHIP_NONE 0
-#define INTEL_DVO_CHIP_LVDS 1
-#define INTEL_DVO_CHIP_TMDS 2
-#define INTEL_DVO_CHIP_TVOUT 4
-
 #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
 #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
 
index d914719..0be30e4 100644 (file)
 #define PIPEACONF_DISABLE              0
 #define PIPEACONF_DOUBLE_WIDE          (1 << 30)
 #define PIPECONF_ACTIVE                        (1 << 30)
-#define I965_PIPECONF_ACTIVE           (1 << 30)
 #define PIPECONF_DSIPLL_LOCK           (1 << 29)
 #define PIPEACONF_SINGLE_WIDE          0
 #define PIPEACONF_PIPE_UNLOCKED                0
index a4cc777..19e3660 100644 (file)
@@ -134,6 +134,9 @@ struct psb_intel_sdvo {
 
        /* Input timings for adjusted_mode */
        struct psb_intel_sdvo_dtd input_dtd;
+
+       /* Saved SDVO output states */
+       uint32_t saveSDVO; /* Can be SDVOB or SDVOC depending on sdvo_reg */
 };
 
 struct psb_intel_sdvo_connector {
@@ -1830,6 +1833,34 @@ done:
 #undef CHECK_PROPERTY
 }
 
+static void psb_intel_sdvo_save(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_sdvo *sdvo =
+                               to_psb_intel_sdvo(&psb_intel_encoder->base);
+
+       sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
+}
+
+static void psb_intel_sdvo_restore(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_encoder *encoder =
+                               &psb_intel_attached_encoder(connector)->base;
+       struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
+       struct drm_crtc *crtc = encoder->crtc;
+
+       REG_WRITE(sdvo->sdvo_reg, sdvo->saveSDVO);
+
+       /* Force a full mode set on the crtc. We're supposed to have the
+          mode_config lock already. */
+       if (connector->status == connector_status_connected)
+               drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+                                        NULL);
+}
+
 static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
        .dpms = psb_intel_sdvo_dpms,
        .mode_fixup = psb_intel_sdvo_mode_fixup,
@@ -1840,6 +1871,8 @@ static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
 
 static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
+       .save = psb_intel_sdvo_save,
+       .restore = psb_intel_sdvo_restore,
        .detect = psb_intel_sdvo_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = psb_intel_sdvo_set_property,
index 8652cdf..029eccf 100644 (file)
@@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
 
        vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
 
-       if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
+       if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
                dsp_int = 1;
 
        /* FIXME: Handle Medfield
index 603045b..debb7f1 100644 (file)
@@ -21,8 +21,8 @@
  *
  **************************************************************************/
 
-#ifndef _SYSIRQ_H_
-#define _SYSIRQ_H_
+#ifndef _PSB_IRQ_H_
+#define _PSB_IRQ_H_
 
 #include <drm/drmP.h>
 
@@ -44,4 +44,4 @@ u32  psb_get_vblank_counter(struct drm_device *dev, int pipe);
 
 int mdfld_enable_te(struct drm_device *dev, int pipe);
 void mdfld_disable_te(struct drm_device *dev, int pipe);
-#endif /* _SYSIRQ_H_ */
+#endif /* _PSB_IRQ_H_ */
index c6dfc14..dc53a52 100644 (file)
@@ -129,7 +129,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
                goto error;
 
        i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0);
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
                pages[i++] = sg_page_iter_page(&sg_iter);
 
        obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
@@ -272,7 +272,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
                         * refcount on gem itself instead of f_count of dmabuf.
                         */
                        drm_gem_object_reference(&obj->base);
-                       dma_buf_put(dma_buf);
                        return &obj->base;
                }
        }
@@ -282,6 +281,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
        if (IS_ERR(attach))
                return ERR_CAST(attach);
 
+       get_dma_buf(dma_buf);
+
        obj = i915_gem_object_alloc(dev);
        if (obj == NULL) {
                ret = -ENOMEM;
@@ -301,5 +302,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 
 fail_detach:
        dma_buf_detach(dma_buf, attach);
+       dma_buf_put(dma_buf);
+
        return ERR_PTR(ret);
 }
index a96b6a3..117ce38 100644 (file)
@@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
        if (eb == NULL) {
                int size = args->buffer_count;
                int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
-               BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
+               BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
                while (count > 2*size)
                        count >>= 1;
                eb = kzalloc(count*sizeof(struct hlist_head) +
index c063b9f..58b4a53 100644 (file)
@@ -45,6 +45,9 @@
 
 struct intel_crt {
        struct intel_encoder base;
+       /* DPMS state is stored in the connector, which we need in the
+        * encoder's enable/disable callbacks */
+       struct intel_connector *connector;
        bool force_hotplug_required;
        u32 adpa_reg;
 };
@@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
        return true;
 }
 
-static void intel_disable_crt(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-       struct intel_crt *crt = intel_encoder_to_crt(encoder);
-       u32 temp;
-
-       temp = I915_READ(crt->adpa_reg);
-       temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
-       temp &= ~ADPA_DAC_ENABLE;
-       I915_WRITE(crt->adpa_reg, temp);
-}
-
-static void intel_enable_crt(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-       struct intel_crt *crt = intel_encoder_to_crt(encoder);
-       u32 temp;
-
-       temp = I915_READ(crt->adpa_reg);
-       temp |= ADPA_DAC_ENABLE;
-       I915_WRITE(crt->adpa_reg, temp);
-}
-
 /* Note: The caller is required to filter out dpms modes not supported by the
  * platform. */
 static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
        I915_WRITE(crt->adpa_reg, temp);
 }
 
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+       intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+       struct intel_crt *crt = intel_encoder_to_crt(encoder);
+
+       intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+}
+
+
 static void intel_crt_dpms(struct drm_connector *connector, int mode)
 {
        struct drm_device *dev = connector->dev;
@@ -749,6 +742,7 @@ void intel_crt_init(struct drm_device *dev)
        }
 
        connector = &intel_connector->base;
+       crt->connector = intel_connector;
        drm_connector_init(dev, &intel_connector->base,
                           &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
index 93694da..b14bec9 100644 (file)
@@ -2519,12 +2519,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
 {
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
        i2c_del_adapter(&intel_dp->adapter);
        drm_encoder_cleanup(encoder);
        if (is_edp(intel_dp)) {
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+               mutex_lock(&dev->mode_config.mutex);
                ironlake_panel_vdd_off_sync(intel_dp);
+               mutex_unlock(&dev->mode_config.mutex);
        }
        kfree(intel_dig_port);
 }
index 4d932c4..bf29b2f 100644 (file)
@@ -115,6 +115,8 @@ struct mga_fbdev {
        void *sysram;
        int size;
        struct ttm_bo_kmap_obj mapping;
+       int x1, y1, x2, y2; /* dirty rect */
+       spinlock_t dirty_lock;
 };
 
 struct mga_crtc {
@@ -215,7 +217,7 @@ mgag200_bo(struct ttm_buffer_object *bo)
 {
        return container_of(bo, struct mgag200_bo, bo);
 }
-                               /* mga_crtc.c */
+                               /* mgag200_crtc.c */
 void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                             u16 blue, int regno);
 void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -225,7 +227,7 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 int mgag200_modeset_init(struct mga_device *mdev);
 void mgag200_modeset_fini(struct mga_device *mdev);
 
-                               /* mga_fbdev.c */
+                               /* mgag200_fb.c */
 int mgag200_fbdev_init(struct mga_device *mdev);
 void mgag200_fbdev_fini(struct mga_device *mdev);
 
@@ -254,7 +256,7 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
                         struct drm_device *dev,
                         uint32_t handle,
                         uint64_t *offset);
-                               /* mga_i2c.c */
+                               /* mgag200_i2c.c */
 struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
 void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
 
index d2253f6..5da824c 100644 (file)
@@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
        int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
        int ret;
        bool unmap = false;
+       bool store_for_later = false;
+       int x2, y2;
+       unsigned long flags;
 
        obj = mfbdev->mfb.obj;
        bo = gem_to_mga_bo(obj);
 
+       /*
+        * try and reserve the BO, if we fail with busy
+        * then the BO is being moved and we should
+        * store up the damage until later.
+        */
        ret = mgag200_bo_reserve(bo, true);
        if (ret) {
-               DRM_ERROR("failed to reserve fb bo\n");
+               if (ret != -EBUSY)
+                       return;
+
+               store_for_later = true;
+       }
+
+       x2 = x + width - 1;
+       y2 = y + height - 1;
+       spin_lock_irqsave(&mfbdev->dirty_lock, flags);
+
+       if (mfbdev->y1 < y)
+               y = mfbdev->y1;
+       if (mfbdev->y2 > y2)
+               y2 = mfbdev->y2;
+       if (mfbdev->x1 < x)
+               x = mfbdev->x1;
+       if (mfbdev->x2 > x2)
+               x2 = mfbdev->x2;
+
+       if (store_for_later) {
+               mfbdev->x1 = x;
+               mfbdev->x2 = x2;
+               mfbdev->y1 = y;
+               mfbdev->y2 = y2;
+               spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
                return;
        }
 
+       mfbdev->x1 = mfbdev->y1 = INT_MAX;
+       mfbdev->x2 = mfbdev->y2 = 0;
+       spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
+
        if (!bo->kmap.virtual) {
                ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
                if (ret) {
@@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
                }
                unmap = true;
        }
-       for (i = y; i < y + height; i++) {
+       for (i = y; i <= y2; i++) {
                /* assume equal stride for now */
                src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
-               memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
+               memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
 
        }
        if (unmap)
@@ -105,12 +141,9 @@ static int mgag200fb_create_object(struct mga_fbdev *afbdev,
                                   struct drm_gem_object **gobj_p)
 {
        struct drm_device *dev = afbdev->helper.dev;
-       u32 bpp, depth;
        u32 size;
        struct drm_gem_object *gobj;
-
        int ret = 0;
-       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
 
        size = mode_cmd->pitches[0] * mode_cmd->height;
        ret = mgag200_gem_create(dev, size, true, &gobj);
@@ -249,19 +282,19 @@ int mgag200_fbdev_init(struct mga_device *mdev)
        struct mga_fbdev *mfbdev;
        int ret;
 
-       mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL);
+       mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
        if (!mfbdev)
                return -ENOMEM;
 
        mdev->mfbdev = mfbdev;
        mfbdev->helper.funcs = &mga_fb_helper_funcs;
+       spin_lock_init(&mfbdev->dirty_lock);
 
        ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
                                 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
-       if (ret) {
-               kfree(mfbdev);
+       if (ret)
                return ret;
-       }
+
        drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
 
        /* disable all the possible outputs/crtcs before entering KMS mode */
@@ -278,6 +311,4 @@ void mgag200_fbdev_fini(struct mga_device *mdev)
                return;
 
        mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
-       kfree(mdev->mfbdev);
-       mdev->mfbdev = NULL;
 }
index 64297c7..9905923 100644 (file)
@@ -76,15 +76,6 @@ static const struct drm_mode_config_funcs mga_mode_funcs = {
        .fb_create = mgag200_user_framebuffer_create,
 };
 
-/* Unmap the framebuffer from the core and release the memory */
-static void mga_vram_fini(struct mga_device *mdev)
-{
-       pci_iounmap(mdev->dev->pdev, mdev->rmmio);
-       mdev->rmmio = NULL;
-       if (mdev->mc.vram_base)
-               release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
-}
-
 static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
 {
        int offset;
@@ -140,7 +131,7 @@ static int mga_vram_init(struct mga_device *mdev)
        remove_conflicting_framebuffers(aper, "mgafb", true);
        kfree(aper);
 
-       if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
+       if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
                                "mgadrmfb_vram")) {
                DRM_ERROR("can't reserve VRAM\n");
                return -ENXIO;
@@ -173,13 +164,13 @@ static int mgag200_device_init(struct drm_device *dev,
        mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
        mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
 
-       if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size,
+       if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
                                "mgadrmfb_mmio")) {
                DRM_ERROR("can't reserve mmio registers\n");
                return -ENOMEM;
        }
 
-       mdev->rmmio = pci_iomap(dev->pdev, 1, 0);
+       mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
        if (mdev->rmmio == NULL)
                return -ENOMEM;
 
@@ -188,10 +179,8 @@ static int mgag200_device_init(struct drm_device *dev,
                mdev->reg_1e24 = RREG32(0x1e24);
 
        ret = mga_vram_init(mdev);
-       if (ret) {
-               release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+       if (ret)
                return ret;
-       }
 
        mdev->bpp_shifts[0] = 0;
        mdev->bpp_shifts[1] = 1;
@@ -200,12 +189,6 @@ static int mgag200_device_init(struct drm_device *dev,
        return 0;
 }
 
-void mgag200_device_fini(struct mga_device *mdev)
-{
-       release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
-       mga_vram_fini(mdev);
-}
-
 /*
  * Functions here will be called by the core once it's bound the driver to
  * a PCI device
@@ -217,7 +200,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
        struct mga_device *mdev;
        int r;
 
-       mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL);
+       mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
        if (mdev == NULL)
                return -ENOMEM;
        dev->dev_private = (void *)mdev;
@@ -234,8 +217,6 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
 
        drm_mode_config_init(dev);
        dev->mode_config.funcs = (void *)&mga_mode_funcs;
-       dev->mode_config.min_width = 0;
-       dev->mode_config.min_height = 0;
        dev->mode_config.preferred_depth = 24;
        dev->mode_config.prefer_shadow = 1;
 
@@ -258,8 +239,6 @@ int mgag200_driver_unload(struct drm_device *dev)
        mgag200_fbdev_fini(mdev);
        drm_mode_config_cleanup(dev);
        mgag200_mm_fini(mdev);
-       mgag200_device_fini(mdev);
-       kfree(mdev);
        dev->dev_private = NULL;
        return 0;
 }
index fe22bb7..7337013 100644 (file)
@@ -1261,9 +1261,8 @@ static const struct drm_crtc_helper_funcs mga_helper_funcs = {
 };
 
 /* CRTC setup */
-static void mga_crtc_init(struct drm_device *dev)
+static void mga_crtc_init(struct mga_device *mdev)
 {
-       struct mga_device *mdev = dev->dev_private;
        struct mga_crtc *mga_crtc;
        int i;
 
@@ -1274,7 +1273,7 @@ static void mga_crtc_init(struct drm_device *dev)
        if (mga_crtc == NULL)
                return;
 
-       drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
+       drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
 
        drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
        mdev->mode_info.crtc = mga_crtc;
@@ -1529,7 +1528,7 @@ int mgag200_modeset_init(struct mga_device *mdev)
 
        mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
 
-       mga_crtc_init(mdev->dev);
+       mga_crtc_init(mdev);
 
        encoder = mga_encoder_init(mdev->dev);
        if (!encoder) {
index 8fc9d92..401c989 100644 (file)
@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
 
        ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
        if (ret) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("reserve failed %p\n", bo);
+               if (ret != -ERESTARTSYS && ret != -EBUSY)
+                       DRM_ERROR("reserve failed %p %d\n", bo, ret);
                return ret;
        }
        return 0;
index 90f9140..998e8b4 100644 (file)
@@ -53,15 +53,6 @@ nouveau-y += core/subdev/clock/nva3.o
 nouveau-y += core/subdev/clock/nvc0.o
 nouveau-y += core/subdev/clock/pllnv04.o
 nouveau-y += core/subdev/clock/pllnva3.o
-nouveau-y += core/subdev/device/base.o
-nouveau-y += core/subdev/device/nv04.o
-nouveau-y += core/subdev/device/nv10.o
-nouveau-y += core/subdev/device/nv20.o
-nouveau-y += core/subdev/device/nv30.o
-nouveau-y += core/subdev/device/nv40.o
-nouveau-y += core/subdev/device/nv50.o
-nouveau-y += core/subdev/device/nvc0.o
-nouveau-y += core/subdev/device/nve0.o
 nouveau-y += core/subdev/devinit/base.o
 nouveau-y += core/subdev/devinit/nv04.o
 nouveau-y += core/subdev/devinit/nv05.o
@@ -126,6 +117,7 @@ nouveau-y += core/subdev/therm/ic.o
 nouveau-y += core/subdev/therm/temp.o
 nouveau-y += core/subdev/therm/nv40.o
 nouveau-y += core/subdev/therm/nv50.o
+nouveau-y += core/subdev/therm/nv84.o
 nouveau-y += core/subdev/therm/nva3.o
 nouveau-y += core/subdev/therm/nvd0.o
 nouveau-y += core/subdev/timer/base.o
@@ -150,6 +142,15 @@ nouveau-y += core/engine/copy/nvc0.o
 nouveau-y += core/engine/copy/nve0.o
 nouveau-y += core/engine/crypt/nv84.o
 nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/device/base.o
+nouveau-y += core/engine/device/nv04.o
+nouveau-y += core/engine/device/nv10.o
+nouveau-y += core/engine/device/nv20.o
+nouveau-y += core/engine/device/nv30.o
+nouveau-y += core/engine/device/nv40.o
+nouveau-y += core/engine/device/nv50.o
+nouveau-y += core/engine/device/nvc0.o
+nouveau-y += core/engine/device/nve0.o
 nouveau-y += core/engine/disp/base.o
 nouveau-y += core/engine/disp/nv04.o
 nouveau-y += core/engine/disp/nv50.o
@@ -159,6 +160,7 @@ nouveau-y += core/engine/disp/nva0.o
 nouveau-y += core/engine/disp/nva3.o
 nouveau-y += core/engine/disp/nvd0.o
 nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/nvf0.o
 nouveau-y += core/engine/disp/dacnv50.o
 nouveau-y += core/engine/disp/dport.o
 nouveau-y += core/engine/disp/hdanva3.o
@@ -212,7 +214,7 @@ nouveau-y += core/engine/vp/nve0.o
 
 # drm/core
 nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
-nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o
+nouveau-y += nouveau_vga.o nouveau_agp.o
 nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
 nouveau-y += nouveau_prime.o nouveau_abi16.o
 nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
@@ -224,9 +226,7 @@ nouveau-y += nouveau_connector.o nouveau_dp.o
 nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
 
 # drm/kms/nv04:nv50
-nouveau-y += nouveau_hw.o nouveau_calc.o
-nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
-nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
+include $(src)/dispnv04/Makefile
 
 # drm/kms/nv50-
 nouveau-y += nv50_display.o
index 295c221..9079c0a 100644 (file)
@@ -27,7 +27,7 @@
 #include <core/handle.h>
 #include <core/option.h>
 
-#include <subdev/device.h>
+#include <engine/device.h>
 
 static void
 nouveau_client_dtor(struct nouveau_object *object)
@@ -58,8 +58,9 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
                return -ENODEV;
 
        ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
-                                    NV_CLIENT_CLASS, nouveau_device_sclass,
-                                    0, length, pobject);
+                                    NV_CLIENT_CLASS, NULL,
+                                    (1ULL << NVDEV_ENGINE_DEVICE),
+                                    length, pobject);
        client = *pobject;
        if (ret)
                return ret;
index 09b3bd5..c8bed4a 100644 (file)
@@ -33,7 +33,6 @@ nouveau_engine_create_(struct nouveau_object *parent,
                       const char *iname, const char *fname,
                       int length, void **pobject)
 {
-       struct nouveau_device *device = nv_device(parent);
        struct nouveau_engine *engine;
        int ret;
 
@@ -43,7 +42,8 @@ nouveau_engine_create_(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
+       if ( parent &&
+           !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) {
                if (!enable)
                        nv_warn(engine, "disabled, %s=1 to enable\n", iname);
                return -ENODEV;
index 6d01e0f..7eb81c1 100644 (file)
@@ -27,8 +27,10 @@ static void
 nouveau_event_put_locked(struct nouveau_event *event, int index,
                         struct nouveau_eventh *handler)
 {
-       if (!--event->index[index].refs)
-               event->disable(event, index);
+       if (!--event->index[index].refs) {
+               if (event->disable)
+                       event->disable(event, index);
+       }
        list_del(&handler->head);
 }
 
@@ -53,8 +55,10 @@ nouveau_event_get(struct nouveau_event *event, int index,
        spin_lock_irqsave(&event->lock, flags);
        if (index < event->index_nr) {
                list_add(&handler->head, &event->index[index].list);
-               if (!event->index[index].refs++)
-                       event->enable(event, index);
+               if (!event->index[index].refs++) {
+                       if (event->enable)
+                               event->enable(event, index);
+               }
        }
        spin_unlock_irqrestore(&event->lock, flags);
 }
index 3b2e7b6..7f48e28 100644 (file)
@@ -136,26 +136,30 @@ nouveau_object_ctor(struct nouveau_object *parent,
                    struct nouveau_object **pobject)
 {
        struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
+       struct nouveau_object *object = NULL;
        int ret;
 
-       *pobject = NULL;
-
-       ret = ofuncs->ctor(parent, engine, oclass, data, size, pobject);
+       ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
+       *pobject = object;
        if (ret < 0) {
                if (ret != -ENODEV) {
                        nv_error(parent, "failed to create 0x%08x, %d\n",
                                 oclass->handle, ret);
                }
 
-               if (*pobject) {
-                       ofuncs->dtor(*pobject);
+               if (object) {
+                       ofuncs->dtor(object);
                        *pobject = NULL;
                }
 
                return ret;
        }
 
-       nv_debug(*pobject, "created\n");
+       if (ret == 0) {
+               nv_debug(object, "created\n");
+               atomic_set(&object->refcount, 1);
+       }
+
        return 0;
 }
 
@@ -327,6 +331,7 @@ nouveau_object_inc(struct nouveau_object *object)
        }
 
        ret = nv_ofuncs(object)->init(object);
+       atomic_set(&object->usecount, 1);
        if (ret) {
                nv_error(object, "init failed, %d\n", ret);
                goto fail_self;
@@ -357,6 +362,7 @@ nouveau_object_decf(struct nouveau_object *object)
        nv_trace(object, "stopping...\n");
 
        ret = nv_ofuncs(object)->fini(object, false);
+       atomic_set(&object->usecount, 0);
        if (ret)
                nv_warn(object, "failed fini, %d\n", ret);
 
@@ -381,6 +387,7 @@ nouveau_object_decs(struct nouveau_object *object)
        nv_trace(object, "suspending...\n");
 
        ret = nv_ofuncs(object)->fini(object, true);
+       atomic_set(&object->usecount, 0);
        if (ret) {
                nv_error(object, "failed suspend, %d\n", ret);
                return ret;
index db7c549..313380c 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <core/object.h>
 #include <core/parent.h>
+#include <core/client.h>
 
 int
 nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
@@ -50,7 +51,12 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
        while (mask) {
                int i = ffsll(mask) - 1;
 
-               if ((engine = nouveau_engine(parent, i))) {
+               if (nv_iclass(parent, NV_CLIENT_CLASS))
+                       engine = nv_engine(nv_client(parent)->device);
+               else
+                       engine = nouveau_engine(parent, i);
+
+               if (engine) {
                        oclass = engine->sclass;
                        while (oclass->ofuncs) {
                                if ((oclass->handle & 0xffff) == handle) {
@@ -29,7 +29,7 @@
 
 #include <core/class.h>
 
-#include <subdev/device.h>
+#include <engine/device.h>
 
 static DEFINE_MUTEX(nv_devices_mutex);
 static LIST_HEAD(nv_devices);
@@ -55,7 +55,6 @@ nouveau_device_find(u64 name)
 struct nouveau_devobj {
        struct nouveau_parent base;
        struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
-       bool created;
 };
 
 static const u64 disable_map[] = {
@@ -173,7 +172,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
                        case 0xa0: device->card_type = NV_50; break;
                        case 0xc0: device->card_type = NV_C0; break;
                        case 0xd0: device->card_type = NV_D0; break;
-                       case 0xe0: device->card_type = NV_E0; break;
+                       case 0xe0:
+                       case 0xf0: device->card_type = NV_E0; break;
                        default:
                                break;
                        }
@@ -238,26 +238,24 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
        }
 
        /* ensure requested subsystems are available for use */
-       for (i = 0, c = 0; i < NVDEV_SUBDEV_NR; i++) {
+       for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
                if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
                        continue;
 
-               if (!device->subdev[i]) {
-                       ret = nouveau_object_ctor(nv_object(device), NULL,
-                                                 oclass, NULL, i,
-                                                 &devobj->subdev[i]);
-                       if (ret == -ENODEV)
-                               continue;
-                       if (ret)
-                               return ret;
-
-                       if (nv_iclass(devobj->subdev[i], NV_ENGINE_CLASS))
-                               nouveau_subdev_reset(devobj->subdev[i]);
-               } else {
+               if (device->subdev[i]) {
                        nouveau_object_ref(device->subdev[i],
                                          &devobj->subdev[i]);
+                       continue;
                }
 
+               ret = nouveau_object_ctor(nv_object(device), NULL,
+                                         oclass, NULL, i,
+                                         &devobj->subdev[i]);
+               if (ret == -ENODEV)
+                       continue;
+               if (ret)
+                       return ret;
+
                /* note: can't init *any* subdevs until devinit has been run
                 * due to not knowing exactly what the vbios init tables will
                 * mess with.  devinit also can't be run until all of its
@@ -273,6 +271,10 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
                                ret = nouveau_object_inc(subdev);
                                if (ret)
                                        return ret;
+                               atomic_dec(&nv_object(device)->usecount);
+                       } else
+                       if (subdev) {
+                               nouveau_subdev_reset(subdev);
                        }
                }
        }
@@ -292,74 +294,6 @@ nouveau_devobj_dtor(struct nouveau_object *object)
        nouveau_parent_destroy(&devobj->base);
 }
 
-static int
-nouveau_devobj_init(struct nouveau_object *object)
-{
-       struct nouveau_devobj *devobj = (void *)object;
-       struct nouveau_object *subdev;
-       int ret, i;
-
-       ret = nouveau_parent_init(&devobj->base);
-       if (ret)
-               return ret;
-
-       for (i = 0; devobj->created && i < NVDEV_SUBDEV_NR; i++) {
-               if ((subdev = devobj->subdev[i])) {
-                       if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-                               ret = nouveau_object_inc(subdev);
-                               if (ret)
-                                       goto fail;
-                       }
-               }
-       }
-
-       devobj->created = true;
-       return 0;
-
-fail:
-       for (--i; i >= 0; i--) {
-               if ((subdev = devobj->subdev[i])) {
-                       if (!nv_iclass(subdev, NV_ENGINE_CLASS))
-                               nouveau_object_dec(subdev, false);
-               }
-       }
-
-       return ret;
-}
-
-static int
-nouveau_devobj_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nouveau_devobj *devobj = (void *)object;
-       struct nouveau_object *subdev;
-       int ret, i;
-
-       for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
-               if ((subdev = devobj->subdev[i])) {
-                       if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-                               ret = nouveau_object_dec(subdev, suspend);
-                               if (ret && suspend)
-                                       goto fail;
-                       }
-               }
-       }
-
-       ret = nouveau_parent_fini(&devobj->base, suspend);
-fail:
-       for (; ret && suspend && i < NVDEV_SUBDEV_NR; i++) {
-               if ((subdev = devobj->subdev[i])) {
-                       if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-                               ret = nouveau_object_inc(subdev);
-                               if (ret) {
-                                       /* XXX */
-                               }
-                       }
-               }
-       }
-
-       return ret;
-}
-
 static u8
 nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
 {
@@ -400,8 +334,8 @@ static struct nouveau_ofuncs
 nouveau_devobj_ofuncs = {
        .ctor = nouveau_devobj_ctor,
        .dtor = nouveau_devobj_dtor,
-       .init = nouveau_devobj_init,
-       .fini = nouveau_devobj_fini,
+       .init = _nouveau_parent_init,
+       .fini = _nouveau_parent_fini,
        .rd08 = nouveau_devobj_rd08,
        .rd16 = nouveau_devobj_rd16,
        .rd32 = nouveau_devobj_rd32,
@@ -413,12 +347,76 @@ nouveau_devobj_ofuncs = {
 /******************************************************************************
  * nouveau_device: engine functions
  *****************************************************************************/
-struct nouveau_oclass
+static struct nouveau_oclass
 nouveau_device_sclass[] = {
        { 0x0080, &nouveau_devobj_ofuncs },
        {}
 };
 
+static int
+nouveau_device_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nouveau_device *device = (void *)object;
+       struct nouveau_object *subdev;
+       int ret, i;
+
+       for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
+               if ((subdev = device->subdev[i])) {
+                       if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+                               ret = nouveau_object_dec(subdev, suspend);
+                               if (ret && suspend)
+                                       goto fail;
+                       }
+               }
+       }
+
+       ret = 0;
+fail:
+       for (; ret && i < NVDEV_SUBDEV_NR; i++) {
+               if ((subdev = device->subdev[i])) {
+                       if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+                               ret = nouveau_object_inc(subdev);
+                               if (ret) {
+                                       /* XXX */
+                               }
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static int
+nouveau_device_init(struct nouveau_object *object)
+{
+       struct nouveau_device *device = (void *)object;
+       struct nouveau_object *subdev;
+       int ret, i;
+
+       for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+               if ((subdev = device->subdev[i])) {
+                       if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+                               ret = nouveau_object_inc(subdev);
+                               if (ret)
+                                       goto fail;
+                       } else {
+                               nouveau_subdev_reset(subdev);
+                       }
+               }
+       }
+
+       ret = 0;
+fail:
+       for (--i; ret && i >= 0; i--) {
+               if ((subdev = device->subdev[i])) {
+                       if (!nv_iclass(subdev, NV_ENGINE_CLASS))
+                               nouveau_object_dec(subdev, false);
+               }
+       }
+
+       return ret;
+}
+
 static void
 nouveau_device_dtor(struct nouveau_object *object)
 {
@@ -428,17 +426,19 @@ nouveau_device_dtor(struct nouveau_object *object)
        list_del(&device->head);
        mutex_unlock(&nv_devices_mutex);
 
-       if (device->base.mmio)
-               iounmap(device->base.mmio);
+       if (nv_subdev(device)->mmio)
+               iounmap(nv_subdev(device)->mmio);
 
-       nouveau_subdev_destroy(&device->base);
+       nouveau_engine_destroy(&device->base);
 }
 
 static struct nouveau_oclass
 nouveau_device_oclass = {
-       .handle = NV_SUBDEV(DEVICE, 0x00),
+       .handle = NV_ENGINE(DEVICE, 0x00),
        .ofuncs = &(struct nouveau_ofuncs) {
                .dtor = nouveau_device_dtor,
+               .init = nouveau_device_init,
+               .fini = nouveau_device_fini,
        },
 };
 
@@ -456,13 +456,12 @@ nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
                        goto done;
        }
 
-       ret = nouveau_subdev_create_(NULL, NULL, &nouveau_device_oclass, 0,
+       ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true,
                                     "DEVICE", "device", length, pobject);
        device = *pobject;
        if (ret)
                goto done;
 
-       atomic_set(&nv_object(device)->usecount, 2);
        device->pdev = pdev;
        device->handle = name;
        device->cfgopt = cfg;
@@ -470,6 +469,7 @@ nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
        device->name = sname;
 
        nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
+       nv_engine(device)->sclass = nouveau_device_sclass;
        list_add(&device->head, &nv_devices);
 done:
        mutex_unlock(&nv_devices_mutex);
@@ -22,7 +22,6 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/device.h>
 #include <subdev/bios.h>
 #include <subdev/bus.h>
 #include <subdev/i2c.h>
@@ -34,6 +33,7 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 
+#include <engine/device.h>
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 #include <engine/software.h>
@@ -22,7 +22,6 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/device.h>
 #include <subdev/bios.h>
 #include <subdev/bus.h>
 #include <subdev/gpio.h>
@@ -35,6 +34,7 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 
+#include <engine/device.h>
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 #include <engine/software.h>
@@ -22,7 +22,6 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/device.h>
 #include <subdev/bios.h>
 #include <subdev/bus.h>
 #include <subdev/gpio.h>
@@ -36,6 +35,7 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 
+#include <engine/device.h>
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 #include <engine/software.h>
@@ -22,7 +22,6 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/device.h>
 #include <subdev/bios.h>
 #include <subdev/bus.h>
 #include <subdev/gpio.h>
@@ -35,6 +34,7 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 
+#include <engine/device.h>
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 #include <engine/software.h>
@@ -22,7 +22,6 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/device.h>
 #include <subdev/bios.h>
 #include <subdev/bus.h>
 #include <subdev/vm.h>
@@ -37,6 +36,7 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 
+#include <engine/device.h>
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 #include <engine/software.h>
@@ -22,7 +22,6 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/device.h>
 #include <subdev/bios.h>
 #include <subdev/bus.h>
 #include <subdev/gpio.h>
@@ -38,6 +37,7 @@
 #include <subdev/vm.h>
 #include <subdev/bar.h>
 
+#include <engine/device.h>
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 #include <engine/software.h>
@@ -83,7 +83,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
@@ -109,7 +109,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
@@ -135,7 +135,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
@@ -161,7 +161,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
@@ -187,7 +187,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
@@ -213,7 +213,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
@@ -239,7 +239,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
@@ -265,7 +265,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
@@ -291,7 +291,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
@@ -22,7 +22,6 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/device.h>
 #include <subdev/bios.h>
 #include <subdev/bus.h>
 #include <subdev/gpio.h>
@@ -40,6 +39,7 @@
 #include <subdev/vm.h>
 #include <subdev/bar.h>
 
+#include <engine/device.h>
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 #include <engine/software.h>
@@ -285,6 +285,34 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
                break;
+       case 0xd7:
+               device->cname = "GF117";
+               device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+               device->oclass[NVDEV_SUBDEV_GPIO   ] = &nvd0_gpio_oclass;
+               device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
+               device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+               device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+               device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+               device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+               device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+               device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+               device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+               break;
        default:
                nv_fatal(device, "unknown Fermi chipset\n");
                return -EINVAL;
@@ -22,7 +22,6 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/device.h>
 #include <subdev/bios.h>
 #include <subdev/bus.h>
 #include <subdev/gpio.h>
@@ -40,6 +39,7 @@
 #include <subdev/vm.h>
 #include <subdev/bar.h>
 
+#include <engine/device.h>
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 #include <engine/software.h>
@@ -141,6 +141,40 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                break;
+       case 0xf0:
+               device->cname = "GK110";
+               device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+               device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
+               device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
+               device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+               device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+               device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+               device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+               device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+#if 0
+               device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
+               device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+               device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
+#endif
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nvf0_disp_oclass;
+#if 0
+               device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+               device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+               device->oclass[NVDEV_ENGINE_COPY2  ] = &nve0_copy2_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+#endif
+               break;
        default:
                nv_fatal(device, "unknown Kepler chipset\n");
                return -EINVAL;
index fa27b02..31cc8fe 100644 (file)
@@ -191,7 +191,7 @@ dp_link_train_cr(struct dp_state *dp)
 static int
 dp_link_train_eq(struct dp_state *dp)
 {
-       bool eq_done, cr_done = true;
+       bool eq_done = false, cr_done = true;
        int tries = 0, i;
 
        dp_set_training_pattern(dp, 2);
index 02e369f..6a38402 100644 (file)
@@ -572,7 +572,8 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
        priv->base.vblank->priv = priv;
        priv->base.vblank->enable = nv50_disp_base_vblank_enable;
        priv->base.vblank->disable = nv50_disp_base_vblank_disable;
-       return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+       return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
+                               &base->ramht);
 }
 
 static void
@@ -719,7 +720,7 @@ nv50_disp_data_ctor(struct nouveau_object *parent,
        if (nv_mclass(parent) != NV_DEVICE_CLASS) {
                atomic_inc(&parent->refcount);
                *pobject = parent;
-               return 0;
+               return 1;
        }
 
        /* allocate display hardware to client */
index 788dd34..019eacd 100644 (file)
@@ -473,7 +473,8 @@ nvd0_disp_base_ctor(struct nouveau_object *parent,
        priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
        priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
 
-       return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+       return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
+                               &base->ramht);
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
new file mode 100644 (file)
index 0000000..a488c36
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nvf0_disp_sclass[] = {
+       { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+       { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+       { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+       { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+       { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+       {}
+};
+
+static struct nouveau_oclass
+nvf0_disp_base_oclass[] = {
+       { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+       {}
+};
+
+static int
+nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int heads = nv_rd32(parent, 0x022448);
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, heads,
+                                 "PDISP", "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nvf0_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nvd0_disp_intr;
+       INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+       priv->sclass = nvf0_disp_sclass;
+       priv->head.nr = heads;
+       priv->dac.nr = 3;
+       priv->sor.nr = 4;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hda_eld = nvd0_hda_eld;
+       priv->sor.hdmi = nvd0_hdmi_ctrl;
+       priv->sor.dp = &nvd0_sor_dp_func;
+       return 0;
+}
+
+struct nouveau_oclass
+nvf0_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x92),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvf0_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
index d152875..944e73a 100644 (file)
@@ -50,6 +50,9 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
                case NVE0_DISP_MAST_CLASS:
                case NVE0_DISP_SYNC_CLASS:
                case NVE0_DISP_OVLY_CLASS:
+               case NVF0_DISP_MAST_CLASS:
+               case NVF0_DISP_SYNC_CLASS:
+               case NVF0_DISP_OVLY_CLASS:
                        break;
                default:
                        return -EINVAL;
index 7341ebe..d3ec436 100644 (file)
@@ -91,6 +91,8 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
        if (!chan->user)
                return -EFAULT;
 
+       nouveau_event_trigger(priv->cevent, 0);
+
        chan->size = size;
        return 0;
 }
@@ -167,6 +169,7 @@ nouveau_fifo_destroy(struct nouveau_fifo *priv)
 {
        kfree(priv->channel);
        nouveau_event_destroy(&priv->uevent);
+       nouveau_event_destroy(&priv->cevent);
        nouveau_engine_destroy(&priv->base);
 }
 
@@ -191,6 +194,10 @@ nouveau_fifo_create_(struct nouveau_object *parent,
        if (!priv->channel)
                return -ENOMEM;
 
+       ret = nouveau_event_create(1, &priv->cevent);
+       if (ret)
+               return ret;
+
        ret = nouveau_event_create(1, &priv->uevent);
        if (ret)
                return ret;
index 840af61..ddaeb55 100644 (file)
@@ -210,7 +210,8 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
        nv_parent(chan)->object_attach = nv50_fifo_object_attach;
        nv_parent(chan)->object_detach = nv50_fifo_object_detach;
 
-       ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+       ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+                               &chan->ramht);
        if (ret)
                return ret;
 
@@ -263,7 +264,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
        nv_parent(chan)->object_attach = nv50_fifo_object_attach;
        nv_parent(chan)->object_detach = nv50_fifo_object_detach;
 
-       ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+       ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+                              &chan->ramht);
        if (ret)
                return ret;
 
@@ -373,17 +375,17 @@ nv50_fifo_context_ctor(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000,
-                                NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+       ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
+                                0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0,
+       ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
                                 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0,
+       ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
                                &base->pgd);
        if (ret)
                return ret;
@@ -437,12 +439,12 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
                                &priv->playlist[0]);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
                                &priv->playlist[1]);
        if (ret)
                return ret;
index 094000e..35b94bd 100644 (file)
@@ -180,7 +180,8 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+       ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+                              &chan->ramht);
        if (ret)
                return ret;
 
@@ -242,7 +243,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+       ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+                              &chan->ramht);
        if (ret)
                return ret;
 
@@ -336,12 +338,12 @@ nv84_fifo_context_ctor(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0,
+       ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
                                 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0,
+       ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
                                 0, &base->pgd);
        if (ret)
                return ret;
@@ -350,13 +352,13 @@ nv84_fifo_context_ctor(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400,
-                                NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
+       ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
+                                0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100,
-                                NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+       ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
+                                0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
        if (ret)
                return ret;
 
@@ -407,12 +409,12 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
                                &priv->playlist[0]);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
                                &priv->playlist[1]);
        if (ret)
                return ret;
index 4f226af..4d4a6b9 100644 (file)
@@ -292,7 +292,8 @@ nvc0_fifo_context_ctor(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
+       ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
+                               &base->pgd);
        if (ret)
                return ret;
 
@@ -623,17 +624,17 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
                                &priv->playlist[0]);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
                                &priv->playlist[1]);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
                                &priv->user.mem);
        if (ret)
                return ret;
index 4419e40..9151919 100644 (file)
@@ -96,7 +96,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
 
        cur = engn->playlist[engn->cur_playlist];
        if (unlikely(cur == NULL)) {
-               int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL,
+               int ret = nouveau_gpuobj_new(nv_object(priv), NULL,
                                             0x8000, 0x1000, 0, &cur);
                if (ret) {
                        nv_error(priv, "playlist alloc failed\n");
@@ -333,7 +333,8 @@ nve0_fifo_context_ctor(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
+       ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
+                               &base->pgd);
        if (ret)
                return ret;
 
@@ -595,7 +596,7 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
                                 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
        if (ret)
                return ret;
index 0b7951a..4cc6269 100644 (file)
@@ -36,7 +36,6 @@ int
 nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
 {
        struct nouveau_bar *bar = nouveau_bar(priv);
-       struct nouveau_object *parent = nv_object(priv);
        struct nouveau_gpuobj *chan;
        u32 size = (0x80000 + priv->size + 4095) & ~4095;
        int ret, i;
@@ -44,7 +43,7 @@ nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
        /* allocate memory to for a "channel", which we'll use to generate
         * the default context values
         */
-       ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, size, 0x1000,
                                 NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
        chan = info->chan;
        if (ret) {
@@ -1399,7 +1398,7 @@ nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
 {
        int i;
 
-       for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
+       for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) {
                nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
                nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
                nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
@@ -1415,7 +1414,7 @@ nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
        nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
        nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
        nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
-       for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
+       for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) {
                nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
                nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
                nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
@@ -1615,7 +1614,7 @@ static void
 nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
 {
 
-       if (nv_device(priv)->chipset == 0xd9) {
+       if (nv_device(priv)->chipset >= 0xd0) {
                nv_wr32(priv, 0x405800, 0x0f8000bf);
                nv_wr32(priv, 0x405830, 0x02180218);
                nv_wr32(priv, 0x405834, 0x08000000);
@@ -1658,10 +1657,10 @@ nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
        nv_wr32(priv, 0x4064ac, 0x00003fff);
        nv_wr32(priv, 0x4064b4, 0x00000000);
        nv_wr32(priv, 0x4064b8, 0x00000000);
-       if (nv_device(priv)->chipset == 0xd9)
+       if (nv_device(priv)->chipset >= 0xd0)
                nv_wr32(priv, 0x4064bc, 0x00000000);
        if (nv_device(priv)->chipset == 0xc1 ||
-           nv_device(priv)->chipset == 0xd9) {
+           nv_device(priv)->chipset >= 0xd0) {
                nv_wr32(priv, 0x4064c0, 0x80140078);
                nv_wr32(priv, 0x4064c4, 0x0086ffff);
        }
@@ -1701,7 +1700,7 @@ nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
        /* ROPC_BROADCAST */
        nv_wr32(priv, 0x408800, 0x02802a3c);
        nv_wr32(priv, 0x408804, 0x00000040);
-       if (chipset == 0xd9) {
+       if (chipset >= 0xd0) {
                nv_wr32(priv, 0x408808, 0x1043e005);
                nv_wr32(priv, 0x408900, 0x3080b801);
                nv_wr32(priv, 0x408904, 0x1043e005);
@@ -1735,7 +1734,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
        nv_wr32(priv, 0x418408, 0x00000000);
        nv_wr32(priv, 0x41840c, 0x00001008);
        nv_wr32(priv, 0x418410, 0x0fff0fff);
-       nv_wr32(priv, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
+       nv_wr32(priv, 0x418414, chipset < 0xd0 ? 0x00200fff : 0x02200fff);
        nv_wr32(priv, 0x418450, 0x00000000);
        nv_wr32(priv, 0x418454, 0x00000000);
        nv_wr32(priv, 0x418458, 0x00000000);
@@ -1750,14 +1749,14 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
        nv_wr32(priv, 0x418700, 0x00000002);
        nv_wr32(priv, 0x418704, 0x00000080);
        nv_wr32(priv, 0x418708, 0x00000000);
-       nv_wr32(priv, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
+       nv_wr32(priv, 0x41870c, chipset < 0xd0 ? 0x07c80000 : 0x00000000);
        nv_wr32(priv, 0x418710, 0x00000000);
-       nv_wr32(priv, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
+       nv_wr32(priv, 0x418800, chipset < 0xd0 ? 0x0006860a : 0x7006860a);
        nv_wr32(priv, 0x418808, 0x00000000);
        nv_wr32(priv, 0x41880c, 0x00000000);
        nv_wr32(priv, 0x418810, 0x00000000);
        nv_wr32(priv, 0x418828, 0x00008442);
-       if (chipset == 0xc1 || chipset == 0xd9)
+       if (chipset == 0xc1 || chipset >= 0xd0)
                nv_wr32(priv, 0x418830, 0x10000001);
        else
                nv_wr32(priv, 0x418830, 0x00000001);
@@ -1768,7 +1767,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
        nv_wr32(priv, 0x4188f0, 0x00000000);
        nv_wr32(priv, 0x4188f4, 0x00000000);
        nv_wr32(priv, 0x4188f8, 0x00000000);
-       if (chipset == 0xd9)
+       if (chipset >= 0xd0)
                nv_wr32(priv, 0x4188fc, 0x20100008);
        else if (chipset == 0xc1)
                nv_wr32(priv, 0x4188fc, 0x00100018);
@@ -1787,7 +1786,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
                nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000);
                nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000);
        }
-       nv_wr32(priv, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
+       nv_wr32(priv, 0x418b00, chipset < 0xd0 ? 0x00000000 : 0x00000006);
        nv_wr32(priv, 0x418b08, 0x0a418820);
        nv_wr32(priv, 0x418b0c, 0x062080e6);
        nv_wr32(priv, 0x418b10, 0x020398a4);
@@ -1804,7 +1803,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
        nv_wr32(priv, 0x418c24, 0x00000000);
        nv_wr32(priv, 0x418c28, 0x00000000);
        nv_wr32(priv, 0x418c2c, 0x00000000);
-       if (chipset == 0xc1 || chipset == 0xd9)
+       if (chipset == 0xc1 || chipset >= 0xd0)
                nv_wr32(priv, 0x418c6c, 0x00000001);
        nv_wr32(priv, 0x418c80, 0x20200004);
        nv_wr32(priv, 0x418c8c, 0x00000001);
@@ -1823,7 +1822,7 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
        nv_wr32(priv, 0x419818, 0x00000000);
        nv_wr32(priv, 0x41983c, 0x00038bc7);
        nv_wr32(priv, 0x419848, 0x00000000);
-       if (chipset == 0xc1 || chipset == 0xd9)
+       if (chipset == 0xc1 || chipset >= 0xd0)
                nv_wr32(priv, 0x419864, 0x00000129);
        else
                nv_wr32(priv, 0x419864, 0x0000012a);
@@ -1836,7 +1835,7 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
        nv_wr32(priv, 0x419a14, 0x00000200);
        nv_wr32(priv, 0x419a1c, 0x00000000);
        nv_wr32(priv, 0x419a20, 0x00000800);
-       if (chipset == 0xd9)
+       if (chipset >= 0xd0)
                nv_wr32(priv, 0x00419ac4, 0x0017f440);
        else if (chipset != 0xc0 && chipset != 0xc8)
                nv_wr32(priv, 0x00419ac4, 0x0007f440);
@@ -1847,16 +1846,16 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
        nv_wr32(priv, 0x419b10, 0x0a418820);
        nv_wr32(priv, 0x419b14, 0x000000e6);
        nv_wr32(priv, 0x419bd0, 0x00900103);
-       if (chipset == 0xc1 || chipset == 0xd9)
+       if (chipset == 0xc1 || chipset >= 0xd0)
                nv_wr32(priv, 0x419be0, 0x00400001);
        else
                nv_wr32(priv, 0x419be0, 0x00000001);
        nv_wr32(priv, 0x419be4, 0x00000000);
-       nv_wr32(priv, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
+       nv_wr32(priv, 0x419c00, chipset < 0xd0 ? 0x00000002 : 0x0000000a);
        nv_wr32(priv, 0x419c04, 0x00000006);
        nv_wr32(priv, 0x419c08, 0x00000002);
        nv_wr32(priv, 0x419c20, 0x00000000);
-       if (nv_device(priv)->chipset == 0xd9) {
+       if (nv_device(priv)->chipset >= 0xd0) {
                nv_wr32(priv, 0x419c24, 0x00084210);
                nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
                nv_wr32(priv, 0x419cb0, 0x00020048);
@@ -1868,12 +1867,12 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
        }
        nv_wr32(priv, 0x419ce8, 0x00000000);
        nv_wr32(priv, 0x419cf4, 0x00000183);
-       if (chipset == 0xc1 || chipset == 0xd9)
+       if (chipset == 0xc1 || chipset >= 0xd0)
                nv_wr32(priv, 0x419d20, 0x12180000);
        else
                nv_wr32(priv, 0x419d20, 0x02180000);
        nv_wr32(priv, 0x419d24, 0x00001fff);
-       if (chipset == 0xc1 || chipset == 0xd9)
+       if (chipset == 0xc1 || chipset >= 0xd0)
                nv_wr32(priv, 0x419d44, 0x02180218);
        nv_wr32(priv, 0x419e04, 0x00000000);
        nv_wr32(priv, 0x419e08, 0x00000000);
@@ -2210,7 +2209,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
        nv_icmd(priv, 0x00000215, 0x00000040);
        nv_icmd(priv, 0x00000216, 0x00000040);
        nv_icmd(priv, 0x00000217, 0x00000040);
-       if (nv_device(priv)->chipset == 0xd9) {
+       if (nv_device(priv)->chipset >= 0xd0) {
                for (i = 0x0400; i <= 0x0417; i++)
                        nv_icmd(priv, i, 0x00000040);
        }
@@ -2222,7 +2221,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
        nv_icmd(priv, 0x0000021d, 0x0000c080);
        nv_icmd(priv, 0x0000021e, 0x0000c080);
        nv_icmd(priv, 0x0000021f, 0x0000c080);
-       if (nv_device(priv)->chipset == 0xd9) {
+       if (nv_device(priv)->chipset >= 0xd0) {
                for (i = 0x0440; i <= 0x0457; i++)
                        nv_icmd(priv, i, 0x0000c080);
        }
@@ -2789,7 +2788,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
        nv_icmd(priv, 0x00000585, 0x0000003f);
        nv_icmd(priv, 0x00000576, 0x00000003);
        if (nv_device(priv)->chipset == 0xc1 ||
-           nv_device(priv)->chipset == 0xd9)
+           nv_device(priv)->chipset >= 0xd0)
                nv_icmd(priv, 0x0000057b, 0x00000059);
        nv_icmd(priv, 0x00000586, 0x00000040);
        nv_icmd(priv, 0x00000582, 0x00000080);
@@ -2891,7 +2890,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
        nv_icmd(priv, 0x00000957, 0x00000003);
        nv_icmd(priv, 0x0000095e, 0x20164010);
        nv_icmd(priv, 0x0000095f, 0x00000020);
-       if (nv_device(priv)->chipset == 0xd9)
+       if (nv_device(priv)->chipset >= 0xd0)
                nv_icmd(priv, 0x0000097d, 0x00000020);
        nv_icmd(priv, 0x00000683, 0x00000006);
        nv_icmd(priv, 0x00000685, 0x003fffff);
index 6d8c639..ae27dae 100644 (file)
@@ -2772,10 +2772,15 @@ nve0_grctx_generate(struct nvc0_graph_priv *priv)
        for (i = 0; i < 8; i++)
                nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
 
-       nv_wr32(priv, 0x405b00, 0x201);
-       nv_wr32(priv, 0x408850, 0x2);
-       nv_wr32(priv, 0x408958, 0x2);
-       nv_wr32(priv, 0x419f78, 0xa);
+       nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+       if (priv->gpc_nr == 1) {
+               nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
+               nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
+       } else {
+               nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
+               nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
+       }
+       nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
 
        nve0_grctx_generate_icmd(priv);
        nve0_grctx_generate_a097(priv);
index b86cc60..f7055af 100644 (file)
@@ -87,6 +87,11 @@ chipsets:
 .b16 #nvd9_gpc_mmio_tail
 .b16 #nvd9_tpc_mmio_head
 .b16 #nvd9_tpc_mmio_tail
+.b8  0xd7 0 0 0
+.b16 #nvd9_gpc_mmio_head
+.b16 #nvd9_gpc_mmio_tail
+.b16 #nvd9_tpc_mmio_head
+.b16 #nvd9_tpc_mmio_tail
 .b8  0 0 0 0
 
 // GPC mmio lists
index 0bcfa4d..7fbdebb 100644 (file)
@@ -62,6 +62,9 @@ chipsets:
 .b8  0xd9 0 0 0
 .b16 #nvd9_hub_mmio_head
 .b16 #nvd9_hub_mmio_tail
+.b8  0xd7 0 0 0
+.b16 #nvd9_hub_mmio_head
+.b16 #nvd9_hub_mmio_tail
 .b8  0 0 0 0
 
 nvc0_hub_mmio_head:
index 0607b98..b245593 100644 (file)
@@ -254,7 +254,7 @@ nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
                                 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
        if (ret)
                return ret;
index b2b650d..7a80d00 100644 (file)
@@ -142,7 +142,7 @@ nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
                                 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
        if (ret)
                return ret;
index 700462f..3e1f32e 100644 (file)
@@ -109,7 +109,7 @@ nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
                                 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
        if (ret)
                return ret;
index cedadaa..e451db3 100644 (file)
@@ -143,7 +143,7 @@ nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
                                 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
        if (ret)
                return ret;
index 273f632..9385ac7 100644 (file)
@@ -143,7 +143,7 @@ nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
                                 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
        if (ret)
                return ret;
index f40ee21..9ce84b7 100644 (file)
@@ -141,7 +141,7 @@ nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
                                 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
        if (ret)
                return ret;
index 17049d5..193a5de 100644 (file)
@@ -46,6 +46,14 @@ struct nv40_graph_chan {
        struct nouveau_graph_chan base;
 };
 
+static u64
+nv40_graph_units(struct nouveau_graph *graph)
+{
+       struct nv40_graph_priv *priv = (void *)graph;
+
+       return nv_rd32(priv, 0x1540);
+}
+
 /*******************************************************************************
  * Graphics object classes
  ******************************************************************************/
@@ -359,6 +367,8 @@ nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        else
                nv_engine(priv)->sclass = nv40_graph_sclass;
        nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
+
+       priv->base.units = nv40_graph_units;
        return 0;
 }
 
index f2b1a7a..1ac3611 100644 (file)
@@ -48,6 +48,14 @@ struct nv50_graph_chan {
        struct nouveau_graph_chan base;
 };
 
+static u64
+nv50_graph_units(struct nouveau_graph *graph)
+{
+       struct nv50_graph_priv *priv = (void *)graph;
+
+       return nv_rd32(priv, 0x1540);
+}
+
 /*******************************************************************************
  * Graphics object classes
  ******************************************************************************/
@@ -819,6 +827,8 @@ nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        nv_subdev(priv)->intr = nv50_graph_intr;
        nv_engine(priv)->cclass = &nv50_graph_cclass;
 
+       priv->base.units = nv50_graph_units;
+
        switch (nv_device(priv)->chipset) {
        case 0x50:
                nv_engine(priv)->sclass = nv50_graph_sclass;
index 0de0dd7..f9b9d82 100644 (file)
@@ -60,6 +60,19 @@ nvc8_graph_sclass[] = {
        {}
 };
 
+u64
+nvc0_graph_units(struct nouveau_graph *graph)
+{
+       struct nvc0_graph_priv *priv = (void *)graph;
+       u64 cfg;
+
+       cfg  = (u32)priv->gpc_nr;
+       cfg |= (u32)priv->tpc_total << 8;
+       cfg |= (u64)priv->rop_nr << 32;
+
+       return cfg;
+}
+
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
@@ -89,7 +102,8 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
         * fuc to modify some per-context register settings on first load
         * of the context.
         */
-       ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x100, 0, &chan->mmio);
+       ret = nouveau_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
+                               &chan->mmio);
        if (ret)
                return ret;
 
@@ -101,8 +115,8 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
 
        /* allocate buffers referenced by mmio list */
        for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
-               ret = nouveau_gpuobj_new(parent, NULL, data->size, data->align,
-                                        0, &chan->data[i].mem);
+               ret = nouveau_gpuobj_new(nv_object(chan), NULL, data->size,
+                                        data->align, 0, &chan->data[i].mem);
                if (ret)
                        return ret;
 
@@ -518,9 +532,10 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 {
        struct nouveau_device *device = nv_device(parent);
        struct nvc0_graph_priv *priv;
+       bool enable = device->chipset != 0xd7;
        int ret, i;
 
-       ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+       ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -529,6 +544,8 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        nv_subdev(priv)->intr = nvc0_graph_intr;
        nv_engine(priv)->cclass = &nvc0_graph_cclass;
 
+       priv->base.units = nvc0_graph_units;
+
        if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
                nv_info(priv, "using external firmware\n");
                if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
@@ -551,11 +568,13 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                break;
        }
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+                               &priv->unk4188b4);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+                               &priv->unk4188b8);
        if (ret)
                return ret;
 
index a1e78de..c870dad 100644 (file)
@@ -118,6 +118,7 @@ nvc0_graph_class(void *obj)
                return 0x9197;
        case 0xc8:
        case 0xd9:
+       case 0xd7:
                return 0x9297;
        case 0xe4:
        case 0xe7:
@@ -169,4 +170,6 @@ int  nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
                             struct nouveau_object **);
 void nvc0_graph_context_dtor(struct nouveau_object *);
 
+u64 nvc0_graph_units(struct nouveau_graph *);
+
 #endif
index 4857f91..678c16f 100644 (file)
@@ -77,11 +77,207 @@ nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
        nv_wr32(priv, 0x409c20, ustat);
 }
 
+static const struct nouveau_enum nve0_mp_warp_error[] = {
+       { 0x00, "NO_ERROR" },
+       { 0x01, "STACK_MISMATCH" },
+       { 0x05, "MISALIGNED_PC" },
+       { 0x08, "MISALIGNED_GPR" },
+       { 0x09, "INVALID_OPCODE" },
+       { 0x0d, "GPR_OUT_OF_BOUNDS" },
+       { 0x0e, "MEM_OUT_OF_BOUNDS" },
+       { 0x0f, "UNALIGNED_MEM_ACCESS" },
+       { 0x11, "INVALID_PARAM" },
+       {}
+};
+
+static const struct nouveau_enum nve0_mp_global_error[] = {
+       { 2, "MULTIPLE_WARP_ERRORS" },
+       { 3, "OUT_OF_STACK_SPACE" },
+       {}
+};
+
+static const struct nouveau_enum nve0_gpc_rop_error[] = {
+       { 1, "RT_PITCH_OVERRUN" },
+       { 4, "RT_WIDTH_OVERRUN" },
+       { 5, "RT_HEIGHT_OVERRUN" },
+       { 7, "ZETA_STORAGE_TYPE_MISMATCH" },
+       { 8, "RT_STORAGE_TYPE_MISMATCH" },
+       { 10, "RT_LINEAR_MISMATCH" },
+       {}
+};
+
+static const struct nouveau_enum nve0_sked_error[] = {
+       { 7, "CONSTANT_BUFFER_SIZE" },
+       { 9, "LOCAL_MEMORY_SIZE_POS" },
+       { 10, "LOCAL_MEMORY_SIZE_NEG" },
+       { 11, "WARP_CSTACK_SIZE" },
+       { 12, "TOTAL_TEMP_SIZE" },
+       { 13, "REGISTER_COUNT" },
+       { 18, "TOTAL_THREADS" },
+       { 20, "PROGRAM_OFFSET" },
+       { 21, "SHARED_MEMORY_SIZE" },
+       { 25, "SHARED_CONFIG_TOO_SMALL" },
+       { 26, "TOTAL_REGISTER_COUNT" },
+       {}
+};
+
+static void
+nve0_graph_mp_trap(struct nvc0_graph_priv *priv, int gpc, int tp)
+{
+       int i;
+       u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x648));
+       u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x650));
+
+       nv_error(priv, "GPC%i/TP%i/MP trap:", gpc, tp);
+
+       for (i = 0; i <= 31; ++i) {
+               if (!(gerr & (1 << i)))
+                       continue;
+               pr_cont(" ");
+               nouveau_enum_print(nve0_mp_global_error, i);
+       }
+       if (werr) {
+               pr_cont(" ");
+               nouveau_enum_print(nve0_mp_warp_error, werr & 0xffff);
+       }
+       pr_cont("\n");
+
+       /* disable MP trap to avoid spam */
+       nv_mask(priv, TPC_UNIT(gpc, tp, 0x50c), 0x2, 0x0);
+
+       /* TODO: figure out how to resume after an MP trap */
+}
+
+static void
+nve0_graph_tp_trap(struct nvc0_graph_priv *priv, int gpc, int tp)
+{
+       u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x508));
+
+       if (stat & 0x1) {
+               u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x224));
+               nv_error(priv, "GPC%i/TP%i/TEX trap: %08x\n",
+                        gpc, tp, trap);
+
+               nv_wr32(priv, TPC_UNIT(gpc, tp, 0x224), 0xc0000000);
+               stat &= ~0x1;
+       }
+
+       if (stat & 0x2) {
+               nve0_graph_mp_trap(priv, gpc, tp);
+               stat &= ~0x2;
+       }
+
+       if (stat & 0x4) {
+               u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x084));
+               nv_error(priv, "GPC%i/TP%i/POLY trap: %08x\n",
+                        gpc, tp, trap);
+
+               nv_wr32(priv, TPC_UNIT(gpc, tp, 0x084), 0xc0000000);
+               stat &= ~0x4;
+       }
+
+       if (stat & 0x8) {
+               u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x48c));
+               nv_error(priv, "GPC%i/TP%i/L1C trap: %08x\n",
+                        gpc, tp, trap);
+
+               nv_wr32(priv, TPC_UNIT(gpc, tp, 0x48c), 0xc0000000);
+               stat &= ~0x8;
+       }
+
+       if (stat) {
+               nv_error(priv, "GPC%i/TP%i: unknown stat %08x\n",
+                        gpc, tp, stat);
+       }
+}
+
+static void
+nve0_graph_gpc_trap(struct nvc0_graph_priv *priv)
+{
+       const u32 mask = nv_rd32(priv, 0x400118);
+       int gpc;
+
+       for (gpc = 0; gpc < 4; ++gpc) {
+               u32 stat;
+               int tp;
+
+               if (!(mask & (1 << gpc)))
+                       continue;
+               stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
+
+               if (stat & 0x0001) {
+                       u32 trap[4];
+                       int i;
+
+                       trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
+                       trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434));
+                       trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438));
+                       trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c));
+
+                       nv_error(priv, "GPC%i/PROP trap:", gpc);
+                       for (i = 0; i <= 29; ++i) {
+                               if (!(trap[0] & (1 << i)))
+                                       continue;
+                               pr_cont(" ");
+                               nouveau_enum_print(nve0_gpc_rop_error, i);
+                       }
+                       pr_cont("\n");
+
+                       nv_error(priv, "x = %u, y = %u, "
+                                "format = %x, storage type = %x\n",
+                                trap[1] & 0xffff,
+                                trap[1] >> 16,
+                                (trap[2] >> 8) & 0x3f,
+                                trap[3] & 0xff);
+
+                       nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+                       stat &= ~0x0001;
+               }
+
+               if (stat & 0x0002) {
+                       u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
+                       nv_error(priv, "GPC%i/ZCULL trap: %08x\n", gpc,
+                                trap);
+                       nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+                       stat &= ~0x0002;
+               }
+
+               if (stat & 0x0004) {
+                       u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
+                       nv_error(priv, "GPC%i/CCACHE trap: %08x\n", gpc,
+                                trap);
+                       nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+                       stat &= ~0x0004;
+               }
+
+               if (stat & 0x0008) {
+                       u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
+                       nv_error(priv, "GPC%i/ESETUP trap %08x\n", gpc,
+                                trap);
+                       nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+                       stat &= ~0x0008;
+               }
+
+               for (tp = 0; tp < 8; ++tp) {
+                       if (stat & (1 << (16 + tp)))
+                               nve0_graph_tp_trap(priv, gpc, tp);
+               }
+               stat &= ~0xff0000;
+
+               if (stat) {
+                       nv_error(priv, "GPC%i: unknown stat %08x\n",
+                                gpc, stat);
+               }
+       }
+}
+
+
 static void
 nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
                struct nouveau_object *engctx)
 {
        u32 trap = nv_rd32(priv, 0x400108);
+       int i;
        int rop;
 
        if (trap & 0x00000001) {
@@ -102,6 +298,32 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
                trap &= ~0x00000010;
        }
 
+       if (trap & 0x00000100) {
+               u32 stat = nv_rd32(priv, 0x407020);
+               nv_error(priv, "SKED ch %d [0x%010llx %s]:",
+                        chid, inst, nouveau_client_name(engctx));
+
+               for (i = 0; i <= 29; ++i) {
+                       if (!(stat & (1 << i)))
+                               continue;
+                       pr_cont(" ");
+                       nouveau_enum_print(nve0_sked_error, i);
+               }
+               pr_cont("\n");
+
+               if (stat & 0x3fffffff)
+                       nv_wr32(priv, 0x407020, 0x40000000);
+               nv_wr32(priv, 0x400108, 0x00000100);
+               trap &= ~0x00000100;
+       }
+
+       if (trap & 0x01000000) {
+               nv_error(priv, "GPC ch %d [0x%010llx %s]:\n",
+                        chid, inst, nouveau_client_name(engctx));
+               nve0_graph_gpc_trap(priv);
+               trap &= ~0x01000000;
+       }
+
        if (trap & 0x02000000) {
                for (rop = 0; rop < priv->rop_nr; rop++) {
                        u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
@@ -217,6 +439,8 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        nv_engine(priv)->cclass = &nve0_graph_cclass;
        nv_engine(priv)->sclass = nve0_graph_sclass;
 
+       priv->base.units = nvc0_graph_units;
+
        if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
                nv_info(priv, "using external firmware\n");
                if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
@@ -227,11 +451,13 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                priv->firmware = true;
        }
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+                               &priv->unk4188b4);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+                               &priv->unk4188b8);
        if (ret)
                return ret;
 
index a523eaa..d698e71 100644 (file)
@@ -94,6 +94,32 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
        return -EINVAL;
 }
 
+static int
+nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
+                              void *args, u32 size)
+{
+       struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+       struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+       u32 data = *(u32 *)args;
+
+       switch (mthd) {
+       case 0x600:
+               nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */
+               break;
+       case 0x644:
+               if (data & ~0x1ffffe)
+                       return -EINVAL;
+               nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */
+               break;
+       case 0x6ac:
+               nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static struct nouveau_omthds
 nvc0_software_omthds[] = {
        { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
@@ -101,6 +127,9 @@ nvc0_software_omthds[] = {
        { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
        { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
        { 0x0500, 0x0500, nvc0_software_mthd_flip },
+       { 0x0600, 0x0600, nvc0_software_mthd_mp_control },
+       { 0x0644, 0x0644, nvc0_software_mthd_mp_control },
+       { 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
        {}
 };
 
index 92d3ab1..0a393f7 100644 (file)
@@ -169,6 +169,7 @@ struct nv04_display_class {
  * 8570: NVA3_DISP
  * 9070: NVD0_DISP
  * 9170: NVE0_DISP
+ * 9270: NVF0_DISP
  */
 
 #define NV50_DISP_CLASS                                              0x00005070
@@ -178,6 +179,7 @@ struct nv04_display_class {
 #define NVA3_DISP_CLASS                                              0x00008570
 #define NVD0_DISP_CLASS                                              0x00009070
 #define NVE0_DISP_CLASS                                              0x00009170
+#define NVF0_DISP_CLASS                                              0x00009270
 
 #define NV50_DISP_SOR_MTHD                                           0x00010000
 #define NV50_DISP_SOR_MTHD_TYPE                                      0x0000f000
@@ -246,6 +248,7 @@ struct nv50_display_class {
  * 857a: NVA3_DISP_CURS
  * 907a: NVD0_DISP_CURS
  * 917a: NVE0_DISP_CURS
+ * 927a: NVF0_DISP_CURS
  */
 
 #define NV50_DISP_CURS_CLASS                                         0x0000507a
@@ -255,6 +258,7 @@ struct nv50_display_class {
 #define NVA3_DISP_CURS_CLASS                                         0x0000857a
 #define NVD0_DISP_CURS_CLASS                                         0x0000907a
 #define NVE0_DISP_CURS_CLASS                                         0x0000917a
+#define NVF0_DISP_CURS_CLASS                                         0x0000927a
 
 struct nv50_display_curs_class {
        u32 head;
@@ -267,6 +271,7 @@ struct nv50_display_curs_class {
  * 857b: NVA3_DISP_OIMM
  * 907b: NVD0_DISP_OIMM
  * 917b: NVE0_DISP_OIMM
+ * 927b: NVE0_DISP_OIMM
  */
 
 #define NV50_DISP_OIMM_CLASS                                         0x0000507b
@@ -276,6 +281,7 @@ struct nv50_display_curs_class {
 #define NVA3_DISP_OIMM_CLASS                                         0x0000857b
 #define NVD0_DISP_OIMM_CLASS                                         0x0000907b
 #define NVE0_DISP_OIMM_CLASS                                         0x0000917b
+#define NVF0_DISP_OIMM_CLASS                                         0x0000927b
 
 struct nv50_display_oimm_class {
        u32 head;
@@ -288,6 +294,7 @@ struct nv50_display_oimm_class {
  * 857c: NVA3_DISP_SYNC
  * 907c: NVD0_DISP_SYNC
  * 917c: NVE0_DISP_SYNC
+ * 927c: NVF0_DISP_SYNC
  */
 
 #define NV50_DISP_SYNC_CLASS                                         0x0000507c
@@ -297,6 +304,7 @@ struct nv50_display_oimm_class {
 #define NVA3_DISP_SYNC_CLASS                                         0x0000857c
 #define NVD0_DISP_SYNC_CLASS                                         0x0000907c
 #define NVE0_DISP_SYNC_CLASS                                         0x0000917c
+#define NVF0_DISP_SYNC_CLASS                                         0x0000927c
 
 struct nv50_display_sync_class {
        u32 pushbuf;
@@ -310,6 +318,7 @@ struct nv50_display_sync_class {
  * 857d: NVA3_DISP_MAST
  * 907d: NVD0_DISP_MAST
  * 917d: NVE0_DISP_MAST
+ * 927d: NVF0_DISP_MAST
  */
 
 #define NV50_DISP_MAST_CLASS                                         0x0000507d
@@ -319,6 +328,7 @@ struct nv50_display_sync_class {
 #define NVA3_DISP_MAST_CLASS                                         0x0000857d
 #define NVD0_DISP_MAST_CLASS                                         0x0000907d
 #define NVE0_DISP_MAST_CLASS                                         0x0000917d
+#define NVF0_DISP_MAST_CLASS                                         0x0000927d
 
 struct nv50_display_mast_class {
        u32 pushbuf;
@@ -331,6 +341,7 @@ struct nv50_display_mast_class {
  * 857e: NVA3_DISP_OVLY
  * 907e: NVD0_DISP_OVLY
  * 917e: NVE0_DISP_OVLY
+ * 927e: NVF0_DISP_OVLY
  */
 
 #define NV50_DISP_OVLY_CLASS                                         0x0000507e
@@ -340,6 +351,7 @@ struct nv50_display_mast_class {
 #define NVA3_DISP_OVLY_CLASS                                         0x0000857e
 #define NVD0_DISP_OVLY_CLASS                                         0x0000907e
 #define NVE0_DISP_OVLY_CLASS                                         0x0000917e
+#define NVF0_DISP_OVLY_CLASS                                         0x0000927e
 
 struct nv50_display_ovly_class {
        u32 pushbuf;
index d351a4e..05840f3 100644 (file)
@@ -6,7 +6,7 @@
 #include <core/engine.h>
 
 enum nv_subdev_type {
-       NVDEV_SUBDEV_DEVICE,
+       NVDEV_ENGINE_DEVICE,
        NVDEV_SUBDEV_VBIOS,
 
        /* All subdevs from DEVINIT to DEVINIT_LAST will be created before
@@ -57,7 +57,7 @@ enum nv_subdev_type {
 };
 
 struct nouveau_device {
-       struct nouveau_subdev base;
+       struct nouveau_engine base;
        struct list_head head;
 
        struct pci_dev *pdev;
@@ -99,7 +99,7 @@ nv_device(void *obj)
 
 #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
        if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) ||
-                    (nv_hclass(device) & 0xff) != NVDEV_SUBDEV_DEVICE)) {
+                    (nv_hclass(device) & 0xff) != NVDEV_ENGINE_DEVICE)) {
                nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x",
                          nv_hclass(object), nv_hclass(device));
        }
index 31cd852..9f5ea90 100644 (file)
@@ -51,8 +51,8 @@ int  nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *,
 void nouveau_parent_destroy(struct nouveau_parent *);
 
 void _nouveau_parent_dtor(struct nouveau_object *);
-#define _nouveau_parent_init _nouveau_object_init
-#define _nouveau_parent_fini _nouveau_object_fini
+#define _nouveau_parent_init nouveau_object_init
+#define _nouveau_parent_fini nouveau_object_fini
 
 int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
                          struct nouveau_object **pengine,
@@ -18,7 +18,6 @@ int nv50_identify(struct nouveau_device *);
 int nvc0_identify(struct nouveau_device *);
 int nve0_identify(struct nouveau_device *);
 
-extern struct nouveau_oclass nouveau_device_sclass[];
 struct nouveau_device *nouveau_device_find(u64 name);
 
 #endif
index 28da677..4b21fab 100644 (file)
@@ -44,5 +44,6 @@ extern struct nouveau_oclass nv94_disp_oclass;
 extern struct nouveau_oclass nva3_disp_oclass;
 extern struct nouveau_oclass nvd0_disp_oclass;
 extern struct nouveau_oclass nve0_disp_oclass;
+extern struct nouveau_oclass nvf0_disp_oclass;
 
 #endif
index b46c197..633c2f8 100644 (file)
@@ -65,7 +65,8 @@ struct nouveau_fifo_base {
 struct nouveau_fifo {
        struct nouveau_engine base;
 
-       struct nouveau_event *uevent;
+       struct nouveau_event *cevent; /* channel creation event */
+       struct nouveau_event *uevent; /* async user trigger */
 
        struct nouveau_object **channel;
        spinlock_t lock;
index 6943b40..5d39243 100644 (file)
@@ -26,6 +26,10 @@ struct nouveau_graph_chan {
 
 struct nouveau_graph {
        struct nouveau_engine base;
+
+       /* Returns chipset-specific counts of units packed into an u64.
+        */
+       u64 (*units)(struct nouveau_graph *);
 };
 
 static inline struct nouveau_graph *
index f351f63..a1985ed 100644 (file)
@@ -4,8 +4,15 @@
 #include <core/subdev.h>
 #include <core/device.h>
 
+struct nouveau_mm_node;
+
 struct nouveau_ltcg {
        struct nouveau_subdev base;
+
+       int  (*tags_alloc)(struct nouveau_ltcg *, u32 count,
+                          struct nouveau_mm_node **);
+       void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **);
+       void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count);
 };
 
 static inline struct nouveau_ltcg *
index fded97c..d550226 100644 (file)
@@ -21,18 +21,22 @@ nouveau_mc(void *obj)
 }
 
 #define nouveau_mc_create(p,e,o,d)                                             \
-       nouveau_subdev_create_((p), (e), (o), 0, "PMC", "master",              \
-                              sizeof(**d), (void **)d)
-#define nouveau_mc_destroy(p)                                                  \
-       nouveau_subdev_destroy(&(p)->base)
-#define nouveau_mc_init(p)                                                     \
-       nouveau_subdev_init(&(p)->base)
-#define nouveau_mc_fini(p,s)                                                   \
-       nouveau_subdev_fini(&(p)->base, (s))
-
-#define _nouveau_mc_dtor _nouveau_subdev_dtor
-#define _nouveau_mc_init _nouveau_subdev_init
-#define _nouveau_mc_fini _nouveau_subdev_fini
+       nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mc_destroy(p) ({                                               \
+       struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc));        \
+})
+#define nouveau_mc_init(p) ({                                                  \
+       struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc));        \
+})
+#define nouveau_mc_fini(p,s) ({                                                \
+       struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s));   \
+})
+
+int  nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
+                       struct nouveau_oclass *, int, void **);
+void _nouveau_mc_dtor(struct nouveau_object *);
+int  _nouveau_mc_init(struct nouveau_object *);
+int  _nouveau_mc_fini(struct nouveau_object *, bool);
 
 extern struct nouveau_oclass nv04_mc_oclass;
 extern struct nouveau_oclass nv44_mc_oclass;
@@ -40,8 +44,6 @@ extern struct nouveau_oclass nv50_mc_oclass;
 extern struct nouveau_oclass nv98_mc_oclass;
 extern struct nouveau_oclass nvc0_mc_oclass;
 
-void nouveau_mc_intr(struct nouveau_subdev *);
-
 extern const struct nouveau_mc_intr nv04_mc_intr[];
 int nv04_mc_init(struct nouveau_object *);
 int nv50_mc_init(struct nouveau_object *);
index 0b20fc0..c075998 100644 (file)
@@ -73,6 +73,7 @@ int  _nouveau_therm_fini(struct nouveau_object *, bool);
 
 extern struct nouveau_oclass nv40_therm_oclass;
 extern struct nouveau_oclass nv50_therm_oclass;
+extern struct nouveau_oclass nv84_therm_oclass;
 extern struct nouveau_oclass nva3_therm_oclass;
 extern struct nouveau_oclass nvd0_therm_oclass;
 
index eb49603..3bd9be2 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/acpi.h>
 #include <linux/dmi.h>
 #include <linux/reboot.h>
+#include <linux/interrupt.h>
 
 #include <asm/unaligned.h>
 
index c3acf5b..649f1ce 100644 (file)
@@ -122,18 +122,20 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, NVOBJ_FLAG_HEAP,
-                               &priv->mem);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
+                                NVOBJ_FLAG_HEAP, &priv->mem);
        heap = nv_object(priv->mem);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, heap, (device->chipset == 0x50) ?
-                                0x1400 : 0x0200, 0, 0, &priv->pad);
+       ret = nouveau_gpuobj_new(nv_object(priv), heap,
+                               (device->chipset == 0x50) ? 0x1400 : 0x0200,
+                                0, 0, &priv->pad);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, heap, 0x4000, 0, 0, &priv->pgd);
+       ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0,
+                                0, &priv->pgd);
        if (ret)
                return ret;
 
@@ -145,9 +147,9 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, heap, ((limit-- - start) >> 12) * 8,
-                                0x1000, NVOBJ_FLAG_ZERO_ALLOC,
-                                &vm->pgt[0].obj[0]);
+       ret = nouveau_gpuobj_new(nv_object(priv), heap,
+                                ((limit-- - start) >> 12) * 8, 0x1000,
+                                NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
        vm->pgt[0].refcount[0] = 1;
        if (ret)
                return ret;
@@ -157,7 +159,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar3);
+       ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
        if (ret)
                return ret;
 
@@ -182,7 +184,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar1);
+       ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
        if (ret)
                return ret;
 
index 77a6fb7..f8a4495 100644 (file)
@@ -101,12 +101,14 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        /* BAR3 */
-       ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+                               &priv->bar[0].mem);
        mem = priv->bar[0].mem;
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
+                               &priv->bar[0].pgd);
        if (ret)
                return ret;
 
@@ -114,7 +116,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL,
                                 (pci_resource_len(pdev, 3) >> 12) * 8,
                                 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
                                 &vm->pgt[0].obj[0]);
@@ -133,12 +135,14 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
 
        /* BAR1 */
-       ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+                               &priv->bar[1].mem);
        mem = priv->bar[1].mem;
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
+                               &priv->bar[1].pgd);
        if (ret)
                return ret;
 
index 9c41b58..c300b5e 100644 (file)
@@ -64,27 +64,33 @@ init_exec_force(struct nvbios_init *init, bool exec)
 static inline int
 init_or(struct nvbios_init *init)
 {
-       if (init->outp)
-               return ffs(init->outp->or) - 1;
-       error("script needs OR!!\n");
+       if (init_exec(init)) {
+               if (init->outp)
+                       return ffs(init->outp->or) - 1;
+               error("script needs OR!!\n");
+       }
        return 0;
 }
 
 static inline int
 init_link(struct nvbios_init *init)
 {
-       if (init->outp)
-               return !(init->outp->sorconf.link & 1);
-       error("script needs OR link\n");
+       if (init_exec(init)) {
+               if (init->outp)
+                       return !(init->outp->sorconf.link & 1);
+               error("script needs OR link\n");
+       }
        return 0;
 }
 
 static inline int
 init_crtc(struct nvbios_init *init)
 {
-       if (init->crtc >= 0)
-               return init->crtc;
-       error("script needs crtc\n");
+       if (init_exec(init)) {
+               if (init->crtc >= 0)
+                       return init->crtc;
+               error("script needs crtc\n");
+       }
        return 0;
 }
 
@@ -92,16 +98,21 @@ static u8
 init_conn(struct nvbios_init *init)
 {
        struct nouveau_bios *bios = init->bios;
+       u8  ver, len;
+       u16 conn;
 
-       if (init->outp) {
-               u8  ver, len;
-               u16 conn = dcb_conn(bios, init->outp->connector, &ver, &len);
-               if (conn)
-                       return nv_ro08(bios, conn);
+       if (init_exec(init)) {
+               if (init->outp) {
+                       conn = init->outp->connector;
+                       conn = dcb_conn(bios, conn, &ver, &len);
+                       if (conn)
+                               return nv_ro08(bios, conn);
+               }
+
+               error("script needs connector type\n");
        }
 
-       error("script needs connector type\n");
-       return 0x00;
+       return 0xff;
 }
 
 static inline u32
@@ -227,7 +238,8 @@ init_i2c(struct nvbios_init *init, int index)
        } else
        if (index < 0) {
                if (!init->outp) {
-                       error("script needs output for i2c\n");
+                       if (init_exec(init))
+                               error("script needs output for i2c\n");
                        return NULL;
                }
 
@@ -544,7 +556,8 @@ init_tmds_reg(struct nvbios_init *init, u8 tmds)
                        return 0x6808b0 + dacoffset;
                }
 
-               error("tmds opcodes need dcb\n");
+               if (init_exec(init))
+                       error("tmds opcodes need dcb\n");
        } else {
                if (tmds < ARRAY_SIZE(pramdac_table))
                        return pramdac_table[tmds];
@@ -792,7 +805,8 @@ init_dp_condition(struct nvbios_init *init)
                        break;
                }
 
-               warn("script needs dp output table data\n");
+               if (init_exec(init))
+                       warn("script needs dp output table data\n");
                break;
        case 5:
                if (!(init_rdauxr(init, 0x0d) & 1))
@@ -816,7 +830,7 @@ init_io_mask_or(struct nvbios_init *init)
        u8    or = init_or(init);
        u8  data;
 
-       trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)", index, or);
+       trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)\n", index, or);
        init->offset += 2;
 
        data = init_rdvgai(init, 0x03d4, index);
@@ -835,7 +849,7 @@ init_io_or(struct nvbios_init *init)
        u8    or = init_or(init);
        u8  data;
 
-       trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)", index, or);
+       trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)\n", index, or);
        init->offset += 2;
 
        data = init_rdvgai(init, 0x03d4, index);
index 7606ed1..86ad592 100644 (file)
@@ -23,6 +23,7 @@
  */
 
 #include <subdev/fb.h>
+#include <subdev/ltcg.h>
 #include <subdev/bios.h>
 
 struct nvc0_fb_priv {
@@ -31,34 +32,14 @@ struct nvc0_fb_priv {
        dma_addr_t r100c10;
 };
 
-/* 0 = unsupported
- * 1 = non-compressed
- * 3 = compressed
- */
-static const u8 types[256] = {
-       1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
-       0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
-       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
-       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
-       3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-       0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
-       0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
-       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-       0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
-       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
-       3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
-       3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
-       3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
-       3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
-};
+extern const u8 nvc0_pte_storage_type_map[256];
+
 
 static bool
 nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 {
        u8 memtype = (tile_flags & 0x0000ff00) >> 8;
-       return likely((types[memtype] == 1));
+       return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
 }
 
 static int
@@ -130,6 +111,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
        int type = (memtype & 0x0ff);
        int back = (memtype & 0x800);
        int ret;
+       const bool comp = nvc0_pte_storage_type_map[type] != type;
 
        size  >>= 12;
        align >>= 12;
@@ -142,10 +124,22 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
                return -ENOMEM;
 
        INIT_LIST_HEAD(&mem->regions);
-       mem->memtype = type;
        mem->size = size;
 
        mutex_lock(&pfb->base.mutex);
+       if (comp) {
+               struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent);
+
+               /* compression only works with lpages */
+               if (align == (1 << (17 - 12))) {
+                       int n = size >> 5;
+                       ltcg->tags_alloc(ltcg, n, &mem->tag);
+               }
+               if (unlikely(!mem->tag))
+                       type = nvc0_pte_storage_type_map[type];
+       }
+       mem->memtype = type;
+
        do {
                if (back)
                        ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
@@ -168,6 +162,17 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
        return 0;
 }
 
+static void
+nvc0_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+{
+       struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent);
+
+       if ((*pmem)->tag)
+               ltcg->tags_free(ltcg, &(*pmem)->tag);
+
+       nv50_fb_vram_del(pfb, pmem);
+}
+
 static int
 nvc0_fb_init(struct nouveau_object *object)
 {
@@ -178,7 +183,8 @@ nvc0_fb_init(struct nouveau_object *object)
        if (ret)
                return ret;
 
-       nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
+       if (priv->r100c10_page)
+               nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
        return 0;
 }
 
@@ -214,16 +220,16 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        priv->base.memtype_valid = nvc0_fb_memtype_valid;
        priv->base.ram.init = nvc0_fb_vram_init;
        priv->base.ram.get = nvc0_fb_vram_new;
-       priv->base.ram.put = nv50_fb_vram_del;
+       priv->base.ram.put = nvc0_fb_vram_del;
 
        priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-       if (!priv->r100c10_page)
-               return -ENOMEM;
-
-       priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 0,
-                                    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       if (pci_dma_mapping_error(device->pdev, priv->r100c10))
-               return -EFAULT;
+       if (priv->r100c10_page) {
+               priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
+                                            0, PAGE_SIZE,
+                                            PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(device->pdev, priv->r100c10))
+                       return -EFAULT;
+       }
 
        return nouveau_fb_preinit(&priv->base);
 }
index 2e98e8a..8ae2625 100644 (file)
@@ -140,12 +140,8 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
        }
 
        /* drop port's i2c subdev refcount, i2c handles this itself */
-       if (ret == 0) {
+       if (ret == 0)
                list_add_tail(&port->head, &i2c->ports);
-               atomic_dec(&parent->refcount);
-               atomic_dec(&engine->refcount);
-       }
-
        return ret;
 }
 
index f5bbd38..795393d 100644 (file)
@@ -93,7 +93,6 @@ nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
                   u32 size, u32 align, struct nouveau_object **pobject)
 {
        struct nouveau_object *engine = nv_object(imem);
-       struct nv04_instmem_priv *priv = (void *)(imem);
        int ret;
 
        ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
@@ -101,14 +100,6 @@ nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       /* INSTMEM itself creates objects to reserve (and preserve across
-        * suspend/resume) various fixed data locations, each one of these
-        * takes a reference on INSTMEM itself, causing it to never be
-        * freed.  We drop all the self-references here to avoid this.
-        */
-       if (unlikely(!priv->created))
-               atomic_dec(&engine->refcount);
-
        return 0;
 }
 
@@ -134,27 +125,28 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        /* 0x00000-0x10000: reserve for probable vbios image */
-       ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
+                               &priv->vbios);
        if (ret)
                return ret;
 
        /* 0x10000-0x18000: reserve for RAMHT */
-       ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
+       ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
        if (ret)
                return ret;
 
        /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
-       ret = nouveau_gpuobj_new(parent, NULL, 0x00800, 0,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
                                 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
        if (ret)
                return ret;
 
        /* 0x18800-0x18a00: reserve for RAMRO */
-       ret = nouveau_gpuobj_new(parent, NULL, 0x00200, 0, 0, &priv->ramro);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
+                               &priv->ramro);
        if (ret)
                return ret;
 
-       priv->created = true;
        return 0;
 }
 
index 7983d8d..b15b613 100644 (file)
@@ -9,7 +9,6 @@
 
 struct nv04_instmem_priv {
        struct nouveau_instmem base;
-       bool created;
 
        void __iomem *iomem;
        struct nouveau_mm heap;
index da64253..716bf41 100644 (file)
@@ -82,31 +82,33 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        /* 0x00000-0x10000: reserve for probable vbios image */
-       ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
+                               &priv->vbios);
        if (ret)
                return ret;
 
        /* 0x10000-0x18000: reserve for RAMHT */
-       ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
+       ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0,
+                              &priv->ramht);
        if (ret)
                return ret;
 
        /* 0x18000-0x18200: reserve for RAMRO
         * 0x18200-0x20000: padding
         */
-       ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 0, &priv->ramro);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
+                               &priv->ramro);
        if (ret)
                return ret;
 
        /* 0x20000-0x21000: reserve for RAMFC
         * 0x21000-0x40000: padding and some unknown crap
         */
-       ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
                                 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
        if (ret)
                return ret;
 
-       priv->created = true;
        return 0;
 }
 
index 078a2b9..e4940fb 100644 (file)
  */
 
 #include <subdev/ltcg.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
 
 struct nvc0_ltcg_priv {
        struct nouveau_ltcg base;
+       u32 part_nr;
+       u32 part_mask;
        u32 subp_nr;
+       struct nouveau_mm tags;
+       u32 num_tags;
+       struct nouveau_mm_node *tag_ram;
 };
 
 static void
@@ -61,12 +68,105 @@ nvc0_ltcg_intr(struct nouveau_subdev *subdev)
        nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
 }
 
+static int
+nvc0_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
+                    struct nouveau_mm_node **pnode)
+{
+       struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+       int ret;
+
+       ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
+       if (ret)
+               *pnode = NULL;
+
+       return ret;
+}
+
+static void
+nvc0_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode)
+{
+       struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+
+       nouveau_mm_free(&priv->tags, pnode);
+}
+
+static void
+nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
+{
+       struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+       u32 last = first + count - 1;
+       int p, i;
+
+       BUG_ON((first > last) || (last >= priv->num_tags));
+
+       nv_wr32(priv, 0x17e8cc, first);
+       nv_wr32(priv, 0x17e8d0, last);
+       nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */
+
+       /* wait until it's finished with clearing */
+       for (p = 0; p < priv->part_nr; ++p) {
+               if (!(priv->part_mask & (1 << p)))
+                       continue;
+               for (i = 0; i < priv->subp_nr; ++i)
+                       nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
+       }
+}
+
+/* TODO: Figure out tag memory details and drop the over-cautious allocation.
+ */
+static int
+nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
+{
+       u32 tag_size, tag_margin, tag_align;
+       int ret;
+
+       nv_wr32(priv, 0x17e8d8, priv->part_nr);
+
+       /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
+       priv->num_tags = (pfb->ram.size >> 17) / 4;
+       if (priv->num_tags > (1 << 17))
+               priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
+       priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
+
+       tag_align = priv->part_nr * 0x800;
+       tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
+
+       /* 4 part 4 sub: 0x2000 bytes for 56 tags */
+       /* 3 part 4 sub: 0x6000 bytes for 168 tags */
+       /*
+        * About 147 bytes per tag. Let's be safe and allocate x2, which makes
+        * 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags.
+        *
+        * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
+        */
+       tag_size  = (priv->num_tags / 64) * 0x6000 + tag_margin;
+       tag_size += tag_align;
+       tag_size  = (tag_size + 0xfff) >> 12; /* round up */
+
+       ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1,
+                             &priv->tag_ram);
+       if (ret) {
+               priv->num_tags = 0;
+       } else {
+               u64 tag_base = (priv->tag_ram->offset << 12) + tag_margin;
+
+               tag_base += tag_align - 1;
+               ret = do_div(tag_base, tag_align);
+
+               nv_wr32(priv, 0x17e8d4, tag_base);
+       }
+       ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
+
+       return ret;
+}
+
 static int
 nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
               struct nouveau_oclass *oclass, void *data, u32 size,
               struct nouveau_object **pobject)
 {
        struct nvc0_ltcg_priv *priv;
+       struct nouveau_fb *pfb = nouveau_fb(parent);
        int ret;
 
        ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
@@ -74,19 +174,44 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 24;
+       priv->part_nr = nv_rd32(priv, 0x022438);
+       priv->part_mask = nv_rd32(priv, 0x022554);
+
+       priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
+
        nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
 
+       ret = nvc0_ltcg_init_tag_ram(pfb, priv);
+       if (ret)
+               return ret;
+
+       priv->base.tags_alloc = nvc0_ltcg_tags_alloc;
+       priv->base.tags_free  = nvc0_ltcg_tags_free;
+       priv->base.tags_clear = nvc0_ltcg_tags_clear;
+
        nv_subdev(priv)->intr = nvc0_ltcg_intr;
        return 0;
 }
 
+static void
+nvc0_ltcg_dtor(struct nouveau_object *object)
+{
+       struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
+       struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+       struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent);
+
+       nouveau_mm_fini(&priv->tags);
+       nouveau_mm_free(&pfb->vram, &priv->tag_ram);
+
+       nouveau_ltcg_destroy(ltcg);
+}
+
 struct nouveau_oclass
 nvc0_ltcg_oclass = {
        .handle = NV_SUBDEV(LTCG, 0xc0),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvc0_ltcg_ctor,
-               .dtor = _nouveau_ltcg_dtor,
+               .dtor = nvc0_ltcg_dtor,
                .init = _nouveau_ltcg_init,
                .fini = _nouveau_ltcg_fini,
        },
index 8379aaf..1c0330b 100644 (file)
 
 #include <subdev/mc.h>
 
-void
-nouveau_mc_intr(struct nouveau_subdev *subdev)
+static irqreturn_t
+nouveau_mc_intr(int irq, void *arg)
 {
-       struct nouveau_mc *pmc = nouveau_mc(subdev);
+       struct nouveau_mc *pmc = arg;
        const struct nouveau_mc_intr *map = pmc->intr_map;
        struct nouveau_subdev *unit;
        u32 stat, intr;
@@ -35,7 +35,7 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
        intr = stat = nv_rd32(pmc, 0x000100);
        while (stat && map->stat) {
                if (stat & map->stat) {
-                       unit = nouveau_subdev(subdev, map->unit);
+                       unit = nouveau_subdev(pmc, map->unit);
                        if (unit && unit->intr)
                                unit->intr(unit);
                        intr &= ~map->stat;
@@ -46,4 +46,56 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
        if (intr) {
                nv_error(pmc, "unknown intr 0x%08x\n", stat);
        }
+
+       return stat ? IRQ_HANDLED : IRQ_NONE;
+}
+
+int
+_nouveau_mc_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nouveau_mc *pmc = (void *)object;
+       nv_wr32(pmc, 0x000140, 0x00000000);
+       return nouveau_subdev_fini(&pmc->base, suspend);
+}
+
+int
+_nouveau_mc_init(struct nouveau_object *object)
+{
+       struct nouveau_mc *pmc = (void *)object;
+       int ret = nouveau_subdev_init(&pmc->base);
+       if (ret)
+               return ret;
+       nv_wr32(pmc, 0x000140, 0x00000001);
+       return 0;
+}
+
+void
+_nouveau_mc_dtor(struct nouveau_object *object)
+{
+       struct nouveau_device *device = nv_device(object);
+       struct nouveau_mc *pmc = (void *)object;
+       free_irq(device->pdev->irq, pmc);
+       nouveau_subdev_destroy(&pmc->base);
+}
+
+int
+nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+                  struct nouveau_oclass *oclass, int length, void **pobject)
+{
+       struct nouveau_device *device = nv_device(parent);
+       struct nouveau_mc *pmc;
+       int ret;
+
+       ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC",
+                                    "master", length, pobject);
+       pmc = *pobject;
+       if (ret)
+               return ret;
+
+       ret = request_irq(device->pdev->irq, nouveau_mc_intr,
+                         IRQF_SHARED, "nouveau", pmc);
+       if (ret < 0)
+               return ret;
+
+       return 0;
 }
index 89da8fa..8c76971 100644 (file)
@@ -55,7 +55,6 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_subdev(priv)->intr = nouveau_mc_intr;
        priv->base.intr_map = nv04_mc_intr;
        return 0;
 }
index 397d868..5191937 100644 (file)
@@ -41,7 +41,6 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_subdev(priv)->intr = nouveau_mc_intr;
        priv->base.intr_map = nv04_mc_intr;
        return 0;
 }
index 5965add..d796924 100644 (file)
@@ -57,7 +57,6 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_subdev(priv)->intr = nouveau_mc_intr;
        priv->base.intr_map = nv50_mc_intr;
        return 0;
 }
index 3a80b29..e82fd21 100644 (file)
@@ -59,7 +59,6 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_subdev(priv)->intr = nouveau_mc_intr;
        priv->base.intr_map = nv98_mc_intr;
        return 0;
 }
index 42bbf72..737bd4b 100644 (file)
@@ -61,7 +61,6 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_subdev(priv)->intr = nouveau_mc_intr;
        priv->base.intr_map = nvc0_mc_intr;
        return 0;
 }
index a70d1b7..002e51b 100644 (file)
@@ -165,7 +165,7 @@ nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
        return 0;
 }
 
-static void
+void
 nv40_therm_intr(struct nouveau_subdev *subdev)
 {
        struct nouveau_therm *therm = nouveau_therm(subdev);
index 86632cb..8cf7597 100644 (file)
@@ -118,145 +118,36 @@ nv50_fan_pwm_clock(struct nouveau_therm *therm)
        return pwm_clock;
 }
 
-int
-nv50_temp_get(struct nouveau_therm *therm)
-{
-       return nv_rd32(therm, 0x20400);
-}
-
-static void
-nv50_therm_program_alarms(struct nouveau_therm *therm)
-{
-       struct nouveau_therm_priv *priv = (void *)therm;
-       struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
-
-       /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
-       nv_wr32(therm, 0x20000, 0x000003ff);
-
-       /* shutdown: The computer should be shutdown when reached */
-       nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
-       nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
-
-       /* THRS_1 : fan boost*/
-       nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
-
-       /* THRS_2 : critical */
-       nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
-
-       /* THRS_4 : down clock */
-       nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
-       spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
-
-       nv_info(therm,
-               "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
-               sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
-               sensor->thrs_down_clock.temp,
-               sensor->thrs_down_clock.hysteresis,
-               sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
-               sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
-
-}
-
-/* must be called with alarm_program_lock taken ! */
 static void
-nv50_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
-                                  uint32_t thrs_reg, u8 status_bit,
-                                  const struct nvbios_therm_threshold *thrs,
-                                  enum nouveau_therm_thrs thrs_name)
+nv50_sensor_setup(struct nouveau_therm *therm)
 {
-       enum nouveau_therm_thrs_direction direction;
-       enum nouveau_therm_thrs_state prev_state, new_state;
-       int temp, cur;
-
-       prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
-       temp = nv_rd32(therm, thrs_reg);
-
-       /* program the next threshold */
-       if (temp == thrs->temp) {
-               nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
-               new_state = NOUVEAU_THERM_THRS_HIGHER;
-       } else {
-               nv_wr32(therm, thrs_reg, thrs->temp);
-               new_state = NOUVEAU_THERM_THRS_LOWER;
-       }
-
-       /* fix the state (in case someone reprogrammed the alarms) */
-       cur = therm->temp_get(therm);
-       if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
-               new_state = NOUVEAU_THERM_THRS_HIGHER;
-       else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
-               cur < thrs->temp - thrs->hysteresis)
-               new_state = NOUVEAU_THERM_THRS_LOWER;
-       nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
-
-       /* find the direction */
-       if (prev_state < new_state)
-               direction = NOUVEAU_THERM_THRS_RISING;
-       else if (prev_state > new_state)
-               direction = NOUVEAU_THERM_THRS_FALLING;
-       else
-               return;
-
-       /* advertise a change in direction */
-       nouveau_therm_sensor_event(therm, thrs_name, direction);
+       nv_mask(therm, 0x20010, 0x40000000, 0x0);
+       mdelay(20); /* wait for the temperature to stabilize */
 }
 
-static void
-nv50_therm_intr(struct nouveau_subdev *subdev)
+static int
+nv50_temp_get(struct nouveau_therm *therm)
 {
-       struct nouveau_therm *therm = nouveau_therm(subdev);
        struct nouveau_therm_priv *priv = (void *)therm;
        struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
-       unsigned long flags;
-       uint32_t intr;
-
-       spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
-
-       intr = nv_rd32(therm, 0x20100);
-
-       /* THRS_4: downclock */
-       if (intr & 0x002) {
-               nv50_therm_threshold_hyst_emulation(therm, 0x20414, 24,
-                                                 &sensor->thrs_down_clock,
-                                                 NOUVEAU_THERM_THRS_DOWNCLOCK);
-               intr &= ~0x002;
-       }
+       int core_temp;
 
-       /* shutdown */
-       if (intr & 0x004) {
-               nv50_therm_threshold_hyst_emulation(therm, 0x20480, 20,
-                                                  &sensor->thrs_shutdown,
-                                                  NOUVEAU_THERM_THRS_SHUTDOWN);
-               intr &= ~0x004;
-       }
-
-       /* THRS_1 : fan boost */
-       if (intr & 0x008) {
-               nv50_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
-                                                  &sensor->thrs_fan_boost,
-                                                  NOUVEAU_THERM_THRS_FANBOOST);
-               intr &= ~0x008;
-       }
+       core_temp = nv_rd32(therm, 0x20014) & 0x3fff;
 
-       /* THRS_2 : critical */
-       if (intr & 0x010) {
-               nv50_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
-                                                  &sensor->thrs_critical,
-                                                  NOUVEAU_THERM_THRS_CRITICAL);
-               intr &= ~0x010;
-       }
+       /* if the slope or the offset is unset, do no use the sensor */
+       if (!sensor->slope_div || !sensor->slope_mult ||
+           !sensor->offset_num || !sensor->offset_den)
+           return -ENODEV;
 
-       if (intr)
-               nv_error(therm, "unhandled intr 0x%08x\n", intr);
+       core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
+       core_temp = core_temp + sensor->offset_num / sensor->offset_den;
+       core_temp = core_temp + sensor->offset_constant - 8;
 
-       /* ACK everything */
-       nv_wr32(therm, 0x20100, 0xffffffff);
-       nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+       /* reserve negative temperatures for errors */
+       if (core_temp < 0)
+               core_temp = 0;
 
-       spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+       return core_temp;
 }
 
 static int
@@ -278,33 +169,29 @@ nv50_therm_ctor(struct nouveau_object *parent,
        priv->base.base.pwm_set = nv50_fan_pwm_set;
        priv->base.base.pwm_clock = nv50_fan_pwm_clock;
        priv->base.base.temp_get = nv50_temp_get;
-       priv->base.sensor.program_alarms = nv50_therm_program_alarms;
-       nv_subdev(priv)->intr = nv50_therm_intr;
-
-       /* init the thresholds */
-       nouveau_therm_sensor_set_threshold_state(&priv->base.base,
-                                                NOUVEAU_THERM_THRS_SHUTDOWN,
-                                                NOUVEAU_THERM_THRS_LOWER);
-       nouveau_therm_sensor_set_threshold_state(&priv->base.base,
-                                                NOUVEAU_THERM_THRS_FANBOOST,
-                                                NOUVEAU_THERM_THRS_LOWER);
-       nouveau_therm_sensor_set_threshold_state(&priv->base.base,
-                                                NOUVEAU_THERM_THRS_CRITICAL,
-                                                NOUVEAU_THERM_THRS_LOWER);
-       nouveau_therm_sensor_set_threshold_state(&priv->base.base,
-                                                NOUVEAU_THERM_THRS_DOWNCLOCK,
-                                                NOUVEAU_THERM_THRS_LOWER);
+       priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+       nv_subdev(priv)->intr = nv40_therm_intr;
 
        return nouveau_therm_preinit(&priv->base.base);
 }
 
+static int
+nv50_therm_init(struct nouveau_object *object)
+{
+       struct nouveau_therm *therm = (void *)object;
+
+       nv50_sensor_setup(therm);
+
+       return _nouveau_therm_init(object);
+}
+
 struct nouveau_oclass
 nv50_therm_oclass = {
        .handle = NV_SUBDEV(THERM, 0x50),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv50_therm_ctor,
                .dtor = _nouveau_therm_dtor,
-               .init = _nouveau_therm_init,
+               .init = nv50_therm_init,
                .fini = _nouveau_therm_fini,
        },
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
new file mode 100644 (file)
index 0000000..42ba633
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ *         Martin Peres
+ */
+
+#include "priv.h"
+
+struct nv84_therm_priv {
+       struct nouveau_therm_priv base;
+};
+
+int
+nv84_temp_get(struct nouveau_therm *therm)
+{
+       return nv_rd32(therm, 0x20400);
+}
+
+static void
+nv84_therm_program_alarms(struct nouveau_therm *therm)
+{
+       struct nouveau_therm_priv *priv = (void *)therm;
+       struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+       /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
+       nv_wr32(therm, 0x20000, 0x000003ff);
+
+       /* shutdown: The computer should be shutdown when reached */
+       nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
+       nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
+
+       /* THRS_1 : fan boost*/
+       nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
+
+       /* THRS_2 : critical */
+       nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
+
+       /* THRS_4 : down clock */
+       nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
+       spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
+       nv_debug(therm,
+                "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+                sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+                sensor->thrs_down_clock.temp,
+                sensor->thrs_down_clock.hysteresis,
+                sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+                sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+
+}
+
+/* must be called with alarm_program_lock taken ! */
+static void
+nv84_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
+                                  uint32_t thrs_reg, u8 status_bit,
+                                  const struct nvbios_therm_threshold *thrs,
+                                  enum nouveau_therm_thrs thrs_name)
+{
+       enum nouveau_therm_thrs_direction direction;
+       enum nouveau_therm_thrs_state prev_state, new_state;
+       int temp, cur;
+
+       prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+       temp = nv_rd32(therm, thrs_reg);
+
+       /* program the next threshold */
+       if (temp == thrs->temp) {
+               nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
+               new_state = NOUVEAU_THERM_THRS_HIGHER;
+       } else {
+               nv_wr32(therm, thrs_reg, thrs->temp);
+               new_state = NOUVEAU_THERM_THRS_LOWER;
+       }
+
+       /* fix the state (in case someone reprogrammed the alarms) */
+       cur = therm->temp_get(therm);
+       if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
+               new_state = NOUVEAU_THERM_THRS_HIGHER;
+       else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
+               cur < thrs->temp - thrs->hysteresis)
+               new_state = NOUVEAU_THERM_THRS_LOWER;
+       nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+
+       /* find the direction */
+       if (prev_state < new_state)
+               direction = NOUVEAU_THERM_THRS_RISING;
+       else if (prev_state > new_state)
+               direction = NOUVEAU_THERM_THRS_FALLING;
+       else
+               return;
+
+       /* advertise a change in direction */
+       nouveau_therm_sensor_event(therm, thrs_name, direction);
+}
+
+static void
+nv84_therm_intr(struct nouveau_subdev *subdev)
+{
+       struct nouveau_therm *therm = nouveau_therm(subdev);
+       struct nouveau_therm_priv *priv = (void *)therm;
+       struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+       unsigned long flags;
+       uint32_t intr;
+
+       spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+       intr = nv_rd32(therm, 0x20100);
+
+       /* THRS_4: downclock */
+       if (intr & 0x002) {
+               nv84_therm_threshold_hyst_emulation(therm, 0x20414, 24,
+                                                 &sensor->thrs_down_clock,
+                                                 NOUVEAU_THERM_THRS_DOWNCLOCK);
+               intr &= ~0x002;
+       }
+
+       /* shutdown */
+       if (intr & 0x004) {
+               nv84_therm_threshold_hyst_emulation(therm, 0x20480, 20,
+                                                  &sensor->thrs_shutdown,
+                                                  NOUVEAU_THERM_THRS_SHUTDOWN);
+               intr &= ~0x004;
+       }
+
+       /* THRS_1 : fan boost */
+       if (intr & 0x008) {
+               nv84_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
+                                                  &sensor->thrs_fan_boost,
+                                                  NOUVEAU_THERM_THRS_FANBOOST);
+               intr &= ~0x008;
+       }
+
+       /* THRS_2 : critical */
+       if (intr & 0x010) {
+               nv84_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
+                                                  &sensor->thrs_critical,
+                                                  NOUVEAU_THERM_THRS_CRITICAL);
+               intr &= ~0x010;
+       }
+
+       if (intr)
+               nv_error(therm, "unhandled intr 0x%08x\n", intr);
+
+       /* ACK everything */
+       nv_wr32(therm, 0x20100, 0xffffffff);
+       nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+
+       spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+}
+
+static int
+nv84_therm_ctor(struct nouveau_object *parent,
+               struct nouveau_object *engine,
+               struct nouveau_oclass *oclass, void *data, u32 size,
+               struct nouveau_object **pobject)
+{
+       struct nv84_therm_priv *priv;
+       int ret;
+
+       ret = nouveau_therm_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+       priv->base.base.pwm_get = nv50_fan_pwm_get;
+       priv->base.base.pwm_set = nv50_fan_pwm_set;
+       priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+       priv->base.base.temp_get = nv84_temp_get;
+       priv->base.sensor.program_alarms = nv84_therm_program_alarms;
+       nv_subdev(priv)->intr = nv84_therm_intr;
+
+       /* init the thresholds */
+       nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+                                                NOUVEAU_THERM_THRS_SHUTDOWN,
+                                                NOUVEAU_THERM_THRS_LOWER);
+       nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+                                                NOUVEAU_THERM_THRS_FANBOOST,
+                                                NOUVEAU_THERM_THRS_LOWER);
+       nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+                                                NOUVEAU_THERM_THRS_CRITICAL,
+                                                NOUVEAU_THERM_THRS_LOWER);
+       nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+                                                NOUVEAU_THERM_THRS_DOWNCLOCK,
+                                                NOUVEAU_THERM_THRS_LOWER);
+
+       return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nv84_therm_oclass = {
+       .handle = NV_SUBDEV(THERM, 0x84),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv84_therm_ctor,
+               .dtor = _nouveau_therm_dtor,
+               .init = _nouveau_therm_init,
+               .fini = _nouveau_therm_fini,
+       },
+};
index 2dcc543..d11a7c4 100644 (file)
@@ -81,7 +81,7 @@ nva3_therm_ctor(struct nouveau_object *parent,
        priv->base.base.pwm_get = nv50_fan_pwm_get;
        priv->base.base.pwm_set = nv50_fan_pwm_set;
        priv->base.base.pwm_clock = nv50_fan_pwm_clock;
-       priv->base.base.temp_get = nv50_temp_get;
+       priv->base.base.temp_get = nv84_temp_get;
        priv->base.base.fan_sense = nva3_therm_fan_sense;
        priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
        return nouveau_therm_preinit(&priv->base.base);
index d7d30ee..54c28bd 100644 (file)
@@ -135,7 +135,7 @@ nvd0_therm_ctor(struct nouveau_object *parent,
        priv->base.base.pwm_get = nvd0_fan_pwm_get;
        priv->base.base.pwm_set = nvd0_fan_pwm_set;
        priv->base.base.pwm_clock = nvd0_fan_pwm_clock;
-       priv->base.base.temp_get = nv50_temp_get;
+       priv->base.base.temp_get = nv84_temp_get;
        priv->base.base.fan_sense = nva3_therm_fan_sense;
        priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
        return nouveau_therm_preinit(&priv->base.base);
index 438d982..15ca64e 100644 (file)
@@ -134,11 +134,12 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
                                enum nouveau_therm_thrs_direction dir);
 void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm);
 
+void nv40_therm_intr(struct nouveau_subdev *);
 int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
 int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
 int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
 int nv50_fan_pwm_clock(struct nouveau_therm *);
-int nv50_temp_get(struct nouveau_therm *therm);
+int nv84_temp_get(struct nouveau_therm *therm);
 
 int nva3_therm_fan_sense(struct nouveau_therm *);
 
index 470f6a4..dde746c 100644 (file)
@@ -205,13 +205,13 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
        struct nouveau_therm_priv *priv = (void *)therm;
        struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
 
-       nv_info(therm,
-               "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
-               sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
-               sensor->thrs_down_clock.temp,
-               sensor->thrs_down_clock.hysteresis,
-               sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
-               sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+       nv_debug(therm,
+                "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+                sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+                sensor->thrs_down_clock.temp,
+                sensor->thrs_down_clock.hysteresis,
+                sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+                sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
 
        alarm_timer_callback(&priv->sensor.therm_poll_alarm);
 }
index 8e1bae4..9469b82 100644 (file)
@@ -96,11 +96,16 @@ nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
 
        /* append new alarm to list, in soonest-alarm-first order */
        spin_lock_irqsave(&priv->lock, flags);
-       list_for_each_entry(list, &priv->alarms, head) {
-               if (list->timestamp > alarm->timestamp)
-                       break;
+       if (!time) {
+               if (!list_empty(&alarm->head))
+                       list_del(&alarm->head);
+       } else {
+               list_for_each_entry(list, &priv->alarms, head) {
+                       if (list->timestamp > alarm->timestamp)
+                               break;
+               }
+               list_add_tail(&alarm->head, &list->head);
        }
-       list_add_tail(&alarm->head, &list->head);
        spin_unlock_irqrestore(&priv->lock, flags);
 
        /* process pending alarms */
index 6adbbc9..ed45437 100644 (file)
@@ -110,7 +110,7 @@ nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL,
                                 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
                                 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
                                 &priv->vm->pgt[0].obj[0]);
index 9474cfc..064c762 100644 (file)
@@ -119,7 +119,7 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL,
                                (NV41_GART_SIZE / NV41_GART_PAGE) * 4,
                                 16, NVOBJ_FLAG_ZERO_ALLOC,
                                 &priv->vm->pgt[0].obj[0]);
index aa81314..fae1f67 100644 (file)
@@ -196,7 +196,7 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_new(parent, NULL,
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL,
                                (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
                                 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
                                 &priv->vm->pgt[0].obj[0]);
index 30c61e6..4c3b0a2 100644 (file)
 #include <subdev/timer.h>
 #include <subdev/fb.h>
 #include <subdev/vm.h>
+#include <subdev/ltcg.h>
 
 struct nvc0_vmmgr_priv {
        struct nouveau_vmmgr base;
        spinlock_t lock;
 };
 
+
+/* Map from compressed to corresponding uncompressed storage type.
+ * The value 0xff represents an invalid storage type.
+ */
+const u8 nvc0_pte_storage_type_map[256] =
+{
+       0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+       0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+       0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+       0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+       0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+       0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+       0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+       0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+       0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+       0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+       0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+       0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+       0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+       0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+       0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+};
+
+
 static void
 nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
                struct nouveau_gpuobj *pgt[2])
@@ -68,10 +110,20 @@ static void
 nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
            struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
 {
-       u32 next = 1 << (vma->node->type - 8);
+       u64 next = 1 << (vma->node->type - 8);
 
        phys  = nvc0_vm_addr(vma, phys, mem->memtype, 0);
        pte <<= 3;
+
+       if (mem->tag) {
+               struct nouveau_ltcg *ltcg =
+                       nouveau_ltcg(vma->vm->vmm->base.base.parent);
+               u32 tag = mem->tag->offset + (delta >> 17);
+               phys |= (u64)tag << (32 + 12);
+               next |= (u64)1   << (32 + 12);
+               ltcg->tags_clear(ltcg, tag, cnt);
+       }
+
        while (cnt--) {
                nv_wo32(pgt, pte + 0, lower_32_bits(phys));
                nv_wo32(pgt, pte + 4, upper_32_bits(phys));
@@ -85,10 +137,12 @@ nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
               struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
        u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
+       /* compressed storage types are invalid for system memory */
+       u32 memtype = nvc0_pte_storage_type_map[mem->memtype & 0xff];
 
        pte <<= 3;
        while (cnt--) {
-               u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target);
+               u64 phys = nvc0_vm_addr(vma, *list++, memtype, target);
                nv_wo32(pgt, pte + 0, lower_32_bits(phys));
                nv_wo32(pgt, pte + 4, upper_32_bits(phys));
                pte += 8;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Makefile
new file mode 100644 (file)
index 0000000..ea3f5b8
--- /dev/null
@@ -0,0 +1,10 @@
+nouveau-y += dispnv04/arb.o
+nouveau-y += dispnv04/crtc.o
+nouveau-y += dispnv04/cursor.o
+nouveau-y += dispnv04/dac.o
+nouveau-y += dispnv04/dfp.o
+nouveau-y += dispnv04/disp.o
+nouveau-y += dispnv04/hw.o
+nouveau-y += dispnv04/tvmodesnv17.o
+nouveau-y += dispnv04/tvnv04.o
+nouveau-y += dispnv04/tvnv17.o
similarity index 99%
rename from drivers/gpu/drm/nouveau/nouveau_calc.c
rename to drivers/gpu/drm/nouveau/dispnv04/arb.c
index 6da5764..2e70462 100644 (file)
@@ -25,7 +25,7 @@
 
 #include "nouveau_drm.h"
 #include "nouveau_reg.h"
-#include "nouveau_hw.h"
+#include "hw.h"
 
 /****************************************************************************\
 *                                                                            *
similarity index 99%
rename from drivers/gpu/drm/nouveau/nv04_crtc.c
rename to drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6578cd2..0782bd2 100644 (file)
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
-#include "nouveau_hw.h"
+#include "hw.h"
 #include "nvreg.h"
 #include "nouveau_fbcon.h"
-#include "nv04_display.h"
+#include "disp.h"
 
 #include <subdev/bios/pll.h>
 #include <subdev/clock.h>
@@ -1070,4 +1070,3 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
 
        return 0;
 }
-
similarity index 98%
rename from drivers/gpu/drm/nouveau/nv04_cursor.c
rename to drivers/gpu/drm/nouveau/dispnv04/cursor.c
index fe86f0d..a810303 100644 (file)
@@ -3,7 +3,7 @@
 #include "nouveau_drm.h"
 #include "nouveau_reg.h"
 #include "nouveau_crtc.h"
-#include "nouveau_hw.h"
+#include "hw.h"
 
 static void
 nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
@@ -68,4 +68,3 @@ nv04_cursor_init(struct nouveau_crtc *crtc)
        crtc->cursor.show = nv04_cursor_show;
        return 0;
 }
-
similarity index 99%
rename from drivers/gpu/drm/nouveau/nv04_dac.c
rename to drivers/gpu/drm/nouveau/dispnv04/dac.c
index 64f7020..434b920 100644 (file)
@@ -31,7 +31,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
-#include "nouveau_hw.h"
+#include "hw.h"
 #include "nvreg.h"
 
 #include <subdev/bios/gpio.h>
similarity index 99%
rename from drivers/gpu/drm/nouveau/nv04_dfp.c
rename to drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7e24cdf..93dd23f 100644 (file)
@@ -32,7 +32,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
-#include "nouveau_hw.h"
+#include "hw.h"
 #include "nvreg.h"
 
 #include <drm/i2c/sil164.h>
similarity index 99%
rename from drivers/gpu/drm/nouveau/nv04_display.c
rename to drivers/gpu/drm/nouveau/dispnv04/disp.c
index ad48444..4908d3f 100644 (file)
@@ -30,7 +30,7 @@
 
 #include "nouveau_drm.h"
 #include "nouveau_reg.h"
-#include "nouveau_hw.h"
+#include "hw.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 
similarity index 99%
rename from drivers/gpu/drm/nouveau/nouveau_hw.c
rename to drivers/gpu/drm/nouveau/dispnv04/hw.c
index 617a06f..973056b 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <drm/drmP.h>
 #include "nouveau_drm.h"
-#include "nouveau_hw.h"
+#include "hw.h"
 
 #include <subdev/bios/pll.h>
 #include <subdev/clock.h>
similarity index 99%
rename from drivers/gpu/drm/nouveau/nouveau_hw.h
rename to drivers/gpu/drm/nouveau/dispnv04/hw.h
index 7dff102..eeb70d9 100644 (file)
@@ -24,7 +24,8 @@
 #define __NOUVEAU_HW_H__
 
 #include <drm/drmP.h>
-#include "nv04_display.h"
+#include "disp.h"
+#include "nvreg.h"
 
 #include <subdev/bios/pll.h>
 
similarity index 99%
rename from drivers/gpu/drm/nouveau/nv17_tv_modes.c
rename to drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
index 1cdfe2a..08c6f5e 100644 (file)
@@ -29,8 +29,8 @@
 #include "nouveau_drm.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
-#include "nouveau_hw.h"
-#include "nv17_tv.h"
+#include "hw.h"
+#include "tvnv17.h"
 
 char *nv17_tv_norm_names[NUM_TV_NORMS] = {
        [TV_NORM_PAL] = "PAL",
similarity index 99%
rename from drivers/gpu/drm/nouveau/nv04_tv.c
rename to drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 4a69ccd..bf13db4 100644 (file)
@@ -30,7 +30,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
-#include "nouveau_hw.h"
+#include "hw.h"
 #include <drm/drm_crtc_helper.h>
 
 #include <drm/i2c/ch7006.h>
similarity index 99%
rename from drivers/gpu/drm/nouveau/nv17_tv.c
rename to drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 977e42b..acef48f 100644 (file)
@@ -31,8 +31,8 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
-#include "nouveau_hw.h"
-#include "nv17_tv.h"
+#include "hw.h"
+#include "tvnv17.h"
 
 #include <core/device.h>
 
index 3b6dc88..1c4c6c9 100644 (file)
@@ -30,6 +30,7 @@
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 #include <subdev/instmem.h>
+#include <engine/graph.h>
 
 #include "nouveau_drm.h"
 #include "nouveau_dma.h"
@@ -168,6 +169,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_device *device = nv_device(drm->device);
        struct nouveau_timer *ptimer = nouveau_timer(device);
+       struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR);
        struct drm_nouveau_getparam *getparam = data;
 
        switch (getparam->param) {
@@ -208,14 +210,8 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
                getparam->value = 1;
                break;
        case NOUVEAU_GETPARAM_GRAPH_UNITS:
-               /* NV40 and NV50 versions are quite different, but register
-                * address is the same. User is supposed to know the card
-                * family anyway... */
-               if (device->chipset >= 0x40) {
-                       getparam->value = nv_rd32(device, 0x001540);
-                       break;
-               }
-               /* FALLTHRU */
+               getparam->value = graph->units ? graph->units(graph) : 0;
+               break;
        default:
                nv_debug(device, "unknown parameter %lld\n", getparam->param);
                return -EINVAL;
@@ -391,7 +387,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_device *device = nv_device(drm->device);
        struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
-       struct nouveau_abi16_chan *chan, *temp;
+       struct nouveau_abi16_chan *chan = NULL, *temp;
        struct nouveau_abi16_ntfy *ntfy;
        struct nouveau_object *object;
        struct nv_dma_class args = {};
@@ -404,10 +400,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
        if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
                return nouveau_abi16_put(abi16, -EINVAL);
 
-       list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
-               if (chan->chan->handle == (NVDRM_CHAN | info->channel))
+       list_for_each_entry(temp, &abi16->channels, head) {
+               if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
+                       chan = temp;
                        break;
-               chan = NULL;
+               }
        }
 
        if (!chan)
@@ -459,17 +456,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
 {
        struct drm_nouveau_gpuobj_free *fini = data;
        struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
-       struct nouveau_abi16_chan *chan, *temp;
+       struct nouveau_abi16_chan *chan = NULL, *temp;
        struct nouveau_abi16_ntfy *ntfy;
        int ret;
 
        if (unlikely(!abi16))
                return -ENOMEM;
 
-       list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
-               if (chan->chan->handle == (NVDRM_CHAN | fini->channel))
+       list_for_each_entry(temp, &abi16->channels, head) {
+               if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
+                       chan = temp;
                        break;
-               chan = NULL;
+               }
        }
 
        if (!chan)
index 5d94030..2ffad21 100644 (file)
@@ -239,6 +239,9 @@ nouveau_backlight_init(struct drm_device *dev)
                case NV_40:
                        return nv40_backlight_init(connector);
                case NV_50:
+               case NV_C0:
+               case NV_D0:
+               case NV_E0:
                        return nv50_backlight_init(connector);
                default:
                        break;
index 50a6dd0..6aa2137 100644 (file)
@@ -28,7 +28,7 @@
 
 #include "nouveau_drm.h"
 #include "nouveau_reg.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
 #include "nouveau_encoder.h"
 
 #include <linux/io-mapping.h>
index 7ccd28f..0067586 100644 (file)
@@ -24,8 +24,6 @@
 #ifndef __NOUVEAU_DISPBIOS_H__
 #define __NOUVEAU_DISPBIOS_H__
 
-#include "nvreg.h"
-
 #define DCB_MAX_NUM_ENTRIES 16
 #define DCB_MAX_NUM_I2C_ENTRIES 16
 #define DCB_MAX_NUM_GPIO_ENTRIES 32
index 4dd7ae2..4da776f 100644 (file)
@@ -32,7 +32,7 @@
 
 #include "nouveau_reg.h"
 #include "nouveau_drm.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
 #include "nouveau_acpi.h"
 
 #include "nouveau_display.h"
index 4610c3a..7bf22d4 100644 (file)
@@ -28,7 +28,7 @@
 #include <drm/drm_crtc_helper.h>
 
 #include "nouveau_fbcon.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
 #include "nouveau_crtc.h"
 #include "nouveau_dma.h"
 #include "nouveau_gem.h"
index d109936..46c152f 100644 (file)
 #include <core/gpuobj.h>
 #include <core/class.h>
 
-#include <subdev/device.h>
-#include <subdev/vm.h>
-
+#include <engine/device.h>
 #include <engine/disp.h>
+#include <engine/fifo.h>
+
+#include <subdev/vm.h>
 
 #include "nouveau_drm.h"
-#include "nouveau_irq.h"
 #include "nouveau_dma.h"
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
@@ -71,12 +71,26 @@ module_param_named(modeset, nouveau_modeset, int, 0400);
 
 static struct drm_driver driver;
 
+static int
+nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
+{
+       struct nouveau_drm *drm =
+               container_of(event, struct nouveau_drm, vblank[head]);
+       drm_handle_vblank(drm->dev, head);
+       return NVKM_EVENT_KEEP;
+}
+
 static int
 nouveau_drm_vblank_enable(struct drm_device *dev, int head)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_disp *pdisp = nouveau_disp(drm->device);
-       nouveau_event_get(pdisp->vblank, head, &drm->vblank);
+
+       if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
+               return -EIO;
+       WARN_ON_ONCE(drm->vblank[head].func);
+       drm->vblank[head].func = nouveau_drm_vblank_handler;
+       nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
        return 0;
 }
 
@@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_disp *pdisp = nouveau_disp(drm->device);
-       nouveau_event_put(pdisp->vblank, head, &drm->vblank);
-}
-
-static int
-nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
-{
-       struct nouveau_drm *drm =
-               container_of(event, struct nouveau_drm, vblank);
-       drm_handle_vblank(drm->dev, head);
-       return NVKM_EVENT_KEEP;
+       if (drm->vblank[head].func)
+               nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
+       else
+               WARN_ON_ONCE(1);
+       drm->vblank[head].func = NULL;
 }
 
 static u64
@@ -156,7 +165,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
        u32 arg0, arg1;
        int ret;
 
-       if (nouveau_noaccel)
+       if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/)
                return;
 
        /* initialise synchronisation routines */
@@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 
        dev->dev_private = drm;
        drm->dev = dev;
-       drm->vblank.func = nouveau_drm_vblank_handler;
 
        INIT_LIST_HEAD(&drm->clients);
        spin_lock_init(&drm->tile.lock);
@@ -357,10 +365,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
        if (ret)
                goto fail_bios;
 
-       ret = nouveau_irq_init(dev);
-       if (ret)
-               goto fail_irq;
-
        ret = nouveau_display_create(dev);
        if (ret)
                goto fail_dispctor;
@@ -380,8 +384,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 fail_dispinit:
        nouveau_display_destroy(dev);
 fail_dispctor:
-       nouveau_irq_fini(dev);
-fail_irq:
        nouveau_bios_takedown(dev);
 fail_bios:
        nouveau_ttm_fini(drm);
@@ -407,7 +409,6 @@ nouveau_drm_unload(struct drm_device *dev)
                nouveau_display_fini(dev);
        nouveau_display_destroy(dev);
 
-       nouveau_irq_fini(dev);
        nouveau_bios_takedown(dev);
 
        nouveau_ttm_fini(drm);
@@ -525,7 +526,6 @@ nouveau_do_resume(struct drm_device *dev)
                nouveau_fence(drm)->resume(drm);
 
        nouveau_run_vbios_init(dev);
-       nouveau_irq_postinstall(dev);
        nouveau_pm_resume(dev);
 
        if (dev->mode_config.num_crtc) {
@@ -661,8 +661,7 @@ static struct drm_driver
 driver = {
        .driver_features =
                DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
-               DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
-               DRIVER_MODESET | DRIVER_PRIME,
+               DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
 
        .load = nouveau_drm_load,
        .unload = nouveau_drm_unload,
@@ -676,11 +675,6 @@ driver = {
        .debugfs_cleanup = nouveau_debugfs_takedown,
 #endif
 
-       .irq_preinstall = nouveau_irq_preinstall,
-       .irq_postinstall = nouveau_irq_postinstall,
-       .irq_uninstall = nouveau_irq_uninstall,
-       .irq_handler = nouveau_irq_handler,
-
        .get_vblank_counter = drm_vblank_count,
        .enable_vblank = nouveau_drm_vblank_enable,
        .disable_vblank = nouveau_drm_vblank_disable,
index b25df37..f2b30f8 100644 (file)
 
 #define DRIVER_MAJOR           1
 #define DRIVER_MINOR           1
-#define DRIVER_PATCHLEVEL      0
+#define DRIVER_PATCHLEVEL      1
+
+/*
+ * 1.1.1:
+ *     - added support for tiled system memory buffer objects
+ *      - added support for NOUVEAU_GETPARAM_GRAPH_UNITS on [nvc0,nve0].
+ *      - added support for compressed memory storage types on [nvc0,nve0].
+ *      - added support for software methods 0x600,0x644,0x6ac on nvc0
+ *        to control registers on the MPs to enable performance counters,
+ *        and to control the warp error enable mask (OpenGL requires out of
+ *        bounds access to local memory to be silently ignored / return 0).
+ */
 
 #include <core/client.h>
 #include <core/event.h>
@@ -113,7 +124,7 @@ struct nouveau_drm {
        struct nvbios vbios;
        struct nouveau_display *display;
        struct backlight_device *backlight;
-       struct nouveau_eventh vblank;
+       struct nouveau_eventh vblank[4];
 
        /* power management */
        struct nouveau_pm *pm;
index e243412..24660c0 100644 (file)
@@ -30,7 +30,7 @@
 #include <subdev/bios/dcb.h>
 
 #include <drm/drm_encoder_slave.h>
-#include "nv04_display.h"
+#include "dispnv04/disp.h"
 
 #define NV_DPMS_CLEARED 0x80
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
deleted file mode 100644 (file)
index 1303680..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <subdev/mc.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_irq.h"
-#include "nv50_display.h"
-
-void
-nouveau_irq_preinstall(struct drm_device *dev)
-{
-       nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
-}
-
-int
-nouveau_irq_postinstall(struct drm_device *dev)
-{
-       nv_wr32(nouveau_dev(dev), 0x000140, 0x00000001);
-       return 0;
-}
-
-void
-nouveau_irq_uninstall(struct drm_device *dev)
-{
-       nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
-}
-
-irqreturn_t
-nouveau_irq_handler(DRM_IRQ_ARGS)
-{
-       struct drm_device *dev = arg;
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_mc *pmc = nouveau_mc(device);
-       u32 stat;
-
-       stat = nv_rd32(device, 0x000100);
-       if (stat == 0 || stat == ~0)
-               return IRQ_NONE;
-
-       nv_subdev(pmc)->intr(nv_subdev(pmc));
-       return IRQ_HANDLED;
-}
-
-int
-nouveau_irq_init(struct drm_device *dev)
-{
-       return drm_irq_install(dev);
-}
-
-void
-nouveau_irq_fini(struct drm_device *dev)
-{
-       drm_irq_uninstall(dev);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.h b/drivers/gpu/drm/nouveau/nouveau_irq.h
deleted file mode 100644 (file)
index 06714ad..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __NOUVEAU_IRQ_H__
-#define __NOUVEAU_IRQ_H__
-
-extern int         nouveau_irq_init(struct drm_device *);
-extern void        nouveau_irq_fini(struct drm_device *);
-extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
-extern void        nouveau_irq_preinstall(struct drm_device *);
-extern int         nouveau_irq_postinstall(struct drm_device *);
-extern void        nouveau_irq_uninstall(struct drm_device *);
-
-#endif
index 9be9cb5..f19a15a 100644 (file)
 static int
 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 {
-       /* nothing to do */
+       struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+       struct nouveau_fb *pfb = nouveau_fb(drm->device);
+       man->priv = pfb;
        return 0;
 }
 
 static int
 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
 {
-       /* nothing to do */
+       man->priv = NULL;
        return 0;
 }
 
@@ -104,7 +106,8 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
 static void
 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
 {
-       struct nouveau_mm *mm = man->priv;
+       struct nouveau_fb *pfb = man->priv;
+       struct nouveau_mm *mm = &pfb->vram;
        struct nouveau_mm_node *r;
        u32 total = 0, free = 0;
 
@@ -161,6 +164,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
                         struct ttm_placement *placement,
                         struct ttm_mem_reg *mem)
 {
+       struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+       struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct nouveau_mem *node;
 
        if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
@@ -171,6 +176,20 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
                return -ENOMEM;
        node->page_shift = 12;
 
+       switch (nv_device(drm->device)->card_type) {
+       case NV_50:
+               if (nv_device(drm->device)->chipset != 0x50)
+                       node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
+               break;
+       case NV_C0:
+       case NV_D0:
+       case NV_E0:
+               node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
+               break;
+       default:
+               break;
+       }
+
        mem->mm_node = node;
        mem->start   = 0;
        return 0;
index 2a0cc9d..27afc0e 100644 (file)
@@ -25,7 +25,7 @@
 #include <drm/drmP.h>
 #include "nouveau_drm.h"
 #include "nouveau_reg.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
 #include "nouveau_pm.h"
 
 #include <subdev/bios/pll.h>
index 3382064..3af5bcd 100644 (file)
@@ -26,7 +26,7 @@
 #include "nouveau_drm.h"
 #include "nouveau_bios.h"
 #include "nouveau_pm.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
 
 #include <subdev/bios/pll.h>
 #include <subdev/clock.h>
index 7f0e6c3..3258873 100644 (file)
@@ -2174,6 +2174,7 @@ int
 nv50_display_create(struct drm_device *dev)
 {
        static const u16 oclass[] = {
+               NVF0_DISP_CLASS,
                NVE0_DISP_CLASS,
                NVD0_DISP_CLASS,
                NVA3_DISP_CLASS,
index 8bd5d27..69620e3 100644 (file)
@@ -25,7 +25,7 @@
 #include <drm/drmP.h>
 #include "nouveau_drm.h"
 #include "nouveau_bios.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
 #include "nouveau_pm.h"
 #include "nouveau_hwsq.h"
 
index c451c41..912759d 100644 (file)
@@ -110,6 +110,11 @@ static enum drm_connector_status omap_connector_detect(
                        ret = connector_status_connected;
                else
                        ret = connector_status_disconnected;
+       } else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI ||
+                       dssdev->type == OMAP_DISPLAY_TYPE_DBI ||
+                       dssdev->type == OMAP_DISPLAY_TYPE_SDI ||
+                       dssdev->type == OMAP_DISPLAY_TYPE_DSI) {
+               ret = connector_status_connected;
        } else {
                ret = connector_status_unknown;
        }
@@ -189,12 +194,30 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
        struct omap_video_timings timings = {0};
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *new_mode;
-       int ret = MODE_BAD;
+       int r, ret = MODE_BAD;
 
        copy_timings_drm_to_omap(&timings, mode);
        mode->vrefresh = drm_mode_vrefresh(mode);
 
-       if (!dssdrv->check_timings(dssdev, &timings)) {
+       /*
+        * if the panel driver doesn't have a check_timings, it's most likely
+        * a fixed resolution panel, check if the timings match with the
+        * panel's timings
+        */
+       if (dssdrv->check_timings) {
+               r = dssdrv->check_timings(dssdev, &timings);
+       } else {
+               struct omap_video_timings t = {0};
+
+               dssdrv->get_timings(dssdev, &t);
+
+               if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
+                       r = -EINVAL;
+               else
+                       r = 0;
+       }
+
+       if (!r) {
                /* check if vrefresh is still valid */
                new_mode = drm_mode_duplicate(dev, mode);
                new_mode->clock = timings.pixel_clock;
index bec66a4..79b200a 100644 (file)
@@ -74,6 +74,13 @@ struct omap_crtc {
        struct work_struct page_flip_work;
 };
 
+uint32_t pipe2vbl(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+       return dispc_mgr_get_vsync_irq(omap_crtc->channel);
+}
+
 /*
  * Manager-ops, callbacks from output when they need to configure
  * the upstream part of the video pipe.
@@ -613,7 +620,13 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
        omap_crtc->apply.pre_apply  = omap_crtc_pre_apply;
        omap_crtc->apply.post_apply = omap_crtc_post_apply;
 
-       omap_crtc->apply_irq.irqmask = pipe2vbl(id);
+       omap_crtc->channel = channel;
+       omap_crtc->plane = plane;
+       omap_crtc->plane->crtc = crtc;
+       omap_crtc->name = channel_names[channel];
+       omap_crtc->pipe = id;
+
+       omap_crtc->apply_irq.irqmask = pipe2vbl(crtc);
        omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
 
        omap_crtc->error_irq.irqmask =
@@ -621,12 +634,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
        omap_crtc->error_irq.irq = omap_crtc_error_irq;
        omap_irq_register(dev, &omap_crtc->error_irq);
 
-       omap_crtc->channel = channel;
-       omap_crtc->plane = plane;
-       omap_crtc->plane->crtc = crtc;
-       omap_crtc->name = channel_names[channel];
-       omap_crtc->pipe = id;
-
        /* temporary: */
        omap_crtc->mgr.id = channel;
 
index 079c54c..9c53c25 100644 (file)
@@ -74,54 +74,53 @@ static int get_connector_type(struct omap_dss_device *dssdev)
        }
 }
 
+static bool channel_used(struct drm_device *dev, enum omap_channel channel)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       int i;
+
+       for (i = 0; i < priv->num_crtcs; i++) {
+               struct drm_crtc *crtc = priv->crtcs[i];
+
+               if (omap_crtc_channel(crtc) == channel)
+                       return true;
+       }
+
+       return false;
+}
+
 static int omap_modeset_init(struct drm_device *dev)
 {
        struct omap_drm_private *priv = dev->dev_private;
        struct omap_dss_device *dssdev = NULL;
        int num_ovls = dss_feat_get_num_ovls();
-       int id;
+       int num_mgrs = dss_feat_get_num_mgrs();
+       int num_crtcs;
+       int i, id = 0;
 
        drm_mode_config_init(dev);
 
        omap_drm_irq_install(dev);
 
        /*
-        * Create private planes and CRTCs for the last NUM_CRTCs overlay
-        * plus manager:
+        * We usually don't want to create a CRTC for each manager, at least
+        * not until we have a way to expose private planes to userspace.
+        * Otherwise there would not be enough video pipes left for drm planes.
+        * We use the num_crtc argument to limit the number of crtcs we create.
         */
-       for (id = 0; id < min(num_crtc, num_ovls); id++) {
-               struct drm_plane *plane;
-               struct drm_crtc *crtc;
-
-               plane = omap_plane_init(dev, id, true);
-               crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
+       num_crtcs = min3(num_crtc, num_mgrs, num_ovls);
 
-               BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
-               priv->crtcs[id] = crtc;
-               priv->num_crtcs++;
-
-               priv->planes[id] = plane;
-               priv->num_planes++;
-       }
-
-       /*
-        * Create normal planes for the remaining overlays:
-        */
-       for (; id < num_ovls; id++) {
-               struct drm_plane *plane = omap_plane_init(dev, id, false);
-
-               BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
-               priv->planes[priv->num_planes++] = plane;
-       }
+       dssdev = NULL;
 
        for_each_dss_dev(dssdev) {
                struct drm_connector *connector;
                struct drm_encoder *encoder;
+               enum omap_channel channel;
 
                if (!dssdev->driver) {
                        dev_warn(dev->dev, "%s has no driver.. skipping it\n",
                                        dssdev->name);
-                       return 0;
+                       continue;
                }
 
                if (!(dssdev->driver->get_timings ||
@@ -129,7 +128,7 @@ static int omap_modeset_init(struct drm_device *dev)
                        dev_warn(dev->dev, "%s driver does not support "
                                "get_timings or read_edid.. skipping it!\n",
                                dssdev->name);
-                       return 0;
+                       continue;
                }
 
                encoder = omap_encoder_init(dev, dssdev);
@@ -157,16 +156,118 @@ static int omap_modeset_init(struct drm_device *dev)
 
                drm_mode_connector_attach_encoder(connector, encoder);
 
+               /*
+                * if we have reached the limit of the crtcs we are allowed to
+                * create, let's not try to look for a crtc for this
+                * panel/encoder and onwards, we will, of course, populate the
+                * the possible_crtcs field for all the encoders with the final
+                * set of crtcs we create
+                */
+               if (id == num_crtcs)
+                       continue;
+
+               /*
+                * get the recommended DISPC channel for this encoder. For now,
+                * we only try to get create a crtc out of the recommended, the
+                * other possible channels to which the encoder can connect are
+                * not considered.
+                */
+               channel = dssdev->output->dispc_channel;
+
+               /*
+                * if this channel hasn't already been taken by a previously
+                * allocated crtc, we create a new crtc for it
+                */
+               if (!channel_used(dev, channel)) {
+                       struct drm_plane *plane;
+                       struct drm_crtc *crtc;
+
+                       plane = omap_plane_init(dev, id, true);
+                       crtc = omap_crtc_init(dev, plane, channel, id);
+
+                       BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+                       priv->crtcs[id] = crtc;
+                       priv->num_crtcs++;
+
+                       priv->planes[id] = plane;
+                       priv->num_planes++;
+
+                       id++;
+               }
+       }
+
+       /*
+        * we have allocated crtcs according to the need of the panels/encoders,
+        * adding more crtcs here if needed
+        */
+       for (; id < num_crtcs; id++) {
+
+               /* find a free manager for this crtc */
+               for (i = 0; i < num_mgrs; i++) {
+                       if (!channel_used(dev, i)) {
+                               struct drm_plane *plane;
+                               struct drm_crtc *crtc;
+
+                               plane = omap_plane_init(dev, id, true);
+                               crtc = omap_crtc_init(dev, plane, i, id);
+
+                               BUG_ON(priv->num_crtcs >=
+                                       ARRAY_SIZE(priv->crtcs));
+
+                               priv->crtcs[id] = crtc;
+                               priv->num_crtcs++;
+
+                               priv->planes[id] = plane;
+                               priv->num_planes++;
+
+                               break;
+                       } else {
+                               continue;
+                       }
+               }
+
+               if (i == num_mgrs) {
+                       /* this shouldn't really happen */
+                       dev_err(dev->dev, "no managers left for crtc\n");
+                       return -ENOMEM;
+               }
+       }
+
+       /*
+        * Create normal planes for the remaining overlays:
+        */
+       for (; id < num_ovls; id++) {
+               struct drm_plane *plane = omap_plane_init(dev, id, false);
+
+               BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+               priv->planes[priv->num_planes++] = plane;
+       }
+
+       for (i = 0; i < priv->num_encoders; i++) {
+               struct drm_encoder *encoder = priv->encoders[i];
+               struct omap_dss_device *dssdev =
+                                       omap_encoder_get_dssdev(encoder);
+
                /* figure out which crtc's we can connect the encoder to: */
                encoder->possible_crtcs = 0;
                for (id = 0; id < priv->num_crtcs; id++) {
-                       enum omap_dss_output_id supported_outputs =
-                                       dss_feat_get_supported_outputs(pipe2chan(id));
+                       struct drm_crtc *crtc = priv->crtcs[id];
+                       enum omap_channel crtc_channel;
+                       enum omap_dss_output_id supported_outputs;
+
+                       crtc_channel = omap_crtc_channel(crtc);
+                       supported_outputs =
+                               dss_feat_get_supported_outputs(crtc_channel);
+
                        if (supported_outputs & dssdev->output->id)
                                encoder->possible_crtcs |= (1 << id);
                }
        }
 
+       DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n",
+               priv->num_planes, priv->num_crtcs, priv->num_encoders,
+               priv->num_connectors);
+
        dev->mode_config.min_width = 32;
        dev->mode_config.min_height = 32;
 
@@ -303,7 +404,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
        return ret;
 }
 
-struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
        DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
        DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -567,7 +668,7 @@ static const struct dev_pm_ops omapdrm_pm_ops = {
 };
 #endif
 
-struct platform_driver pdev = {
+static struct platform_driver pdev = {
                .driver = {
                        .name = DRIVER_NAME,
                        .owner = THIS_MODULE,
index d4f997b..215a20d 100644 (file)
@@ -139,8 +139,8 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
 int omap_gem_resume(struct device *dev);
 #endif
 
-int omap_irq_enable_vblank(struct drm_device *dev, int crtc);
-void omap_irq_disable_vblank(struct drm_device *dev, int crtc);
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
 irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
 void omap_irq_preinstall(struct drm_device *dev);
 int omap_irq_postinstall(struct drm_device *dev);
@@ -271,39 +271,9 @@ static inline int align_pitch(int pitch, int width, int bpp)
        return ALIGN(pitch, 8 * bytespp);
 }
 
-static inline enum omap_channel pipe2chan(int pipe)
-{
-       int num_mgrs = dss_feat_get_num_mgrs();
-
-       /*
-        * We usually don't want to create a CRTC for each manager,
-        * at least not until we have a way to expose private planes
-        * to userspace.  Otherwise there would not be enough video
-        * pipes left for drm planes.  The higher #'d managers tend
-        * to have more features so start in reverse order.
-        */
-       return num_mgrs - pipe - 1;
-}
-
 /* map crtc to vblank mask */
-static inline uint32_t pipe2vbl(int crtc)
-{
-       enum omap_channel channel = pipe2chan(crtc);
-       return dispc_mgr_get_vsync_irq(channel);
-}
-
-static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++)
-               if (priv->crtcs[i] == crtc)
-                       return i;
-
-       BUG();  /* bogus CRTC ptr */
-       return -1;
-}
+uint32_t pipe2vbl(struct drm_crtc *crtc);
+struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
 
 /* should these be made into common util helpers?
  */
index 21d126d..c29451b 100644 (file)
@@ -41,6 +41,13 @@ struct omap_encoder {
        struct omap_dss_device *dssdev;
 };
 
+struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
+{
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+
+       return omap_encoder->dssdev;
+}
+
 static void omap_encoder_destroy(struct drm_encoder *encoder)
 {
        struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -128,13 +135,26 @@ int omap_encoder_update(struct drm_encoder *encoder,
 
        dssdev->output->manager = mgr;
 
-       ret = dssdrv->check_timings(dssdev, timings);
+       if (dssdrv->check_timings) {
+               ret = dssdrv->check_timings(dssdev, timings);
+       } else {
+               struct omap_video_timings t = {0};
+
+               dssdrv->get_timings(dssdev, &t);
+
+               if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
+                       ret = -EINVAL;
+               else
+                       ret = 0;
+       }
+
        if (ret) {
                dev_err(dev->dev, "could not set timings: %d\n", ret);
                return ret;
        }
 
-       dssdrv->set_timings(dssdev, timings);
+       if (dssdrv->set_timings)
+               dssdrv->set_timings(dssdev, timings);
 
        return 0;
 }
index ac74d1b..be7cd97 100644 (file)
@@ -178,7 +178,7 @@ out_unlock:
        return omap_gem_mmap_obj(obj, vma);
 }
 
-struct dma_buf_ops omap_dmabuf_ops = {
+static struct dma_buf_ops omap_dmabuf_ops = {
                .map_dma_buf = omap_gem_map_dma_buf,
                .unmap_dma_buf = omap_gem_unmap_dma_buf,
                .release = omap_gem_dmabuf_release,
@@ -212,7 +212,6 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
                         * refcount on gem itself instead of f_count of dmabuf.
                         */
                        drm_gem_object_reference(obj);
-                       dma_buf_put(buffer);
                        return obj;
                }
        }
index e01303e..9263db1 100644 (file)
@@ -130,12 +130,13 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
  * Zero on success, appropriate errno if the given @crtc's vblank
  * interrupt cannot be enabled.
  */
-int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id)
 {
        struct omap_drm_private *priv = dev->dev_private;
+       struct drm_crtc *crtc = priv->crtcs[crtc_id];
        unsigned long flags;
 
-       DBG("dev=%p, crtc=%d", dev, crtc);
+       DBG("dev=%p, crtc=%d", dev, crtc_id);
 
        dispc_runtime_get();
        spin_lock_irqsave(&list_lock, flags);
@@ -156,12 +157,13 @@ int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
  * a hardware vblank counter, this routine should be a no-op, since
  * interrupts will have to stay on to keep the count accurate.
  */
-void omap_irq_disable_vblank(struct drm_device *dev, int crtc)
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
 {
        struct omap_drm_private *priv = dev->dev_private;
+       struct drm_crtc *crtc = priv->crtcs[crtc_id];
        unsigned long flags;
 
-       DBG("dev=%p, crtc=%d", dev, crtc);
+       DBG("dev=%p, crtc=%d", dev, crtc_id);
 
        dispc_runtime_get();
        spin_lock_irqsave(&list_lock, flags);
@@ -186,9 +188,12 @@ irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
 
        VERB("irqs: %08x", irqstatus);
 
-       for (id = 0; id < priv->num_crtcs; id++)
-               if (irqstatus & pipe2vbl(id))
+       for (id = 0; id < priv->num_crtcs; id++) {
+               struct drm_crtc *crtc = priv->crtcs[id];
+
+               if (irqstatus & pipe2vbl(crtc))
                        drm_handle_vblank(dev, id);
+       }
 
        spin_lock_irqsave(&list_lock, flags);
        list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
index 2882cda..8d225d7 100644 (file)
@@ -247,6 +247,12 @@ static int omap_plane_update(struct drm_plane *plane,
 {
        struct omap_plane *omap_plane = to_omap_plane(plane);
        omap_plane->enabled = true;
+
+       if (plane->fb)
+               drm_framebuffer_unreference(plane->fb);
+
+       drm_framebuffer_reference(fb);
+
        return omap_plane_mode_set(plane, crtc, fb,
                        crtc_x, crtc_y, crtc_w, crtc_h,
                        src_x, src_y, src_w, src_h,
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
new file mode 100644 (file)
index 0000000..2f1a57e
--- /dev/null
@@ -0,0 +1,10 @@
+config DRM_QXL
+       tristate "QXL virtual GPU"
+       depends on DRM && PCI
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
+        select DRM_KMS_HELPER
+        select DRM_TTM
+       help
+               QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile
new file mode 100644 (file)
index 0000000..ea046ba
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o
+
+obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
new file mode 100644 (file)
index 0000000..08b0823
--- /dev/null
@@ -0,0 +1,685 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+/* QXL cmd/ring handling */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
+
+struct ring {
+       struct qxl_ring_header      header;
+       uint8_t                     elements[0];
+};
+
+struct qxl_ring {
+       struct ring            *ring;
+       int                     element_size;
+       int                     n_elements;
+       int                     prod_notify;
+       wait_queue_head_t      *push_event;
+       spinlock_t             lock;
+};
+
+void qxl_ring_free(struct qxl_ring *ring)
+{
+       kfree(ring);
+}
+
+struct qxl_ring *
+qxl_ring_create(struct qxl_ring_header *header,
+               int element_size,
+               int n_elements,
+               int prod_notify,
+               bool set_prod_notify,
+               wait_queue_head_t *push_event)
+{
+       struct qxl_ring *ring;
+
+       ring = kmalloc(sizeof(*ring), GFP_KERNEL);
+       if (!ring)
+               return NULL;
+
+       ring->ring = (struct ring *)header;
+       ring->element_size = element_size;
+       ring->n_elements = n_elements;
+       ring->prod_notify = prod_notify;
+       ring->push_event = push_event;
+       if (set_prod_notify)
+               header->notify_on_prod = ring->n_elements;
+       spin_lock_init(&ring->lock);
+       return ring;
+}
+
+static int qxl_check_header(struct qxl_ring *ring)
+{
+       int ret;
+       struct qxl_ring_header *header = &(ring->ring->header);
+       unsigned long flags;
+       spin_lock_irqsave(&ring->lock, flags);
+       ret = header->prod - header->cons < header->num_items;
+       if (ret == 0)
+               header->notify_on_cons = header->cons + 1;
+       spin_unlock_irqrestore(&ring->lock, flags);
+       return ret;
+}
+
+static int qxl_check_idle(struct qxl_ring *ring)
+{
+       int ret;
+       struct qxl_ring_header *header = &(ring->ring->header);
+       unsigned long flags;
+       spin_lock_irqsave(&ring->lock, flags);
+       ret = header->prod == header->cons;
+       spin_unlock_irqrestore(&ring->lock, flags);
+       return ret;
+}
+
+int qxl_ring_push(struct qxl_ring *ring,
+                 const void *new_elt, bool interruptible)
+{
+       struct qxl_ring_header *header = &(ring->ring->header);
+       uint8_t *elt;
+       int idx, ret;
+       unsigned long flags;
+       spin_lock_irqsave(&ring->lock, flags);
+       if (header->prod - header->cons == header->num_items) {
+               header->notify_on_cons = header->cons + 1;
+               mb();
+               spin_unlock_irqrestore(&ring->lock, flags);
+               if (!drm_can_sleep()) {
+                       while (!qxl_check_header(ring))
+                               udelay(1);
+               } else {
+                       if (interruptible) {
+                               ret = wait_event_interruptible(*ring->push_event,
+                                                              qxl_check_header(ring));
+                               if (ret)
+                                       return ret;
+                       } else {
+                               wait_event(*ring->push_event,
+                                          qxl_check_header(ring));
+                       }
+
+               }
+               spin_lock_irqsave(&ring->lock, flags);
+       }
+
+       idx = header->prod & (ring->n_elements - 1);
+       elt = ring->ring->elements + idx * ring->element_size;
+
+       memcpy((void *)elt, new_elt, ring->element_size);
+
+       header->prod++;
+
+       mb();
+
+       if (header->prod == header->notify_on_prod)
+               outb(0, ring->prod_notify);
+
+       spin_unlock_irqrestore(&ring->lock, flags);
+       return 0;
+}
+
+static bool qxl_ring_pop(struct qxl_ring *ring,
+                        void *element)
+{
+       volatile struct qxl_ring_header *header = &(ring->ring->header);
+       volatile uint8_t *ring_elt;
+       int idx;
+       unsigned long flags;
+       spin_lock_irqsave(&ring->lock, flags);
+       if (header->cons == header->prod) {
+               header->notify_on_prod = header->cons + 1;
+               spin_unlock_irqrestore(&ring->lock, flags);
+               return false;
+       }
+
+       idx = header->cons & (ring->n_elements - 1);
+       ring_elt = ring->ring->elements + idx * ring->element_size;
+
+       memcpy(element, (void *)ring_elt, ring->element_size);
+
+       header->cons++;
+
+       spin_unlock_irqrestore(&ring->lock, flags);
+       return true;
+}
+
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+                             uint32_t type, bool interruptible)
+{
+       struct qxl_command cmd;
+
+       cmd.type = type;
+       cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+
+       return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
+}
+
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+                            uint32_t type, bool interruptible)
+{
+       struct qxl_command cmd;
+
+       cmd.type = type;
+       cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+
+       return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
+}
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
+{
+       if (!qxl_check_idle(qdev->release_ring)) {
+               queue_work(qdev->gc_queue, &qdev->gc_work);
+               if (flush)
+                       flush_work(&qdev->gc_work);
+               return true;
+       }
+       return false;
+}
+
+int qxl_garbage_collect(struct qxl_device *qdev)
+{
+       struct qxl_release *release;
+       uint64_t id, next_id;
+       int i = 0;
+       int ret;
+       union qxl_release_info *info;
+
+       while (qxl_ring_pop(qdev->release_ring, &id)) {
+               QXL_INFO(qdev, "popped %lld\n", id);
+               while (id) {
+                       release = qxl_release_from_id_locked(qdev, id);
+                       if (release == NULL)
+                               break;
+
+                       ret = qxl_release_reserve(qdev, release, false);
+                       if (ret) {
+                               qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
+                               DRM_ERROR("failed to reserve release %lld\n", id);
+                       }
+
+                       info = qxl_release_map(qdev, release);
+                       next_id = info->next;
+                       qxl_release_unmap(qdev, release, info);
+
+                       qxl_release_unreserve(qdev, release);
+                       QXL_INFO(qdev, "popped %lld, next %lld\n", id,
+                               next_id);
+
+                       switch (release->type) {
+                       case QXL_RELEASE_DRAWABLE:
+                       case QXL_RELEASE_SURFACE_CMD:
+                       case QXL_RELEASE_CURSOR_CMD:
+                               break;
+                       default:
+                               DRM_ERROR("unexpected release type\n");
+                               break;
+                       }
+                       id = next_id;
+
+                       qxl_release_free(qdev, release);
+                       ++i;
+               }
+       }
+
+       QXL_INFO(qdev, "%s: %lld\n", __func__, i);
+
+       return i;
+}
+
+int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+                         struct qxl_bo **_bo)
+{
+       struct qxl_bo *bo;
+       int ret;
+
+       ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
+                           QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+       if (ret) {
+               DRM_ERROR("failed to allocate VRAM BO\n");
+               return ret;
+       }
+       ret = qxl_bo_reserve(bo, false);
+       if (unlikely(ret != 0))
+               goto out_unref;
+
+       *_bo = bo;
+       return 0;
+out_unref:
+       qxl_bo_unref(&bo);
+       return 0;
+}
+
+static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
+{
+       int irq_num;
+       long addr = qdev->io_base + port;
+       int ret;
+
+       mutex_lock(&qdev->async_io_mutex);
+       irq_num = atomic_read(&qdev->irq_received_io_cmd);
+
+
+       if (qdev->last_sent_io_cmd > irq_num) {
+               ret = wait_event_interruptible(qdev->io_cmd_event,
+                                              atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+               if (ret)
+                       goto out;
+               irq_num = atomic_read(&qdev->irq_received_io_cmd);
+       }
+       outb(val, addr);
+       qdev->last_sent_io_cmd = irq_num + 1;
+       ret = wait_event_interruptible(qdev->io_cmd_event,
+                                      atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+out:
+       mutex_unlock(&qdev->async_io_mutex);
+       return ret;
+}
+
+static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
+{
+       int ret;
+
+restart:
+       ret = wait_for_io_cmd_user(qdev, val, port);
+       if (ret == -ERESTARTSYS)
+               goto restart;
+}
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+                       const struct qxl_rect *area)
+{
+       int surface_id;
+       uint32_t surface_width, surface_height;
+       int ret;
+
+       if (!surf->hw_surf_alloc)
+               DRM_ERROR("got io update area with no hw surface\n");
+
+       if (surf->is_primary)
+               surface_id = 0;
+       else
+               surface_id = surf->surface_id;
+       surface_width = surf->surf.width;
+       surface_height = surf->surf.height;
+
+       if (area->left < 0 || area->top < 0 ||
+           area->right > surface_width || area->bottom > surface_height) {
+               qxl_io_log(qdev, "%s: not doing area update for "
+                          "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
+                          area->top, area->right, area->bottom, surface_width, surface_height);
+               return -EINVAL;
+       }
+       mutex_lock(&qdev->update_area_mutex);
+       qdev->ram_header->update_area = *area;
+       qdev->ram_header->update_surface = surface_id;
+       ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC);
+       mutex_unlock(&qdev->update_area_mutex);
+       return ret;
+}
+
+void qxl_io_notify_oom(struct qxl_device *qdev)
+{
+       outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
+}
+
+void qxl_io_flush_release(struct qxl_device *qdev)
+{
+       outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
+}
+
+void qxl_io_flush_surfaces(struct qxl_device *qdev)
+{
+       wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
+}
+
+
+void qxl_io_destroy_primary(struct qxl_device *qdev)
+{
+       wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
+}
+
+void qxl_io_create_primary(struct qxl_device *qdev, unsigned width,
+                          unsigned height, unsigned offset, struct qxl_bo *bo)
+{
+       struct qxl_surface_create *create;
+
+       QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
+                qdev->ram_header);
+       create = &qdev->ram_header->create_surface;
+       create->format = bo->surf.format;
+       create->width = width;
+       create->height = height;
+       create->stride = bo->surf.stride;
+       create->mem = qxl_bo_physical_address(qdev, bo, offset);
+
+       QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
+                bo->kptr);
+
+       create->flags = QXL_SURF_FLAG_KEEP_DATA;
+       create->type = QXL_SURF_TYPE_PRIMARY;
+
+       wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
+}
+
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
+{
+       QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
+       wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
+}
+
+void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
+{
+       va_list args;
+
+       va_start(args, fmt);
+       vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
+       va_end(args);
+       /*
+        * DO not do a DRM output here - this will call printk, which will
+        * call back into qxl for rendering (qxl_fb)
+        */
+       outb(0, qdev->io_base + QXL_IO_LOG);
+}
+
+void qxl_io_reset(struct qxl_device *qdev)
+{
+       outb(0, qdev->io_base + QXL_IO_RESET);
+}
+
+void qxl_io_monitors_config(struct qxl_device *qdev)
+{
+       qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
+                  qdev->monitors_config ?
+                  qdev->monitors_config->count : -1,
+                  qdev->monitors_config && qdev->monitors_config->count ?
+                  qdev->monitors_config->heads[0].width : -1,
+                  qdev->monitors_config && qdev->monitors_config->count ?
+                  qdev->monitors_config->heads[0].height : -1,
+                  qdev->monitors_config && qdev->monitors_config->count ?
+                  qdev->monitors_config->heads[0].x : -1,
+                  qdev->monitors_config && qdev->monitors_config->count ?
+                  qdev->monitors_config->heads[0].y : -1
+                  );
+
+       wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
+}
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+                     struct qxl_bo *surf)
+{
+       uint32_t handle;
+       int idr_ret;
+       int count = 0;
+again:
+       idr_preload(GFP_ATOMIC);
+       spin_lock(&qdev->surf_id_idr_lock);
+       idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
+       spin_unlock(&qdev->surf_id_idr_lock);
+       idr_preload_end();
+       if (idr_ret < 0)
+               return idr_ret;
+       handle = idr_ret;
+
+       if (handle >= qdev->rom->n_surfaces) {
+               count++;
+               spin_lock(&qdev->surf_id_idr_lock);
+               idr_remove(&qdev->surf_id_idr, handle);
+               spin_unlock(&qdev->surf_id_idr_lock);
+               qxl_reap_surface_id(qdev, 2);
+               goto again;
+       }
+       surf->surface_id = handle;
+
+       spin_lock(&qdev->surf_id_idr_lock);
+       qdev->last_alloced_surf_id = handle;
+       spin_unlock(&qdev->surf_id_idr_lock);
+       return 0;
+}
+
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+                           uint32_t surface_id)
+{
+       spin_lock(&qdev->surf_id_idr_lock);
+       idr_remove(&qdev->surf_id_idr, surface_id);
+       spin_unlock(&qdev->surf_id_idr_lock);
+}
+
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+                        struct qxl_bo *surf,
+                        struct ttm_mem_reg *new_mem)
+{
+       struct qxl_surface_cmd *cmd;
+       struct qxl_release *release;
+       int ret;
+
+       if (surf->hw_surf_alloc)
+               return 0;
+
+       ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
+                                                NULL,
+                                                &release);
+       if (ret)
+               return ret;
+
+       cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_SURFACE_CMD_CREATE;
+       cmd->u.surface_create.format = surf->surf.format;
+       cmd->u.surface_create.width = surf->surf.width;
+       cmd->u.surface_create.height = surf->surf.height;
+       cmd->u.surface_create.stride = surf->surf.stride;
+       if (new_mem) {
+               int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
+               struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+
+               /* TODO - need to hold one of the locks to read tbo.offset */
+               cmd->u.surface_create.data = slot->high_bits;
+
+               cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
+       } else
+               cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
+       cmd->surface_id = surf->surface_id;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       surf->surf_create = release;
+
+       /* no need to add a release to the fence for this bo,
+          since it is only released when we ask to destroy the surface
+          and it would never signal otherwise */
+       qxl_fence_releaseable(qdev, release);
+
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+       qxl_release_unreserve(qdev, release);
+
+       surf->hw_surf_alloc = true;
+       spin_lock(&qdev->surf_id_idr_lock);
+       idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
+       spin_unlock(&qdev->surf_id_idr_lock);
+       return 0;
+}
+
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+                          struct qxl_bo *surf)
+{
+       struct qxl_surface_cmd *cmd;
+       struct qxl_release *release;
+       int ret;
+       int id;
+
+       if (!surf->hw_surf_alloc)
+               return 0;
+
+       ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
+                                                surf->surf_create,
+                                                &release);
+       if (ret)
+               return ret;
+
+       surf->surf_create = NULL;
+       /* remove the surface from the idr, but not the surface id yet */
+       spin_lock(&qdev->surf_id_idr_lock);
+       idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
+       spin_unlock(&qdev->surf_id_idr_lock);
+       surf->hw_surf_alloc = false;
+
+       id = surf->surface_id;
+       surf->surface_id = 0;
+
+       release->surface_release_id = id;
+       cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_SURFACE_CMD_DESTROY;
+       cmd->surface_id = id;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       qxl_fence_releaseable(qdev, release);
+
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+       qxl_release_unreserve(qdev, release);
+
+
+       return 0;
+}
+
+int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
+{
+       struct qxl_rect rect;
+       int ret;
+
+       /* if we are evicting, we need to make sure the surface is up
+          to date */
+       rect.left = 0;
+       rect.right = surf->surf.width;
+       rect.top = 0;
+       rect.bottom = surf->surf.height;
+retry:
+       ret = qxl_io_update_area(qdev, surf, &rect);
+       if (ret == -ERESTARTSYS)
+               goto retry;
+       return ret;
+}
+
+static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+       /* no need to update area if we are just freeing the surface normally */
+       if (do_update_area)
+               qxl_update_surface(qdev, surf);
+
+       /* nuke the surface id at the hw */
+       qxl_hw_surface_dealloc(qdev, surf);
+}
+
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+       mutex_lock(&qdev->surf_evict_mutex);
+       qxl_surface_evict_locked(qdev, surf, do_update_area);
+       mutex_unlock(&qdev->surf_evict_mutex);
+}
+
+static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
+{
+       int ret;
+
+       ret = qxl_bo_reserve(surf, false);
+       if (ret == -EBUSY)
+               return -EBUSY;
+
+       if (surf->fence.num_active_releases > 0 && stall == false) {
+               qxl_bo_unreserve(surf);
+               return -EBUSY;
+       }
+
+       if (stall)
+               mutex_unlock(&qdev->surf_evict_mutex);
+
+       spin_lock(&surf->tbo.bdev->fence_lock);
+       ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
+       spin_unlock(&surf->tbo.bdev->fence_lock);
+
+       if (stall)
+               mutex_lock(&qdev->surf_evict_mutex);
+       if (ret == -EBUSY) {
+               qxl_bo_unreserve(surf);
+               return -EBUSY;
+       }
+
+       qxl_surface_evict_locked(qdev, surf, true);
+       qxl_bo_unreserve(surf);
+       return 0;
+}
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
+{
+       int num_reaped = 0;
+       int i, ret;
+       bool stall = false;
+       int start = 0;
+
+       mutex_lock(&qdev->surf_evict_mutex);
+again:
+
+       spin_lock(&qdev->surf_id_idr_lock);
+       start = qdev->last_alloced_surf_id + 1;
+       spin_unlock(&qdev->surf_id_idr_lock);
+
+       for (i = start; i < start + qdev->rom->n_surfaces; i++) {
+               void *objptr;
+               int surfid = i % qdev->rom->n_surfaces;
+
+               /* this avoids the case where the objects is in the
+                  idr but has been evicted half way - its makes
+                  the idr lookup atomic with the eviction */
+               spin_lock(&qdev->surf_id_idr_lock);
+               objptr = idr_find(&qdev->surf_id_idr, surfid);
+               spin_unlock(&qdev->surf_id_idr_lock);
+
+               if (!objptr)
+                       continue;
+
+               ret = qxl_reap_surf(qdev, objptr, stall);
+               if (ret == 0)
+                       num_reaped++;
+               if (num_reaped >= max_to_reap)
+                       break;
+       }
+       if (num_reaped == 0 && stall == false) {
+               stall = true;
+               goto again;
+       }
+
+       mutex_unlock(&qdev->surf_evict_mutex);
+       if (num_reaped) {
+               usleep_range(500, 1000);
+               qxl_queue_garbage_collect(qdev, true);
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
new file mode 100644 (file)
index 0000000..c3c2bbd
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *  Alon Levy <alevy@redhat.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "drmP.h"
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+
+#if defined(CONFIG_DEBUG_FS)
+static int
+qxl_debugfs_irq_received(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct qxl_device *qdev = node->minor->dev->dev_private;
+
+       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
+       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
+       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
+       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
+       seq_printf(m, "%d\n", qdev->irq_received_error);
+       return 0;
+}
+
+static int
+qxl_debugfs_buffers_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct qxl_device *qdev = node->minor->dev->dev_private;
+       struct qxl_bo *bo;
+
+       list_for_each_entry(bo, &qdev->gem.objects, list) {
+               seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
+                          (unsigned long)bo->gem_base.size, bo->pin_count,
+                          bo->tbo.sync_obj, bo->fence.num_active_releases);
+       }
+       return 0;
+}
+
+static struct drm_info_list qxl_debugfs_list[] = {
+       { "irq_received", qxl_debugfs_irq_received, 0, NULL },
+       { "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
+};
+#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
+#endif
+
+int
+qxl_debugfs_init(struct drm_minor *minor)
+{
+#if defined(CONFIG_DEBUG_FS)
+       drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+                                minor->debugfs_root, minor);
+#endif
+       return 0;
+}
+
+void
+qxl_debugfs_takedown(struct drm_minor *minor)
+{
+#if defined(CONFIG_DEBUG_FS)
+       drm_debugfs_remove_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+                                minor);
+#endif
+}
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+                         struct drm_info_list *files,
+                         unsigned nfiles)
+{
+       unsigned i;
+
+       for (i = 0; i < qdev->debugfs_count; i++) {
+               if (qdev->debugfs[i].files == files) {
+                       /* Already registered */
+                       return 0;
+               }
+       }
+
+       i = qdev->debugfs_count + 1;
+       if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
+               DRM_ERROR("Reached maximum number of debugfs components.\n");
+               DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
+               return -EINVAL;
+       }
+       qdev->debugfs[qdev->debugfs_count].files = files;
+       qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
+       qdev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+       drm_debugfs_create_files(files, nfiles,
+                                qdev->ddev->control->debugfs_root,
+                                qdev->ddev->control);
+       drm_debugfs_create_files(files, nfiles,
+                                qdev->ddev->primary->debugfs_root,
+                                qdev->ddev->primary);
+#endif
+       return 0;
+}
+
+void qxl_debugfs_remove_files(struct qxl_device *qdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       unsigned i;
+
+       for (i = 0; i < qdev->debugfs_count; i++) {
+               drm_debugfs_remove_files(qdev->debugfs[i].files,
+                                        qdev->debugfs[i].num_files,
+                                        qdev->ddev->control);
+               drm_debugfs_remove_files(qdev->debugfs[i].files,
+                                        qdev->debugfs[i].num_files,
+                                        qdev->ddev->primary);
+       }
+#endif
+}
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
new file mode 100644 (file)
index 0000000..94c5aec
--- /dev/null
@@ -0,0 +1,879 @@
+/*
+   Copyright (C) 2009 Red Hat, Inc.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+        notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in
+        the documentation and/or other materials provided with the
+        distribution.
+       * Neither the name of the copyright holder nor the names of its
+        contributors may be used to endorse or promote products derived
+        from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
+   IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+   TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+   PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+#ifndef H_QXL_DEV
+#define H_QXL_DEV
+
+#include <linux/types.h>
+
+/*
+ * from spice-protocol
+ * Release 0.10.0
+ */
+
+/* enums.h */
+
+enum SpiceImageType {
+       SPICE_IMAGE_TYPE_BITMAP,
+       SPICE_IMAGE_TYPE_QUIC,
+       SPICE_IMAGE_TYPE_RESERVED,
+       SPICE_IMAGE_TYPE_LZ_PLT = 100,
+       SPICE_IMAGE_TYPE_LZ_RGB,
+       SPICE_IMAGE_TYPE_GLZ_RGB,
+       SPICE_IMAGE_TYPE_FROM_CACHE,
+       SPICE_IMAGE_TYPE_SURFACE,
+       SPICE_IMAGE_TYPE_JPEG,
+       SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS,
+       SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB,
+       SPICE_IMAGE_TYPE_JPEG_ALPHA,
+
+       SPICE_IMAGE_TYPE_ENUM_END
+};
+
+enum SpiceBitmapFmt {
+       SPICE_BITMAP_FMT_INVALID,
+       SPICE_BITMAP_FMT_1BIT_LE,
+       SPICE_BITMAP_FMT_1BIT_BE,
+       SPICE_BITMAP_FMT_4BIT_LE,
+       SPICE_BITMAP_FMT_4BIT_BE,
+       SPICE_BITMAP_FMT_8BIT,
+       SPICE_BITMAP_FMT_16BIT,
+       SPICE_BITMAP_FMT_24BIT,
+       SPICE_BITMAP_FMT_32BIT,
+       SPICE_BITMAP_FMT_RGBA,
+
+       SPICE_BITMAP_FMT_ENUM_END
+};
+
+enum SpiceSurfaceFmt {
+       SPICE_SURFACE_FMT_INVALID,
+       SPICE_SURFACE_FMT_1_A,
+       SPICE_SURFACE_FMT_8_A = 8,
+       SPICE_SURFACE_FMT_16_555 = 16,
+       SPICE_SURFACE_FMT_32_xRGB = 32,
+       SPICE_SURFACE_FMT_16_565 = 80,
+       SPICE_SURFACE_FMT_32_ARGB = 96,
+
+       SPICE_SURFACE_FMT_ENUM_END
+};
+
+enum SpiceClipType {
+       SPICE_CLIP_TYPE_NONE,
+       SPICE_CLIP_TYPE_RECTS,
+
+       SPICE_CLIP_TYPE_ENUM_END
+};
+
+enum SpiceRopd {
+       SPICE_ROPD_INVERS_SRC = (1 << 0),
+       SPICE_ROPD_INVERS_BRUSH = (1 << 1),
+       SPICE_ROPD_INVERS_DEST = (1 << 2),
+       SPICE_ROPD_OP_PUT = (1 << 3),
+       SPICE_ROPD_OP_OR = (1 << 4),
+       SPICE_ROPD_OP_AND = (1 << 5),
+       SPICE_ROPD_OP_XOR = (1 << 6),
+       SPICE_ROPD_OP_BLACKNESS = (1 << 7),
+       SPICE_ROPD_OP_WHITENESS = (1 << 8),
+       SPICE_ROPD_OP_INVERS = (1 << 9),
+       SPICE_ROPD_INVERS_RES = (1 << 10),
+
+       SPICE_ROPD_MASK = 0x7ff
+};
+
+enum SpiceBrushType {
+       SPICE_BRUSH_TYPE_NONE,
+       SPICE_BRUSH_TYPE_SOLID,
+       SPICE_BRUSH_TYPE_PATTERN,
+
+       SPICE_BRUSH_TYPE_ENUM_END
+};
+
+enum SpiceCursorType {
+       SPICE_CURSOR_TYPE_ALPHA,
+       SPICE_CURSOR_TYPE_MONO,
+       SPICE_CURSOR_TYPE_COLOR4,
+       SPICE_CURSOR_TYPE_COLOR8,
+       SPICE_CURSOR_TYPE_COLOR16,
+       SPICE_CURSOR_TYPE_COLOR24,
+       SPICE_CURSOR_TYPE_COLOR32,
+
+       SPICE_CURSOR_TYPE_ENUM_END
+};
+
+/* qxl_dev.h */
+
+#pragma pack(push, 1)
+
+#define REDHAT_PCI_VENDOR_ID 0x1b36
+
+/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */
+#define QXL_DEVICE_ID_STABLE 0x0100
+
+enum {
+       QXL_REVISION_STABLE_V04 = 0x01,
+       QXL_REVISION_STABLE_V06 = 0x02,
+       QXL_REVISION_STABLE_V10 = 0x03,
+       QXL_REVISION_STABLE_V12 = 0x04,
+};
+
+#define QXL_DEVICE_ID_DEVEL 0x01ff
+#define QXL_REVISION_DEVEL 0x01
+
+#define QXL_ROM_MAGIC (*(uint32_t *)"QXRO")
+#define QXL_RAM_MAGIC (*(uint32_t *)"QXRA")
+
+enum {
+       QXL_RAM_RANGE_INDEX,
+       QXL_VRAM_RANGE_INDEX,
+       QXL_ROM_RANGE_INDEX,
+       QXL_IO_RANGE_INDEX,
+
+       QXL_PCI_RANGES
+};
+
+/* qxl-1 compat: append only */
+enum {
+       QXL_IO_NOTIFY_CMD,
+       QXL_IO_NOTIFY_CURSOR,
+       QXL_IO_UPDATE_AREA,
+       QXL_IO_UPDATE_IRQ,
+       QXL_IO_NOTIFY_OOM,
+       QXL_IO_RESET,
+       QXL_IO_SET_MODE,                  /* qxl-1 */
+       QXL_IO_LOG,
+       /* appended for qxl-2 */
+       QXL_IO_MEMSLOT_ADD,
+       QXL_IO_MEMSLOT_DEL,
+       QXL_IO_DETACH_PRIMARY,
+       QXL_IO_ATTACH_PRIMARY,
+       QXL_IO_CREATE_PRIMARY,
+       QXL_IO_DESTROY_PRIMARY,
+       QXL_IO_DESTROY_SURFACE_WAIT,
+       QXL_IO_DESTROY_ALL_SURFACES,
+       /* appended for qxl-3 */
+       QXL_IO_UPDATE_AREA_ASYNC,
+       QXL_IO_MEMSLOT_ADD_ASYNC,
+       QXL_IO_CREATE_PRIMARY_ASYNC,
+       QXL_IO_DESTROY_PRIMARY_ASYNC,
+       QXL_IO_DESTROY_SURFACE_ASYNC,
+       QXL_IO_DESTROY_ALL_SURFACES_ASYNC,
+       QXL_IO_FLUSH_SURFACES_ASYNC,
+       QXL_IO_FLUSH_RELEASE,
+       /* appended for qxl-4 */
+       QXL_IO_MONITORS_CONFIG_ASYNC,
+
+       QXL_IO_RANGE_SIZE
+};
+
+typedef uint64_t QXLPHYSICAL;
+typedef int32_t QXLFIXED; /* fixed 28.4 */
+
+struct qxl_point_fix {
+       QXLFIXED x;
+       QXLFIXED y;
+};
+
+struct qxl_point {
+       int32_t x;
+       int32_t y;
+};
+
+struct qxl_point_1_6 {
+       int16_t x;
+       int16_t y;
+};
+
+struct qxl_rect {
+       int32_t top;
+       int32_t left;
+       int32_t bottom;
+       int32_t right;
+};
+
+struct qxl_urect {
+       uint32_t top;
+       uint32_t left;
+       uint32_t bottom;
+       uint32_t right;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_rom {
+       uint32_t magic;
+       uint32_t id;
+       uint32_t update_id;
+       uint32_t compression_level;
+       uint32_t log_level;
+       uint32_t mode;                    /* qxl-1 */
+       uint32_t modes_offset;
+       uint32_t num_io_pages;
+       uint32_t pages_offset;            /* qxl-1 */
+       uint32_t draw_area_offset;        /* qxl-1 */
+       uint32_t surface0_area_size;      /* qxl-1 name: draw_area_size */
+       uint32_t ram_header_offset;
+       uint32_t mm_clock;
+       /* appended for qxl-2 */
+       uint32_t n_surfaces;
+       uint64_t flags;
+       uint8_t slots_start;
+       uint8_t slots_end;
+       uint8_t slot_gen_bits;
+       uint8_t slot_id_bits;
+       uint8_t slot_generation;
+       /* appended for qxl-4 */
+       uint8_t client_present;
+       uint8_t client_capabilities[58];
+       uint32_t client_monitors_config_crc;
+       struct {
+               uint16_t count;
+       uint16_t padding;
+               struct qxl_urect heads[64];
+       } client_monitors_config;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_mode {
+       uint32_t id;
+       uint32_t x_res;
+       uint32_t y_res;
+       uint32_t bits;
+       uint32_t stride;
+       uint32_t x_mili;
+       uint32_t y_mili;
+       uint32_t orientation;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_modes {
+       uint32_t n_modes;
+       struct qxl_mode modes[0];
+};
+
+/* qxl-1 compat: append only */
+enum qxl_cmd_type {
+       QXL_CMD_NOP,
+       QXL_CMD_DRAW,
+       QXL_CMD_UPDATE,
+       QXL_CMD_CURSOR,
+       QXL_CMD_MESSAGE,
+       QXL_CMD_SURFACE,
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_command {
+       QXLPHYSICAL data;
+       uint32_t type;
+       uint32_t padding;
+};
+
+#define QXL_COMMAND_FLAG_COMPAT                (1<<0)
+#define QXL_COMMAND_FLAG_COMPAT_16BPP  (2<<0)
+
+struct qxl_command_ext {
+       struct qxl_command cmd;
+       uint32_t group_id;
+       uint32_t flags;
+};
+
+struct qxl_mem_slot {
+       uint64_t mem_start;
+       uint64_t mem_end;
+};
+
+#define QXL_SURF_TYPE_PRIMARY     0
+
+#define QXL_SURF_FLAG_KEEP_DATA           (1 << 0)
+
+struct qxl_surface_create {
+       uint32_t width;
+       uint32_t height;
+       int32_t stride;
+       uint32_t format;
+       uint32_t position;
+       uint32_t mouse_mode;
+       uint32_t flags;
+       uint32_t type;
+       QXLPHYSICAL mem;
+};
+
+#define QXL_COMMAND_RING_SIZE 32
+#define QXL_CURSOR_RING_SIZE 32
+#define QXL_RELEASE_RING_SIZE 8
+
+#define QXL_LOG_BUF_SIZE 4096
+
+#define QXL_INTERRUPT_DISPLAY (1 << 0)
+#define QXL_INTERRUPT_CURSOR (1 << 1)
+#define QXL_INTERRUPT_IO_CMD (1 << 2)
+#define QXL_INTERRUPT_ERROR  (1 << 3)
+#define QXL_INTERRUPT_CLIENT (1 << 4)
+#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG  (1 << 5)
+
+struct qxl_ring_header {
+       uint32_t num_items;
+       uint32_t prod;
+       uint32_t notify_on_prod;
+       uint32_t cons;
+       uint32_t notify_on_cons;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_ram_header {
+       uint32_t magic;
+       uint32_t int_pending;
+       uint32_t int_mask;
+       uint8_t log_buf[QXL_LOG_BUF_SIZE];
+       struct qxl_ring_header  cmd_ring_hdr;
+       struct qxl_command      cmd_ring[QXL_COMMAND_RING_SIZE];
+       struct qxl_ring_header  cursor_ring_hdr;
+       struct qxl_command      cursor_ring[QXL_CURSOR_RING_SIZE];
+       struct qxl_ring_header  release_ring_hdr;
+       uint64_t                release_ring[QXL_RELEASE_RING_SIZE];
+       struct qxl_rect update_area;
+       /* appended for qxl-2 */
+       uint32_t update_surface;
+       struct qxl_mem_slot mem_slot;
+       struct qxl_surface_create create_surface;
+       uint64_t flags;
+
+       /* appended for qxl-4 */
+
+       /* used by QXL_IO_MONITORS_CONFIG_ASYNC */
+       QXLPHYSICAL monitors_config;
+       uint8_t guest_capabilities[64];
+};
+
+union qxl_release_info {
+       uint64_t id;      /* in  */
+       uint64_t next;    /* out */
+};
+
+struct qxl_release_info_ext {
+       union qxl_release_info *info;
+       uint32_t group_id;
+};
+
+struct qxl_data_chunk {
+       uint32_t data_size;
+       QXLPHYSICAL prev_chunk;
+       QXLPHYSICAL next_chunk;
+       uint8_t data[0];
+};
+
+struct qxl_message {
+       union qxl_release_info release_info;
+       uint8_t data[0];
+};
+
+struct qxl_compat_update_cmd {
+       union qxl_release_info release_info;
+       struct qxl_rect area;
+       uint32_t update_id;
+};
+
+struct qxl_update_cmd {
+       union qxl_release_info release_info;
+       struct qxl_rect area;
+       uint32_t update_id;
+       uint32_t surface_id;
+};
+
+struct qxl_cursor_header {
+       uint64_t unique;
+       uint16_t type;
+       uint16_t width;
+       uint16_t height;
+       uint16_t hot_spot_x;
+       uint16_t hot_spot_y;
+};
+
+struct qxl_cursor {
+       struct qxl_cursor_header header;
+       uint32_t data_size;
+       struct qxl_data_chunk chunk;
+};
+
+enum {
+       QXL_CURSOR_SET,
+       QXL_CURSOR_MOVE,
+       QXL_CURSOR_HIDE,
+       QXL_CURSOR_TRAIL,
+};
+
+#define QXL_CURSOR_DEVICE_DATA_SIZE 128
+
+struct qxl_cursor_cmd {
+       union qxl_release_info release_info;
+       uint8_t type;
+       union {
+               struct {
+                       struct qxl_point_1_6 position;
+                       uint8_t visible;
+                       QXLPHYSICAL shape;
+               } set;
+               struct {
+                       uint16_t length;
+                       uint16_t frequency;
+               } trail;
+               struct qxl_point_1_6 position;
+       } u;
+       /* todo: dynamic size from rom */
+       uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE];
+};
+
+enum {
+       QXL_DRAW_NOP,
+       QXL_DRAW_FILL,
+       QXL_DRAW_OPAQUE,
+       QXL_DRAW_COPY,
+       QXL_COPY_BITS,
+       QXL_DRAW_BLEND,
+       QXL_DRAW_BLACKNESS,
+       QXL_DRAW_WHITENESS,
+       QXL_DRAW_INVERS,
+       QXL_DRAW_ROP3,
+       QXL_DRAW_STROKE,
+       QXL_DRAW_TEXT,
+       QXL_DRAW_TRANSPARENT,
+       QXL_DRAW_ALPHA_BLEND,
+       QXL_DRAW_COMPOSITE
+};
+
+struct qxl_raster_glyph {
+       struct qxl_point render_pos;
+       struct qxl_point glyph_origin;
+       uint16_t width;
+       uint16_t height;
+       uint8_t data[0];
+};
+
+struct qxl_string {
+       uint32_t data_size;
+       uint16_t length;
+       uint16_t flags;
+       struct qxl_data_chunk chunk;
+};
+
+struct qxl_copy_bits {
+       struct qxl_point src_pos;
+};
+
+enum qxl_effect_type {
+       QXL_EFFECT_BLEND = 0,
+       QXL_EFFECT_OPAQUE = 1,
+       QXL_EFFECT_REVERT_ON_DUP = 2,
+       QXL_EFFECT_BLACKNESS_ON_DUP = 3,
+       QXL_EFFECT_WHITENESS_ON_DUP = 4,
+       QXL_EFFECT_NOP_ON_DUP = 5,
+       QXL_EFFECT_NOP = 6,
+       QXL_EFFECT_OPAQUE_BRUSH = 7
+};
+
+struct qxl_pattern {
+       QXLPHYSICAL pat;
+       struct qxl_point pos;
+};
+
+struct qxl_brush {
+       uint32_t type;
+       union {
+               uint32_t color;
+               struct qxl_pattern pattern;
+       } u;
+};
+
+struct qxl_q_mask {
+       uint8_t flags;
+       struct qxl_point pos;
+       QXLPHYSICAL bitmap;
+};
+
+struct qxl_fill {
+       struct qxl_brush brush;
+       uint16_t rop_descriptor;
+       struct qxl_q_mask mask;
+};
+
+struct qxl_opaque {
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+       struct qxl_brush brush;
+       uint16_t rop_descriptor;
+       uint8_t scale_mode;
+       struct qxl_q_mask mask;
+};
+
+struct qxl_copy {
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+       uint16_t rop_descriptor;
+       uint8_t scale_mode;
+       struct qxl_q_mask mask;
+};
+
+struct qxl_transparent {
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+       uint32_t src_color;
+       uint32_t true_color;
+};
+
+struct qxl_alpha_blend {
+       uint16_t alpha_flags;
+       uint8_t alpha;
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+};
+
+struct qxl_compat_alpha_blend {
+       uint8_t alpha;
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+};
+
+struct qxl_rop_3 {
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+       struct qxl_brush brush;
+       uint8_t rop3;
+       uint8_t scale_mode;
+       struct qxl_q_mask mask;
+};
+
+struct qxl_line_attr {
+       uint8_t flags;
+       uint8_t join_style;
+       uint8_t end_style;
+       uint8_t style_nseg;
+       QXLFIXED width;
+       QXLFIXED miter_limit;
+       QXLPHYSICAL style;
+};
+
+struct qxl_stroke {
+       QXLPHYSICAL path;
+       struct qxl_line_attr attr;
+       struct qxl_brush brush;
+       uint16_t fore_mode;
+       uint16_t back_mode;
+};
+
+struct qxl_text {
+       QXLPHYSICAL str;
+       struct qxl_rect back_area;
+       struct qxl_brush fore_brush;
+       struct qxl_brush back_brush;
+       uint16_t fore_mode;
+       uint16_t back_mode;
+};
+
+struct qxl_mask {
+       struct qxl_q_mask mask;
+};
+
+struct qxl_clip {
+       uint32_t type;
+       QXLPHYSICAL data;
+};
+
+enum qxl_operator {
+       QXL_OP_CLEAR                     = 0x00,
+       QXL_OP_SOURCE                    = 0x01,
+       QXL_OP_DST                       = 0x02,
+       QXL_OP_OVER                      = 0x03,
+       QXL_OP_OVER_REVERSE              = 0x04,
+       QXL_OP_IN                        = 0x05,
+       QXL_OP_IN_REVERSE                = 0x06,
+       QXL_OP_OUT                       = 0x07,
+       QXL_OP_OUT_REVERSE               = 0x08,
+       QXL_OP_ATOP                      = 0x09,
+       QXL_OP_ATOP_REVERSE              = 0x0a,
+       QXL_OP_XOR                       = 0x0b,
+       QXL_OP_ADD                       = 0x0c,
+       QXL_OP_SATURATE                  = 0x0d,
+       /* Note the jump here from 0x0d to 0x30 */
+       QXL_OP_MULTIPLY                  = 0x30,
+       QXL_OP_SCREEN                    = 0x31,
+       QXL_OP_OVERLAY                   = 0x32,
+       QXL_OP_DARKEN                    = 0x33,
+       QXL_OP_LIGHTEN                   = 0x34,
+       QXL_OP_COLOR_DODGE               = 0x35,
+       QXL_OP_COLOR_BURN                = 0x36,
+       QXL_OP_HARD_LIGHT                = 0x37,
+       QXL_OP_SOFT_LIGHT                = 0x38,
+       QXL_OP_DIFFERENCE                = 0x39,
+       QXL_OP_EXCLUSION                 = 0x3a,
+       QXL_OP_HSL_HUE                   = 0x3b,
+       QXL_OP_HSL_SATURATION            = 0x3c,
+       QXL_OP_HSL_COLOR                 = 0x3d,
+       QXL_OP_HSL_LUMINOSITY            = 0x3e
+};
+
+struct qxl_transform {
+       uint32_t        t00;
+       uint32_t        t01;
+       uint32_t        t02;
+       uint32_t        t10;
+       uint32_t        t11;
+       uint32_t        t12;
+};
+
+/* The flags field has the following bit fields:
+ *
+ *     operator:               [  0 -  7 ]
+ *     src_filter:             [  8 - 10 ]
+ *     mask_filter:            [ 11 - 13 ]
+ *     src_repeat:             [ 14 - 15 ]
+ *     mask_repeat:            [ 16 - 17 ]
+ *     component_alpha:                [ 18 - 18 ]
+ *     reserved:               [ 19 - 31 ]
+ *
+ * The repeat and filter values are those of pixman:
+ *             REPEAT_NONE =           0
+ *              REPEAT_NORMAL =                1
+ *             REPEAT_PAD =            2
+ *             REPEAT_REFLECT =        3
+ *
+ * The filter values are:
+ *             FILTER_NEAREST =        0
+ *             FILTER_BILINEAR =       1
+ */
+struct qxl_composite {
+       uint32_t                flags;
+
+       QXLPHYSICAL                     src;
+       QXLPHYSICAL                     src_transform;  /* May be NULL */
+       QXLPHYSICAL                     mask;           /* May be NULL */
+       QXLPHYSICAL                     mask_transform; /* May be NULL */
+       struct qxl_point_1_6    src_origin;
+       struct qxl_point_1_6    mask_origin;
+};
+
+struct qxl_compat_drawable {
+       union qxl_release_info release_info;
+       uint8_t effect;
+       uint8_t type;
+       uint16_t bitmap_offset;
+       struct qxl_rect bitmap_area;
+       struct qxl_rect bbox;
+       struct qxl_clip clip;
+       uint32_t mm_time;
+       union {
+               struct qxl_fill fill;
+               struct qxl_opaque opaque;
+               struct qxl_copy copy;
+               struct qxl_transparent transparent;
+               struct qxl_compat_alpha_blend alpha_blend;
+               struct qxl_copy_bits copy_bits;
+               struct qxl_copy blend;
+               struct qxl_rop_3 rop3;
+               struct qxl_stroke stroke;
+               struct qxl_text text;
+               struct qxl_mask blackness;
+               struct qxl_mask invers;
+               struct qxl_mask whiteness;
+       } u;
+};
+
+struct qxl_drawable {
+       union qxl_release_info release_info;
+       uint32_t surface_id;
+       uint8_t effect;
+       uint8_t type;
+       uint8_t self_bitmap;
+       struct qxl_rect self_bitmap_area;
+       struct qxl_rect bbox;
+       struct qxl_clip clip;
+       uint32_t mm_time;
+       int32_t surfaces_dest[3];
+       struct qxl_rect surfaces_rects[3];
+       union {
+               struct qxl_fill fill;
+               struct qxl_opaque opaque;
+               struct qxl_copy copy;
+               struct qxl_transparent transparent;
+               struct qxl_alpha_blend alpha_blend;
+               struct qxl_copy_bits copy_bits;
+               struct qxl_copy blend;
+               struct qxl_rop_3 rop3;
+               struct qxl_stroke stroke;
+               struct qxl_text text;
+               struct qxl_mask blackness;
+               struct qxl_mask invers;
+               struct qxl_mask whiteness;
+               struct qxl_composite composite;
+       } u;
+};
+
+enum qxl_surface_cmd_type {
+       QXL_SURFACE_CMD_CREATE,
+       QXL_SURFACE_CMD_DESTROY,
+};
+
+struct qxl_surface {
+       uint32_t format;
+       uint32_t width;
+       uint32_t height;
+       int32_t stride;
+       QXLPHYSICAL data;
+};
+
+struct qxl_surface_cmd {
+       union qxl_release_info release_info;
+       uint32_t surface_id;
+       uint8_t type;
+       uint32_t flags;
+       union {
+               struct qxl_surface surface_create;
+       } u;
+};
+
+struct qxl_clip_rects {
+       uint32_t num_rects;
+       struct qxl_data_chunk chunk;
+};
+
+enum {
+       QXL_PATH_BEGIN = (1 << 0),
+       QXL_PATH_END = (1 << 1),
+       QXL_PATH_CLOSE = (1 << 3),
+       QXL_PATH_BEZIER = (1 << 4),
+};
+
+struct qxl_path_seg {
+       uint32_t flags;
+       uint32_t count;
+       struct qxl_point_fix points[0];
+};
+
+struct qxl_path {
+       uint32_t data_size;
+       struct qxl_data_chunk chunk;
+};
+
+enum {
+       QXL_IMAGE_GROUP_DRIVER,
+       QXL_IMAGE_GROUP_DEVICE,
+       QXL_IMAGE_GROUP_RED,
+       QXL_IMAGE_GROUP_DRIVER_DONT_CACHE,
+};
+
+struct qxl_image_id {
+       uint32_t group;
+       uint32_t unique;
+};
+
+union qxl_image_id_union {
+       struct qxl_image_id id;
+       uint64_t value;
+};
+
+enum qxl_image_flags {
+       QXL_IMAGE_CACHE = (1 << 0),
+       QXL_IMAGE_HIGH_BITS_SET = (1 << 1),
+};
+
+enum qxl_bitmap_flags {
+       QXL_BITMAP_DIRECT = (1 << 0),
+       QXL_BITMAP_UNSTABLE = (1 << 1),
+       QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */
+};
+
+#define QXL_SET_IMAGE_ID(image, _group, _unique) {              \
+       (image)->descriptor.id = (((uint64_t)_unique) << 32) | _group;  \
+}
+
+struct qxl_image_descriptor {
+       uint64_t id;
+       uint8_t type;
+       uint8_t flags;
+       uint32_t width;
+       uint32_t height;
+};
+
+struct qxl_palette {
+       uint64_t unique;
+       uint16_t num_ents;
+       uint32_t ents[0];
+};
+
+struct qxl_bitmap {
+       uint8_t format;
+       uint8_t flags;
+       uint32_t x;
+       uint32_t y;
+       uint32_t stride;
+       QXLPHYSICAL palette;
+       QXLPHYSICAL data; /* data[0] ? */
+};
+
+struct qxl_surface_id {
+       uint32_t surface_id;
+};
+
+struct qxl_encoder_data {
+       uint32_t data_size;
+       uint8_t data[0];
+};
+
+struct qxl_image {
+       struct qxl_image_descriptor descriptor;
+       union { /* variable length */
+               struct qxl_bitmap bitmap;
+               struct qxl_encoder_data quic;
+               struct qxl_surface_id surface_image;
+       } u;
+};
+
+/* A QXLHead is a single monitor output backed by a QXLSurface.
+ * x and y offsets are unsigned since they are used in relation to
+ * the given surface, not the same as the x, y coordinates in the guest
+ * screen reference frame. */
+struct qxl_head {
+       uint32_t id;
+       uint32_t surface_id;
+       uint32_t width;
+       uint32_t height;
+       uint32_t x;
+       uint32_t y;
+       uint32_t flags;
+};
+
+struct qxl_monitors_config {
+       uint16_t count;
+       uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
+                                driver */
+       struct qxl_head heads[0];
+};
+
+#pragma pack(pop)
+
+#endif /* _H_QXL_DEV */
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
new file mode 100644 (file)
index 0000000..fcfd443
--- /dev/null
@@ -0,0 +1,982 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#include "linux/crc32.h"
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+#include "drm_crtc_helper.h"
+
+static void qxl_crtc_set_to_mode(struct qxl_device *qdev,
+                                struct drm_connector *connector,
+                                struct qxl_head *head)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode, *t;
+       int width = head->width;
+       int height = head->height;
+
+       if (width < 320 || height < 240) {
+               qxl_io_log(qdev, "%s: bad head: %dx%d", width, height);
+               width = 1024;
+               height = 768;
+       }
+       if (width * height * 4 > 16*1024*1024) {
+               width = 1024;
+               height = 768;
+       }
+       /* TODO: go over regular modes and removed preferred? */
+       list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+               drm_mode_remove(connector, mode);
+       mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
+       mode->type |= DRM_MODE_TYPE_PREFERRED;
+       mode->status = MODE_OK;
+       drm_mode_probed_add(connector, mode);
+       qxl_io_log(qdev, "%s: %d x %d\n", __func__, width, height);
+}
+
+void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev)
+{
+       struct drm_connector *connector;
+       int i;
+       struct drm_device *dev = qdev->ddev;
+
+       i = 0;
+       qxl_io_log(qdev, "%s: %d, %d\n", __func__,
+                  dev->mode_config.num_connector,
+                  qdev->monitors_config->count);
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (i > qdev->monitors_config->count) {
+                       /* crtc will be reported as disabled */
+                       continue;
+               }
+               qxl_crtc_set_to_mode(qdev, connector,
+                                    &qdev->monitors_config->heads[i]);
+               ++i;
+       }
+}
+
+void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+{
+       if (qdev->client_monitors_config &&
+           count > qdev->client_monitors_config->count) {
+               kfree(qdev->client_monitors_config);
+               qdev->client_monitors_config = NULL;
+       }
+       if (!qdev->client_monitors_config) {
+               qdev->client_monitors_config = kzalloc(
+                               sizeof(struct qxl_monitors_config) +
+                               sizeof(struct qxl_head) * count, GFP_KERNEL);
+               if (!qdev->client_monitors_config) {
+                       qxl_io_log(qdev,
+                                  "%s: allocation failure for %u heads\n",
+                                  __func__, count);
+                       return;
+               }
+       }
+       qdev->client_monitors_config->count = count;
+}
+
+static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
+{
+       int i;
+       int num_monitors;
+       uint32_t crc;
+
+       BUG_ON(!qdev->monitors_config);
+       num_monitors = qdev->rom->client_monitors_config.count;
+       crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
+                 sizeof(qdev->rom->client_monitors_config));
+       if (crc != qdev->rom->client_monitors_config_crc) {
+               qxl_io_log(qdev, "crc mismatch: have %X (%d) != %X\n", crc,
+                          sizeof(qdev->rom->client_monitors_config),
+                          qdev->rom->client_monitors_config_crc);
+               return 1;
+       }
+       if (num_monitors > qdev->monitors_config->max_allowed) {
+               DRM_INFO("client monitors list will be truncated: %d < %d\n",
+                        qdev->monitors_config->max_allowed, num_monitors);
+               num_monitors = qdev->monitors_config->max_allowed;
+       } else {
+               num_monitors = qdev->rom->client_monitors_config.count;
+       }
+       qxl_alloc_client_monitors_config(qdev, num_monitors);
+       /* we copy max from the client but it isn't used */
+       qdev->client_monitors_config->max_allowed =
+                               qdev->monitors_config->max_allowed;
+       for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
+               struct qxl_urect *c_rect =
+                       &qdev->rom->client_monitors_config.heads[i];
+               struct qxl_head *client_head =
+                       &qdev->client_monitors_config->heads[i];
+               struct qxl_head *head = &qdev->monitors_config->heads[i];
+               client_head->x = head->x = c_rect->left;
+               client_head->y = head->y = c_rect->top;
+               client_head->width = head->width =
+                                               c_rect->right - c_rect->left;
+               client_head->height = head->height =
+                                               c_rect->bottom - c_rect->top;
+               client_head->surface_id = head->surface_id = 0;
+               client_head->id = head->id = i;
+               client_head->flags = head->flags = 0;
+               QXL_DEBUG(qdev, "read %dx%d+%d+%d\n", head->width, head->height,
+                         head->x, head->y);
+       }
+       return 0;
+}
+
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
+{
+
+       while (qxl_display_copy_rom_client_monitors_config(qdev)) {
+               qxl_io_log(qdev, "failed crc check for client_monitors_config,"
+                                " retrying\n");
+       }
+       qxl_crtc_set_from_monitors_config(qdev);
+       /* fire off a uevent and let userspace tell us what to do */
+       qxl_io_log(qdev, "calling drm_sysfs_hotplug_event\n");
+       drm_sysfs_hotplug_event(qdev->ddev);
+}
+
+static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_output *output = drm_connector_to_qxl_output(connector);
+       int h = output->index;
+       struct drm_display_mode *mode = NULL;
+       struct qxl_head *head;
+
+       if (!qdev->monitors_config)
+               return 0;
+       head = &qdev->monitors_config->heads[h];
+
+       mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
+                           false);
+       mode->type |= DRM_MODE_TYPE_PREFERRED;
+       drm_mode_probed_add(connector, mode);
+       return 1;
+}
+
+static int qxl_add_common_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode = NULL;
+       int i;
+       struct mode_size {
+               int w;
+               int h;
+       } common_modes[] = {
+               { 640,  480},
+               { 720,  480},
+               { 800,  600},
+               { 848,  480},
+               {1024,  768},
+               {1152,  768},
+               {1280,  720},
+               {1280,  800},
+               {1280,  854},
+               {1280,  960},
+               {1280, 1024},
+               {1440,  900},
+               {1400, 1050},
+               {1680, 1050},
+               {1600, 1200},
+               {1920, 1080},
+               {1920, 1200}
+       };
+
+       for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+               if (common_modes[i].w < 320 || common_modes[i].h < 200)
+                       continue;
+
+               mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
+                                   60, false, false, false);
+               if (common_modes[i].w == 1024 && common_modes[i].h == 768)
+                       mode->type |= DRM_MODE_TYPE_PREFERRED;
+               drm_mode_probed_add(connector, mode);
+       }
+       return i - 1;
+}
+
+static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                              u16 *blue, uint32_t start, uint32_t size)
+{
+       /* TODO */
+}
+
+static void qxl_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
+
+       drm_crtc_cleanup(crtc);
+       kfree(qxl_crtc);
+}
+
+static void
+qxl_hide_cursor(struct qxl_device *qdev)
+{
+       struct qxl_release *release;
+       struct qxl_cursor_cmd *cmd;
+       int ret;
+
+       ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
+                                        &release, NULL);
+
+       cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_CURSOR_HIDE;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+       qxl_release_unreserve(qdev, release);
+}
+
+static int qxl_crtc_cursor_set(struct drm_crtc *crtc,
+                              struct drm_file *file_priv,
+                              uint32_t handle,
+                              uint32_t width,
+                              uint32_t height)
+{
+       struct drm_device *dev = crtc->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+       struct drm_gem_object *obj;
+       struct qxl_cursor *cursor;
+       struct qxl_cursor_cmd *cmd;
+       struct qxl_bo *cursor_bo, *user_bo;
+       struct qxl_release *release;
+       void *user_ptr;
+
+       int size = 64*64*4;
+       int ret = 0;
+       if (!handle) {
+               qxl_hide_cursor(qdev);
+               return 0;
+       }
+
+       obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+       if (!obj) {
+               DRM_ERROR("cannot find cursor object\n");
+               return -ENOENT;
+       }
+
+       user_bo = gem_to_qxl_bo(obj);
+
+       ret = qxl_bo_reserve(user_bo, false);
+       if (ret)
+               goto out_unref;
+
+       ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+       if (ret)
+               goto out_unreserve;
+
+       ret = qxl_bo_kmap(user_bo, &user_ptr);
+       if (ret)
+               goto out_unpin;
+
+       ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+                                        QXL_RELEASE_CURSOR_CMD,
+                                        &release, NULL);
+       if (ret)
+               goto out_kunmap;
+       ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
+                                   &cursor_bo);
+       if (ret)
+               goto out_free_release;
+       ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+       if (ret)
+               goto out_free_bo;
+
+       cursor->header.unique = 0;
+       cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
+       cursor->header.width = 64;
+       cursor->header.height = 64;
+       cursor->header.hot_spot_x = 0;
+       cursor->header.hot_spot_y = 0;
+       cursor->data_size = size;
+       cursor->chunk.next_chunk = 0;
+       cursor->chunk.prev_chunk = 0;
+       cursor->chunk.data_size = size;
+
+       memcpy(cursor->chunk.data, user_ptr, size);
+
+       qxl_bo_kunmap(cursor_bo);
+
+       /* finish with the userspace bo */
+       qxl_bo_kunmap(user_bo);
+       qxl_bo_unpin(user_bo);
+       qxl_bo_unreserve(user_bo);
+       drm_gem_object_unreference_unlocked(obj);
+
+       cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_CURSOR_SET;
+       cmd->u.set.position.x = qcrtc->cur_x;
+       cmd->u.set.position.y = qcrtc->cur_y;
+
+       cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
+       qxl_release_add_res(qdev, release, cursor_bo);
+
+       cmd->u.set.visible = 1;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+       qxl_release_unreserve(qdev, release);
+
+       qxl_bo_unreserve(cursor_bo);
+       qxl_bo_unref(&cursor_bo);
+
+       return ret;
+out_free_bo:
+       qxl_bo_unref(&cursor_bo);
+out_free_release:
+       qxl_release_unreserve(qdev, release);
+       qxl_release_free(qdev, release);
+out_kunmap:
+       qxl_bo_kunmap(user_bo);
+out_unpin:
+       qxl_bo_unpin(user_bo);
+out_unreserve:
+       qxl_bo_unreserve(user_bo);
+out_unref:
+       drm_gem_object_unreference_unlocked(obj);
+       return ret;
+}
+
+static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
+                               int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+       struct qxl_release *release;
+       struct qxl_cursor_cmd *cmd;
+       int ret;
+
+       ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
+                                  &release, NULL);
+
+       qcrtc->cur_x = x;
+       qcrtc->cur_y = y;
+
+       cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_CURSOR_MOVE;
+       cmd->u.position.x = qcrtc->cur_x;
+       cmd->u.position.y = qcrtc->cur_y;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+       qxl_release_unreserve(qdev, release);
+       return 0;
+}
+
+
+static const struct drm_crtc_funcs qxl_crtc_funcs = {
+       .cursor_set = qxl_crtc_cursor_set,
+       .cursor_move = qxl_crtc_cursor_move,
+       .gamma_set = qxl_crtc_gamma_set,
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = qxl_crtc_destroy,
+};
+
+static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+       struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+
+       if (qxl_fb->obj)
+               drm_gem_object_unreference_unlocked(qxl_fb->obj);
+       drm_framebuffer_cleanup(fb);
+       kfree(qxl_fb);
+}
+
+static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
+                                        struct drm_file *file_priv,
+                                        unsigned flags, unsigned color,
+                                        struct drm_clip_rect *clips,
+                                        unsigned num_clips)
+{
+       /* TODO: vmwgfx where this was cribbed from had locking. Why? */
+       struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+       struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
+       struct drm_clip_rect norect;
+       struct qxl_bo *qobj;
+       int inc = 1;
+
+       qobj = gem_to_qxl_bo(qxl_fb->obj);
+       if (qxl_fb != qdev->active_user_framebuffer) {
+               DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n",
+                       __func__, qxl_fb, qdev->active_user_framebuffer);
+       }
+       if (!num_clips) {
+               num_clips = 1;
+               clips = &norect;
+               norect.x1 = norect.y1 = 0;
+               norect.x2 = fb->width;
+               norect.y2 = fb->height;
+       } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+               num_clips /= 2;
+               inc = 2; /* skip source rects */
+       }
+
+       qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
+                         clips, num_clips, inc);
+       return 0;
+}
+
+static const struct drm_framebuffer_funcs qxl_fb_funcs = {
+       .destroy = qxl_user_framebuffer_destroy,
+       .dirty = qxl_framebuffer_surface_dirty,
+/*     TODO?
+ *     .create_handle = qxl_user_framebuffer_create_handle, */
+};
+
+int
+qxl_framebuffer_init(struct drm_device *dev,
+                    struct qxl_framebuffer *qfb,
+                    struct drm_mode_fb_cmd2 *mode_cmd,
+                    struct drm_gem_object *obj)
+{
+       int ret;
+
+       qfb->obj = obj;
+       ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
+       if (ret) {
+               qfb->obj = NULL;
+               return ret;
+       }
+       drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd);
+       return 0;
+}
+
+static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 const struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct qxl_device *qdev = dev->dev_private;
+
+       qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n",
+                  __func__,
+                  mode->hdisplay, mode->vdisplay,
+                  adjusted_mode->hdisplay,
+                  adjusted_mode->vdisplay);
+       return true;
+}
+
+void
+qxl_send_monitors_config(struct qxl_device *qdev)
+{
+       int i;
+
+       BUG_ON(!qdev->ram_header->monitors_config);
+
+       if (qdev->monitors_config->count == 0) {
+               qxl_io_log(qdev, "%s: 0 monitors??\n", __func__);
+               return;
+       }
+       for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
+               struct qxl_head *head = &qdev->monitors_config->heads[i];
+
+               if (head->y > 8192 || head->y < head->x ||
+                   head->width > 8192 || head->height > 8192) {
+                       DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
+                                 i, head->width, head->height,
+                                 head->x, head->y);
+                       return;
+               }
+       }
+       qxl_io_monitors_config(qdev);
+}
+
+static void qxl_monitors_config_set_single(struct qxl_device *qdev,
+                                          unsigned x, unsigned y,
+                                          unsigned width, unsigned height)
+{
+       DRM_DEBUG("%dx%d+%d+%d\n", width, height, x, y);
+       qdev->monitors_config->count = 1;
+       qdev->monitors_config->heads[0].x = x;
+       qdev->monitors_config->heads[0].y = y;
+       qdev->monitors_config->heads[0].width = width;
+       qdev->monitors_config->heads[0].height = height;
+}
+
+static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode,
+                              int x, int y,
+                              struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_mode *m = (void *)mode->private;
+       struct qxl_framebuffer *qfb;
+       struct qxl_bo *bo, *old_bo = NULL;
+       uint32_t width, height, base_offset;
+       bool recreate_primary = false;
+       int ret;
+
+       if (!crtc->fb) {
+               DRM_DEBUG_KMS("No FB bound\n");
+               return 0;
+       }
+
+       if (old_fb) {
+               qfb = to_qxl_framebuffer(old_fb);
+               old_bo = gem_to_qxl_bo(qfb->obj);
+       }
+       qfb = to_qxl_framebuffer(crtc->fb);
+       bo = gem_to_qxl_bo(qfb->obj);
+       if (!m)
+               /* and do we care? */
+               DRM_DEBUG("%dx%d: not a native mode\n", x, y);
+       else
+               DRM_DEBUG("%dx%d: qxl id %d\n",
+                         mode->hdisplay, mode->vdisplay, m->id);
+       DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
+                 x, y,
+                 mode->hdisplay, mode->vdisplay,
+                 adjusted_mode->hdisplay,
+                 adjusted_mode->vdisplay);
+
+       recreate_primary = true;
+
+       width = mode->hdisplay;
+       height = mode->vdisplay;
+       base_offset = 0;
+
+       ret = qxl_bo_reserve(bo, false);
+       if (ret != 0)
+               return ret;
+       ret = qxl_bo_pin(bo, bo->type, NULL);
+       if (ret != 0) {
+               qxl_bo_unreserve(bo);
+               return -EINVAL;
+       }
+       qxl_bo_unreserve(bo);
+       if (recreate_primary) {
+               qxl_io_destroy_primary(qdev);
+               qxl_io_log(qdev,
+                          "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
+                          width, height, bo->surf.width,
+                          bo->surf.height, bo->surf.stride, bo->surf.format);
+               qxl_io_create_primary(qdev, width, height, base_offset, bo);
+               bo->is_primary = true;
+       }
+
+       if (old_bo && old_bo != bo) {
+               old_bo->is_primary = false;
+               ret = qxl_bo_reserve(old_bo, false);
+               qxl_bo_unpin(old_bo);
+               qxl_bo_unreserve(old_bo);
+       }
+
+       if (qdev->monitors_config->count == 0) {
+               qxl_monitors_config_set_single(qdev, x, y,
+                                              mode->hdisplay,
+                                              mode->vdisplay);
+       }
+       qdev->mode_set = true;
+       return 0;
+}
+
+static void qxl_crtc_prepare(struct drm_crtc *crtc)
+{
+       DRM_DEBUG("current: %dx%d+%d+%d (%d).\n",
+                 crtc->mode.hdisplay, crtc->mode.vdisplay,
+                 crtc->x, crtc->y, crtc->enabled);
+}
+
+static void qxl_crtc_commit(struct drm_crtc *crtc)
+{
+       DRM_DEBUG("\n");
+}
+
+static void qxl_crtc_load_lut(struct drm_crtc *crtc)
+{
+       DRM_DEBUG("\n");
+}
+
+static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
+       .dpms = qxl_crtc_dpms,
+       .mode_fixup = qxl_crtc_mode_fixup,
+       .mode_set = qxl_crtc_mode_set,
+       .prepare = qxl_crtc_prepare,
+       .commit = qxl_crtc_commit,
+       .load_lut = qxl_crtc_load_lut,
+};
+
+static int qdev_crtc_init(struct drm_device *dev, int num_crtc)
+{
+       struct qxl_crtc *qxl_crtc;
+
+       qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
+       if (!qxl_crtc)
+               return -ENOMEM;
+
+       drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
+       drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
+       return 0;
+}
+
+static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
+{
+       DRM_DEBUG("\n");
+}
+
+static bool qxl_enc_mode_fixup(struct drm_encoder *encoder,
+                              const struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+       DRM_DEBUG("\n");
+       return true;
+}
+
+static void qxl_enc_prepare(struct drm_encoder *encoder)
+{
+       DRM_DEBUG("\n");
+}
+
+static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
+               struct drm_encoder *encoder)
+{
+       int i;
+       struct qxl_head *head;
+       struct drm_display_mode *mode;
+
+       BUG_ON(!encoder);
+       /* TODO: ugly, do better */
+       for (i = 0 ; (encoder->possible_crtcs != (1 << i)) && i < 32; ++i)
+               ;
+       if (encoder->possible_crtcs != (1 << i)) {
+               DRM_ERROR("encoder has wrong possible_crtcs: %x\n",
+                         encoder->possible_crtcs);
+               return;
+       }
+       if (!qdev->monitors_config ||
+           qdev->monitors_config->max_allowed <= i) {
+               DRM_ERROR(
+               "head number too large or missing monitors config: %p, %d",
+               qdev->monitors_config,
+               qdev->monitors_config ?
+                       qdev->monitors_config->max_allowed : -1);
+               return;
+       }
+       if (!encoder->crtc) {
+               DRM_ERROR("missing crtc on encoder %p\n", encoder);
+               return;
+       }
+       if (i != 0)
+               DRM_DEBUG("missing for multiple monitors: no head holes\n");
+       head = &qdev->monitors_config->heads[i];
+       head->id = i;
+       head->surface_id = 0;
+       if (encoder->crtc->enabled) {
+               mode = &encoder->crtc->mode;
+               head->width = mode->hdisplay;
+               head->height = mode->vdisplay;
+               head->x = encoder->crtc->x;
+               head->y = encoder->crtc->y;
+               if (qdev->monitors_config->count < i + 1)
+                       qdev->monitors_config->count = i + 1;
+       } else {
+               head->width = 0;
+               head->height = 0;
+               head->x = 0;
+               head->y = 0;
+       }
+       DRM_DEBUG("setting head %d to +%d+%d %dx%d\n",
+                 i, head->x, head->y, head->width, head->height);
+       head->flags = 0;
+       /* TODO - somewhere else to call this for multiple monitors
+        * (config_commit?) */
+       qxl_send_monitors_config(qdev);
+}
+
+static void qxl_enc_commit(struct drm_encoder *encoder)
+{
+       struct qxl_device *qdev = encoder->dev->dev_private;
+
+       qxl_write_monitors_config_for_encoder(qdev, encoder);
+       DRM_DEBUG("\n");
+}
+
+static void qxl_enc_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       DRM_DEBUG("\n");
+}
+
+static int qxl_conn_get_modes(struct drm_connector *connector)
+{
+       int ret = 0;
+       struct qxl_device *qdev = connector->dev->dev_private;
+
+       DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
+       /* TODO: what should we do here? only show the configured modes for the
+        * device, or allow the full list, or both? */
+       if (qdev->monitors_config && qdev->monitors_config->count) {
+               ret = qxl_add_monitors_config_modes(connector);
+               if (ret < 0)
+                       return ret;
+       }
+       ret += qxl_add_common_modes(connector);
+       return ret;
+}
+
+static int qxl_conn_mode_valid(struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       /* TODO: is this called for user defined modes? (xrandr --add-mode)
+        * TODO: check that the mode fits in the framebuffer */
+       DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
+                 mode->vdisplay, mode->status);
+       return MODE_OK;
+}
+
+static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
+{
+       struct qxl_output *qxl_output =
+               drm_connector_to_qxl_output(connector);
+
+       DRM_DEBUG("\n");
+       return &qxl_output->enc;
+}
+
+
+static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
+       .dpms = qxl_enc_dpms,
+       .mode_fixup = qxl_enc_mode_fixup,
+       .prepare = qxl_enc_prepare,
+       .mode_set = qxl_enc_mode_set,
+       .commit = qxl_enc_commit,
+};
+
+static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
+       .get_modes = qxl_conn_get_modes,
+       .mode_valid = qxl_conn_mode_valid,
+       .best_encoder = qxl_best_encoder,
+};
+
+static void qxl_conn_save(struct drm_connector *connector)
+{
+       DRM_DEBUG("\n");
+}
+
+static void qxl_conn_restore(struct drm_connector *connector)
+{
+       DRM_DEBUG("\n");
+}
+
+static enum drm_connector_status qxl_conn_detect(
+                       struct drm_connector *connector,
+                       bool force)
+{
+       struct qxl_output *output =
+               drm_connector_to_qxl_output(connector);
+       struct drm_device *ddev = connector->dev;
+       struct qxl_device *qdev = ddev->dev_private;
+       int connected;
+
+       /* The first monitor is always connected */
+       connected = (output->index == 0) ||
+                   (qdev->monitors_config &&
+                    qdev->monitors_config->count > output->index);
+
+       DRM_DEBUG("\n");
+       return connected ? connector_status_connected
+                        : connector_status_disconnected;
+}
+
+static int qxl_conn_set_property(struct drm_connector *connector,
+                                  struct drm_property *property,
+                                  uint64_t value)
+{
+       DRM_DEBUG("\n");
+       return 0;
+}
+
+static void qxl_conn_destroy(struct drm_connector *connector)
+{
+       struct qxl_output *qxl_output =
+               drm_connector_to_qxl_output(connector);
+
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(qxl_output);
+}
+
+static const struct drm_connector_funcs qxl_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .save = qxl_conn_save,
+       .restore = qxl_conn_restore,
+       .detect = qxl_conn_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = qxl_conn_set_property,
+       .destroy = qxl_conn_destroy,
+};
+
+static void qxl_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs qxl_enc_funcs = {
+       .destroy = qxl_enc_destroy,
+};
+
+static int qdev_output_init(struct drm_device *dev, int num_output)
+{
+       struct qxl_output *qxl_output;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+
+       qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
+       if (!qxl_output)
+               return -ENOMEM;
+
+       qxl_output->index = num_output;
+
+       connector = &qxl_output->base;
+       encoder = &qxl_output->enc;
+       drm_connector_init(dev, &qxl_output->base,
+                          &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+
+       drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
+                        DRM_MODE_ENCODER_VIRTUAL);
+
+       encoder->possible_crtcs = 1 << num_output;
+       drm_mode_connector_attach_encoder(&qxl_output->base,
+                                         &qxl_output->enc);
+       drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
+       drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
+
+       drm_sysfs_connector_add(connector);
+       return 0;
+}
+
+static struct drm_framebuffer *
+qxl_user_framebuffer_create(struct drm_device *dev,
+                           struct drm_file *file_priv,
+                           struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct drm_gem_object *obj;
+       struct qxl_framebuffer *qxl_fb;
+       struct qxl_device *qdev = dev->dev_private;
+       int ret;
+
+       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+
+       qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
+       if (qxl_fb == NULL)
+               return NULL;
+
+       ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
+       if (ret) {
+               kfree(qxl_fb);
+               drm_gem_object_unreference_unlocked(obj);
+               return NULL;
+       }
+
+       if (qdev->active_user_framebuffer) {
+               DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
+                        __func__,
+                        qdev->active_user_framebuffer, qxl_fb);
+       }
+       qdev->active_user_framebuffer = qxl_fb;
+
+       return &qxl_fb->base;
+}
+
+static const struct drm_mode_config_funcs qxl_mode_funcs = {
+       .fb_create = qxl_user_framebuffer_create,
+};
+
+int qxl_modeset_init(struct qxl_device *qdev)
+{
+       int i;
+       int ret;
+       struct drm_gem_object *gobj;
+       int max_allowed = QXL_NUM_OUTPUTS;
+       int monitors_config_size = sizeof(struct qxl_monitors_config) +
+                                  max_allowed * sizeof(struct qxl_head);
+
+       drm_mode_config_init(qdev->ddev);
+       ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
+                                   QXL_GEM_DOMAIN_VRAM,
+                                   false, false, NULL, &gobj);
+       if (ret) {
+               DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
+               return -ENOMEM;
+       }
+       qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
+       qxl_bo_kmap(qdev->monitors_config_bo, NULL);
+       qdev->monitors_config = qdev->monitors_config_bo->kptr;
+       qdev->ram_header->monitors_config =
+               qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
+
+       memset(qdev->monitors_config, 0, monitors_config_size);
+       qdev->monitors_config->max_allowed = max_allowed;
+
+       qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs;
+
+       /* modes will be validated against the framebuffer size */
+       qdev->ddev->mode_config.min_width = 320;
+       qdev->ddev->mode_config.min_height = 200;
+       qdev->ddev->mode_config.max_width = 8192;
+       qdev->ddev->mode_config.max_height = 8192;
+
+       qdev->ddev->mode_config.fb_base = qdev->vram_base;
+       for (i = 0 ; i < QXL_NUM_OUTPUTS; ++i) {
+               qdev_crtc_init(qdev->ddev, i);
+               qdev_output_init(qdev->ddev, i);
+       }
+
+       qdev->mode_info.mode_config_initialized = true;
+
+       /* primary surface must be created by this point, to allow
+        * issuing command queue commands and having them read by
+        * spice server. */
+       qxl_fbdev_init(qdev);
+       return 0;
+}
+
+void qxl_modeset_fini(struct qxl_device *qdev)
+{
+       qxl_fbdev_fini(qdev);
+       if (qdev->mode_info.mode_config_initialized) {
+               drm_mode_config_cleanup(qdev->ddev);
+               qdev->mode_info.mode_config_initialized = false;
+       }
+}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
new file mode 100644 (file)
index 0000000..3c8c3db
--- /dev/null
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* returns a pointer to the already allocated qxl_rect array inside
+ * the qxl_clip_rects. This is *not* the same as the memory allocated
+ * on the device, it is offset to qxl_clip_rects.chunk.data */
+static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
+                                             struct qxl_drawable *drawable,
+                                             unsigned num_clips,
+                                             struct qxl_bo **clips_bo,
+                                             struct qxl_release *release)
+{
+       struct qxl_clip_rects *dev_clips;
+       int ret;
+       int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
+       ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
+       if (ret)
+               return NULL;
+
+       ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
+       if (ret) {
+               qxl_bo_unref(clips_bo);
+               return NULL;
+       }
+       dev_clips->num_rects = num_clips;
+       dev_clips->chunk.next_chunk = 0;
+       dev_clips->chunk.prev_chunk = 0;
+       dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
+       return (struct qxl_rect *)dev_clips->chunk.data;
+}
+
+static int
+make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
+             const struct qxl_rect *rect,
+             struct qxl_release **release)
+{
+       struct qxl_drawable *drawable;
+       int i, ret;
+
+       ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
+                                        QXL_RELEASE_DRAWABLE, release,
+                                        NULL);
+       if (ret)
+               return ret;
+
+       drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
+       drawable->type = type;
+
+       drawable->surface_id = surface;         /* Only primary for now */
+       drawable->effect = QXL_EFFECT_OPAQUE;
+       drawable->self_bitmap = 0;
+       drawable->self_bitmap_area.top = 0;
+       drawable->self_bitmap_area.left = 0;
+       drawable->self_bitmap_area.bottom = 0;
+       drawable->self_bitmap_area.right = 0;
+       /* FIXME: add clipping */
+       drawable->clip.type = SPICE_CLIP_TYPE_NONE;
+
+       /*
+        * surfaces_dest[i] should apparently be filled out with the
+        * surfaces that we depend on, and surface_rects should be
+        * filled with the rectangles of those surfaces that we
+        * are going to use.
+        */
+       for (i = 0; i < 3; ++i)
+               drawable->surfaces_dest[i] = -1;
+
+       if (rect)
+               drawable->bbox = *rect;
+
+       drawable->mm_time = qdev->rom->mm_clock;
+       qxl_release_unmap(qdev, *release, &drawable->release_info);
+       return 0;
+}
+
+static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
+                                  const struct qxl_fb_image *qxl_fb_image)
+{
+       struct qxl_device *qdev = qxl_fb_image->qdev;
+       const struct fb_image *fb_image = &qxl_fb_image->fb_image;
+       uint32_t visual = qxl_fb_image->visual;
+       const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
+       struct qxl_palette *pal;
+       int ret;
+       uint32_t fgcolor, bgcolor;
+       static uint64_t unique; /* we make no attempt to actually set this
+                                * correctly globaly, since that would require
+                                * tracking all of our palettes. */
+
+       ret = qxl_alloc_bo_reserved(qdev,
+                                   sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
+                                   palette_bo);
+
+       ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
+       pal->num_ents = 2;
+       pal->unique = unique++;
+       if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
+               /* NB: this is the only used branch currently. */
+               fgcolor = pseudo_palette[fb_image->fg_color];
+               bgcolor = pseudo_palette[fb_image->bg_color];
+       } else {
+               fgcolor = fb_image->fg_color;
+               bgcolor = fb_image->bg_color;
+       }
+       pal->ents[0] = bgcolor;
+       pal->ents[1] = fgcolor;
+       qxl_bo_kunmap(*palette_bo);
+       return 0;
+}
+
+void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
+                       int stride /* filled in if 0 */)
+{
+       struct qxl_device *qdev = qxl_fb_image->qdev;
+       struct qxl_drawable *drawable;
+       struct qxl_rect rect;
+       const struct fb_image *fb_image = &qxl_fb_image->fb_image;
+       int x = fb_image->dx;
+       int y = fb_image->dy;
+       int width = fb_image->width;
+       int height = fb_image->height;
+       const char *src = fb_image->data;
+       int depth = fb_image->depth;
+       struct qxl_release *release;
+       struct qxl_bo *image_bo;
+       struct qxl_image *image;
+       int ret;
+
+       if (stride == 0)
+               stride = depth * width / 8;
+
+       rect.left = x;
+       rect.right = x + width;
+       rect.top = y;
+       rect.bottom = y + height;
+
+       ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
+       if (ret)
+               return;
+
+       ret = qxl_image_create(qdev, release, &image_bo,
+                              (const uint8_t *)src, 0, 0,
+                              width, height, depth, stride);
+       if (ret) {
+               qxl_release_unreserve(qdev, release);
+               qxl_release_free(qdev, release);
+               return;
+       }
+
+       if (depth == 1) {
+               struct qxl_bo *palette_bo;
+               void *ptr;
+               ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
+               qxl_release_add_res(qdev, release, palette_bo);
+
+               ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
+               image = ptr;
+               image->u.bitmap.palette =
+                       qxl_bo_physical_address(qdev, palette_bo, 0);
+               qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+               qxl_bo_unreserve(palette_bo);
+               qxl_bo_unref(&palette_bo);
+       }
+
+       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+       drawable->u.copy.src_area.top = 0;
+       drawable->u.copy.src_area.bottom = height;
+       drawable->u.copy.src_area.left = 0;
+       drawable->u.copy.src_area.right = width;
+
+       drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+       drawable->u.copy.scale_mode = 0;
+       drawable->u.copy.mask.flags = 0;
+       drawable->u.copy.mask.pos.x = 0;
+       drawable->u.copy.mask.pos.y = 0;
+       drawable->u.copy.mask.bitmap = 0;
+
+       drawable->u.copy.src_bitmap =
+               qxl_bo_physical_address(qdev, image_bo, 0);
+       qxl_release_unmap(qdev, release, &drawable->release_info);
+
+       qxl_release_add_res(qdev, release, image_bo);
+       qxl_bo_unreserve(image_bo);
+       qxl_bo_unref(&image_bo);
+
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+       qxl_release_unreserve(qdev, release);
+}
+
+/* push a draw command using the given clipping rectangles as
+ * the sources from the shadow framebuffer.
+ *
+ * Right now implementing with a single draw and a clip list. Clip
+ * lists are known to be a problem performance wise, this can be solved
+ * by treating them differently in the server.
+ */
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+                      struct qxl_framebuffer *qxl_fb,
+                      struct qxl_bo *bo,
+                      unsigned flags, unsigned color,
+                      struct drm_clip_rect *clips,
+                      unsigned num_clips, int inc)
+{
+       /*
+        * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
+        * send a fill command instead, much cheaper.
+        *
+        * See include/drm/drm_mode.h
+        */
+       struct drm_clip_rect *clips_ptr;
+       int i;
+       int left, right, top, bottom;
+       int width, height;
+       struct qxl_drawable *drawable;
+       struct qxl_rect drawable_rect;
+       struct qxl_rect *rects;
+       int stride = qxl_fb->base.pitches[0];
+       /* depth is not actually interesting, we don't mask with it */
+       int depth = qxl_fb->base.bits_per_pixel;
+       uint8_t *surface_base;
+       struct qxl_release *release;
+       struct qxl_bo *image_bo;
+       struct qxl_bo *clips_bo;
+       int ret;
+
+       left = clips->x1;
+       right = clips->x2;
+       top = clips->y1;
+       bottom = clips->y2;
+
+       /* skip the first clip rect */
+       for (i = 1, clips_ptr = clips + inc;
+            i < num_clips; i++, clips_ptr += inc) {
+               left = min_t(int, left, (int)clips_ptr->x1);
+               right = max_t(int, right, (int)clips_ptr->x2);
+               top = min_t(int, top, (int)clips_ptr->y1);
+               bottom = max_t(int, bottom, (int)clips_ptr->y2);
+       }
+
+       width = right - left;
+       height = bottom - top;
+       drawable_rect.left = left;
+       drawable_rect.right = right;
+       drawable_rect.top = top;
+       drawable_rect.bottom = bottom;
+       ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
+                           &release);
+       if (ret)
+               return;
+
+       ret = qxl_bo_kmap(bo, (void **)&surface_base);
+       if (ret)
+               goto out_unref;
+
+       ret = qxl_image_create(qdev, release, &image_bo, surface_base,
+                              left, top, width, height, depth, stride);
+       qxl_bo_kunmap(bo);
+       if (ret)
+               goto out_unref;
+
+       rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
+       if (!rects) {
+               qxl_bo_unref(&image_bo);
+               goto out_unref;
+       }
+       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+       drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
+       drawable->clip.data = qxl_bo_physical_address(qdev,
+                                                     clips_bo, 0);
+       qxl_release_add_res(qdev, release, clips_bo);
+
+       drawable->u.copy.src_area.top = 0;
+       drawable->u.copy.src_area.bottom = height;
+       drawable->u.copy.src_area.left = 0;
+       drawable->u.copy.src_area.right = width;
+
+       drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+       drawable->u.copy.scale_mode = 0;
+       drawable->u.copy.mask.flags = 0;
+       drawable->u.copy.mask.pos.x = 0;
+       drawable->u.copy.mask.pos.y = 0;
+       drawable->u.copy.mask.bitmap = 0;
+
+       drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
+       qxl_release_unmap(qdev, release, &drawable->release_info);
+       qxl_release_add_res(qdev, release, image_bo);
+       qxl_bo_unreserve(image_bo);
+       qxl_bo_unref(&image_bo);
+       clips_ptr = clips;
+       for (i = 0; i < num_clips; i++, clips_ptr += inc) {
+               rects[i].left   = clips_ptr->x1;
+               rects[i].right  = clips_ptr->x2;
+               rects[i].top    = clips_ptr->y1;
+               rects[i].bottom = clips_ptr->y2;
+       }
+       qxl_bo_kunmap(clips_bo);
+       qxl_bo_unreserve(clips_bo);
+       qxl_bo_unref(&clips_bo);
+
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+       qxl_release_unreserve(qdev, release);
+       return;
+
+out_unref:
+       qxl_release_unreserve(qdev, release);
+       qxl_release_free(qdev, release);
+}
+
+void qxl_draw_copyarea(struct qxl_device *qdev,
+                      u32 width, u32 height,
+                      u32 sx, u32 sy,
+                      u32 dx, u32 dy)
+{
+       struct qxl_drawable *drawable;
+       struct qxl_rect rect;
+       struct qxl_release *release;
+       int ret;
+
+       rect.left = dx;
+       rect.top = dy;
+       rect.right = dx + width;
+       rect.bottom = dy + height;
+       ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
+       if (ret)
+               return;
+
+       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+       drawable->u.copy_bits.src_pos.x = sx;
+       drawable->u.copy_bits.src_pos.y = sy;
+
+       qxl_release_unmap(qdev, release, &drawable->release_info);
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+       qxl_release_unreserve(qdev, release);
+}
+
+void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
+{
+       struct qxl_device *qdev = qxl_draw_fill_rec->qdev;
+       struct qxl_rect rect = qxl_draw_fill_rec->rect;
+       uint32_t color = qxl_draw_fill_rec->color;
+       uint16_t rop = qxl_draw_fill_rec->rop;
+       struct qxl_drawable *drawable;
+       struct qxl_release *release;
+       int ret;
+
+       ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
+       if (ret)
+               return;
+
+       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+       drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
+       drawable->u.fill.brush.u.color = color;
+       drawable->u.fill.rop_descriptor = rop;
+       drawable->u.fill.mask.flags = 0;
+       drawable->u.fill.mask.pos.x = 0;
+       drawable->u.fill.mask.pos.y = 0;
+       drawable->u.fill.mask.bitmap = 0;
+
+       qxl_release_unmap(qdev, release, &drawable->release_info);
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+       qxl_release_unreserve(qdev, release);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
new file mode 100644 (file)
index 0000000..aa291d8
--- /dev/null
@@ -0,0 +1,145 @@
+/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
+/* qxl_drv.c -- QXL driver -*- linux-c -*-
+ *
+ * Copyright 2011 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Airlie <airlie@redhat.com>
+ *    Alon Levy <alevy@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm/drm.h"
+
+#include "qxl_drv.h"
+
+extern int qxl_max_ioctls;
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+       { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
+         0xffff00, 0 },
+       { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
+         0xffff00, 0 },
+       { 0, 0, 0 },
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int qxl_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, qxl_modeset, int, 0400);
+
+static struct drm_driver qxl_driver;
+static struct pci_driver qxl_pci_driver;
+
+static int
+qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       if (pdev->revision < 4) {
+               DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
+                         " use xf86-video-qxl in user mode");
+               return -EINVAL; /* TODO: ENODEV ? */
+       }
+       return drm_get_pci_dev(pdev, ent, &qxl_driver);
+}
+
+static void
+qxl_pci_remove(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       drm_put_dev(dev);
+}
+
+static struct pci_driver qxl_pci_driver = {
+        .name = DRIVER_NAME,
+        .id_table = pciidlist,
+        .probe = qxl_pci_probe,
+        .remove = qxl_pci_remove,
+};
+
+static const struct file_operations qxl_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .mmap = qxl_mmap,
+};
+
+static struct drm_driver qxl_driver = {
+       .driver_features = DRIVER_GEM | DRIVER_MODESET |
+                          DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+       .dev_priv_size = 0,
+       .load = qxl_driver_load,
+       .unload = qxl_driver_unload,
+
+       .dumb_create = qxl_mode_dumb_create,
+       .dumb_map_offset = qxl_mode_dumb_mmap,
+       .dumb_destroy = qxl_mode_dumb_destroy,
+#if defined(CONFIG_DEBUG_FS)
+       .debugfs_init = qxl_debugfs_init,
+       .debugfs_cleanup = qxl_debugfs_takedown,
+#endif
+       .gem_init_object = qxl_gem_object_init,
+       .gem_free_object = qxl_gem_object_free,
+       .gem_open_object = qxl_gem_object_open,
+       .gem_close_object = qxl_gem_object_close,
+       .fops = &qxl_fops,
+       .ioctls = qxl_ioctls,
+       .irq_handler = qxl_irq_handler,
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = 0,
+       .minor = 1,
+       .patchlevel = 0,
+};
+
+static int __init qxl_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+       if (vgacon_text_force() && qxl_modeset == -1)
+               return -EINVAL;
+#endif
+
+       if (qxl_modeset == 0)
+               return -EINVAL;
+       qxl_driver.num_ioctls = qxl_max_ioctls;
+       return drm_pci_init(&qxl_driver, &qxl_pci_driver);
+}
+
+static void __exit qxl_exit(void)
+{
+       drm_pci_exit(&qxl_driver, &qxl_pci_driver);
+}
+
+module_init(qxl_init);
+module_exit(qxl_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
new file mode 100644 (file)
index 0000000..52b582c
--- /dev/null
@@ -0,0 +1,566 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#ifndef QXL_DRV_H
+#define QXL_DRV_H
+
+/*
+ * Definitions taken from spice-protocol, plus kernel driver specific bits.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
+#include <drm/qxl_drm.h>
+#include "qxl_dev.h"
+
+#define DRIVER_AUTHOR          "Dave Airlie"
+
+#define DRIVER_NAME            "qxl"
+#define DRIVER_DESC            "RH QXL"
+#define DRIVER_DATE            "20120117"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define QXL_NUM_OUTPUTS 1
+
+#define QXL_DEBUGFS_MAX_COMPONENTS             32
+
+extern int qxl_log_level;
+
+enum {
+       QXL_INFO_LEVEL = 1,
+       QXL_DEBUG_LEVEL = 2,
+};
+
+#define QXL_INFO(qdev, fmt, ...) do { \
+               if (qxl_log_level >= QXL_INFO_LEVEL) {  \
+                       qxl_io_log(qdev, fmt, __VA_ARGS__); \
+               }       \
+       } while (0)
+#define QXL_DEBUG(qdev, fmt, ...) do { \
+               if (qxl_log_level >= QXL_DEBUG_LEVEL) { \
+                       qxl_io_log(qdev, fmt, __VA_ARGS__); \
+               }       \
+       } while (0)
+#define QXL_INFO_ONCE(qdev, fmt, ...) do { \
+               static int done;                \
+               if (!done) {                    \
+                       done = 1;                       \
+                       QXL_INFO(qdev, fmt, __VA_ARGS__);       \
+               }                                               \
+       } while (0)
+
+#define DRM_FILE_OFFSET 0x100000000ULL
+#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
+
+#define QXL_INTERRUPT_MASK (\
+       QXL_INTERRUPT_DISPLAY |\
+       QXL_INTERRUPT_CURSOR |\
+       QXL_INTERRUPT_IO_CMD |\
+       QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
+
+struct qxl_fence {
+       struct qxl_device *qdev;
+       uint32_t num_active_releases;
+       uint32_t *release_ids;
+       struct radix_tree_root tree;
+};
+
+struct qxl_bo {
+       /* Protected by gem.mutex */
+       struct list_head                list;
+       /* Protected by tbo.reserved */
+       u32                             placements[3];
+       struct ttm_placement            placement;
+       struct ttm_buffer_object        tbo;
+       struct ttm_bo_kmap_obj          kmap;
+       unsigned                        pin_count;
+       void                            *kptr;
+       int                             type;
+       /* Constant after initialization */
+       struct drm_gem_object           gem_base;
+       bool is_primary; /* is this now a primary surface */
+       bool hw_surf_alloc;
+       struct qxl_surface surf;
+       uint32_t surface_id;
+       struct qxl_fence fence; /* per bo fence  - list of releases */
+       struct qxl_release *surf_create;
+       atomic_t reserve_count;
+};
+#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+
+struct qxl_gem {
+       struct mutex            mutex;
+       struct list_head        objects;
+};
+
+struct qxl_bo_list {
+       struct list_head lhead;
+       struct qxl_bo *bo;
+};
+
+struct qxl_reloc_list {
+       struct list_head bos;
+};
+
+struct qxl_crtc {
+       struct drm_crtc base;
+       int cur_x;
+       int cur_y;
+};
+
+struct qxl_output {
+       int index;
+       struct drm_connector base;
+       struct drm_encoder enc;
+};
+
+struct qxl_framebuffer {
+       struct drm_framebuffer base;
+       struct drm_gem_object *obj;
+};
+
+#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
+#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
+
+struct qxl_mman {
+       struct ttm_bo_global_ref        bo_global_ref;
+       struct drm_global_reference     mem_global_ref;
+       bool                            mem_global_referenced;
+       struct ttm_bo_device            bdev;
+};
+
+struct qxl_mode_info {
+       int num_modes;
+       struct qxl_mode *modes;
+       bool mode_config_initialized;
+
+       /* pointer to fbdev info structure */
+       struct qxl_fbdev *qfbdev;
+};
+
+
+struct qxl_memslot {
+       uint8_t         generation;
+       uint64_t        start_phys_addr;
+       uint64_t        end_phys_addr;
+       uint64_t        high_bits;
+};
+
+enum {
+       QXL_RELEASE_DRAWABLE,
+       QXL_RELEASE_SURFACE_CMD,
+       QXL_RELEASE_CURSOR_CMD,
+};
+
+/* drm_ prefix to differentiate from qxl_release_info in
+ * spice-protocol/qxl_dev.h */
+#define QXL_MAX_RES 96
+struct qxl_release {
+       int id;
+       int type;
+       int bo_count;
+       uint32_t release_offset;
+       uint32_t surface_release_id;
+       struct qxl_bo *bos[QXL_MAX_RES];
+};
+
+struct qxl_fb_image {
+       struct qxl_device *qdev;
+       uint32_t pseudo_palette[16];
+       struct fb_image fb_image;
+       uint32_t visual;
+};
+
+struct qxl_draw_fill {
+       struct qxl_device *qdev;
+       struct qxl_rect rect;
+       uint32_t color;
+       uint16_t rop;
+};
+
+/*
+ * Debugfs
+ */
+struct qxl_debugfs {
+       struct drm_info_list    *files;
+       unsigned                num_files;
+};
+
+int qxl_debugfs_add_files(struct qxl_device *rdev,
+                            struct drm_info_list *files,
+                            unsigned nfiles);
+int qxl_debugfs_fence_init(struct qxl_device *rdev);
+void qxl_debugfs_remove_files(struct qxl_device *qdev);
+
+struct qxl_device;
+
+struct qxl_device {
+       struct device                   *dev;
+       struct drm_device               *ddev;
+       struct pci_dev                  *pdev;
+       unsigned long flags;
+
+       resource_size_t vram_base, vram_size;
+       resource_size_t surfaceram_base, surfaceram_size;
+       resource_size_t rom_base, rom_size;
+       struct qxl_rom *rom;
+
+       struct qxl_mode *modes;
+       struct qxl_bo *monitors_config_bo;
+       struct qxl_monitors_config *monitors_config;
+
+       /* last received client_monitors_config */
+       struct qxl_monitors_config *client_monitors_config;
+
+       int io_base;
+       void *ram;
+       struct qxl_mman         mman;
+       struct qxl_gem          gem;
+       struct qxl_mode_info mode_info;
+
+       /*
+        * last created framebuffer with fb_create
+        * only used by debugfs dumbppm
+        */
+       struct qxl_framebuffer *active_user_framebuffer;
+
+       struct fb_info                  *fbdev_info;
+       struct qxl_framebuffer  *fbdev_qfb;
+       void *ram_physical;
+
+       struct qxl_ring *release_ring;
+       struct qxl_ring *command_ring;
+       struct qxl_ring *cursor_ring;
+
+       struct qxl_ram_header *ram_header;
+       bool mode_set;
+
+       bool primary_created;
+
+       struct qxl_memslot      *mem_slots;
+       uint8_t         n_mem_slots;
+
+       uint8_t         main_mem_slot;
+       uint8_t         surfaces_mem_slot;
+       uint8_t         slot_id_bits;
+       uint8_t         slot_gen_bits;
+       uint64_t        va_slot_mask;
+
+       struct idr      release_idr;
+       spinlock_t release_idr_lock;
+       struct mutex    async_io_mutex;
+       unsigned int last_sent_io_cmd;
+
+       /* interrupt handling */
+       atomic_t irq_received;
+       atomic_t irq_received_display;
+       atomic_t irq_received_cursor;
+       atomic_t irq_received_io_cmd;
+       unsigned irq_received_error;
+       wait_queue_head_t display_event;
+       wait_queue_head_t cursor_event;
+       wait_queue_head_t io_cmd_event;
+       struct work_struct client_monitors_config_work;
+
+       /* debugfs */
+       struct qxl_debugfs      debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
+       unsigned                debugfs_count;
+
+       struct mutex            update_area_mutex;
+
+       struct idr      surf_id_idr;
+       spinlock_t surf_id_idr_lock;
+       int last_alloced_surf_id;
+
+       struct mutex surf_evict_mutex;
+       struct io_mapping *vram_mapping;
+       struct io_mapping *surface_mapping;
+
+       /* */
+       struct mutex release_mutex;
+       struct qxl_bo *current_release_bo[3];
+       int current_release_bo_offset[3];
+
+       struct workqueue_struct *gc_queue;
+       struct work_struct gc_work;
+
+};
+
+/* forward declaration for QXL_INFO_IO */
+void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
+
+extern struct drm_ioctl_desc qxl_ioctls[];
+extern int qxl_max_ioctl;
+
+int qxl_driver_load(struct drm_device *dev, unsigned long flags);
+int qxl_driver_unload(struct drm_device *dev);
+
+int qxl_modeset_init(struct qxl_device *qdev);
+void qxl_modeset_fini(struct qxl_device *qdev);
+
+int qxl_bo_init(struct qxl_device *qdev);
+void qxl_bo_fini(struct qxl_device *qdev);
+
+struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
+                                int element_size,
+                                int n_elements,
+                                int prod_notify,
+                                bool set_prod_notify,
+                                wait_queue_head_t *push_event);
+void qxl_ring_free(struct qxl_ring *ring);
+
+static inline void *
+qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
+{
+       QXL_INFO(qdev, "not implemented (%lu)\n", physical);
+       return 0;
+}
+
+static inline uint64_t
+qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
+                       unsigned long offset)
+{
+       int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
+       struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+
+       /* TODO - need to hold one of the locks to read tbo.offset */
+       return slot->high_bits | (bo->tbo.offset + offset);
+}
+
+/* qxl_fb.c */
+#define QXLFB_CONN_LIMIT 1
+
+int qxl_fbdev_init(struct qxl_device *qdev);
+void qxl_fbdev_fini(struct qxl_device *qdev);
+int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
+                                 struct drm_file *file_priv,
+                                 uint32_t *handle);
+
+/* qxl_display.c */
+int
+qxl_framebuffer_init(struct drm_device *dev,
+                    struct qxl_framebuffer *rfb,
+                    struct drm_mode_fb_cmd2 *mode_cmd,
+                    struct drm_gem_object *obj);
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
+void qxl_send_monitors_config(struct qxl_device *qdev);
+
+/* used by qxl_debugfs only */
+void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
+void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
+
+/* qxl_gem.c */
+int qxl_gem_init(struct qxl_device *qdev);
+void qxl_gem_fini(struct qxl_device *qdev);
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+                         int alignment, int initial_domain,
+                         bool discardable, bool kernel,
+                         struct qxl_surface *surf,
+                         struct drm_gem_object **obj);
+int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+                         uint64_t *gpu_addr);
+void qxl_gem_object_unpin(struct drm_gem_object *obj);
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+                                     struct drm_file *file_priv,
+                                     u32 domain,
+                                     size_t size,
+                                     struct qxl_surface *surf,
+                                     struct qxl_bo **qobj,
+                                     uint32_t *handle);
+int qxl_gem_object_init(struct drm_gem_object *obj);
+void qxl_gem_object_free(struct drm_gem_object *gobj);
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+void qxl_gem_object_close(struct drm_gem_object *obj,
+                         struct drm_file *file_priv);
+void qxl_bo_force_delete(struct qxl_device *qdev);
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+
+/* qxl_dumb.c */
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+                        struct drm_device *dev,
+                        struct drm_mode_create_dumb *args);
+int qxl_mode_dumb_destroy(struct drm_file *file_priv,
+                         struct drm_device *dev,
+                         uint32_t handle);
+int qxl_mode_dumb_mmap(struct drm_file *filp,
+                      struct drm_device *dev,
+                      uint32_t handle, uint64_t *offset_p);
+
+
+/* qxl ttm */
+int qxl_ttm_init(struct qxl_device *qdev);
+void qxl_ttm_fini(struct qxl_device *qdev);
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* qxl image */
+
+int qxl_image_create(struct qxl_device *qdev,
+                    struct qxl_release *release,
+                    struct qxl_bo **image_bo,
+                    const uint8_t *data,
+                    int x, int y, int width, int height,
+                    int depth, int stride);
+void qxl_update_screen(struct qxl_device *qxl);
+
+/* qxl io operations (qxl_cmd.c) */
+
+void qxl_io_create_primary(struct qxl_device *qdev,
+                          unsigned width, unsigned height, unsigned offset,
+                          struct qxl_bo *bo);
+void qxl_io_destroy_primary(struct qxl_device *qdev);
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
+void qxl_io_notify_oom(struct qxl_device *qdev);
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+                      const struct qxl_rect *area);
+
+void qxl_io_reset(struct qxl_device *qdev);
+void qxl_io_monitors_config(struct qxl_device *qdev);
+int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible);
+void qxl_io_flush_release(struct qxl_device *qdev);
+void qxl_io_flush_surfaces(struct qxl_device *qdev);
+
+int qxl_release_reserve(struct qxl_device *qdev,
+                       struct qxl_release *release, bool no_wait);
+void qxl_release_unreserve(struct qxl_device *qdev,
+                          struct qxl_release *release);
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+                                       struct qxl_release *release);
+void qxl_release_unmap(struct qxl_device *qdev,
+                      struct qxl_release *release,
+                      union qxl_release_info *info);
+/*
+ * qxl_bo_add_resource.
+ *
+ */
+void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+                                      enum qxl_surface_cmd_type surface_cmd_type,
+                                      struct qxl_release *create_rel,
+                                      struct qxl_release **release);
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+                              int type, struct qxl_release **release,
+                              struct qxl_bo **rbo);
+int qxl_fence_releaseable(struct qxl_device *qdev,
+                         struct qxl_release *release);
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+                             uint32_t type, bool interruptible);
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+                            uint32_t type, bool interruptible);
+int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+                         struct qxl_bo **_bo);
+/* qxl drawing commands */
+
+void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
+                       int stride /* filled in if 0 */);
+
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+                      struct qxl_framebuffer *qxl_fb,
+                      struct qxl_bo *bo,
+                      unsigned flags, unsigned color,
+                      struct drm_clip_rect *clips,
+                      unsigned num_clips, int inc);
+
+void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
+
+void qxl_draw_copyarea(struct qxl_device *qdev,
+                      u32 width, u32 height,
+                      u32 sx, u32 sy,
+                      u32 dx, u32 dy);
+
+uint64_t
+qxl_release_alloc(struct qxl_device *qdev, int type,
+                 struct qxl_release **ret);
+
+void qxl_release_free(struct qxl_device *qdev,
+                     struct qxl_release *release);
+void qxl_release_add_res(struct qxl_device *qdev,
+                        struct qxl_release *release,
+                        struct qxl_bo *bo);
+/* used by qxl_debugfs_release */
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+                                                  uint64_t id);
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush);
+int qxl_garbage_collect(struct qxl_device *qdev);
+
+/* debugfs */
+
+int qxl_debugfs_init(struct drm_minor *minor);
+void qxl_debugfs_takedown(struct drm_minor *minor);
+
+/* qxl_irq.c */
+int qxl_irq_init(struct qxl_device *qdev);
+irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
+
+/* qxl_fb.c */
+int qxl_fb_init(struct qxl_device *qdev);
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+                         struct drm_info_list *files,
+                         unsigned nfiles);
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+                        struct qxl_bo *surf);
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+                           uint32_t surface_id);
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+                        struct qxl_bo *surf,
+                        struct ttm_mem_reg *mem);
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+                          struct qxl_bo *surf);
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
+
+struct qxl_drv_surface *
+qxl_surface_lookup(struct drm_device *dev, int surface_id);
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
+int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
+
+/* qxl_fence.c */
+int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
+int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
+int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
+void qxl_fence_fini(struct qxl_fence *qfence);
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
new file mode 100644 (file)
index 0000000..847c4ee
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* dumb ioctls implementation */
+
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+                           struct drm_device *dev,
+                           struct drm_mode_create_dumb *args)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_bo *qobj;
+       uint32_t handle;
+       int r;
+       struct qxl_surface surf;
+       uint32_t pitch, format;
+       pitch = args->width * ((args->bpp + 1) / 8);
+       args->size = pitch * args->height;
+       args->size = ALIGN(args->size, PAGE_SIZE);
+
+       switch (args->bpp) {
+       case 16:
+               format = SPICE_SURFACE_FMT_16_565;
+               break;
+       case 32:
+               format = SPICE_SURFACE_FMT_32_xRGB;
+               break;
+       default:
+               return -EINVAL;
+       }
+         
+       surf.width = args->width;
+       surf.height = args->height;
+       surf.stride = pitch;
+       surf.format = format;
+       r = qxl_gem_object_create_with_handle(qdev, file_priv,
+                                             QXL_GEM_DOMAIN_VRAM,
+                                             args->size, &surf, &qobj,
+                                             &handle);
+       if (r)
+               return r;
+       args->pitch = pitch;
+       args->handle = handle;
+       return 0;
+}
+
+int qxl_mode_dumb_destroy(struct drm_file *file_priv,
+                            struct drm_device *dev,
+                            uint32_t handle)
+{
+       return drm_gem_handle_delete(file_priv, handle);
+}
+
+int qxl_mode_dumb_mmap(struct drm_file *file_priv,
+                      struct drm_device *dev,
+                      uint32_t handle, uint64_t *offset_p)
+{
+       struct drm_gem_object *gobj;
+       struct qxl_bo *qobj;
+
+       BUG_ON(!offset_p);
+       gobj = drm_gem_object_lookup(dev, file_priv, handle);
+       if (gobj == NULL)
+               return -ENOENT;
+       qobj = gem_to_qxl_bo(gobj);
+       *offset_p = qxl_bo_mmap_offset(qobj);
+       drm_gem_object_unreference_unlocked(gobj);
+       return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
new file mode 100644 (file)
index 0000000..b3c5127
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+ * Copyright Â© 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+#include <linux/module.h>
+#include <linux/fb.h>
+
+#include "drmP.h"
+#include "drm/drm.h"
+#include "drm/drm_crtc.h"
+#include "drm/drm_crtc_helper.h"
+#include "qxl_drv.h"
+
+#include "qxl_object.h"
+#include "drm_fb_helper.h"
+
+#define QXL_DIRTY_DELAY (HZ / 30)
+
+struct qxl_fbdev {
+       struct drm_fb_helper helper;
+       struct qxl_framebuffer  qfb;
+       struct list_head        fbdev_list;
+       struct qxl_device       *qdev;
+
+       void *shadow;
+       int size;
+
+       /* dirty memory logging */
+       struct {
+               spinlock_t lock;
+               bool active;
+               unsigned x1;
+               unsigned y1;
+               unsigned x2;
+               unsigned y2;
+       } dirty;
+};
+
+static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
+                             struct qxl_device *qdev, struct fb_info *info,
+                             const struct fb_image *image)
+{
+       qxl_fb_image->qdev = qdev;
+       if (info) {
+               qxl_fb_image->visual = info->fix.visual;
+               if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
+                   qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
+                       memcpy(&qxl_fb_image->pseudo_palette,
+                              info->pseudo_palette,
+                              sizeof(qxl_fb_image->pseudo_palette));
+       } else {
+                /* fallback */
+               if (image->depth == 1)
+                       qxl_fb_image->visual = FB_VISUAL_MONO10;
+               else
+                       qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
+       }
+       if (image) {
+               memcpy(&qxl_fb_image->fb_image, image,
+                      sizeof(qxl_fb_image->fb_image));
+       }
+}
+
+static void qxl_fb_dirty_flush(struct fb_info *info)
+{
+       struct qxl_fbdev *qfbdev = info->par;
+       struct qxl_device *qdev = qfbdev->qdev;
+       struct qxl_fb_image qxl_fb_image;
+       struct fb_image *image = &qxl_fb_image.fb_image;
+       u32 x1, x2, y1, y2;
+
+       /* TODO: hard coding 32 bpp */
+       int stride = qfbdev->qfb.base.pitches[0] * 4;
+
+       x1 = qfbdev->dirty.x1;
+       x2 = qfbdev->dirty.x2;
+       y1 = qfbdev->dirty.y1;
+       y2 = qfbdev->dirty.y2;
+       /*
+        * we are using a shadow draw buffer, at qdev->surface0_shadow
+        */
+       qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
+       image->dx = x1;
+       image->dy = y1;
+       image->width = x2 - x1;
+       image->height = y2 - y1;
+       image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
+                                        warnings */
+       image->bg_color = 0;
+       image->depth = 32;           /* TODO: take from somewhere? */
+       image->cmap.start = 0;
+       image->cmap.len = 0;
+       image->cmap.red = NULL;
+       image->cmap.green = NULL;
+       image->cmap.blue = NULL;
+       image->cmap.transp = NULL;
+       image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
+
+       qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
+       qxl_draw_opaque_fb(&qxl_fb_image, stride);
+       qfbdev->dirty.x1 = 0;
+       qfbdev->dirty.x2 = 0;
+       qfbdev->dirty.y1 = 0;
+       qfbdev->dirty.y2 = 0;
+}
+
+static void qxl_deferred_io(struct fb_info *info,
+                           struct list_head *pagelist)
+{
+       struct qxl_fbdev *qfbdev = info->par;
+       unsigned long start, end, min, max;
+       struct page *page;
+       int y1, y2;
+
+       min = ULONG_MAX;
+       max = 0;
+       list_for_each_entry(page, pagelist, lru) {
+               start = page->index << PAGE_SHIFT;
+               end = start + PAGE_SIZE - 1;
+               min = min(min, start);
+               max = max(max, end);
+       }
+
+       if (min < max) {
+               y1 = min / info->fix.line_length;
+               y2 = (max / info->fix.line_length) + 1;
+
+               /* TODO: add spin lock? */
+               /* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
+               qfbdev->dirty.x1 = 0;
+               qfbdev->dirty.y1 = y1;
+               qfbdev->dirty.x2 = info->var.xres;
+               qfbdev->dirty.y2 = y2;
+               /* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
+       }
+
+       qxl_fb_dirty_flush(info);
+};
+
+
+static struct fb_deferred_io qxl_defio = {
+       .delay          = QXL_DIRTY_DELAY,
+       .deferred_io    = qxl_deferred_io,
+};
+
+static void qxl_fb_fillrect(struct fb_info *info,
+                           const struct fb_fillrect *fb_rect)
+{
+       struct qxl_fbdev *qfbdev = info->par;
+       struct qxl_device *qdev = qfbdev->qdev;
+       struct qxl_rect rect;
+       uint32_t color;
+       int x = fb_rect->dx;
+       int y = fb_rect->dy;
+       int width = fb_rect->width;
+       int height = fb_rect->height;
+       uint16_t rop;
+       struct qxl_draw_fill qxl_draw_fill_rec;
+
+       if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+           info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+               color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
+       else
+               color = fb_rect->color;
+       rect.left = x;
+       rect.right = x + width;
+       rect.top = y;
+       rect.bottom = y + height;
+       switch (fb_rect->rop) {
+       case ROP_XOR:
+               rop = SPICE_ROPD_OP_XOR;
+               break;
+       case ROP_COPY:
+               rop = SPICE_ROPD_OP_PUT;
+               break;
+       default:
+               pr_err("qxl_fb_fillrect(): unknown rop, "
+                      "defaulting to SPICE_ROPD_OP_PUT\n");
+               rop = SPICE_ROPD_OP_PUT;
+       }
+       qxl_draw_fill_rec.qdev = qdev;
+       qxl_draw_fill_rec.rect = rect;
+       qxl_draw_fill_rec.color = color;
+       qxl_draw_fill_rec.rop = rop;
+       if (!drm_can_sleep()) {
+               qxl_io_log(qdev,
+                       "%s: TODO use RCU, mysterious locks with spin_lock\n",
+                       __func__);
+               return;
+       }
+       qxl_draw_fill(&qxl_draw_fill_rec);
+}
+
+static void qxl_fb_copyarea(struct fb_info *info,
+                           const struct fb_copyarea *region)
+{
+       struct qxl_fbdev *qfbdev = info->par;
+
+       qxl_draw_copyarea(qfbdev->qdev,
+                         region->width, region->height,
+                         region->sx, region->sy,
+                         region->dx, region->dy);
+}
+
+static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
+{
+       qxl_draw_opaque_fb(qxl_fb_image, 0);
+}
+
+static void qxl_fb_imageblit(struct fb_info *info,
+                            const struct fb_image *image)
+{
+       struct qxl_fbdev *qfbdev = info->par;
+       struct qxl_device *qdev = qfbdev->qdev;
+       struct qxl_fb_image qxl_fb_image;
+
+       if (!drm_can_sleep()) {
+               /* we cannot do any ttm_bo allocation since that will fail on
+                * ioremap_wc..__get_vm_area_node, so queue the work item
+                * instead This can happen from printk inside an interrupt
+                * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
+               qxl_io_log(qdev,
+                       "%s: TODO use RCU, mysterious locks with spin_lock\n",
+                          __func__);
+               return;
+       }
+
+       /* ensure proper order of rendering operations - TODO: must do this
+        * for everything. */
+       qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
+       qxl_fb_imageblit_safe(&qxl_fb_image);
+}
+
+int qxl_fb_init(struct qxl_device *qdev)
+{
+       return 0;
+}
+
+static struct fb_ops qxlfb_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+       .fb_fillrect = qxl_fb_fillrect,
+       .fb_copyarea = qxl_fb_copyarea,
+       .fb_imageblit = qxl_fb_imageblit,
+       .fb_pan_display = drm_fb_helper_pan_display,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcmap = drm_fb_helper_setcmap,
+       .fb_debug_enter = drm_fb_helper_debug_enter,
+       .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+       struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
+       int ret;
+
+       ret = qxl_bo_reserve(qbo, false);
+       if (likely(ret == 0)) {
+               qxl_bo_kunmap(qbo);
+               qxl_bo_unpin(qbo);
+               qxl_bo_unreserve(qbo);
+       }
+       drm_gem_object_unreference_unlocked(gobj);
+}
+
+int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
+                                 struct drm_file *file_priv,
+                                 uint32_t *handle)
+{
+       int r;
+       struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
+
+       BUG_ON(!gobj);
+       /* drm_get_handle_create adds a reference - good */
+       r = drm_gem_handle_create(file_priv, gobj, handle);
+       if (r)
+               return r;
+       return 0;
+}
+
+static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
+                                     struct drm_mode_fb_cmd2 *mode_cmd,
+                                     struct drm_gem_object **gobj_p)
+{
+       struct qxl_device *qdev = qfbdev->qdev;
+       struct drm_gem_object *gobj = NULL;
+       struct qxl_bo *qbo = NULL;
+       int ret;
+       int aligned_size, size;
+       int height = mode_cmd->height;
+       int bpp;
+       int depth;
+
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
+
+       size = mode_cmd->pitches[0] * height;
+       aligned_size = ALIGN(size, PAGE_SIZE);
+       /* TODO: unallocate and reallocate surface0 for real. Hack to just
+        * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
+       ret = qxl_gem_object_create(qdev, aligned_size, 0,
+                                   QXL_GEM_DOMAIN_SURFACE,
+                                   false, /* is discardable */
+                                   false, /* is kernel (false means device) */
+                                   NULL,
+                                   &gobj);
+       if (ret) {
+               pr_err("failed to allocate framebuffer (%d)\n",
+                      aligned_size);
+               return -ENOMEM;
+       }
+       qbo = gem_to_qxl_bo(gobj);
+
+       qbo->surf.width = mode_cmd->width;
+       qbo->surf.height = mode_cmd->height;
+       qbo->surf.stride = mode_cmd->pitches[0];
+       qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
+       ret = qxl_bo_reserve(qbo, false);
+       if (unlikely(ret != 0))
+               goto out_unref;
+       ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
+       if (ret) {
+               qxl_bo_unreserve(qbo);
+               goto out_unref;
+       }
+       ret = qxl_bo_kmap(qbo, NULL);
+       qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
+       if (ret)
+               goto out_unref;
+
+       *gobj_p = gobj;
+       return 0;
+out_unref:
+       qxlfb_destroy_pinned_object(gobj);
+       *gobj_p = NULL;
+       return ret;
+}
+
+static int qxlfb_create(struct qxl_fbdev *qfbdev,
+                       struct drm_fb_helper_surface_size *sizes)
+{
+       struct qxl_device *qdev = qfbdev->qdev;
+       struct fb_info *info;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_mode_fb_cmd2 mode_cmd;
+       struct drm_gem_object *gobj = NULL;
+       struct qxl_bo *qbo = NULL;
+       struct device *device = &qdev->pdev->dev;
+       int ret;
+       int size;
+       int bpp = sizes->surface_bpp;
+       int depth = sizes->surface_depth;
+       void *shadow;
+
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
+
+       mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+       ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
+       qbo = gem_to_qxl_bo(gobj);
+       QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
+                mode_cmd.height, mode_cmd.pitches[0]);
+
+       shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
+       /* TODO: what's the usual response to memory allocation errors? */
+       BUG_ON(!shadow);
+       QXL_INFO(qdev,
+       "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
+                qxl_bo_gpu_offset(qbo),
+                qxl_bo_mmap_offset(qbo),
+                qbo->kptr,
+                shadow);
+       size = mode_cmd.pitches[0] * mode_cmd.height;
+
+       info = framebuffer_alloc(0, device);
+       if (info == NULL) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+
+       info->par = qfbdev;
+
+       qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
+
+       fb = &qfbdev->qfb.base;
+
+       /* setup helper with fb data */
+       qfbdev->helper.fb = fb;
+       qfbdev->helper.fbdev = info;
+       qfbdev->shadow = shadow;
+       strcpy(info->fix.id, "qxldrmfb");
+
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+
+       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
+       info->fbops = &qxlfb_ops;
+
+       /*
+        * TODO: using gobj->size in various places in this function. Not sure
+        * what the difference between the different sizes is.
+        */
+       info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
+       info->fix.smem_len = gobj->size;
+       info->screen_base = qfbdev->shadow;
+       info->screen_size = gobj->size;
+
+       drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
+                              sizes->fb_height);
+
+       /* setup aperture base/size for vesafb takeover */
+       info->apertures = alloc_apertures(1);
+       if (!info->apertures) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+       info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
+       info->apertures->ranges[0].size = qdev->vram_size;
+
+       info->fix.mmio_start = 0;
+       info->fix.mmio_len = 0;
+
+       if (info->screen_base == NULL) {
+               ret = -ENOSPC;
+               goto out_unref;
+       }
+
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+
+       info->fbdefio = &qxl_defio;
+       fb_deferred_io_init(info);
+
+       qdev->fbdev_info = info;
+       qdev->fbdev_qfb = &qfbdev->qfb;
+       DRM_INFO("fb mappable at 0x%lX, size %lu\n",  info->fix.smem_start, (unsigned long)info->screen_size);
+       DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
+       return 0;
+
+out_unref:
+       if (qbo) {
+               ret = qxl_bo_reserve(qbo, false);
+               if (likely(ret == 0)) {
+                       qxl_bo_kunmap(qbo);
+                       qxl_bo_unpin(qbo);
+                       qxl_bo_unreserve(qbo);
+               }
+       }
+       if (fb && ret) {
+               drm_gem_object_unreference(gobj);
+               drm_framebuffer_cleanup(fb);
+               kfree(fb);
+       }
+       drm_gem_object_unreference(gobj);
+       return ret;
+}
+
+static int qxl_fb_find_or_create_single(
+               struct drm_fb_helper *helper,
+               struct drm_fb_helper_surface_size *sizes)
+{
+       struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper;
+       int new_fb = 0;
+       int ret;
+
+       if (!helper->fb) {
+               ret = qxlfb_create(qfbdev, sizes);
+               if (ret)
+                       return ret;
+               new_fb = 1;
+       }
+       return new_fb;
+}
+
+static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
+{
+       struct fb_info *info;
+       struct qxl_framebuffer *qfb = &qfbdev->qfb;
+
+       if (qfbdev->helper.fbdev) {
+               info = qfbdev->helper.fbdev;
+
+               unregister_framebuffer(info);
+               framebuffer_release(info);
+       }
+       if (qfb->obj) {
+               qxlfb_destroy_pinned_object(qfb->obj);
+               qfb->obj = NULL;
+       }
+       drm_fb_helper_fini(&qfbdev->helper);
+       vfree(qfbdev->shadow);
+       drm_framebuffer_cleanup(&qfb->base);
+
+       return 0;
+}
+
+static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
+       /* TODO
+       .gamma_set = qxl_crtc_fb_gamma_set,
+       .gamma_get = qxl_crtc_fb_gamma_get,
+       */
+       .fb_probe = qxl_fb_find_or_create_single,
+};
+
+int qxl_fbdev_init(struct qxl_device *qdev)
+{
+       struct qxl_fbdev *qfbdev;
+       int bpp_sel = 32; /* TODO: parameter from somewhere? */
+       int ret;
+
+       qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
+       if (!qfbdev)
+               return -ENOMEM;
+
+       qfbdev->qdev = qdev;
+       qdev->mode_info.qfbdev = qfbdev;
+       qfbdev->helper.funcs = &qxl_fb_helper_funcs;
+
+       ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
+                                1 /* num_crtc - QXL supports just 1 */,
+                                QXLFB_CONN_LIMIT);
+       if (ret) {
+               kfree(qfbdev);
+               return ret;
+       }
+
+       drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
+       drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
+       return 0;
+}
+
+void qxl_fbdev_fini(struct qxl_device *qdev)
+{
+       if (!qdev->mode_info.qfbdev)
+               return;
+
+       qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
+       kfree(qdev->mode_info.qfbdev);
+       qdev->mode_info.qfbdev = NULL;
+}
+
+
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
new file mode 100644 (file)
index 0000000..63c6715
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#include "qxl_drv.h"
+
+/* QXL fencing-
+
+   When we submit operations to the GPU we pass a release reference to the GPU
+   with them, the release reference is then added to the release ring when
+   the GPU is finished with that particular operation and has removed it from
+   its tree.
+
+   So we have can have multiple outstanding non linear fences per object.
+
+   From a TTM POV we only care if the object has any outstanding releases on
+   it.
+
+   we wait until all outstanding releases are processeed.
+
+   sync object is just a list of release ids that represent that fence on
+   that buffer.
+
+   we just add new releases onto the sync object attached to the object.
+
+   This currently uses a radix tree to store the list of release ids.
+
+   For some reason every so often qxl hw fails to release, things go wrong.
+*/
+
+
+int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
+{
+       struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+       spin_lock(&bo->tbo.bdev->fence_lock);
+       radix_tree_insert(&qfence->tree, rel_id, qfence);
+       qfence->num_active_releases++;
+       spin_unlock(&bo->tbo.bdev->fence_lock);
+       return 0;
+}
+
+int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
+{
+       void *ret;
+       int retval = 0;
+       struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+       spin_lock(&bo->tbo.bdev->fence_lock);
+
+       ret = radix_tree_delete(&qfence->tree, rel_id);
+       if (ret == qfence)
+               qfence->num_active_releases--;
+       else {
+               DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
+               retval = -ENOENT;
+       }
+       spin_unlock(&bo->tbo.bdev->fence_lock);
+       return retval;
+}
+
+
+int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
+{
+       qfence->qdev = qdev;
+       qfence->num_active_releases = 0;
+       INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
+       return 0;
+}
+
+void qxl_fence_fini(struct qxl_fence *qfence)
+{
+       kfree(qfence->release_ids);
+       qfence->num_active_releases = 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
new file mode 100644 (file)
index 0000000..a235693
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "drmP.h"
+#include "drm/drm.h"
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+int qxl_gem_object_init(struct drm_gem_object *obj)
+{
+       /* we do nothings here */
+       return 0;
+}
+
+void qxl_gem_object_free(struct drm_gem_object *gobj)
+{
+       struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
+
+       if (qobj)
+               qxl_bo_unref(&qobj);
+}
+
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+                         int alignment, int initial_domain,
+                         bool discardable, bool kernel,
+                         struct qxl_surface *surf,
+                         struct drm_gem_object **obj)
+{
+       struct qxl_bo *qbo;
+       int r;
+
+       *obj = NULL;
+       /* At least align on page size */
+       if (alignment < PAGE_SIZE)
+               alignment = PAGE_SIZE;
+       r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
+       if (r) {
+               if (r != -ERESTARTSYS)
+                       DRM_ERROR(
+                       "Failed to allocate GEM object (%d, %d, %u, %d)\n",
+                                 size, initial_domain, alignment, r);
+               return r;
+       }
+       *obj = &qbo->gem_base;
+
+       mutex_lock(&qdev->gem.mutex);
+       list_add_tail(&qbo->list, &qdev->gem.objects);
+       mutex_unlock(&qdev->gem.mutex);
+
+       return 0;
+}
+
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+                                     struct drm_file *file_priv,
+                                     u32 domain,
+                                     size_t size,
+                                     struct qxl_surface *surf,
+                                     struct qxl_bo **qobj,
+                                     uint32_t *handle)
+{
+       struct drm_gem_object *gobj;
+       int r;
+
+       BUG_ON(!qobj);
+       BUG_ON(!handle);
+
+       r = qxl_gem_object_create(qdev, size, 0,
+                                 domain,
+                                 false, false, surf,
+                                 &gobj);
+       if (r)
+               return -ENOMEM;
+       r = drm_gem_handle_create(file_priv, gobj, handle);
+       if (r)
+               return r;
+       /* drop reference from allocate - handle holds it now */
+       *qobj = gem_to_qxl_bo(gobj);
+       drm_gem_object_unreference_unlocked(gobj);
+       return 0;
+}
+
+int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+                         uint64_t *gpu_addr)
+{
+       struct qxl_bo *qobj = obj->driver_private;
+       int r;
+
+       r = qxl_bo_reserve(qobj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
+       qxl_bo_unreserve(qobj);
+       return r;
+}
+
+void qxl_gem_object_unpin(struct drm_gem_object *obj)
+{
+       struct qxl_bo *qobj = obj->driver_private;
+       int r;
+
+       r = qxl_bo_reserve(qobj, false);
+       if (likely(r == 0)) {
+               qxl_bo_unpin(qobj);
+               qxl_bo_unreserve(qobj);
+       }
+}
+
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+       return 0;
+}
+
+void qxl_gem_object_close(struct drm_gem_object *obj,
+                         struct drm_file *file_priv)
+{
+}
+
+int qxl_gem_init(struct qxl_device *qdev)
+{
+       INIT_LIST_HEAD(&qdev->gem.objects);
+       return 0;
+}
+
+void qxl_gem_fini(struct qxl_device *qdev)
+{
+       qxl_bo_force_delete(qdev);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
new file mode 100644 (file)
index 0000000..cf85620
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int
+qxl_image_create_helper(struct qxl_device *qdev,
+                       struct qxl_release *release,
+                       struct qxl_bo **image_bo,
+                       const uint8_t *data,
+                       int width, int height,
+                       int depth, unsigned int hash,
+                       int stride)
+{
+       struct qxl_image *image;
+       struct qxl_data_chunk *chunk;
+       int i;
+       int chunk_stride;
+       int linesize = width * depth / 8;
+       struct qxl_bo *chunk_bo;
+       int ret;
+       void *ptr;
+       /* Chunk */
+       /* FIXME: Check integer overflow */
+       /* TODO: variable number of chunks */
+       chunk_stride = stride; /* TODO: should use linesize, but it renders
+                                 wrong (check the bitmaps are sent correctly
+                                 first) */
+       ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
+                                   &chunk_bo);
+       
+       ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
+       chunk = ptr;
+       chunk->data_size = height * chunk_stride;
+       chunk->prev_chunk = 0;
+       chunk->next_chunk = 0;
+       qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+
+       {
+               void *k_data, *i_data;
+               int remain;
+               int page;
+               int size;
+               if (stride == linesize && chunk_stride == stride) {
+                       remain = linesize * height;
+                       page = 0;
+                       i_data = (void *)data;
+
+                       while (remain > 0) {
+                               ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
+
+                               if (page == 0) {
+                                       chunk = ptr;
+                                       k_data = chunk->data;
+                                       size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
+                               } else {
+                                       k_data = ptr;
+                                       size = PAGE_SIZE;
+                               }
+                               size = min(size, remain);
+
+                               memcpy(k_data, i_data, size);
+
+                               qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+                               i_data += size;
+                               remain -= size;
+                               page++;
+                       }
+               } else {
+                       unsigned page_base, page_offset, out_offset;
+                       for (i = 0 ; i < height ; ++i) {
+                               i_data = (void *)data + i * stride;
+                               remain = linesize;
+                               out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
+
+                               while (remain > 0) {
+                                       page_base = out_offset & PAGE_MASK;
+                                       page_offset = offset_in_page(out_offset);
+                                       
+                                       size = min((int)(PAGE_SIZE - page_offset), remain);
+
+                                       ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
+                                       k_data = ptr + page_offset;
+                                       memcpy(k_data, i_data, size);
+                                       qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+                                       remain -= size;
+                                       i_data += size;
+                                       out_offset += size;
+                               }
+                       }
+               }
+       }
+
+
+       qxl_bo_kunmap(chunk_bo);
+
+       /* Image */
+       ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
+
+       ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
+       image = ptr;
+
+       image->descriptor.id = 0;
+       image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
+
+       image->descriptor.flags = 0;
+       image->descriptor.width = width;
+       image->descriptor.height = height;
+
+       switch (depth) {
+       case 1:
+               /* TODO: BE? check by arch? */
+               image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
+               break;
+       case 24:
+               image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
+               break;
+       case 32:
+               image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
+               break;
+       default:
+               DRM_ERROR("unsupported image bit depth\n");
+               return -EINVAL; /* TODO: cleanup */
+       }
+       image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
+       image->u.bitmap.x = width;
+       image->u.bitmap.y = height;
+       image->u.bitmap.stride = chunk_stride;
+       image->u.bitmap.palette = 0;
+       image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
+       qxl_release_add_res(qdev, release, chunk_bo);
+       qxl_bo_unreserve(chunk_bo);
+       qxl_bo_unref(&chunk_bo);
+
+       qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
+
+       return 0;
+}
+
+int qxl_image_create(struct qxl_device *qdev,
+                    struct qxl_release *release,
+                    struct qxl_bo **image_bo,
+                    const uint8_t *data,
+                    int x, int y, int width, int height,
+                    int depth, int stride)
+{
+       data += y * stride + x * (depth / 8);
+       return qxl_image_create_helper(qdev, release, image_bo, data,
+                                      width, height, depth, 0, stride);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
new file mode 100644 (file)
index 0000000..04b64f9
--- /dev/null
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * TODO: allocating a new gem(in qxl_bo) for each request.
+ * This is wasteful since bo's are page aligned.
+ */
+static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_alloc *qxl_alloc = data;
+       int ret;
+       struct qxl_bo *qobj;
+       uint32_t handle;
+       u32 domain = QXL_GEM_DOMAIN_VRAM;
+
+       if (qxl_alloc->size == 0) {
+               DRM_ERROR("invalid size %d\n", qxl_alloc->size);
+               return -EINVAL;
+       }
+       ret = qxl_gem_object_create_with_handle(qdev, file_priv,
+                                               domain,
+                                               qxl_alloc->size,
+                                               NULL,
+                                               &qobj, &handle);
+       if (ret) {
+               DRM_ERROR("%s: failed to create gem ret=%d\n",
+                         __func__, ret);
+               return -ENOMEM;
+       }
+       qxl_alloc->handle = handle;
+       return 0;
+}
+
+static int qxl_map_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_map *qxl_map = data;
+
+       return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
+                                 &qxl_map->offset);
+}
+
+/*
+ * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
+ * are on vram).
+ * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
+ */
+static void
+apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
+           struct qxl_bo *src, uint64_t src_off)
+{
+       void *reloc_page;
+
+       reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
+       *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+                                                                    src, src_off);
+       qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+}
+
+static void
+apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
+                struct qxl_bo *src)
+{
+       uint32_t id = 0;
+       void *reloc_page;
+
+       if (src && !src->is_primary)
+               id = src->surface_id;
+
+       reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
+       *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
+       qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+}
+
+/* return holding the reference to this object */
+static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
+                                        struct drm_file *file_priv, uint64_t handle,
+                                        struct qxl_reloc_list *reloc_list)
+{
+       struct drm_gem_object *gobj;
+       struct qxl_bo *qobj;
+       int ret;
+
+       gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
+       if (!gobj) {
+               DRM_ERROR("bad bo handle %lld\n", handle);
+               return NULL;
+       }
+       qobj = gem_to_qxl_bo(gobj);
+
+       ret = qxl_bo_list_add(reloc_list, qobj);
+       if (ret)
+               return NULL;
+
+       return qobj;
+}
+
+/*
+ * Usage of execbuffer:
+ * Relocations need to take into account the full QXLDrawable size.
+ * However, the command as passed from user space must *not* contain the initial
+ * QXLReleaseInfo struct (first XXX bytes)
+ */
+static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_execbuffer *execbuffer = data;
+       struct drm_qxl_command user_cmd;
+       int cmd_num;
+       struct qxl_bo *reloc_src_bo;
+       struct qxl_bo *reloc_dst_bo;
+       struct drm_qxl_reloc reloc;
+       void *fb_cmd;
+       int i, ret;
+       struct qxl_reloc_list reloc_list;
+       int unwritten;
+       uint32_t reloc_dst_offset;
+       INIT_LIST_HEAD(&reloc_list.bos);
+
+       for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+               struct qxl_release *release;
+               struct qxl_bo *cmd_bo;
+               int release_type;
+               struct drm_qxl_command *commands =
+                       (struct drm_qxl_command *)execbuffer->commands;
+
+               if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+                                      sizeof(user_cmd)))
+                       return -EFAULT;
+               switch (user_cmd.type) {
+               case QXL_CMD_DRAW:
+                       release_type = QXL_RELEASE_DRAWABLE;
+                       break;
+               case QXL_CMD_SURFACE:
+               case QXL_CMD_CURSOR:
+               default:
+                       DRM_DEBUG("Only draw commands in execbuffers\n");
+                       return -EINVAL;
+                       break;
+               }
+
+               if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+                       return -EINVAL;
+
+               ret = qxl_alloc_release_reserved(qdev,
+                                                sizeof(union qxl_release_info) +
+                                                user_cmd.command_size,
+                                                release_type,
+                                                &release,
+                                                &cmd_bo);
+               if (ret)
+                       return ret;
+
+               /* TODO copy slow path code from i915 */
+               fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+               unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
+               qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+               if (unwritten) {
+                       DRM_ERROR("got unwritten %d\n", unwritten);
+                       qxl_release_unreserve(qdev, release);
+                       qxl_release_free(qdev, release);
+                       return -EFAULT;
+               }
+
+               for (i = 0 ; i < user_cmd.relocs_num; ++i) {
+                       if (DRM_COPY_FROM_USER(&reloc,
+                                              &((struct drm_qxl_reloc *)user_cmd.relocs)[i],
+                                              sizeof(reloc))) {
+                               qxl_bo_list_unreserve(&reloc_list, true);
+                               qxl_release_unreserve(qdev, release);
+                               qxl_release_free(qdev, release);
+                               return -EFAULT;
+                       }
+
+                       /* add the bos to the list of bos to validate -
+                          need to validate first then process relocs? */
+                       if (reloc.dst_handle) {
+                               reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
+                                                                 reloc.dst_handle, &reloc_list);
+                               if (!reloc_dst_bo) {
+                                       qxl_bo_list_unreserve(&reloc_list, true);
+                                       qxl_release_unreserve(qdev, release);
+                                       qxl_release_free(qdev, release);
+                                       return -EINVAL;
+                               }
+                               reloc_dst_offset = 0;
+                       } else {
+                               reloc_dst_bo = cmd_bo;
+                               reloc_dst_offset = release->release_offset;
+                       }
+
+                       /* reserve and validate the reloc dst bo */
+                       if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
+                               reloc_src_bo =
+                                       qxlhw_handle_to_bo(qdev, file_priv,
+                                                          reloc.src_handle, &reloc_list);
+                               if (!reloc_src_bo) {
+                                       if (reloc_dst_bo != cmd_bo)
+                                               drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
+                                       qxl_bo_list_unreserve(&reloc_list, true);
+                                       qxl_release_unreserve(qdev, release);
+                                       qxl_release_free(qdev, release);
+                                       return -EINVAL;
+                               }
+                       } else
+                               reloc_src_bo = NULL;
+                       if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
+                               apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
+                                           reloc_src_bo, reloc.src_offset);
+                       } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
+                               apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
+                       } else {
+                               DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
+                               return -EINVAL;
+                       }
+
+                       if (reloc_src_bo && reloc_src_bo != cmd_bo) {
+                               qxl_release_add_res(qdev, release, reloc_src_bo);
+                               drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
+                       }
+
+                       if (reloc_dst_bo != cmd_bo)
+                               drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
+               }
+               qxl_fence_releaseable(qdev, release);
+
+               ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
+               if (ret == -ERESTARTSYS) {
+                       qxl_release_unreserve(qdev, release);
+                       qxl_release_free(qdev, release);
+                       qxl_bo_list_unreserve(&reloc_list, true);
+                       return ret;
+               }
+               qxl_release_unreserve(qdev, release);
+       }
+       qxl_bo_list_unreserve(&reloc_list, 0);
+       return 0;
+}
+
+static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
+                                struct drm_file *file)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_update_area *update_area = data;
+       struct qxl_rect area = {.left = update_area->left,
+                               .top = update_area->top,
+                               .right = update_area->right,
+                               .bottom = update_area->bottom};
+       int ret;
+       struct drm_gem_object *gobj = NULL;
+       struct qxl_bo *qobj = NULL;
+
+       if (update_area->left >= update_area->right ||
+           update_area->top >= update_area->bottom)
+               return -EINVAL;
+
+       gobj = drm_gem_object_lookup(dev, file, update_area->handle);
+       if (gobj == NULL)
+               return -ENOENT;
+
+       qobj = gem_to_qxl_bo(gobj);
+
+       ret = qxl_bo_reserve(qobj, false);
+       if (ret)
+               goto out;
+
+       if (!qobj->pin_count) {
+               ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
+                                     true, false);
+               if (unlikely(ret))
+                       goto out;
+       }
+
+       ret = qxl_bo_check_id(qdev, qobj);
+       if (ret)
+               goto out2;
+       if (!qobj->surface_id)
+               DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
+       ret = qxl_io_update_area(qdev, qobj, &area);
+
+out2:
+       qxl_bo_unreserve(qobj);
+
+out:
+       drm_gem_object_unreference_unlocked(gobj);
+       return ret;
+}
+
+static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_getparam *param = data;
+
+       switch (param->param) {
+       case QXL_PARAM_NUM_SURFACES:
+               param->value = qdev->rom->n_surfaces;
+               break;
+       case QXL_PARAM_MAX_RELOCS:
+               param->value = QXL_MAX_RES;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_clientcap *param = data;
+       int byte, idx;
+
+       byte = param->index / 8;
+       idx = param->index % 8;
+
+       if (qdev->pdev->revision < 4)
+               return -ENOSYS;
+
+       if (byte >= 58)
+               return -ENOSYS;
+
+       if (qdev->rom->client_capabilities[byte] & (1 << idx))
+               return 0;
+       return -ENOSYS;
+}
+
+static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_alloc_surf *param = data;
+       struct qxl_bo *qobj;
+       int handle;
+       int ret;
+       int size, actual_stride;
+       struct qxl_surface surf;
+
+       /* work out size allocate bo with handle */
+       actual_stride = param->stride < 0 ? -param->stride : param->stride;
+       size = actual_stride * param->height + actual_stride;
+
+       surf.format = param->format;
+       surf.width = param->width;
+       surf.height = param->height;
+       surf.stride = param->stride;
+       surf.data = 0;
+
+       ret = qxl_gem_object_create_with_handle(qdev, file,
+                                               QXL_GEM_DOMAIN_SURFACE,
+                                               size,
+                                               &surf,
+                                               &qobj, &handle);
+       if (ret) {
+               DRM_ERROR("%s: failed to create gem ret=%d\n",
+                         __func__, ret);
+               return -ENOMEM;
+       } else
+               param->handle = handle;
+       return ret;
+}
+
+struct drm_ioctl_desc qxl_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+       DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+       DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
+                                                       DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
+                                                       DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
+                                                       DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
+                                                       DRM_AUTH|DRM_UNLOCKED),
+
+       DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
+                         DRM_AUTH|DRM_UNLOCKED),
+};
+
+int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
new file mode 100644 (file)
index 0000000..21393dc
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+
+irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+{
+       struct drm_device *dev = (struct drm_device *) arg;
+       struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+       uint32_t pending;
+
+       pending = xchg(&qdev->ram_header->int_pending, 0);
+
+       atomic_inc(&qdev->irq_received);
+
+       if (pending & QXL_INTERRUPT_DISPLAY) {
+               atomic_inc(&qdev->irq_received_display);
+               wake_up_all(&qdev->display_event);
+               qxl_queue_garbage_collect(qdev, false);
+       }
+       if (pending & QXL_INTERRUPT_CURSOR) {
+               atomic_inc(&qdev->irq_received_cursor);
+               wake_up_all(&qdev->cursor_event);
+       }
+       if (pending & QXL_INTERRUPT_IO_CMD) {
+               atomic_inc(&qdev->irq_received_io_cmd);
+               wake_up_all(&qdev->io_cmd_event);
+       }
+       if (pending & QXL_INTERRUPT_ERROR) {
+               /* TODO: log it, reset device (only way to exit this condition)
+                * (do it a certain number of times, afterwards admit defeat,
+                * to avoid endless loops).
+                */
+               qdev->irq_received_error++;
+               qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
+       }
+       if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
+               qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
+               schedule_work(&qdev->client_monitors_config_work);
+       }
+       qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+       outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
+       return IRQ_HANDLED;
+}
+
+static void qxl_client_monitors_config_work_func(struct work_struct *work)
+{
+       struct qxl_device *qdev = container_of(work, struct qxl_device,
+                                              client_monitors_config_work);
+
+       qxl_display_read_client_monitors_config(qdev);
+}
+
+int qxl_irq_init(struct qxl_device *qdev)
+{
+       int ret;
+
+       init_waitqueue_head(&qdev->display_event);
+       init_waitqueue_head(&qdev->cursor_event);
+       init_waitqueue_head(&qdev->io_cmd_event);
+       INIT_WORK(&qdev->client_monitors_config_work,
+                 qxl_client_monitors_config_work_func);
+       atomic_set(&qdev->irq_received, 0);
+       atomic_set(&qdev->irq_received_display, 0);
+       atomic_set(&qdev->irq_received_cursor, 0);
+       atomic_set(&qdev->irq_received_io_cmd, 0);
+       qdev->irq_received_error = 0;
+       ret = drm_irq_install(qdev->ddev);
+       qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed installing irq: %d\n", ret);
+               return 1;
+       }
+       return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
new file mode 100644 (file)
index 0000000..85127ed
--- /dev/null
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+
+int qxl_log_level;
+
+static void qxl_dump_mode(struct qxl_device *qdev, void *p)
+{
+       struct qxl_mode *m = p;
+       DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
+                     m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
+                     m->y_mili, m->orientation);
+}
+
+static bool qxl_check_device(struct qxl_device *qdev)
+{
+       struct qxl_rom *rom = qdev->rom;
+       int mode_offset;
+       int i;
+
+       if (rom->magic != 0x4f525851) {
+               DRM_ERROR("bad rom signature %x\n", rom->magic);
+               return false;
+       }
+
+       DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
+       DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
+                rom->log_level);
+       DRM_INFO("Currently using mode #%d, list at 0x%x\n",
+                rom->mode, rom->modes_offset);
+       DRM_INFO("%d io pages at offset 0x%x\n",
+                rom->num_io_pages, rom->pages_offset);
+       DRM_INFO("%d byte draw area at offset 0x%x\n",
+                rom->surface0_area_size, rom->draw_area_offset);
+
+       qdev->vram_size = rom->surface0_area_size;
+       DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
+
+       mode_offset = rom->modes_offset / 4;
+       qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
+       DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
+                qdev->mode_info.num_modes);
+       qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
+       for (i = 0; i < qdev->mode_info.num_modes; i++)
+               qxl_dump_mode(qdev, qdev->mode_info.modes + i);
+       return true;
+}
+
+static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
+       unsigned long start_phys_addr, unsigned long end_phys_addr)
+{
+       uint64_t high_bits;
+       struct qxl_memslot *slot;
+       uint8_t slot_index;
+       struct qxl_ram_header *ram_header = qdev->ram_header;
+
+       slot_index = qdev->rom->slots_start + slot_index_offset;
+       slot = &qdev->mem_slots[slot_index];
+       slot->start_phys_addr = start_phys_addr;
+       slot->end_phys_addr = end_phys_addr;
+       ram_header->mem_slot.mem_start = slot->start_phys_addr;
+       ram_header->mem_slot.mem_end = slot->end_phys_addr;
+       qxl_io_memslot_add(qdev, slot_index);
+       slot->generation = qdev->rom->slot_generation;
+       high_bits = slot_index << qdev->slot_gen_bits;
+       high_bits |= slot->generation;
+       high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
+       slot->high_bits = high_bits;
+       return slot_index;
+}
+
+static void qxl_gc_work(struct work_struct *work)
+{
+       struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
+       qxl_garbage_collect(qdev);
+}
+
+int qxl_device_init(struct qxl_device *qdev,
+                   struct drm_device *ddev,
+                   struct pci_dev *pdev,
+                   unsigned long flags)
+{
+       int r;
+
+       qdev->dev = &pdev->dev;
+       qdev->ddev = ddev;
+       qdev->pdev = pdev;
+       qdev->flags = flags;
+
+       mutex_init(&qdev->gem.mutex);
+       mutex_init(&qdev->update_area_mutex);
+       mutex_init(&qdev->release_mutex);
+       mutex_init(&qdev->surf_evict_mutex);
+       INIT_LIST_HEAD(&qdev->gem.objects);
+
+       qdev->rom_base = pci_resource_start(pdev, 2);
+       qdev->rom_size = pci_resource_len(pdev, 2);
+       qdev->vram_base = pci_resource_start(pdev, 0);
+       qdev->surfaceram_base = pci_resource_start(pdev, 1);
+       qdev->surfaceram_size = pci_resource_len(pdev, 1);
+       qdev->io_base = pci_resource_start(pdev, 3);
+
+       qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
+       qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
+       DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
+                (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
+                (int)pci_resource_len(pdev, 0) / 1024 / 1024,
+                (int)pci_resource_len(pdev, 0) / 1024,
+                (void *)qdev->surfaceram_base,
+                (void *)pci_resource_end(pdev, 1),
+                (int)qdev->surfaceram_size / 1024 / 1024,
+                (int)qdev->surfaceram_size / 1024);
+
+       qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
+       if (!qdev->rom) {
+               pr_err("Unable to ioremap ROM\n");
+               return -ENOMEM;
+       }
+
+       qxl_check_device(qdev);
+
+       r = qxl_bo_init(qdev);
+       if (r) {
+               DRM_ERROR("bo init failed %d\n", r);
+               return r;
+       }
+
+       qdev->ram_header = ioremap(qdev->vram_base +
+                                  qdev->rom->ram_header_offset,
+                                  sizeof(*qdev->ram_header));
+
+       qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
+                                            sizeof(struct qxl_command),
+                                            QXL_COMMAND_RING_SIZE,
+                                            qdev->io_base + QXL_IO_NOTIFY_CMD,
+                                            false,
+                                            &qdev->display_event);
+
+       qdev->cursor_ring = qxl_ring_create(
+                               &(qdev->ram_header->cursor_ring_hdr),
+                               sizeof(struct qxl_command),
+                               QXL_CURSOR_RING_SIZE,
+                               qdev->io_base + QXL_IO_NOTIFY_CMD,
+                               false,
+                               &qdev->cursor_event);
+
+       qdev->release_ring = qxl_ring_create(
+                               &(qdev->ram_header->release_ring_hdr),
+                               sizeof(uint64_t),
+                               QXL_RELEASE_RING_SIZE, 0, true,
+                               NULL);
+
+       /* TODO - slot initialization should happen on reset. where is our
+        * reset handler? */
+       qdev->n_mem_slots = qdev->rom->slots_end;
+       qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
+       qdev->slot_id_bits = qdev->rom->slot_id_bits;
+       qdev->va_slot_mask =
+               (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
+
+       qdev->mem_slots =
+               kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
+                       GFP_KERNEL);
+
+       idr_init(&qdev->release_idr);
+       spin_lock_init(&qdev->release_idr_lock);
+
+       idr_init(&qdev->surf_id_idr);
+       spin_lock_init(&qdev->surf_id_idr_lock);
+
+       mutex_init(&qdev->async_io_mutex);
+
+       /* reset the device into a known state - no memslots, no primary
+        * created, no surfaces. */
+       qxl_io_reset(qdev);
+
+       /* must initialize irq before first async io - slot creation */
+       r = qxl_irq_init(qdev);
+       if (r)
+               return r;
+
+       /*
+        * Note that virtual is surface0. We rely on the single ioremap done
+        * before.
+        */
+       qdev->main_mem_slot = setup_slot(qdev, 0,
+               (unsigned long)qdev->vram_base,
+               (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
+       qdev->surfaces_mem_slot = setup_slot(qdev, 1,
+               (unsigned long)qdev->surfaceram_base,
+               (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
+       DRM_INFO("main mem slot %d [%lx,%x)\n",
+               qdev->main_mem_slot,
+               (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+
+
+       qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
+       INIT_WORK(&qdev->gc_work, qxl_gc_work);
+
+       r = qxl_fb_init(qdev);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+static void qxl_device_fini(struct qxl_device *qdev)
+{
+       if (qdev->current_release_bo[0])
+               qxl_bo_unref(&qdev->current_release_bo[0]);
+       if (qdev->current_release_bo[1])
+               qxl_bo_unref(&qdev->current_release_bo[1]);
+       flush_workqueue(qdev->gc_queue);
+       destroy_workqueue(qdev->gc_queue);
+       qdev->gc_queue = NULL;
+
+       qxl_ring_free(qdev->command_ring);
+       qxl_ring_free(qdev->cursor_ring);
+       qxl_ring_free(qdev->release_ring);
+       qxl_bo_fini(qdev);
+       io_mapping_free(qdev->surface_mapping);
+       io_mapping_free(qdev->vram_mapping);
+       iounmap(qdev->ram_header);
+       iounmap(qdev->rom);
+       qdev->rom = NULL;
+       qdev->mode_info.modes = NULL;
+       qdev->mode_info.num_modes = 0;
+       qxl_debugfs_remove_files(qdev);
+}
+
+int qxl_driver_unload(struct drm_device *dev)
+{
+       struct qxl_device *qdev = dev->dev_private;
+
+       if (qdev == NULL)
+               return 0;
+       qxl_modeset_fini(qdev);
+       qxl_device_fini(qdev);
+
+       kfree(qdev);
+       dev->dev_private = NULL;
+       return 0;
+}
+
+int qxl_driver_load(struct drm_device *dev, unsigned long flags)
+{
+       struct qxl_device *qdev;
+       int r;
+
+       /* require kms */
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -ENODEV;
+
+       qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
+       if (qdev == NULL)
+               return -ENOMEM;
+
+       dev->dev_private = qdev;
+
+       r = qxl_device_init(qdev, dev, dev->pdev, flags);
+       if (r)
+               goto out;
+
+       r = qxl_modeset_init(qdev);
+       if (r) {
+               qxl_driver_unload(dev);
+               goto out;
+       }
+
+       return 0;
+out:
+       kfree(qdev);
+       return r;
+}
+
+
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
new file mode 100644 (file)
index 0000000..d9b12e7
--- /dev/null
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+       struct qxl_bo *bo;
+       struct qxl_device *qdev;
+
+       bo = container_of(tbo, struct qxl_bo, tbo);
+       qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+
+       qxl_surface_evict(qdev, bo, false);
+       qxl_fence_fini(&bo->fence);
+       mutex_lock(&qdev->gem.mutex);
+       list_del_init(&bo->list);
+       mutex_unlock(&qdev->gem.mutex);
+       drm_gem_object_release(&bo->gem_base);
+       kfree(bo);
+}
+
+bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
+{
+       if (bo->destroy == &qxl_ttm_bo_destroy)
+               return true;
+       return false;
+}
+
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
+{
+       u32 c = 0;
+
+       qbo->placement.fpfn = 0;
+       qbo->placement.lpfn = 0;
+       qbo->placement.placement = qbo->placements;
+       qbo->placement.busy_placement = qbo->placements;
+       if (domain == QXL_GEM_DOMAIN_VRAM)
+               qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
+       if (domain == QXL_GEM_DOMAIN_SURFACE)
+               qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
+       if (domain == QXL_GEM_DOMAIN_CPU)
+               qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       if (!c)
+               qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       qbo->placement.num_placement = c;
+       qbo->placement.num_busy_placement = c;
+}
+
+
+int qxl_bo_create(struct qxl_device *qdev,
+                 unsigned long size, bool kernel, u32 domain,
+                 struct qxl_surface *surf,
+                 struct qxl_bo **bo_ptr)
+{
+       struct qxl_bo *bo;
+       enum ttm_bo_type type;
+       int r;
+
+       if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
+               qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+       if (kernel)
+               type = ttm_bo_type_kernel;
+       else
+               type = ttm_bo_type_device;
+       *bo_ptr = NULL;
+       bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
+       if (bo == NULL)
+               return -ENOMEM;
+       size = roundup(size, PAGE_SIZE);
+       r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
+       if (unlikely(r)) {
+               kfree(bo);
+               return r;
+       }
+       bo->gem_base.driver_private = NULL;
+       bo->type = domain;
+       bo->pin_count = 0;
+       bo->surface_id = 0;
+       qxl_fence_init(qdev, &bo->fence);
+       INIT_LIST_HEAD(&bo->list);
+       atomic_set(&bo->reserve_count, 0);
+       if (surf)
+               bo->surf = *surf;
+
+       qxl_ttm_placement_from_domain(bo, domain);
+
+       r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
+                       &bo->placement, 0, !kernel, NULL, size,
+                       NULL, &qxl_ttm_bo_destroy);
+       if (unlikely(r != 0)) {
+               if (r != -ERESTARTSYS)
+                       dev_err(qdev->dev,
+                               "object_init failed for (%lu, 0x%08X)\n",
+                               size, domain);
+               return r;
+       }
+       *bo_ptr = bo;
+       return 0;
+}
+
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
+{
+       bool is_iomem;
+       int r;
+
+       if (bo->kptr) {
+               if (ptr)
+                       *ptr = bo->kptr;
+               return 0;
+       }
+       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+       if (r)
+               return r;
+       bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+       if (ptr)
+               *ptr = bo->kptr;
+       return 0;
+}
+
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
+                             struct qxl_bo *bo, int page_offset)
+{
+       struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+       void *rptr;
+       int ret;
+       struct io_mapping *map;
+
+       if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+               map = qdev->vram_mapping;
+       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+               map = qdev->surface_mapping;
+       else
+               goto fallback;
+
+       (void) ttm_mem_io_lock(man, false);
+       ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
+       ttm_mem_io_unlock(man);
+
+       return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
+fallback:
+       if (bo->kptr) {
+               rptr = bo->kptr + (page_offset * PAGE_SIZE);
+               return rptr;
+       }
+
+       ret = qxl_bo_kmap(bo, &rptr);
+       if (ret)
+               return NULL;
+
+       rptr += page_offset * PAGE_SIZE;
+       return rptr;
+}
+
+void qxl_bo_kunmap(struct qxl_bo *bo)
+{
+       if (bo->kptr == NULL)
+               return;
+       bo->kptr = NULL;
+       ttm_bo_kunmap(&bo->kmap);
+}
+
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
+                              struct qxl_bo *bo, void *pmap)
+{
+       struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+       struct io_mapping *map;
+
+       if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+               map = qdev->vram_mapping;
+       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+               map = qdev->surface_mapping;
+       else
+               goto fallback;
+
+       io_mapping_unmap_atomic(pmap);
+
+       (void) ttm_mem_io_lock(man, false);
+       ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
+       ttm_mem_io_unlock(man);
+       return ;
+ fallback:
+       qxl_bo_kunmap(bo);
+}
+
+void qxl_bo_unref(struct qxl_bo **bo)
+{
+       struct ttm_buffer_object *tbo;
+
+       if ((*bo) == NULL)
+               return;
+       tbo = &((*bo)->tbo);
+       ttm_bo_unref(&tbo);
+       if (tbo == NULL)
+               *bo = NULL;
+}
+
+struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
+{
+       ttm_bo_reference(&bo->tbo);
+       return bo;
+}
+
+int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+{
+       struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+       int r, i;
+
+       if (bo->pin_count) {
+               bo->pin_count++;
+               if (gpu_addr)
+                       *gpu_addr = qxl_bo_gpu_offset(bo);
+               return 0;
+       }
+       qxl_ttm_placement_from_domain(bo, domain);
+       for (i = 0; i < bo->placement.num_placement; i++)
+               bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       if (likely(r == 0)) {
+               bo->pin_count = 1;
+               if (gpu_addr != NULL)
+                       *gpu_addr = qxl_bo_gpu_offset(bo);
+       }
+       if (unlikely(r != 0))
+               dev_err(qdev->dev, "%p pin failed\n", bo);
+       return r;
+}
+
+int qxl_bo_unpin(struct qxl_bo *bo)
+{
+       struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+       int r, i;
+
+       if (!bo->pin_count) {
+               dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
+               return 0;
+       }
+       bo->pin_count--;
+       if (bo->pin_count)
+               return 0;
+       for (i = 0; i < bo->placement.num_placement; i++)
+               bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       if (unlikely(r != 0))
+               dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
+       return r;
+}
+
+void qxl_bo_force_delete(struct qxl_device *qdev)
+{
+       struct qxl_bo *bo, *n;
+
+       if (list_empty(&qdev->gem.objects))
+               return;
+       dev_err(qdev->dev, "Userspace still has active objects !\n");
+       list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
+               mutex_lock(&qdev->ddev->struct_mutex);
+               dev_err(qdev->dev, "%p %p %lu %lu force free\n",
+                       &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+                       *((unsigned long *)&bo->gem_base.refcount));
+               mutex_lock(&qdev->gem.mutex);
+               list_del_init(&bo->list);
+               mutex_unlock(&qdev->gem.mutex);
+               /* this should unref the ttm bo */
+               drm_gem_object_unreference(&bo->gem_base);
+               mutex_unlock(&qdev->ddev->struct_mutex);
+       }
+}
+
+int qxl_bo_init(struct qxl_device *qdev)
+{
+       return qxl_ttm_init(qdev);
+}
+
+void qxl_bo_fini(struct qxl_device *qdev)
+{
+       qxl_ttm_fini(qdev);
+}
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
+{
+       int ret;
+       if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
+               /* allocate a surface id for this surface now */
+               ret = qxl_surface_id_alloc(qdev, bo);
+               if (ret)
+                       return ret;
+
+               ret = qxl_hw_surface_alloc(qdev, bo, NULL);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
+{
+       struct qxl_bo_list *entry, *sf;
+
+       list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
+               qxl_bo_unreserve(entry->bo);
+               list_del(&entry->lhead);
+               kfree(entry);
+       }
+}
+
+int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
+{
+       struct qxl_bo_list *entry;
+       int ret;
+
+       list_for_each_entry(entry, &reloc_list->bos, lhead) {
+               if (entry->bo == bo)
+                       return 0;
+       }
+
+       entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->bo = bo;
+       list_add(&entry->lhead, &reloc_list->bos);
+
+       ret = qxl_bo_reserve(bo, false);
+       if (ret)
+               return ret;
+
+       if (!bo->pin_count) {
+               qxl_ttm_placement_from_domain(bo, bo->type);
+               ret = ttm_bo_validate(&bo->tbo, &bo->placement,
+                                     true, false);
+               if (ret)
+                       return ret;
+       }
+
+       /* allocate a surface for reserved + validated buffers */
+       ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+       if (ret)
+               return ret;
+       return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
new file mode 100644 (file)
index 0000000..b4fd89f
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+#ifndef QXL_OBJECT_H
+#define QXL_OBJECT_H
+
+#include "qxl_drv.h"
+
+static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
+{
+       int r;
+
+       r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+       if (unlikely(r != 0)) {
+               if (r != -ERESTARTSYS) {
+                       struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+                       dev_err(qdev->dev, "%p reserve failed\n", bo);
+               }
+               return r;
+       }
+       return 0;
+}
+
+static inline void qxl_bo_unreserve(struct qxl_bo *bo)
+{
+       ttm_bo_unreserve(&bo->tbo);
+}
+
+static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
+{
+       return bo->tbo.offset;
+}
+
+static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
+{
+       return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)
+{
+       return !!atomic_read(&bo->tbo.reserved);
+}
+
+static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
+{
+       return bo->tbo.addr_space_offset;
+}
+
+static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
+                             bool no_wait)
+{
+       int r;
+
+       r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+       if (unlikely(r != 0)) {
+               if (r != -ERESTARTSYS) {
+                       struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+                       dev_err(qdev->dev, "%p reserve failed for wait\n",
+                               bo);
+               }
+               return r;
+       }
+       spin_lock(&bo->tbo.bdev->fence_lock);
+       if (mem_type)
+               *mem_type = bo->tbo.mem.mem_type;
+       if (bo->tbo.sync_obj)
+               r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+       spin_unlock(&bo->tbo.bdev->fence_lock);
+       ttm_bo_unreserve(&bo->tbo);
+       return r;
+}
+
+extern int qxl_bo_create(struct qxl_device *qdev,
+                        unsigned long size,
+                        bool kernel, u32 domain,
+                        struct qxl_surface *surf,
+                        struct qxl_bo **bo_ptr);
+extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+extern void qxl_bo_kunmap(struct qxl_bo *bo);
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
+extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
+extern void qxl_bo_unref(struct qxl_bo **bo);
+extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
+extern int qxl_bo_unpin(struct qxl_bo *bo);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
+extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
+
+extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
+extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
new file mode 100644 (file)
index 0000000..b443d67
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
+ * into 256 byte chunks for now - gives 16 cmds per page.
+ *
+ * use an ida to index into the chunks?
+ */
+/* manage releaseables */
+/* stack them 16 high for now -drawable object is 191 */
+#define RELEASE_SIZE 256
+#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
+/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
+#define SURFACE_RELEASE_SIZE 128
+#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
+
+static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
+static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
+uint64_t
+qxl_release_alloc(struct qxl_device *qdev, int type,
+                 struct qxl_release **ret)
+{
+       struct qxl_release *release;
+       int handle;
+       size_t size = sizeof(*release);
+       int idr_ret;
+
+       release = kmalloc(size, GFP_KERNEL);
+       if (!release) {
+               DRM_ERROR("Out of memory\n");
+               return 0;
+       }
+       release->type = type;
+       release->bo_count = 0;
+       release->release_offset = 0;
+       release->surface_release_id = 0;
+
+       idr_preload(GFP_KERNEL);
+       spin_lock(&qdev->release_idr_lock);
+       idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
+       spin_unlock(&qdev->release_idr_lock);
+       idr_preload_end();
+       handle = idr_ret;
+       if (idr_ret < 0)
+               goto release_fail;
+       *ret = release;
+       QXL_INFO(qdev, "allocated release %lld\n", handle);
+       release->id = handle;
+release_fail:
+
+       return handle;
+}
+
+void
+qxl_release_free(struct qxl_device *qdev,
+                struct qxl_release *release)
+{
+       int i;
+
+       QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
+                release->type, release->bo_count);
+
+       if (release->surface_release_id)
+               qxl_surface_id_dealloc(qdev, release->surface_release_id);
+
+       for (i = 0 ; i < release->bo_count; ++i) {
+               QXL_INFO(qdev, "release %llx\n",
+                       release->bos[i]->tbo.addr_space_offset
+                                               - DRM_FILE_OFFSET);
+               qxl_fence_remove_release(&release->bos[i]->fence, release->id);
+               qxl_bo_unref(&release->bos[i]);
+       }
+       spin_lock(&qdev->release_idr_lock);
+       idr_remove(&qdev->release_idr, release->id);
+       spin_unlock(&qdev->release_idr_lock);
+       kfree(release);
+}
+
+void
+qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
+                   struct qxl_bo *bo)
+{
+       int i;
+       for (i = 0; i < release->bo_count; i++)
+               if (release->bos[i] == bo)
+                       return;
+
+       if (release->bo_count >= QXL_MAX_RES) {
+               DRM_ERROR("exceeded max resource on a qxl_release item\n");
+               return;
+       }
+       release->bos[release->bo_count++] = qxl_bo_ref(bo);
+}
+
+static int qxl_release_bo_alloc(struct qxl_device *qdev,
+                               struct qxl_bo **bo)
+{
+       int ret;
+       ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
+                           bo);
+       return ret;
+}
+
+int qxl_release_reserve(struct qxl_device *qdev,
+                       struct qxl_release *release, bool no_wait)
+{
+       int ret;
+       if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
+               ret = qxl_bo_reserve(release->bos[0], no_wait);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+void qxl_release_unreserve(struct qxl_device *qdev,
+                         struct qxl_release *release)
+{
+       if (atomic_dec_and_test(&release->bos[0]->reserve_count))
+               qxl_bo_unreserve(release->bos[0]);
+}
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+                                      enum qxl_surface_cmd_type surface_cmd_type,
+                                      struct qxl_release *create_rel,
+                                      struct qxl_release **release)
+{
+       int ret;
+
+       if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
+               int idr_ret;
+               struct qxl_bo *bo;
+               union qxl_release_info *info;
+
+               /* stash the release after the create command */
+               idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
+               bo = qxl_bo_ref(create_rel->bos[0]);
+
+               (*release)->release_offset = create_rel->release_offset + 64;
+
+               qxl_release_add_res(qdev, *release, bo);
+
+               ret = qxl_release_reserve(qdev, *release, false);
+               if (ret) {
+                       DRM_ERROR("release reserve failed\n");
+                       goto out_unref;
+               }
+               info = qxl_release_map(qdev, *release);
+               info->id = idr_ret;
+               qxl_release_unmap(qdev, *release, info);
+
+
+out_unref:
+               qxl_bo_unref(&bo);
+               return ret;
+       }
+
+       return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
+                                        QXL_RELEASE_SURFACE_CMD, release, NULL);
+}
+
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+                                      int type, struct qxl_release **release,
+                                      struct qxl_bo **rbo)
+{
+       struct qxl_bo *bo;
+       int idr_ret;
+       int ret;
+       union qxl_release_info *info;
+       int cur_idx;
+
+       if (type == QXL_RELEASE_DRAWABLE)
+               cur_idx = 0;
+       else if (type == QXL_RELEASE_SURFACE_CMD)
+               cur_idx = 1;
+       else if (type == QXL_RELEASE_CURSOR_CMD)
+               cur_idx = 2;
+       else {
+               DRM_ERROR("got illegal type: %d\n", type);
+               return -EINVAL;
+       }
+
+       idr_ret = qxl_release_alloc(qdev, type, release);
+
+       mutex_lock(&qdev->release_mutex);
+       if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
+               qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
+               qdev->current_release_bo_offset[cur_idx] = 0;
+               qdev->current_release_bo[cur_idx] = NULL;
+       }
+       if (!qdev->current_release_bo[cur_idx]) {
+               ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
+               if (ret) {
+                       mutex_unlock(&qdev->release_mutex);
+                       return ret;
+               }
+
+               /* pin releases bo's they are too messy to evict */
+               ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
+               qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
+               qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
+       }
+
+       bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
+
+       (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
+       qdev->current_release_bo_offset[cur_idx]++;
+
+       if (rbo)
+               *rbo = bo;
+
+       qxl_release_add_res(qdev, *release, bo);
+
+       ret = qxl_release_reserve(qdev, *release, false);
+       mutex_unlock(&qdev->release_mutex);
+       if (ret)
+               goto out_unref;
+
+       info = qxl_release_map(qdev, *release);
+       info->id = idr_ret;
+       qxl_release_unmap(qdev, *release, info);
+
+out_unref:
+       qxl_bo_unref(&bo);
+       return ret;
+}
+
+int qxl_fence_releaseable(struct qxl_device *qdev,
+                         struct qxl_release *release)
+{
+       int i, ret;
+       for (i = 0; i < release->bo_count; i++) {
+               if (!release->bos[i]->tbo.sync_obj)
+                       release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
+               ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+                                                  uint64_t id)
+{
+       struct qxl_release *release;
+
+       spin_lock(&qdev->release_idr_lock);
+       release = idr_find(&qdev->release_idr, id);
+       spin_unlock(&qdev->release_idr_lock);
+       if (!release) {
+               DRM_ERROR("failed to find id in release_idr\n");
+               return NULL;
+       }
+       if (release->bo_count < 1) {
+               DRM_ERROR("read a released resource with 0 bos\n");
+               return NULL;
+       }
+       return release;
+}
+
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+                                       struct qxl_release *release)
+{
+       void *ptr;
+       union qxl_release_info *info;
+       struct qxl_bo *bo = release->bos[0];
+
+       ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+       info = ptr + (release->release_offset & ~PAGE_SIZE);
+       return info;
+}
+
+void qxl_release_unmap(struct qxl_device *qdev,
+                      struct qxl_release *release,
+                      union qxl_release_info *info)
+{
+       struct qxl_bo *bo = release->bos[0];
+       void *ptr;
+
+       ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
+       qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
new file mode 100644 (file)
index 0000000..489cb8c
--- /dev/null
@@ -0,0 +1,581 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/qxl_drm.h>
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/delay.h>
+static int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+
+static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
+{
+       struct qxl_mman *mman;
+       struct qxl_device *qdev;
+
+       mman = container_of(bdev, struct qxl_mman, bdev);
+       qdev = container_of(mman, struct qxl_device, mman);
+       return qdev;
+}
+
+static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+       return ttm_mem_global_init(ref->object);
+}
+
+static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+       ttm_mem_global_release(ref->object);
+}
+
+static int qxl_ttm_global_init(struct qxl_device *qdev)
+{
+       struct drm_global_reference *global_ref;
+       int r;
+
+       qdev->mman.mem_global_referenced = false;
+       global_ref = &qdev->mman.mem_global_ref;
+       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+       global_ref->size = sizeof(struct ttm_mem_global);
+       global_ref->init = &qxl_ttm_mem_global_init;
+       global_ref->release = &qxl_ttm_mem_global_release;
+
+       r = drm_global_item_ref(global_ref);
+       if (r != 0) {
+               DRM_ERROR("Failed setting up TTM memory accounting "
+                         "subsystem.\n");
+               return r;
+       }
+
+       qdev->mman.bo_global_ref.mem_glob =
+               qdev->mman.mem_global_ref.object;
+       global_ref = &qdev->mman.bo_global_ref.ref;
+       global_ref->global_type = DRM_GLOBAL_TTM_BO;
+       global_ref->size = sizeof(struct ttm_bo_global);
+       global_ref->init = &ttm_bo_global_init;
+       global_ref->release = &ttm_bo_global_release;
+       r = drm_global_item_ref(global_ref);
+       if (r != 0) {
+               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+               drm_global_item_unref(&qdev->mman.mem_global_ref);
+               return r;
+       }
+
+       qdev->mman.mem_global_referenced = true;
+       return 0;
+}
+
+static void qxl_ttm_global_fini(struct qxl_device *qdev)
+{
+       if (qdev->mman.mem_global_referenced) {
+               drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
+               drm_global_item_unref(&qdev->mman.mem_global_ref);
+               qdev->mman.mem_global_referenced = false;
+       }
+}
+
+static struct vm_operations_struct qxl_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct ttm_buffer_object *bo;
+       struct qxl_device *qdev;
+       int r;
+
+       bo = (struct ttm_buffer_object *)vma->vm_private_data;
+       if (bo == NULL)
+               return VM_FAULT_NOPAGE;
+       qdev = qxl_get_qdev(bo->bdev);
+       r = ttm_vm_ops->fault(vma, vmf);
+       return r;
+}
+
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *file_priv;
+       struct qxl_device *qdev;
+       int r;
+
+       if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+               pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
+                       __func__, vma->vm_pgoff);
+               return drm_mmap(filp, vma);
+       }
+
+       file_priv = filp->private_data;
+       qdev = file_priv->minor->dev->dev_private;
+       if (qdev == NULL) {
+               DRM_ERROR(
+                "filp->private_data->minor->dev->dev_private == NULL\n");
+               return -EINVAL;
+       }
+       QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
+                __func__, filp->private_data, vma->vm_pgoff);
+
+       r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
+       if (unlikely(r != 0))
+               return r;
+       if (unlikely(ttm_vm_ops == NULL)) {
+               ttm_vm_ops = vma->vm_ops;
+               qxl_ttm_vm_ops = *ttm_vm_ops;
+               qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
+       }
+       vma->vm_ops = &qxl_ttm_vm_ops;
+       return 0;
+}
+
+static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+       return 0;
+}
+
+static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+                            struct ttm_mem_type_manager *man)
+{
+       struct qxl_device *qdev;
+
+       qdev = qxl_get_qdev(bdev);
+
+       switch (type) {
+       case TTM_PL_SYSTEM:
+               /* System memory */
+               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_MASK_CACHING;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       case TTM_PL_VRAM:
+       case TTM_PL_PRIV0:
+               /* "On-card" video ram */
+               man->func = &ttm_bo_manager_func;
+               man->gpu_offset = 0;
+               man->flags = TTM_MEMTYPE_FLAG_FIXED |
+                            TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_MASK_CACHING;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       default:
+               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qxl_evict_flags(struct ttm_buffer_object *bo,
+                               struct ttm_placement *placement)
+{
+       struct qxl_bo *qbo;
+       static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+       if (!qxl_ttm_bo_is_qxl_bo(bo)) {
+               placement->fpfn = 0;
+               placement->lpfn = 0;
+               placement->placement = &placements;
+               placement->busy_placement = &placements;
+               placement->num_placement = 1;
+               placement->num_busy_placement = 1;
+               return;
+       }
+       qbo = container_of(bo, struct qxl_bo, tbo);
+       qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
+       *placement = qbo->placement;
+}
+
+static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+       return 0;
+}
+
+static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+                                 struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct qxl_device *qdev = qxl_get_qdev(bdev);
+
+       mem->bus.addr = NULL;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       mem->bus.is_iomem = false;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* system memory */
+               return 0;
+       case TTM_PL_VRAM:
+               mem->bus.is_iomem = true;
+               mem->bus.base = qdev->vram_base;
+               mem->bus.offset = mem->start << PAGE_SHIFT;
+               break;
+       case TTM_PL_PRIV0:
+               mem->bus.is_iomem = true;
+               mem->bus.base = qdev->surfaceram_base;
+               mem->bus.offset = mem->start << PAGE_SHIFT;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
+                               struct ttm_mem_reg *mem)
+{
+}
+
+/*
+ * TTM backend functions.
+ */
+struct qxl_ttm_tt {
+       struct ttm_dma_tt               ttm;
+       struct qxl_device               *qdev;
+       u64                             offset;
+};
+
+static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
+                               struct ttm_mem_reg *bo_mem)
+{
+       struct qxl_ttm_tt *gtt = (void *)ttm;
+
+       gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+       if (!ttm->num_pages) {
+               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+                    ttm->num_pages, bo_mem, ttm);
+       }
+       /* Not implemented */
+       return -1;
+}
+
+static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+       /* Not implemented */
+       return -1;
+}
+
+static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+       struct qxl_ttm_tt *gtt = (void *)ttm;
+
+       ttm_dma_tt_fini(&gtt->ttm);
+       kfree(gtt);
+}
+
+static struct ttm_backend_func qxl_backend_func = {
+       .bind = &qxl_ttm_backend_bind,
+       .unbind = &qxl_ttm_backend_unbind,
+       .destroy = &qxl_ttm_backend_destroy,
+};
+
+static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
+{
+       int r;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       r = ttm_pool_populate(ttm);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+       ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
+                                       unsigned long size, uint32_t page_flags,
+                                       struct page *dummy_read_page)
+{
+       struct qxl_device *qdev;
+       struct qxl_ttm_tt *gtt;
+
+       qdev = qxl_get_qdev(bdev);
+       gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
+       if (gtt == NULL)
+               return NULL;
+       gtt->ttm.ttm.func = &qxl_backend_func;
+       gtt->qdev = qdev;
+       if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
+                           dummy_read_page)) {
+               kfree(gtt);
+               return NULL;
+       }
+       return &gtt->ttm.ttm;
+}
+
+static void qxl_move_null(struct ttm_buffer_object *bo,
+                            struct ttm_mem_reg *new_mem)
+{
+       struct ttm_mem_reg *old_mem = &bo->mem;
+
+       BUG_ON(old_mem->mm_node != NULL);
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+}
+
+static int qxl_bo_move(struct ttm_buffer_object *bo,
+                      bool evict, bool interruptible,
+                      bool no_wait_gpu,
+                      struct ttm_mem_reg *new_mem)
+{
+       struct ttm_mem_reg *old_mem = &bo->mem;
+       if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+               qxl_move_null(bo, new_mem);
+               return 0;
+       }
+       return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+}
+
+
+static int qxl_sync_obj_wait(void *sync_obj,
+                            bool lazy, bool interruptible)
+{
+       struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
+       int count = 0, sc = 0;
+       struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+       if (qfence->num_active_releases == 0)
+               return 0;
+
+retry:
+       if (sc == 0) {
+               if (bo->type == QXL_GEM_DOMAIN_SURFACE)
+                       qxl_update_surface(qfence->qdev, bo);
+       } else if (sc >= 1) {
+               qxl_io_notify_oom(qfence->qdev);
+       }
+
+       sc++;
+
+       for (count = 0; count < 10; count++) {
+               bool ret;
+               ret = qxl_queue_garbage_collect(qfence->qdev, true);
+               if (ret == false)
+                       break;
+
+               if (qfence->num_active_releases == 0)
+                       return 0;
+       }
+
+       if (qfence->num_active_releases) {
+               bool have_drawable_releases = false;
+               void **slot;
+               struct radix_tree_iter iter;
+               int release_id;
+
+               radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) {
+                       struct qxl_release *release;
+
+                       release_id = iter.index;
+                       release = qxl_release_from_id_locked(qfence->qdev, release_id);
+                       if (release == NULL)
+                               continue;
+
+                       if (release->type == QXL_RELEASE_DRAWABLE)
+                               have_drawable_releases = true;
+               }
+
+               qxl_queue_garbage_collect(qfence->qdev, true);
+
+               if (have_drawable_releases || sc < 4) {
+                       if (sc > 2)
+                               /* back off */
+                               usleep_range(500, 1000);
+                       if (have_drawable_releases && sc > 300) {
+                               WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases);
+                               return -EBUSY;
+                       }
+                       goto retry;
+               }
+       }
+       return 0;
+}
+
+static int qxl_sync_obj_flush(void *sync_obj)
+{
+       return 0;
+}
+
+static void qxl_sync_obj_unref(void **sync_obj)
+{
+}
+
+static void *qxl_sync_obj_ref(void *sync_obj)
+{
+       return sync_obj;
+}
+
+static bool qxl_sync_obj_signaled(void *sync_obj)
+{
+       struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
+       return (qfence->num_active_releases == 0);
+}
+
+static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
+                              struct ttm_mem_reg *new_mem)
+{
+       struct qxl_bo *qbo;
+       struct qxl_device *qdev;
+
+       if (!qxl_ttm_bo_is_qxl_bo(bo))
+               return;
+       qbo = container_of(bo, struct qxl_bo, tbo);
+       qdev = qbo->gem_base.dev->dev_private;
+
+       if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
+               qxl_surface_evict(qdev, qbo, new_mem ? true : false);
+}
+
+static struct ttm_bo_driver qxl_bo_driver = {
+       .ttm_tt_create = &qxl_ttm_tt_create,
+       .ttm_tt_populate = &qxl_ttm_tt_populate,
+       .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
+       .invalidate_caches = &qxl_invalidate_caches,
+       .init_mem_type = &qxl_init_mem_type,
+       .evict_flags = &qxl_evict_flags,
+       .move = &qxl_bo_move,
+       .verify_access = &qxl_verify_access,
+       .io_mem_reserve = &qxl_ttm_io_mem_reserve,
+       .io_mem_free = &qxl_ttm_io_mem_free,
+       .sync_obj_signaled = &qxl_sync_obj_signaled,
+       .sync_obj_wait = &qxl_sync_obj_wait,
+       .sync_obj_flush = &qxl_sync_obj_flush,
+       .sync_obj_unref = &qxl_sync_obj_unref,
+       .sync_obj_ref = &qxl_sync_obj_ref,
+       .move_notify = &qxl_bo_move_notify,
+};
+
+
+
+int qxl_ttm_init(struct qxl_device *qdev)
+{
+       int r;
+       int num_io_pages; /* != rom->num_io_pages, we include surface0 */
+
+       r = qxl_ttm_global_init(qdev);
+       if (r)
+               return r;
+       /* No others user of address space so set it to 0 */
+       r = ttm_bo_device_init(&qdev->mman.bdev,
+                              qdev->mman.bo_global_ref.ref.object,
+                              &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0);
+       if (r) {
+               DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+               return r;
+       }
+       /* NOTE: this includes the framebuffer (aka surface 0) */
+       num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
+       r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
+                          num_io_pages);
+       if (r) {
+               DRM_ERROR("Failed initializing VRAM heap.\n");
+               return r;
+       }
+       r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
+                          qdev->surfaceram_size / PAGE_SIZE);
+       if (r) {
+               DRM_ERROR("Failed initializing Surfaces heap.\n");
+               return r;
+       }
+       DRM_INFO("qxl: %uM of VRAM memory size\n",
+                (unsigned)qdev->vram_size / (1024 * 1024));
+       DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
+                ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+       if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
+               qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+       r = qxl_ttm_debugfs_init(qdev);
+       if (r) {
+               DRM_ERROR("Failed to init debugfs\n");
+               return r;
+       }
+       return 0;
+}
+
+void qxl_ttm_fini(struct qxl_device *qdev)
+{
+       ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+       ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+       ttm_bo_device_release(&qdev->mman.bdev);
+       qxl_ttm_global_fini(qdev);
+       DRM_INFO("qxl: ttm finalized\n");
+}
+
+
+#define QXL_DEBUGFS_MEM_TYPES 2
+
+#if defined(CONFIG_DEBUG_FS)
+static int qxl_mm_dump_table(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+       struct drm_device *dev = node->minor->dev;
+       struct qxl_device *rdev = dev->dev_private;
+       int ret;
+       struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+
+       spin_lock(&glob->lru_lock);
+       ret = drm_mm_dump_table(m, mm);
+       spin_unlock(&glob->lru_lock);
+       return ret;
+}
+#endif
+
+static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
+       static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
+       unsigned i;
+
+       for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
+               if (i == 0)
+                       sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
+               else
+                       sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
+               qxl_mem_types_list[i].name = qxl_mem_types_names[i];
+               qxl_mem_types_list[i].show = &qxl_mm_dump_table;
+               qxl_mem_types_list[i].driver_features = 0;
+               if (i == 0)
+                       qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+               else
+                       qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+
+       }
+       return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
+#else
+       return 0;
+#endif
+}
index bf17252..86c5e36 100644 (file)
@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
        evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
        evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
        atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
-       si_blit_shaders.o radeon_prime.o
+       si_blit_shaders.o radeon_prime.o radeon_uvd.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
index 46a9c37..fb441a7 100644 (file)
@@ -1394,10 +1394,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
                firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
 
                DRM_DEBUG("atom firmware requested %08x %dkb\n",
-                         firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
-                         firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+                         le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+                         le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
 
-               usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+               usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
        }
        ctx->scratch_size_bytes = 0;
        if (usage_bytes == 0)
index 4b04ba3..0ee5737 100644 (file)
@@ -458,6 +458,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
   union
   {
     ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
     ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
   };
   UCHAR   ucRefDiv;                           //Output Parameter      
@@ -490,6 +491,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
   union
   {
     ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
     ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
   };
   UCHAR   ucRefDiv;                           //Output Parameter      
index 21a892c..6d6fdb3 100644 (file)
@@ -557,6 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                /* use frac fb div on APUs */
                if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+               /* use frac fb div on RS780/RS880 */
+               if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+                       radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
        } else {
index 4552d4a..44a7da6 100644 (file)
@@ -2150,13 +2150,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        atombios_apply_encoder_quirks(encoder, adjusted_mode);
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
-               r600_hdmi_enable(encoder);
-               if (ASIC_IS_DCE6(rdev))
-                       ; /* TODO (use pointers instead of if-s?) */
-               else if (ASIC_IS_DCE4(rdev))
-                       evergreen_hdmi_setmode(encoder, adjusted_mode);
-               else
-                       r600_hdmi_setmode(encoder, adjusted_mode);
+               if (rdev->asic->display.hdmi_enable)
+                       radeon_hdmi_enable(rdev, encoder, true);
+               if (rdev->asic->display.hdmi_setmode)
+                       radeon_hdmi_setmode(rdev, encoder, adjusted_mode);
        }
 }
 
@@ -2413,8 +2410,10 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
 
 disable_done:
        if (radeon_encoder_is_digital(encoder)) {
-               if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
-                       r600_hdmi_disable(encoder);
+               if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+                       if (rdev->asic->display.hdmi_enable)
+                               radeon_hdmi_enable(rdev, encoder, false);
+               }
                dig = radeon_encoder->enc_priv;
                dig->dig_encoder = -1;
        }
index 305a657..105bafb 100644 (file)
@@ -53,6 +53,864 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
 extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
                                     int ring, u32 cp_int_cntl);
 
+static const u32 evergreen_golden_registers[] =
+{
+       0x3f90, 0xffff0000, 0xff000000,
+       0x9148, 0xffff0000, 0xff000000,
+       0x3f94, 0xffff0000, 0xff000000,
+       0x914c, 0xffff0000, 0xff000000,
+       0x9b7c, 0xffffffff, 0x00000000,
+       0x8a14, 0xffffffff, 0x00000007,
+       0x8b10, 0xffffffff, 0x00000000,
+       0x960c, 0xffffffff, 0x54763210,
+       0x88c4, 0xffffffff, 0x000000c2,
+       0x88d4, 0xffffffff, 0x00000010,
+       0x8974, 0xffffffff, 0x00000000,
+       0xc78, 0x00000080, 0x00000080,
+       0x5eb4, 0xffffffff, 0x00000002,
+       0x5e78, 0xffffffff, 0x001000f0,
+       0x6104, 0x01000300, 0x00000000,
+       0x5bc0, 0x00300000, 0x00000000,
+       0x7030, 0xffffffff, 0x00000011,
+       0x7c30, 0xffffffff, 0x00000011,
+       0x10830, 0xffffffff, 0x00000011,
+       0x11430, 0xffffffff, 0x00000011,
+       0x12030, 0xffffffff, 0x00000011,
+       0x12c30, 0xffffffff, 0x00000011,
+       0xd02c, 0xffffffff, 0x08421000,
+       0x240c, 0xffffffff, 0x00000380,
+       0x8b24, 0xffffffff, 0x00ff0fff,
+       0x28a4c, 0x06000000, 0x06000000,
+       0x10c, 0x00000001, 0x00000001,
+       0x8d00, 0xffffffff, 0x100e4848,
+       0x8d04, 0xffffffff, 0x00164745,
+       0x8c00, 0xffffffff, 0xe4000003,
+       0x8c04, 0xffffffff, 0x40600060,
+       0x8c08, 0xffffffff, 0x001c001c,
+       0x8cf0, 0xffffffff, 0x08e00620,
+       0x8c20, 0xffffffff, 0x00800080,
+       0x8c24, 0xffffffff, 0x00800080,
+       0x8c18, 0xffffffff, 0x20202078,
+       0x8c1c, 0xffffffff, 0x00001010,
+       0x28350, 0xffffffff, 0x00000000,
+       0xa008, 0xffffffff, 0x00010000,
+       0x5cc, 0xffffffff, 0x00000001,
+       0x9508, 0xffffffff, 0x00000002,
+       0x913c, 0x0000000f, 0x0000000a
+};
+
+static const u32 evergreen_golden_registers2[] =
+{
+       0x2f4c, 0xffffffff, 0x00000000,
+       0x54f4, 0xffffffff, 0x00000000,
+       0x54f0, 0xffffffff, 0x00000000,
+       0x5498, 0xffffffff, 0x00000000,
+       0x549c, 0xffffffff, 0x00000000,
+       0x5494, 0xffffffff, 0x00000000,
+       0x53cc, 0xffffffff, 0x00000000,
+       0x53c8, 0xffffffff, 0x00000000,
+       0x53c4, 0xffffffff, 0x00000000,
+       0x53c0, 0xffffffff, 0x00000000,
+       0x53bc, 0xffffffff, 0x00000000,
+       0x53b8, 0xffffffff, 0x00000000,
+       0x53b4, 0xffffffff, 0x00000000,
+       0x53b0, 0xffffffff, 0x00000000
+};
+
+static const u32 cypress_mgcg_init[] =
+{
+       0x802c, 0xffffffff, 0xc0000000,
+       0x5448, 0xffffffff, 0x00000100,
+       0x55e4, 0xffffffff, 0x00000100,
+       0x160c, 0xffffffff, 0x00000100,
+       0x5644, 0xffffffff, 0x00000100,
+       0xc164, 0xffffffff, 0x00000100,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x897c, 0xffffffff, 0x06000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x9a60, 0xffffffff, 0x00000100,
+       0x9868, 0xffffffff, 0x00000100,
+       0x8d58, 0xffffffff, 0x00000100,
+       0x9510, 0xffffffff, 0x00000100,
+       0x949c, 0xffffffff, 0x00000100,
+       0x9654, 0xffffffff, 0x00000100,
+       0x9030, 0xffffffff, 0x00000100,
+       0x9034, 0xffffffff, 0x00000100,
+       0x9038, 0xffffffff, 0x00000100,
+       0x903c, 0xffffffff, 0x00000100,
+       0x9040, 0xffffffff, 0x00000100,
+       0xa200, 0xffffffff, 0x00000100,
+       0xa204, 0xffffffff, 0x00000100,
+       0xa208, 0xffffffff, 0x00000100,
+       0xa20c, 0xffffffff, 0x00000100,
+       0x971c, 0xffffffff, 0x00000100,
+       0x977c, 0xffffffff, 0x00000100,
+       0x3f80, 0xffffffff, 0x00000100,
+       0xa210, 0xffffffff, 0x00000100,
+       0xa214, 0xffffffff, 0x00000100,
+       0x4d8, 0xffffffff, 0x00000100,
+       0x9784, 0xffffffff, 0x00000100,
+       0x9698, 0xffffffff, 0x00000100,
+       0x4d4, 0xffffffff, 0x00000200,
+       0x30cc, 0xffffffff, 0x00000100,
+       0xd0c0, 0xffffffff, 0xff000100,
+       0x802c, 0xffffffff, 0x40000000,
+       0x915c, 0xffffffff, 0x00010000,
+       0x9160, 0xffffffff, 0x00030002,
+       0x9178, 0xffffffff, 0x00070000,
+       0x917c, 0xffffffff, 0x00030002,
+       0x9180, 0xffffffff, 0x00050004,
+       0x918c, 0xffffffff, 0x00010006,
+       0x9190, 0xffffffff, 0x00090008,
+       0x9194, 0xffffffff, 0x00070000,
+       0x9198, 0xffffffff, 0x00030002,
+       0x919c, 0xffffffff, 0x00050004,
+       0x91a8, 0xffffffff, 0x00010006,
+       0x91ac, 0xffffffff, 0x00090008,
+       0x91b0, 0xffffffff, 0x00070000,
+       0x91b4, 0xffffffff, 0x00030002,
+       0x91b8, 0xffffffff, 0x00050004,
+       0x91c4, 0xffffffff, 0x00010006,
+       0x91c8, 0xffffffff, 0x00090008,
+       0x91cc, 0xffffffff, 0x00070000,
+       0x91d0, 0xffffffff, 0x00030002,
+       0x91d4, 0xffffffff, 0x00050004,
+       0x91e0, 0xffffffff, 0x00010006,
+       0x91e4, 0xffffffff, 0x00090008,
+       0x91e8, 0xffffffff, 0x00000000,
+       0x91ec, 0xffffffff, 0x00070000,
+       0x91f0, 0xffffffff, 0x00030002,
+       0x91f4, 0xffffffff, 0x00050004,
+       0x9200, 0xffffffff, 0x00010006,
+       0x9204, 0xffffffff, 0x00090008,
+       0x9208, 0xffffffff, 0x00070000,
+       0x920c, 0xffffffff, 0x00030002,
+       0x9210, 0xffffffff, 0x00050004,
+       0x921c, 0xffffffff, 0x00010006,
+       0x9220, 0xffffffff, 0x00090008,
+       0x9224, 0xffffffff, 0x00070000,
+       0x9228, 0xffffffff, 0x00030002,
+       0x922c, 0xffffffff, 0x00050004,
+       0x9238, 0xffffffff, 0x00010006,
+       0x923c, 0xffffffff, 0x00090008,
+       0x9240, 0xffffffff, 0x00070000,
+       0x9244, 0xffffffff, 0x00030002,
+       0x9248, 0xffffffff, 0x00050004,
+       0x9254, 0xffffffff, 0x00010006,
+       0x9258, 0xffffffff, 0x00090008,
+       0x925c, 0xffffffff, 0x00070000,
+       0x9260, 0xffffffff, 0x00030002,
+       0x9264, 0xffffffff, 0x00050004,
+       0x9270, 0xffffffff, 0x00010006,
+       0x9274, 0xffffffff, 0x00090008,
+       0x9278, 0xffffffff, 0x00070000,
+       0x927c, 0xffffffff, 0x00030002,
+       0x9280, 0xffffffff, 0x00050004,
+       0x928c, 0xffffffff, 0x00010006,
+       0x9290, 0xffffffff, 0x00090008,
+       0x9294, 0xffffffff, 0x00000000,
+       0x929c, 0xffffffff, 0x00000001,
+       0x802c, 0xffffffff, 0x40010000,
+       0x915c, 0xffffffff, 0x00010000,
+       0x9160, 0xffffffff, 0x00030002,
+       0x9178, 0xffffffff, 0x00070000,
+       0x917c, 0xffffffff, 0x00030002,
+       0x9180, 0xffffffff, 0x00050004,
+       0x918c, 0xffffffff, 0x00010006,
+       0x9190, 0xffffffff, 0x00090008,
+       0x9194, 0xffffffff, 0x00070000,
+       0x9198, 0xffffffff, 0x00030002,
+       0x919c, 0xffffffff, 0x00050004,
+       0x91a8, 0xffffffff, 0x00010006,
+       0x91ac, 0xffffffff, 0x00090008,
+       0x91b0, 0xffffffff, 0x00070000,
+       0x91b4, 0xffffffff, 0x00030002,
+       0x91b8, 0xffffffff, 0x00050004,
+       0x91c4, 0xffffffff, 0x00010006,
+       0x91c8, 0xffffffff, 0x00090008,
+       0x91cc, 0xffffffff, 0x00070000,
+       0x91d0, 0xffffffff, 0x00030002,
+       0x91d4, 0xffffffff, 0x00050004,
+       0x91e0, 0xffffffff, 0x00010006,
+       0x91e4, 0xffffffff, 0x00090008,
+       0x91e8, 0xffffffff, 0x00000000,
+       0x91ec, 0xffffffff, 0x00070000,
+       0x91f0, 0xffffffff, 0x00030002,
+       0x91f4, 0xffffffff, 0x00050004,
+       0x9200, 0xffffffff, 0x00010006,
+       0x9204, 0xffffffff, 0x00090008,
+       0x9208, 0xffffffff, 0x00070000,
+       0x920c, 0xffffffff, 0x00030002,
+       0x9210, 0xffffffff, 0x00050004,
+       0x921c, 0xffffffff, 0x00010006,
+       0x9220, 0xffffffff, 0x00090008,
+       0x9224, 0xffffffff, 0x00070000,
+       0x9228, 0xffffffff, 0x00030002,
+       0x922c, 0xffffffff, 0x00050004,
+       0x9238, 0xffffffff, 0x00010006,
+       0x923c, 0xffffffff, 0x00090008,
+       0x9240, 0xffffffff, 0x00070000,
+       0x9244, 0xffffffff, 0x00030002,
+       0x9248, 0xffffffff, 0x00050004,
+       0x9254, 0xffffffff, 0x00010006,
+       0x9258, 0xffffffff, 0x00090008,
+       0x925c, 0xffffffff, 0x00070000,
+       0x9260, 0xffffffff, 0x00030002,
+       0x9264, 0xffffffff, 0x00050004,
+       0x9270, 0xffffffff, 0x00010006,
+       0x9274, 0xffffffff, 0x00090008,
+       0x9278, 0xffffffff, 0x00070000,
+       0x927c, 0xffffffff, 0x00030002,
+       0x9280, 0xffffffff, 0x00050004,
+       0x928c, 0xffffffff, 0x00010006,
+       0x9290, 0xffffffff, 0x00090008,
+       0x9294, 0xffffffff, 0x00000000,
+       0x929c, 0xffffffff, 0x00000001,
+       0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 redwood_mgcg_init[] =
+{
+       0x802c, 0xffffffff, 0xc0000000,
+       0x5448, 0xffffffff, 0x00000100,
+       0x55e4, 0xffffffff, 0x00000100,
+       0x160c, 0xffffffff, 0x00000100,
+       0x5644, 0xffffffff, 0x00000100,
+       0xc164, 0xffffffff, 0x00000100,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x897c, 0xffffffff, 0x06000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x9a60, 0xffffffff, 0x00000100,
+       0x9868, 0xffffffff, 0x00000100,
+       0x8d58, 0xffffffff, 0x00000100,
+       0x9510, 0xffffffff, 0x00000100,
+       0x949c, 0xffffffff, 0x00000100,
+       0x9654, 0xffffffff, 0x00000100,
+       0x9030, 0xffffffff, 0x00000100,
+       0x9034, 0xffffffff, 0x00000100,
+       0x9038, 0xffffffff, 0x00000100,
+       0x903c, 0xffffffff, 0x00000100,
+       0x9040, 0xffffffff, 0x00000100,
+       0xa200, 0xffffffff, 0x00000100,
+       0xa204, 0xffffffff, 0x00000100,
+       0xa208, 0xffffffff, 0x00000100,
+       0xa20c, 0xffffffff, 0x00000100,
+       0x971c, 0xffffffff, 0x00000100,
+       0x977c, 0xffffffff, 0x00000100,
+       0x3f80, 0xffffffff, 0x00000100,
+       0xa210, 0xffffffff, 0x00000100,
+       0xa214, 0xffffffff, 0x00000100,
+       0x4d8, 0xffffffff, 0x00000100,
+       0x9784, 0xffffffff, 0x00000100,
+       0x9698, 0xffffffff, 0x00000100,
+       0x4d4, 0xffffffff, 0x00000200,
+       0x30cc, 0xffffffff, 0x00000100,
+       0xd0c0, 0xffffffff, 0xff000100,
+       0x802c, 0xffffffff, 0x40000000,
+       0x915c, 0xffffffff, 0x00010000,
+       0x9160, 0xffffffff, 0x00030002,
+       0x9178, 0xffffffff, 0x00070000,
+       0x917c, 0xffffffff, 0x00030002,
+       0x9180, 0xffffffff, 0x00050004,
+       0x918c, 0xffffffff, 0x00010006,
+       0x9190, 0xffffffff, 0x00090008,
+       0x9194, 0xffffffff, 0x00070000,
+       0x9198, 0xffffffff, 0x00030002,
+       0x919c, 0xffffffff, 0x00050004,
+       0x91a8, 0xffffffff, 0x00010006,
+       0x91ac, 0xffffffff, 0x00090008,
+       0x91b0, 0xffffffff, 0x00070000,
+       0x91b4, 0xffffffff, 0x00030002,
+       0x91b8, 0xffffffff, 0x00050004,
+       0x91c4, 0xffffffff, 0x00010006,
+       0x91c8, 0xffffffff, 0x00090008,
+       0x91cc, 0xffffffff, 0x00070000,
+       0x91d0, 0xffffffff, 0x00030002,
+       0x91d4, 0xffffffff, 0x00050004,
+       0x91e0, 0xffffffff, 0x00010006,
+       0x91e4, 0xffffffff, 0x00090008,
+       0x91e8, 0xffffffff, 0x00000000,
+       0x91ec, 0xffffffff, 0x00070000,
+       0x91f0, 0xffffffff, 0x00030002,
+       0x91f4, 0xffffffff, 0x00050004,
+       0x9200, 0xffffffff, 0x00010006,
+       0x9204, 0xffffffff, 0x00090008,
+       0x9294, 0xffffffff, 0x00000000,
+       0x929c, 0xffffffff, 0x00000001,
+       0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 cedar_golden_registers[] =
+{
+       0x3f90, 0xffff0000, 0xff000000,
+       0x9148, 0xffff0000, 0xff000000,
+       0x3f94, 0xffff0000, 0xff000000,
+       0x914c, 0xffff0000, 0xff000000,
+       0x9b7c, 0xffffffff, 0x00000000,
+       0x8a14, 0xffffffff, 0x00000007,
+       0x8b10, 0xffffffff, 0x00000000,
+       0x960c, 0xffffffff, 0x54763210,
+       0x88c4, 0xffffffff, 0x000000c2,
+       0x88d4, 0xffffffff, 0x00000000,
+       0x8974, 0xffffffff, 0x00000000,
+       0xc78, 0x00000080, 0x00000080,
+       0x5eb4, 0xffffffff, 0x00000002,
+       0x5e78, 0xffffffff, 0x001000f0,
+       0x6104, 0x01000300, 0x00000000,
+       0x5bc0, 0x00300000, 0x00000000,
+       0x7030, 0xffffffff, 0x00000011,
+       0x7c30, 0xffffffff, 0x00000011,
+       0x10830, 0xffffffff, 0x00000011,
+       0x11430, 0xffffffff, 0x00000011,
+       0xd02c, 0xffffffff, 0x08421000,
+       0x240c, 0xffffffff, 0x00000380,
+       0x8b24, 0xffffffff, 0x00ff0fff,
+       0x28a4c, 0x06000000, 0x06000000,
+       0x10c, 0x00000001, 0x00000001,
+       0x8d00, 0xffffffff, 0x100e4848,
+       0x8d04, 0xffffffff, 0x00164745,
+       0x8c00, 0xffffffff, 0xe4000003,
+       0x8c04, 0xffffffff, 0x40600060,
+       0x8c08, 0xffffffff, 0x001c001c,
+       0x8cf0, 0xffffffff, 0x08e00410,
+       0x8c20, 0xffffffff, 0x00800080,
+       0x8c24, 0xffffffff, 0x00800080,
+       0x8c18, 0xffffffff, 0x20202078,
+       0x8c1c, 0xffffffff, 0x00001010,
+       0x28350, 0xffffffff, 0x00000000,
+       0xa008, 0xffffffff, 0x00010000,
+       0x5cc, 0xffffffff, 0x00000001,
+       0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 cedar_mgcg_init[] =
+{
+       0x802c, 0xffffffff, 0xc0000000,
+       0x5448, 0xffffffff, 0x00000100,
+       0x55e4, 0xffffffff, 0x00000100,
+       0x160c, 0xffffffff, 0x00000100,
+       0x5644, 0xffffffff, 0x00000100,
+       0xc164, 0xffffffff, 0x00000100,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x897c, 0xffffffff, 0x06000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x9a60, 0xffffffff, 0x00000100,
+       0x9868, 0xffffffff, 0x00000100,
+       0x8d58, 0xffffffff, 0x00000100,
+       0x9510, 0xffffffff, 0x00000100,
+       0x949c, 0xffffffff, 0x00000100,
+       0x9654, 0xffffffff, 0x00000100,
+       0x9030, 0xffffffff, 0x00000100,
+       0x9034, 0xffffffff, 0x00000100,
+       0x9038, 0xffffffff, 0x00000100,
+       0x903c, 0xffffffff, 0x00000100,
+       0x9040, 0xffffffff, 0x00000100,
+       0xa200, 0xffffffff, 0x00000100,
+       0xa204, 0xffffffff, 0x00000100,
+       0xa208, 0xffffffff, 0x00000100,
+       0xa20c, 0xffffffff, 0x00000100,
+       0x971c, 0xffffffff, 0x00000100,
+       0x977c, 0xffffffff, 0x00000100,
+       0x3f80, 0xffffffff, 0x00000100,
+       0xa210, 0xffffffff, 0x00000100,
+       0xa214, 0xffffffff, 0x00000100,
+       0x4d8, 0xffffffff, 0x00000100,
+       0x9784, 0xffffffff, 0x00000100,
+       0x9698, 0xffffffff, 0x00000100,
+       0x4d4, 0xffffffff, 0x00000200,
+       0x30cc, 0xffffffff, 0x00000100,
+       0xd0c0, 0xffffffff, 0xff000100,
+       0x802c, 0xffffffff, 0x40000000,
+       0x915c, 0xffffffff, 0x00010000,
+       0x9178, 0xffffffff, 0x00050000,
+       0x917c, 0xffffffff, 0x00030002,
+       0x918c, 0xffffffff, 0x00010004,
+       0x9190, 0xffffffff, 0x00070006,
+       0x9194, 0xffffffff, 0x00050000,
+       0x9198, 0xffffffff, 0x00030002,
+       0x91a8, 0xffffffff, 0x00010004,
+       0x91ac, 0xffffffff, 0x00070006,
+       0x91e8, 0xffffffff, 0x00000000,
+       0x9294, 0xffffffff, 0x00000000,
+       0x929c, 0xffffffff, 0x00000001,
+       0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 juniper_mgcg_init[] =
+{
+       0x802c, 0xffffffff, 0xc0000000,
+       0x5448, 0xffffffff, 0x00000100,
+       0x55e4, 0xffffffff, 0x00000100,
+       0x160c, 0xffffffff, 0x00000100,
+       0x5644, 0xffffffff, 0x00000100,
+       0xc164, 0xffffffff, 0x00000100,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x897c, 0xffffffff, 0x06000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x9a60, 0xffffffff, 0x00000100,
+       0x9868, 0xffffffff, 0x00000100,
+       0x8d58, 0xffffffff, 0x00000100,
+       0x9510, 0xffffffff, 0x00000100,
+       0x949c, 0xffffffff, 0x00000100,
+       0x9654, 0xffffffff, 0x00000100,
+       0x9030, 0xffffffff, 0x00000100,
+       0x9034, 0xffffffff, 0x00000100,
+       0x9038, 0xffffffff, 0x00000100,
+       0x903c, 0xffffffff, 0x00000100,
+       0x9040, 0xffffffff, 0x00000100,
+       0xa200, 0xffffffff, 0x00000100,
+       0xa204, 0xffffffff, 0x00000100,
+       0xa208, 0xffffffff, 0x00000100,
+       0xa20c, 0xffffffff, 0x00000100,
+       0x971c, 0xffffffff, 0x00000100,
+       0xd0c0, 0xffffffff, 0xff000100,
+       0x802c, 0xffffffff, 0x40000000,
+       0x915c, 0xffffffff, 0x00010000,
+       0x9160, 0xffffffff, 0x00030002,
+       0x9178, 0xffffffff, 0x00070000,
+       0x917c, 0xffffffff, 0x00030002,
+       0x9180, 0xffffffff, 0x00050004,
+       0x918c, 0xffffffff, 0x00010006,
+       0x9190, 0xffffffff, 0x00090008,
+       0x9194, 0xffffffff, 0x00070000,
+       0x9198, 0xffffffff, 0x00030002,
+       0x919c, 0xffffffff, 0x00050004,
+       0x91a8, 0xffffffff, 0x00010006,
+       0x91ac, 0xffffffff, 0x00090008,
+       0x91b0, 0xffffffff, 0x00070000,
+       0x91b4, 0xffffffff, 0x00030002,
+       0x91b8, 0xffffffff, 0x00050004,
+       0x91c4, 0xffffffff, 0x00010006,
+       0x91c8, 0xffffffff, 0x00090008,
+       0x91cc, 0xffffffff, 0x00070000,
+       0x91d0, 0xffffffff, 0x00030002,
+       0x91d4, 0xffffffff, 0x00050004,
+       0x91e0, 0xffffffff, 0x00010006,
+       0x91e4, 0xffffffff, 0x00090008,
+       0x91e8, 0xffffffff, 0x00000000,
+       0x91ec, 0xffffffff, 0x00070000,
+       0x91f0, 0xffffffff, 0x00030002,
+       0x91f4, 0xffffffff, 0x00050004,
+       0x9200, 0xffffffff, 0x00010006,
+       0x9204, 0xffffffff, 0x00090008,
+       0x9208, 0xffffffff, 0x00070000,
+       0x920c, 0xffffffff, 0x00030002,
+       0x9210, 0xffffffff, 0x00050004,
+       0x921c, 0xffffffff, 0x00010006,
+       0x9220, 0xffffffff, 0x00090008,
+       0x9224, 0xffffffff, 0x00070000,
+       0x9228, 0xffffffff, 0x00030002,
+       0x922c, 0xffffffff, 0x00050004,
+       0x9238, 0xffffffff, 0x00010006,
+       0x923c, 0xffffffff, 0x00090008,
+       0x9240, 0xffffffff, 0x00070000,
+       0x9244, 0xffffffff, 0x00030002,
+       0x9248, 0xffffffff, 0x00050004,
+       0x9254, 0xffffffff, 0x00010006,
+       0x9258, 0xffffffff, 0x00090008,
+       0x925c, 0xffffffff, 0x00070000,
+       0x9260, 0xffffffff, 0x00030002,
+       0x9264, 0xffffffff, 0x00050004,
+       0x9270, 0xffffffff, 0x00010006,
+       0x9274, 0xffffffff, 0x00090008,
+       0x9278, 0xffffffff, 0x00070000,
+       0x927c, 0xffffffff, 0x00030002,
+       0x9280, 0xffffffff, 0x00050004,
+       0x928c, 0xffffffff, 0x00010006,
+       0x9290, 0xffffffff, 0x00090008,
+       0x9294, 0xffffffff, 0x00000000,
+       0x929c, 0xffffffff, 0x00000001,
+       0x802c, 0xffffffff, 0xc0000000,
+       0x977c, 0xffffffff, 0x00000100,
+       0x3f80, 0xffffffff, 0x00000100,
+       0xa210, 0xffffffff, 0x00000100,
+       0xa214, 0xffffffff, 0x00000100,
+       0x4d8, 0xffffffff, 0x00000100,
+       0x9784, 0xffffffff, 0x00000100,
+       0x9698, 0xffffffff, 0x00000100,
+       0x4d4, 0xffffffff, 0x00000200,
+       0x30cc, 0xffffffff, 0x00000100,
+       0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 supersumo_golden_registers[] =
+{
+       0x5eb4, 0xffffffff, 0x00000002,
+       0x5cc, 0xffffffff, 0x00000001,
+       0x7030, 0xffffffff, 0x00000011,
+       0x7c30, 0xffffffff, 0x00000011,
+       0x6104, 0x01000300, 0x00000000,
+       0x5bc0, 0x00300000, 0x00000000,
+       0x8c04, 0xffffffff, 0x40600060,
+       0x8c08, 0xffffffff, 0x001c001c,
+       0x8c20, 0xffffffff, 0x00800080,
+       0x8c24, 0xffffffff, 0x00800080,
+       0x8c18, 0xffffffff, 0x20202078,
+       0x8c1c, 0xffffffff, 0x00001010,
+       0x918c, 0xffffffff, 0x00010006,
+       0x91a8, 0xffffffff, 0x00010006,
+       0x91c4, 0xffffffff, 0x00010006,
+       0x91e0, 0xffffffff, 0x00010006,
+       0x9200, 0xffffffff, 0x00010006,
+       0x9150, 0xffffffff, 0x6e944040,
+       0x917c, 0xffffffff, 0x00030002,
+       0x9180, 0xffffffff, 0x00050004,
+       0x9198, 0xffffffff, 0x00030002,
+       0x919c, 0xffffffff, 0x00050004,
+       0x91b4, 0xffffffff, 0x00030002,
+       0x91b8, 0xffffffff, 0x00050004,
+       0x91d0, 0xffffffff, 0x00030002,
+       0x91d4, 0xffffffff, 0x00050004,
+       0x91f0, 0xffffffff, 0x00030002,
+       0x91f4, 0xffffffff, 0x00050004,
+       0x915c, 0xffffffff, 0x00010000,
+       0x9160, 0xffffffff, 0x00030002,
+       0x3f90, 0xffff0000, 0xff000000,
+       0x9178, 0xffffffff, 0x00070000,
+       0x9194, 0xffffffff, 0x00070000,
+       0x91b0, 0xffffffff, 0x00070000,
+       0x91cc, 0xffffffff, 0x00070000,
+       0x91ec, 0xffffffff, 0x00070000,
+       0x9148, 0xffff0000, 0xff000000,
+       0x9190, 0xffffffff, 0x00090008,
+       0x91ac, 0xffffffff, 0x00090008,
+       0x91c8, 0xffffffff, 0x00090008,
+       0x91e4, 0xffffffff, 0x00090008,
+       0x9204, 0xffffffff, 0x00090008,
+       0x3f94, 0xffff0000, 0xff000000,
+       0x914c, 0xffff0000, 0xff000000,
+       0x929c, 0xffffffff, 0x00000001,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x5644, 0xffffffff, 0x00000100,
+       0x9b7c, 0xffffffff, 0x00000000,
+       0x8030, 0xffffffff, 0x0000100a,
+       0x8a14, 0xffffffff, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ff0fff,
+       0x8b10, 0xffffffff, 0x00000000,
+       0x28a4c, 0x06000000, 0x06000000,
+       0x4d8, 0xffffffff, 0x00000100,
+       0x913c, 0xffff000f, 0x0100000a,
+       0x960c, 0xffffffff, 0x54763210,
+       0x88c4, 0xffffffff, 0x000000c2,
+       0x88d4, 0xffffffff, 0x00000010,
+       0x8974, 0xffffffff, 0x00000000,
+       0xc78, 0x00000080, 0x00000080,
+       0x5e78, 0xffffffff, 0x001000f0,
+       0xd02c, 0xffffffff, 0x08421000,
+       0xa008, 0xffffffff, 0x00010000,
+       0x8d00, 0xffffffff, 0x100e4848,
+       0x8d04, 0xffffffff, 0x00164745,
+       0x8c00, 0xffffffff, 0xe4000003,
+       0x8cf0, 0x1fffffff, 0x08e00620,
+       0x28350, 0xffffffff, 0x00000000,
+       0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 sumo_golden_registers[] =
+{
+       0x900c, 0x00ffffff, 0x0017071f,
+       0x8c18, 0xffffffff, 0x10101060,
+       0x8c1c, 0xffffffff, 0x00001010,
+       0x8c30, 0x0000000f, 0x00000005,
+       0x9688, 0x0000000f, 0x00000007
+};
+
+static const u32 wrestler_golden_registers[] =
+{
+       0x5eb4, 0xffffffff, 0x00000002,
+       0x5cc, 0xffffffff, 0x00000001,
+       0x7030, 0xffffffff, 0x00000011,
+       0x7c30, 0xffffffff, 0x00000011,
+       0x6104, 0x01000300, 0x00000000,
+       0x5bc0, 0x00300000, 0x00000000,
+       0x918c, 0xffffffff, 0x00010006,
+       0x91a8, 0xffffffff, 0x00010006,
+       0x9150, 0xffffffff, 0x6e944040,
+       0x917c, 0xffffffff, 0x00030002,
+       0x9198, 0xffffffff, 0x00030002,
+       0x915c, 0xffffffff, 0x00010000,
+       0x3f90, 0xffff0000, 0xff000000,
+       0x9178, 0xffffffff, 0x00070000,
+       0x9194, 0xffffffff, 0x00070000,
+       0x9148, 0xffff0000, 0xff000000,
+       0x9190, 0xffffffff, 0x00090008,
+       0x91ac, 0xffffffff, 0x00090008,
+       0x3f94, 0xffff0000, 0xff000000,
+       0x914c, 0xffff0000, 0xff000000,
+       0x929c, 0xffffffff, 0x00000001,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x9b7c, 0xffffffff, 0x00000000,
+       0x8030, 0xffffffff, 0x0000100a,
+       0x8a14, 0xffffffff, 0x00000001,
+       0x8b24, 0xffffffff, 0x00ff0fff,
+       0x8b10, 0xffffffff, 0x00000000,
+       0x28a4c, 0x06000000, 0x06000000,
+       0x4d8, 0xffffffff, 0x00000100,
+       0x913c, 0xffff000f, 0x0100000a,
+       0x960c, 0xffffffff, 0x54763210,
+       0x88c4, 0xffffffff, 0x000000c2,
+       0x88d4, 0xffffffff, 0x00000010,
+       0x8974, 0xffffffff, 0x00000000,
+       0xc78, 0x00000080, 0x00000080,
+       0x5e78, 0xffffffff, 0x001000f0,
+       0xd02c, 0xffffffff, 0x08421000,
+       0xa008, 0xffffffff, 0x00010000,
+       0x8d00, 0xffffffff, 0x100e4848,
+       0x8d04, 0xffffffff, 0x00164745,
+       0x8c00, 0xffffffff, 0xe4000003,
+       0x8cf0, 0x1fffffff, 0x08e00410,
+       0x28350, 0xffffffff, 0x00000000,
+       0x9508, 0xffffffff, 0x00000002,
+       0x900c, 0xffffffff, 0x0017071f,
+       0x8c18, 0xffffffff, 0x10101060,
+       0x8c1c, 0xffffffff, 0x00001010
+};
+
+static const u32 barts_golden_registers[] =
+{
+       0x5eb4, 0xffffffff, 0x00000002,
+       0x5e78, 0x8f311ff1, 0x001000f0,
+       0x3f90, 0xffff0000, 0xff000000,
+       0x9148, 0xffff0000, 0xff000000,
+       0x3f94, 0xffff0000, 0xff000000,
+       0x914c, 0xffff0000, 0xff000000,
+       0xc78, 0x00000080, 0x00000080,
+       0xbd4, 0x70073777, 0x00010001,
+       0xd02c, 0xbfffff1f, 0x08421000,
+       0xd0b8, 0x03773777, 0x02011003,
+       0x5bc0, 0x00200000, 0x50100000,
+       0x98f8, 0x33773777, 0x02011003,
+       0x98fc, 0xffffffff, 0x76543210,
+       0x7030, 0x31000311, 0x00000011,
+       0x2f48, 0x00000007, 0x02011003,
+       0x6b28, 0x00000010, 0x00000012,
+       0x7728, 0x00000010, 0x00000012,
+       0x10328, 0x00000010, 0x00000012,
+       0x10f28, 0x00000010, 0x00000012,
+       0x11b28, 0x00000010, 0x00000012,
+       0x12728, 0x00000010, 0x00000012,
+       0x240c, 0x000007ff, 0x00000380,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8b24, 0x3fff3fff, 0x00ff0fff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x10c, 0x00000001, 0x00010003,
+       0xa02c, 0xffffffff, 0x0000009b,
+       0x913c, 0x0000000f, 0x0100000a,
+       0x8d00, 0xffff7f7f, 0x100e4848,
+       0x8d04, 0x00ffffff, 0x00164745,
+       0x8c00, 0xfffc0003, 0xe4000003,
+       0x8c04, 0xf8ff00ff, 0x40600060,
+       0x8c08, 0x00ff00ff, 0x001c001c,
+       0x8cf0, 0x1fff1fff, 0x08e00620,
+       0x8c20, 0x0fff0fff, 0x00800080,
+       0x8c24, 0x0fff0fff, 0x00800080,
+       0x8c18, 0xffffffff, 0x20202078,
+       0x8c1c, 0x0000ffff, 0x00001010,
+       0x28350, 0x00000f01, 0x00000000,
+       0x9508, 0x3700001f, 0x00000002,
+       0x960c, 0xffffffff, 0x54763210,
+       0x88c4, 0x001f3ae3, 0x000000c2,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 turks_golden_registers[] =
+{
+       0x5eb4, 0xffffffff, 0x00000002,
+       0x5e78, 0x8f311ff1, 0x001000f0,
+       0x8c8, 0x00003000, 0x00001070,
+       0x8cc, 0x000fffff, 0x00040035,
+       0x3f90, 0xffff0000, 0xfff00000,
+       0x9148, 0xffff0000, 0xfff00000,
+       0x3f94, 0xffff0000, 0xfff00000,
+       0x914c, 0xffff0000, 0xfff00000,
+       0xc78, 0x00000080, 0x00000080,
+       0xbd4, 0x00073007, 0x00010002,
+       0xd02c, 0xbfffff1f, 0x08421000,
+       0xd0b8, 0x03773777, 0x02010002,
+       0x5bc0, 0x00200000, 0x50100000,
+       0x98f8, 0x33773777, 0x00010002,
+       0x98fc, 0xffffffff, 0x33221100,
+       0x7030, 0x31000311, 0x00000011,
+       0x2f48, 0x33773777, 0x00010002,
+       0x6b28, 0x00000010, 0x00000012,
+       0x7728, 0x00000010, 0x00000012,
+       0x10328, 0x00000010, 0x00000012,
+       0x10f28, 0x00000010, 0x00000012,
+       0x11b28, 0x00000010, 0x00000012,
+       0x12728, 0x00000010, 0x00000012,
+       0x240c, 0x000007ff, 0x00000380,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8b24, 0x3fff3fff, 0x00ff0fff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x10c, 0x00000001, 0x00010003,
+       0xa02c, 0xffffffff, 0x0000009b,
+       0x913c, 0x0000000f, 0x0100000a,
+       0x8d00, 0xffff7f7f, 0x100e4848,
+       0x8d04, 0x00ffffff, 0x00164745,
+       0x8c00, 0xfffc0003, 0xe4000003,
+       0x8c04, 0xf8ff00ff, 0x40600060,
+       0x8c08, 0x00ff00ff, 0x001c001c,
+       0x8cf0, 0x1fff1fff, 0x08e00410,
+       0x8c20, 0x0fff0fff, 0x00800080,
+       0x8c24, 0x0fff0fff, 0x00800080,
+       0x8c18, 0xffffffff, 0x20202078,
+       0x8c1c, 0x0000ffff, 0x00001010,
+       0x28350, 0x00000f01, 0x00000000,
+       0x9508, 0x3700001f, 0x00000002,
+       0x960c, 0xffffffff, 0x54763210,
+       0x88c4, 0x001f3ae3, 0x000000c2,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 caicos_golden_registers[] =
+{
+       0x5eb4, 0xffffffff, 0x00000002,
+       0x5e78, 0x8f311ff1, 0x001000f0,
+       0x8c8, 0x00003420, 0x00001450,
+       0x8cc, 0x000fffff, 0x00040035,
+       0x3f90, 0xffff0000, 0xfffc0000,
+       0x9148, 0xffff0000, 0xfffc0000,
+       0x3f94, 0xffff0000, 0xfffc0000,
+       0x914c, 0xffff0000, 0xfffc0000,
+       0xc78, 0x00000080, 0x00000080,
+       0xbd4, 0x00073007, 0x00010001,
+       0xd02c, 0xbfffff1f, 0x08421000,
+       0xd0b8, 0x03773777, 0x02010001,
+       0x5bc0, 0x00200000, 0x50100000,
+       0x98f8, 0x33773777, 0x02010001,
+       0x98fc, 0xffffffff, 0x33221100,
+       0x7030, 0x31000311, 0x00000011,
+       0x2f48, 0x33773777, 0x02010001,
+       0x6b28, 0x00000010, 0x00000012,
+       0x7728, 0x00000010, 0x00000012,
+       0x10328, 0x00000010, 0x00000012,
+       0x10f28, 0x00000010, 0x00000012,
+       0x11b28, 0x00000010, 0x00000012,
+       0x12728, 0x00000010, 0x00000012,
+       0x240c, 0x000007ff, 0x00000380,
+       0x8a14, 0xf000001f, 0x00000001,
+       0x8b24, 0x3fff3fff, 0x00ff0fff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x10c, 0x00000001, 0x00010003,
+       0xa02c, 0xffffffff, 0x0000009b,
+       0x913c, 0x0000000f, 0x0100000a,
+       0x8d00, 0xffff7f7f, 0x100e4848,
+       0x8d04, 0x00ffffff, 0x00164745,
+       0x8c00, 0xfffc0003, 0xe4000003,
+       0x8c04, 0xf8ff00ff, 0x40600060,
+       0x8c08, 0x00ff00ff, 0x001c001c,
+       0x8cf0, 0x1fff1fff, 0x08e00410,
+       0x8c20, 0x0fff0fff, 0x00800080,
+       0x8c24, 0x0fff0fff, 0x00800080,
+       0x8c18, 0xffffffff, 0x20202078,
+       0x8c1c, 0x0000ffff, 0x00001010,
+       0x28350, 0x00000f01, 0x00000000,
+       0x9508, 0x3700001f, 0x00000002,
+       0x960c, 0xffffffff, 0x54763210,
+       0x88c4, 0x001f3ae3, 0x000000c2,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x8974, 0xffffffff, 0x00000000
+};
+
+static void evergreen_init_golden_registers(struct radeon_device *rdev)
+{
+       switch (rdev->family) {
+       case CHIP_CYPRESS:
+       case CHIP_HEMLOCK:
+               radeon_program_register_sequence(rdev,
+                                                evergreen_golden_registers,
+                                                (const u32)ARRAY_SIZE(evergreen_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                evergreen_golden_registers2,
+                                                (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+               radeon_program_register_sequence(rdev,
+                                                cypress_mgcg_init,
+                                                (const u32)ARRAY_SIZE(cypress_mgcg_init));
+               break;
+       case CHIP_JUNIPER:
+               radeon_program_register_sequence(rdev,
+                                                evergreen_golden_registers,
+                                                (const u32)ARRAY_SIZE(evergreen_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                evergreen_golden_registers2,
+                                                (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+               radeon_program_register_sequence(rdev,
+                                                juniper_mgcg_init,
+                                                (const u32)ARRAY_SIZE(juniper_mgcg_init));
+               break;
+       case CHIP_REDWOOD:
+               radeon_program_register_sequence(rdev,
+                                                evergreen_golden_registers,
+                                                (const u32)ARRAY_SIZE(evergreen_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                evergreen_golden_registers2,
+                                                (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+               radeon_program_register_sequence(rdev,
+                                                redwood_mgcg_init,
+                                                (const u32)ARRAY_SIZE(redwood_mgcg_init));
+               break;
+       case CHIP_CEDAR:
+               radeon_program_register_sequence(rdev,
+                                                cedar_golden_registers,
+                                                (const u32)ARRAY_SIZE(cedar_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                evergreen_golden_registers2,
+                                                (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+               radeon_program_register_sequence(rdev,
+                                                cedar_mgcg_init,
+                                                (const u32)ARRAY_SIZE(cedar_mgcg_init));
+               break;
+       case CHIP_PALM:
+               radeon_program_register_sequence(rdev,
+                                                wrestler_golden_registers,
+                                                (const u32)ARRAY_SIZE(wrestler_golden_registers));
+               break;
+       case CHIP_SUMO:
+               radeon_program_register_sequence(rdev,
+                                                supersumo_golden_registers,
+                                                (const u32)ARRAY_SIZE(supersumo_golden_registers));
+               break;
+       case CHIP_SUMO2:
+               radeon_program_register_sequence(rdev,
+                                                supersumo_golden_registers,
+                                                (const u32)ARRAY_SIZE(supersumo_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                sumo_golden_registers,
+                                                (const u32)ARRAY_SIZE(sumo_golden_registers));
+               break;
+       case CHIP_BARTS:
+               radeon_program_register_sequence(rdev,
+                                                barts_golden_registers,
+                                                (const u32)ARRAY_SIZE(barts_golden_registers));
+               break;
+       case CHIP_TURKS:
+               radeon_program_register_sequence(rdev,
+                                                turks_golden_registers,
+                                                (const u32)ARRAY_SIZE(turks_golden_registers));
+               break;
+       case CHIP_CAICOS:
+               radeon_program_register_sequence(rdev,
+                                                caicos_golden_registers,
+                                                (const u32)ARRAY_SIZE(caicos_golden_registers));
+               break;
+       default:
+               break;
+       }
+}
+
 void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
                             unsigned *bankh, unsigned *mtaspect,
                             unsigned *tile_split)
@@ -84,6 +942,142 @@ void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
        }
 }
 
+static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
+                             u32 cntl_reg, u32 status_reg)
+{
+       int r, i;
+       struct atom_clock_dividers dividers;
+
+        r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+                                          clock, false, &dividers);
+       if (r)
+               return r;
+
+       WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
+
+       for (i = 0; i < 100; i++) {
+               if (RREG32(status_reg) & DCLK_STATUS)
+                       break;
+               mdelay(10);
+       }
+       if (i == 100)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+       int r = 0;
+       u32 cg_scratch = RREG32(CG_SCRATCH1);
+
+       r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
+       if (r)
+               goto done;
+       cg_scratch &= 0xffff0000;
+       cg_scratch |= vclk / 100; /* Mhz */
+
+       r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
+       if (r)
+               goto done;
+       cg_scratch &= 0x0000ffff;
+       cg_scratch |= (dclk / 100) << 16; /* Mhz */
+
+done:
+       WREG32(CG_SCRATCH1, cg_scratch);
+
+       return r;
+}
+
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+       /* start off with something large */
+       unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+       int r;
+
+       /* bypass vclk and dclk with bclk */
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+               VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+               ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+       /* put PLL in bypass mode */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+       if (!vclk || !dclk) {
+               /* keep the Bypass mode, put PLL to sleep */
+               WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+               return 0;
+       }
+
+       r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+                                         16384, 0x03FFFFFF, 0, 128, 5,
+                                         &fb_div, &vclk_div, &dclk_div);
+       if (r)
+               return r;
+
+       /* set VCO_MODE to 1 */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+       /* toggle UPLL_SLEEP to 1 then back to 0 */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+       /* deassert UPLL_RESET */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+       mdelay(1);
+
+       r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+       if (r)
+               return r;
+
+       /* assert UPLL_RESET again */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+       /* disable spread spectrum. */
+       WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+       /* set feedback divider */
+       WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+       /* set ref divider to 0 */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+       if (fb_div < 307200)
+               WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+       else
+               WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+       /* set PDIV_A and PDIV_B */
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+               UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+               ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+       /* give the PLL some time to settle */
+       mdelay(15);
+
+       /* deassert PLL_RESET */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+       mdelay(15);
+
+       /* switch from bypass mode to normal mode */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+       r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+       if (r)
+               return r;
+
+       /* switch VCLK and DCLK selection */
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+               VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+               ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+       mdelay(100);
+
+       return 0;
+}
+
 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
 {
        u16 ctl, v;
@@ -105,6 +1099,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
        }
 }
 
+static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+       if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+               return true;
+       else
+               return false;
+}
+
+static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+       u32 pos1, pos2;
+
+       pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+       pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+       if (pos1 != pos2)
+               return true;
+       else
+               return false;
+}
+
 /**
  * dce4_wait_for_vblank - vblank wait asic callback.
  *
@@ -115,21 +1130,28 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
  */
 void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
 {
-       int i;
+       unsigned i = 0;
 
        if (crtc >= rdev->num_crtc)
                return;
 
-       if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
-               for (i = 0; i < rdev->usec_timeout; i++) {
-                       if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
+       if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+               return;
+
+       /* depending on when we hit vblank, we may be close to active; if so,
+        * wait for another frame.
+        */
+       while (dce4_is_in_vblank(rdev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!dce4_is_counter_moving(rdev, crtc))
                                break;
-                       udelay(1);
                }
-               for (i = 0; i < rdev->usec_timeout; i++) {
-                       if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+       }
+
+       while (!dce4_is_in_vblank(rdev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!dce4_is_counter_moving(rdev, crtc))
                                break;
-                       udelay(1);
                }
        }
 }
@@ -608,6 +1630,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
 
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+               if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+                   connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+                       /* don't try to enable hpd on eDP or LVDS avoid breaking the
+                        * aux dp channel on imac and help (but not completely fix)
+                        * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+                        * also avoid interrupt storms during dpms.
+                        */
+                       continue;
+               }
                switch (radeon_connector->hpd.hpd) {
                case RADEON_HPD_1:
                        WREG32(DC_HPD1_CONTROL, tmp);
@@ -1325,17 +2357,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
                                if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
                                        radeon_wait_for_vblank(rdev, i);
-                                       tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
                                        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                                       tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
                                        WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
                        } else {
                                tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
                                if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
                                        radeon_wait_for_vblank(rdev, i);
-                                       tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
                                        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                                       tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
                                        WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
                                        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
@@ -1347,6 +2378,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                        break;
                                udelay(1);
                        }
+
+                       /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                       tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+                       tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+                       WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       save->crtc_enabled[i] = false;
+                       /* ***** */
                } else {
                        save->crtc_enabled[i] = false;
                }
@@ -1364,6 +2404,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
        }
        /* wait for the MC to settle */
        udelay(100);
+
+       /* lock double buffered regs */
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (save->crtc_enabled[i]) {
+                       tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+                       if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
+                               tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+                               WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+                       if (!(tmp & 1)) {
+                               tmp |= 1;
+                               WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+                       }
+               }
+       }
 }
 
 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1385,6 +2441,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
        WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
        WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
 
+       /* unlock regs and wait for update */
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (save->crtc_enabled[i]) {
+                       tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+                       if ((tmp & 0x3) != 0) {
+                               tmp &= ~0x3;
+                               WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+                       if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+                               tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+                               WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+                       if (tmp & 1) {
+                               tmp &= ~1;
+                               WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+                       }
+                       for (j = 0; j < rdev->usec_timeout; j++) {
+                               tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+                               if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       }
+
        /* unblackout the MC */
        tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
        tmp &= ~BLACKOUT_MODE_MASK;
@@ -2050,6 +3133,14 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        }
        /* enabled rb are just the one not disabled :) */
        disabled_rb_mask = tmp;
+       tmp = 0;
+       for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+               tmp |= (1 << i);
+       /* if all the backends are disabled, fix it up here */
+       if ((disabled_rb_mask & tmp) == tmp) {
+               for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+                       disabled_rb_mask &= ~(1 << i);
+       }
 
        WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
        WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
@@ -2058,6 +3149,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
        WREG32(DMA_TILING_CONFIG, gb_addr_config);
+       WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+       WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+       WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
 
        if ((rdev->config.evergreen.max_backends == 1) &&
            (rdev->flags & RADEON_IS_IGP)) {
@@ -3360,6 +4454,9 @@ restart_ih:
                                DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
                                break;
                        }
+               case 124: /* UVD */
+                       DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+                       radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
                        break;
                case 146:
                case 147:
@@ -3571,7 +4668,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
 
 static int evergreen_startup(struct radeon_device *rdev)
 {
-       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       struct radeon_ring *ring;
        int r;
 
        /* enable pcie gen2 link */
@@ -3638,6 +4735,17 @@ static int evergreen_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = rv770_uvd_resume(rdev);
+       if (!r) {
+               r = radeon_fence_driver_start_ring(rdev,
+                                                  R600_RING_TYPE_UVD_INDEX);
+               if (r)
+                       dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+       }
+
+       if (r)
+               rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -3647,6 +4755,7 @@ static int evergreen_startup(struct radeon_device *rdev)
        }
        evergreen_irq_set(rdev);
 
+       ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
                             R600_CP_RB_RPTR, R600_CP_RB_WPTR,
                             0, 0xfffff, RADEON_CP_PACKET2);
@@ -3670,6 +4779,19 @@ static int evergreen_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+       if (ring->ring_size) {
+               r = radeon_ring_init(rdev, ring, ring->ring_size,
+                                    R600_WB_UVD_RPTR_OFFSET,
+                                    UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+                                    0, 0xfffff, RADEON_CP_PACKET2);
+               if (!r)
+                       r = r600_uvd_init(rdev);
+
+               if (r)
+                       DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
+       }
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3701,6 +4823,9 @@ int evergreen_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       /* init golden registers */
+       evergreen_init_golden_registers(rdev);
+
        rdev->accel_working = true;
        r = evergreen_startup(rdev);
        if (r) {
@@ -3716,8 +4841,10 @@ int evergreen_resume(struct radeon_device *rdev)
 int evergreen_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
+       radeon_uvd_suspend(rdev);
        r700_cp_stop(rdev);
        r600_dma_stop(rdev);
+       r600_uvd_rbc_stop(rdev);
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
@@ -3762,6 +4889,8 @@ int evergreen_init(struct radeon_device *rdev)
                DRM_INFO("GPU not posted. posting now...\n");
                atom_asic_init(rdev->mode_info.atom_context);
        }
+       /* init golden registers */
+       evergreen_init_golden_registers(rdev);
        /* Initialize scratch registers */
        r600_scratch_init(rdev);
        /* Initialize surface registers */
@@ -3797,6 +4926,13 @@ int evergreen_init(struct radeon_device *rdev)
        rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
 
+       r = radeon_uvd_init(rdev);
+       if (!r) {
+               rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+               r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
+                              4096);
+       }
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -3843,6 +4979,7 @@ void evergreen_fini(struct radeon_device *rdev)
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
+       radeon_uvd_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
@@ -3878,7 +5015,7 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
        if (!(mask & DRM_PCIE_SPEED_50))
                return;
 
-       speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
        if (speed_cntl & LC_CURRENT_DATA_RATE) {
                DRM_INFO("PCIE gen 2 link speeds already enabled\n");
                return;
@@ -3889,33 +5026,33 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
        if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
            (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
 
-               link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+               link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
                link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-               WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
 
-               speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
                speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
-               WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
-               speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
                speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
-               WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
-               speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
                speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
-               WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
-               speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
                speed_cntl |= LC_GEN2_EN_STRAP;
-               WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
        } else {
-               link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+               link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
                /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
                if (1)
                        link_width_cntl |= LC_UPCONFIGURE_DIS;
                else
                        link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-               WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
        }
 }
index 4fdecc2..b4ab8ce 100644 (file)
@@ -54,6 +54,68 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
        WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
 }
 
+static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
+{
+       struct radeon_device *rdev = encoder->dev->dev_private;
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector = NULL;
+       struct cea_sad *sads;
+       int i, sad_count;
+
+       static const u16 eld_reg_to_type[][2] = {
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+               { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+       };
+
+       list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder)
+                       radeon_connector = to_radeon_connector(connector);
+       }
+
+       if (!radeon_connector) {
+               DRM_ERROR("Couldn't find encoder's connector\n");
+               return;
+       }
+
+       sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+       if (sad_count < 0) {
+               DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+               return;
+       }
+       BUG_ON(!sads);
+
+       for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+               u32 value = 0;
+               int j;
+
+               for (j = 0; j < sad_count; j++) {
+                       struct cea_sad *sad = &sads[j];
+
+                       if (sad->format == eld_reg_to_type[i][1]) {
+                               value = MAX_CHANNELS(sad->channels) |
+                                       DESCRIPTOR_BYTE_2(sad->byte2) |
+                                       SUPPORTED_FREQUENCIES(sad->freq);
+                               if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+                                       value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+                               break;
+                       }
+               }
+               WREG32(eld_reg_to_type[i][0], value);
+       }
+
+       kfree(sads);
+}
+
 /*
  * build a HDMI Video Info Frame
  */
@@ -85,6 +147,30 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
                frame[0xC] | (frame[0xD] << 8));
 }
 
+static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+       u32 base_rate = 48000;
+
+       if (!dig || !dig->afmt)
+               return;
+
+       /* XXX: properly calculate this */
+       /* XXX two dtos; generally use dto0 for hdmi */
+       /* Express [24MHz / target pixel clock] as an exact rational
+        * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+        * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+        */
+       WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff);
+       WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff);
+       WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
+}
+
+
 /*
  * update the info frames with the data from the current display mode
  */
@@ -104,33 +190,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
                return;
        offset = dig->afmt->offset;
 
-       r600_audio_set_clock(encoder, mode->clock);
+       evergreen_audio_set_dto(encoder, mode->clock);
 
        WREG32(HDMI_VBI_PACKET_CONTROL + offset,
               HDMI_NULL_SEND); /* send null packets when required */
 
        WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
 
-       WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
-              HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
-              HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-
-       WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
-              AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
-              AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
-
-       WREG32(HDMI_ACR_PACKET_CONTROL + offset,
-              HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
-              HDMI_ACR_SOURCE); /* select SW CTS value */
-
        WREG32(HDMI_VBI_PACKET_CONTROL + offset,
               HDMI_NULL_SEND | /* send null packets when required */
               HDMI_GC_SEND | /* send general control packets */
               HDMI_GC_CONT); /* send general control packets every frame */
 
        WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
-              HDMI_AVI_INFO_SEND | /* enable AVI info frames */
-              HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
               HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
               HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
 
@@ -138,11 +210,47 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
               AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
 
        WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
-              HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
               HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
 
        WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
 
+       WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+              HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+              HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+       WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+              AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+       /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
+
+       WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+              HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+              HDMI_ACR_SOURCE); /* select SW CTS value */
+
+       evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+       WREG32(AFMT_60958_0 + offset,
+              AFMT_60958_CS_CHANNEL_NUMBER_L(1));
+
+       WREG32(AFMT_60958_1 + offset,
+              AFMT_60958_CS_CHANNEL_NUMBER_R(2));
+
+       WREG32(AFMT_60958_2 + offset,
+              AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
+              AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
+              AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
+              AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
+              AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
+              AFMT_60958_CS_CHANNEL_NUMBER_7(8));
+
+       /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */
+
+       WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
+              AFMT_AUDIO_CHANNEL_ENABLE(0xff));
+
+       /* fglrx sets 0x40 in 0x5f80 here */
+       evergreen_hdmi_write_sad_regs(encoder);
+
        err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
        if (err < 0) {
                DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
@@ -156,7 +264,17 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
        }
 
        evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
-       evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+       WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
+                 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+                 HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+
+       WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
+                HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
+                ~HDMI_AVI_INFO_LINE_MASK);
+
+       WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
+                 AFMT_AUDIO_SAMPLE_SEND); /* send audio packets */
 
        /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
        WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
@@ -164,3 +282,20 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
        WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
        WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
 }
+
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+{
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+       /* Silent, r600_hdmi_enable will raise WARN for us */
+       if (enable && dig->afmt->enabled)
+               return;
+       if (!enable && !dig->afmt->enabled)
+               return;
+
+       dig->afmt->enabled = enable;
+
+       DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+                 enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
+}
index f585be1..881aba2 100644 (file)
 #define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x6ea0
 #define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
 #define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x6ef4
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
 
 #define EVERGREEN_DC_GPIO_HPD_MASK                      0x64b0
 #define EVERGREEN_DC_GPIO_HPD_A                         0x64b4
index 982d25a..75c0563 100644 (file)
 #define RCU_IND_INDEX                                  0x100
 #define RCU_IND_DATA                                   0x104
 
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL                              0x718
+#      define UPLL_RESET_MASK                          0x00000001
+#      define UPLL_SLEEP_MASK                          0x00000002
+#      define UPLL_BYPASS_EN_MASK                      0x00000004
+#      define UPLL_CTLREQ_MASK                         0x00000008
+#      define UPLL_REF_DIV_MASK                        0x003F0000
+#      define UPLL_VCO_MODE_MASK                       0x00000200
+#      define UPLL_CTLACK_MASK                         0x40000000
+#      define UPLL_CTLACK2_MASK                        0x80000000
+#define CG_UPLL_FUNC_CNTL_2                            0x71c
+#      define UPLL_PDIV_A(x)                           ((x) << 0)
+#      define UPLL_PDIV_A_MASK                         0x0000007F
+#      define UPLL_PDIV_B(x)                           ((x) << 8)
+#      define UPLL_PDIV_B_MASK                         0x00007F00
+#      define VCLK_SRC_SEL(x)                          ((x) << 20)
+#      define VCLK_SRC_SEL_MASK                        0x01F00000
+#      define DCLK_SRC_SEL(x)                          ((x) << 25)
+#      define DCLK_SRC_SEL_MASK                        0x3E000000
+#define CG_UPLL_FUNC_CNTL_3                            0x720
+#      define UPLL_FB_DIV(x)                           ((x) << 0)
+#      define UPLL_FB_DIV_MASK                         0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4                            0x854
+#      define UPLL_SPARE_ISPARE9                       0x00020000
+#define CG_UPLL_SPREAD_SPECTRUM                                0x79c
+#      define SSEN_MASK                                0x00000001
+
+/* fusion uvd clocks */
+#define CG_DCLK_CNTL                                    0x610
+#       define DCLK_DIVIDER_MASK                        0x7f
+#       define DCLK_DIR_CNTL_EN                         (1 << 8)
+#define CG_DCLK_STATUS                                  0x614
+#       define DCLK_STATUS                              (1 << 0)
+#define CG_VCLK_CNTL                                    0x618
+#define CG_VCLK_STATUS                                  0x61c
+#define        CG_SCRATCH1                                     0x820
+
 #define GRBM_GFX_INDEX                                 0x802C
 #define                INSTANCE_INDEX(x)                       ((x) << 0)
 #define                SE_INDEX(x)                             ((x) << 16)
 #       define HDMI_MPEG_INFO_CONT           (1 << 9)
 #define HDMI_INFOFRAME_CONTROL1              0x7048
 #       define HDMI_AVI_INFO_LINE(x)         (((x) & 0x3f) << 0)
+#       define HDMI_AVI_INFO_LINE_MASK       (0x3f << 0)
 #       define HDMI_AUDIO_INFO_LINE(x)       (((x) & 0x3f) << 8)
 #       define HDMI_MPEG_INFO_LINE(x)        (((x) & 0x3f) << 16)
 #define HDMI_GENERIC_PACKET_CONTROL          0x704c
 #       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
 #       define SELECTABLE_DEEMPHASIS                      (1 << 6)
 
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG                           0xef4c
+#define UVD_UDEC_DB_ADDR_CONFIG                                0xef50
+#define UVD_UDEC_DBW_ADDR_CONFIG                       0xef54
+#define UVD_RBC_RB_RPTR                                        0xf690
+#define UVD_RBC_RB_WPTR                                        0xf694
+
 /*
  * PM4
  */
index 27769e7..7969c0c 100644 (file)
@@ -78,6 +78,282 @@ MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
 MODULE_FIRMWARE("radeon/ARUBA_me.bin");
 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
 
+
+static const u32 cayman_golden_registers2[] =
+{
+       0x3e5c, 0xffffffff, 0x00000000,
+       0x3e48, 0xffffffff, 0x00000000,
+       0x3e4c, 0xffffffff, 0x00000000,
+       0x3e64, 0xffffffff, 0x00000000,
+       0x3e50, 0xffffffff, 0x00000000,
+       0x3e60, 0xffffffff, 0x00000000
+};
+
+static const u32 cayman_golden_registers[] =
+{
+       0x5eb4, 0xffffffff, 0x00000002,
+       0x5e78, 0x8f311ff1, 0x001000f0,
+       0x3f90, 0xffff0000, 0xff000000,
+       0x9148, 0xffff0000, 0xff000000,
+       0x3f94, 0xffff0000, 0xff000000,
+       0x914c, 0xffff0000, 0xff000000,
+       0xc78, 0x00000080, 0x00000080,
+       0xbd4, 0x70073777, 0x00011003,
+       0xd02c, 0xbfffff1f, 0x08421000,
+       0xd0b8, 0x73773777, 0x02011003,
+       0x5bc0, 0x00200000, 0x50100000,
+       0x98f8, 0x33773777, 0x02011003,
+       0x98fc, 0xffffffff, 0x76541032,
+       0x7030, 0x31000311, 0x00000011,
+       0x2f48, 0x33773777, 0x42010001,
+       0x6b28, 0x00000010, 0x00000012,
+       0x7728, 0x00000010, 0x00000012,
+       0x10328, 0x00000010, 0x00000012,
+       0x10f28, 0x00000010, 0x00000012,
+       0x11b28, 0x00000010, 0x00000012,
+       0x12728, 0x00000010, 0x00000012,
+       0x240c, 0x000007ff, 0x00000000,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8b24, 0x3fff3fff, 0x00ff0fff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x10c, 0x00000001, 0x00010003,
+       0xa02c, 0xffffffff, 0x0000009b,
+       0x913c, 0x0000010f, 0x01000100,
+       0x8c04, 0xf8ff00ff, 0x40600060,
+       0x28350, 0x00000f01, 0x00000000,
+       0x9508, 0x3700001f, 0x00000002,
+       0x960c, 0xffffffff, 0x54763210,
+       0x88c4, 0x001f3ae3, 0x00000082,
+       0x88d0, 0xffffffff, 0x0f40df40,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 dvst_golden_registers2[] =
+{
+       0x8f8, 0xffffffff, 0,
+       0x8fc, 0x00380000, 0,
+       0x8f8, 0xffffffff, 1,
+       0x8fc, 0x0e000000, 0
+};
+
+static const u32 dvst_golden_registers[] =
+{
+       0x690, 0x3fff3fff, 0x20c00033,
+       0x918c, 0x0fff0fff, 0x00010006,
+       0x91a8, 0x0fff0fff, 0x00010006,
+       0x9150, 0xffffdfff, 0x6e944040,
+       0x917c, 0x0fff0fff, 0x00030002,
+       0x9198, 0x0fff0fff, 0x00030002,
+       0x915c, 0x0fff0fff, 0x00010000,
+       0x3f90, 0xffff0001, 0xff000000,
+       0x9178, 0x0fff0fff, 0x00070000,
+       0x9194, 0x0fff0fff, 0x00070000,
+       0x9148, 0xffff0001, 0xff000000,
+       0x9190, 0x0fff0fff, 0x00090008,
+       0x91ac, 0x0fff0fff, 0x00090008,
+       0x3f94, 0xffff0000, 0xff000000,
+       0x914c, 0xffff0000, 0xff000000,
+       0x929c, 0x00000fff, 0x00000001,
+       0x55e4, 0xff607fff, 0xfc000100,
+       0x8a18, 0xff000fff, 0x00000100,
+       0x8b28, 0xff000fff, 0x00000100,
+       0x9144, 0xfffc0fff, 0x00000100,
+       0x6ed8, 0x00010101, 0x00010000,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9838, 0xfffffffe, 0x00000000,
+       0xd0c0, 0xff000fff, 0x00000100,
+       0xd02c, 0xbfffff1f, 0x08421000,
+       0xd0b8, 0x73773777, 0x12010001,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x98f8, 0x73773777, 0x12010001,
+       0x98fc, 0xffffffff, 0x00000010,
+       0x9b7c, 0x00ff0000, 0x00fc0000,
+       0x8030, 0x00001f0f, 0x0000100a,
+       0x2f48, 0x73773777, 0x12010001,
+       0x2408, 0x00030000, 0x000c007f,
+       0x8a14, 0xf000003f, 0x00000007,
+       0x8b24, 0x3fff3fff, 0x00ff0fff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x4d8, 0x00000fff, 0x00000100,
+       0xa008, 0xffffffff, 0x00010000,
+       0x913c, 0xffff03ff, 0x01000100,
+       0x8c00, 0x000000ff, 0x00000003,
+       0x8c04, 0xf8ff00ff, 0x40600060,
+       0x8cf0, 0x1fff1fff, 0x08e00410,
+       0x28350, 0x00000f01, 0x00000000,
+       0x9508, 0xf700071f, 0x00000002,
+       0x960c, 0xffffffff, 0x54763210,
+       0x20ef8, 0x01ff01ff, 0x00000002,
+       0x20e98, 0xfffffbff, 0x00200000,
+       0x2015c, 0xffffffff, 0x00000f40,
+       0x88c4, 0x001f3ae3, 0x00000082,
+       0x8978, 0x3fffffff, 0x04050140,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 scrapper_golden_registers[] =
+{
+       0x690, 0x3fff3fff, 0x20c00033,
+       0x918c, 0x0fff0fff, 0x00010006,
+       0x918c, 0x0fff0fff, 0x00010006,
+       0x91a8, 0x0fff0fff, 0x00010006,
+       0x91a8, 0x0fff0fff, 0x00010006,
+       0x9150, 0xffffdfff, 0x6e944040,
+       0x9150, 0xffffdfff, 0x6e944040,
+       0x917c, 0x0fff0fff, 0x00030002,
+       0x917c, 0x0fff0fff, 0x00030002,
+       0x9198, 0x0fff0fff, 0x00030002,
+       0x9198, 0x0fff0fff, 0x00030002,
+       0x915c, 0x0fff0fff, 0x00010000,
+       0x915c, 0x0fff0fff, 0x00010000,
+       0x3f90, 0xffff0001, 0xff000000,
+       0x3f90, 0xffff0001, 0xff000000,
+       0x9178, 0x0fff0fff, 0x00070000,
+       0x9178, 0x0fff0fff, 0x00070000,
+       0x9194, 0x0fff0fff, 0x00070000,
+       0x9194, 0x0fff0fff, 0x00070000,
+       0x9148, 0xffff0001, 0xff000000,
+       0x9148, 0xffff0001, 0xff000000,
+       0x9190, 0x0fff0fff, 0x00090008,
+       0x9190, 0x0fff0fff, 0x00090008,
+       0x91ac, 0x0fff0fff, 0x00090008,
+       0x91ac, 0x0fff0fff, 0x00090008,
+       0x3f94, 0xffff0000, 0xff000000,
+       0x3f94, 0xffff0000, 0xff000000,
+       0x914c, 0xffff0000, 0xff000000,
+       0x914c, 0xffff0000, 0xff000000,
+       0x929c, 0x00000fff, 0x00000001,
+       0x929c, 0x00000fff, 0x00000001,
+       0x55e4, 0xff607fff, 0xfc000100,
+       0x8a18, 0xff000fff, 0x00000100,
+       0x8a18, 0xff000fff, 0x00000100,
+       0x8b28, 0xff000fff, 0x00000100,
+       0x8b28, 0xff000fff, 0x00000100,
+       0x9144, 0xfffc0fff, 0x00000100,
+       0x9144, 0xfffc0fff, 0x00000100,
+       0x6ed8, 0x00010101, 0x00010000,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9838, 0xfffffffe, 0x00000000,
+       0x9838, 0xfffffffe, 0x00000000,
+       0xd0c0, 0xff000fff, 0x00000100,
+       0xd02c, 0xbfffff1f, 0x08421000,
+       0xd02c, 0xbfffff1f, 0x08421000,
+       0xd0b8, 0x73773777, 0x12010001,
+       0xd0b8, 0x73773777, 0x12010001,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x98f8, 0x73773777, 0x12010001,
+       0x98f8, 0x73773777, 0x12010001,
+       0x98fc, 0xffffffff, 0x00000010,
+       0x98fc, 0xffffffff, 0x00000010,
+       0x9b7c, 0x00ff0000, 0x00fc0000,
+       0x9b7c, 0x00ff0000, 0x00fc0000,
+       0x8030, 0x00001f0f, 0x0000100a,
+       0x8030, 0x00001f0f, 0x0000100a,
+       0x2f48, 0x73773777, 0x12010001,
+       0x2f48, 0x73773777, 0x12010001,
+       0x2408, 0x00030000, 0x000c007f,
+       0x8a14, 0xf000003f, 0x00000007,
+       0x8a14, 0xf000003f, 0x00000007,
+       0x8b24, 0x3fff3fff, 0x00ff0fff,
+       0x8b24, 0x3fff3fff, 0x00ff0fff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x4d8, 0x00000fff, 0x00000100,
+       0x4d8, 0x00000fff, 0x00000100,
+       0xa008, 0xffffffff, 0x00010000,
+       0xa008, 0xffffffff, 0x00010000,
+       0x913c, 0xffff03ff, 0x01000100,
+       0x913c, 0xffff03ff, 0x01000100,
+       0x90e8, 0x001fffff, 0x010400c0,
+       0x8c00, 0x000000ff, 0x00000003,
+       0x8c00, 0x000000ff, 0x00000003,
+       0x8c04, 0xf8ff00ff, 0x40600060,
+       0x8c04, 0xf8ff00ff, 0x40600060,
+       0x8c30, 0x0000000f, 0x00040005,
+       0x8cf0, 0x1fff1fff, 0x08e00410,
+       0x8cf0, 0x1fff1fff, 0x08e00410,
+       0x900c, 0x00ffffff, 0x0017071f,
+       0x28350, 0x00000f01, 0x00000000,
+       0x28350, 0x00000f01, 0x00000000,
+       0x9508, 0xf700071f, 0x00000002,
+       0x9508, 0xf700071f, 0x00000002,
+       0x9688, 0x00300000, 0x0017000f,
+       0x960c, 0xffffffff, 0x54763210,
+       0x960c, 0xffffffff, 0x54763210,
+       0x20ef8, 0x01ff01ff, 0x00000002,
+       0x20e98, 0xfffffbff, 0x00200000,
+       0x2015c, 0xffffffff, 0x00000f40,
+       0x88c4, 0x001f3ae3, 0x00000082,
+       0x88c4, 0x001f3ae3, 0x00000082,
+       0x8978, 0x3fffffff, 0x04050140,
+       0x8978, 0x3fffffff, 0x04050140,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x8974, 0xffffffff, 0x00000000,
+       0x8974, 0xffffffff, 0x00000000
+};
+
+static void ni_init_golden_registers(struct radeon_device *rdev)
+{
+       switch (rdev->family) {
+       case CHIP_CAYMAN:
+               radeon_program_register_sequence(rdev,
+                                                cayman_golden_registers,
+                                                (const u32)ARRAY_SIZE(cayman_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                cayman_golden_registers2,
+                                                (const u32)ARRAY_SIZE(cayman_golden_registers2));
+               break;
+       case CHIP_ARUBA:
+               if ((rdev->pdev->device == 0x9900) ||
+                   (rdev->pdev->device == 0x9901) ||
+                   (rdev->pdev->device == 0x9903) ||
+                   (rdev->pdev->device == 0x9904) ||
+                   (rdev->pdev->device == 0x9905) ||
+                   (rdev->pdev->device == 0x9906) ||
+                   (rdev->pdev->device == 0x9907) ||
+                   (rdev->pdev->device == 0x9908) ||
+                   (rdev->pdev->device == 0x9909) ||
+                   (rdev->pdev->device == 0x990A) ||
+                   (rdev->pdev->device == 0x990B) ||
+                   (rdev->pdev->device == 0x990C) ||
+                   (rdev->pdev->device == 0x990D) ||
+                   (rdev->pdev->device == 0x990E) ||
+                   (rdev->pdev->device == 0x990F) ||
+                   (rdev->pdev->device == 0x9910) ||
+                   (rdev->pdev->device == 0x9913) ||
+                   (rdev->pdev->device == 0x9917) ||
+                   (rdev->pdev->device == 0x9918)) {
+                       radeon_program_register_sequence(rdev,
+                                                        dvst_golden_registers,
+                                                        (const u32)ARRAY_SIZE(dvst_golden_registers));
+                       radeon_program_register_sequence(rdev,
+                                                        dvst_golden_registers2,
+                                                        (const u32)ARRAY_SIZE(dvst_golden_registers2));
+               } else {
+                       radeon_program_register_sequence(rdev,
+                                                        scrapper_golden_registers,
+                                                        (const u32)ARRAY_SIZE(scrapper_golden_registers));
+                       radeon_program_register_sequence(rdev,
+                                                        dvst_golden_registers2,
+                                                        (const u32)ARRAY_SIZE(dvst_golden_registers2));
+               }
+               break;
+       default:
+               break;
+       }
+}
+
 #define BTC_IO_MC_REGS_SIZE 29
 
 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
@@ -473,7 +749,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                    (rdev->pdev->device == 0x990F) ||
                    (rdev->pdev->device == 0x9910) ||
                    (rdev->pdev->device == 0x9917) ||
-                   (rdev->pdev->device == 0x9999)) {
+                   (rdev->pdev->device == 0x9999) ||
+                   (rdev->pdev->device == 0x999C)) {
                        rdev->config.cayman.max_simds_per_se = 6;
                        rdev->config.cayman.max_backends_per_se = 2;
                } else if ((rdev->pdev->device == 0x9903) ||
@@ -482,7 +759,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                           (rdev->pdev->device == 0x990D) ||
                           (rdev->pdev->device == 0x990E) ||
                           (rdev->pdev->device == 0x9913) ||
-                          (rdev->pdev->device == 0x9918)) {
+                          (rdev->pdev->device == 0x9918) ||
+                          (rdev->pdev->device == 0x999D)) {
                        rdev->config.cayman.max_simds_per_se = 4;
                        rdev->config.cayman.max_backends_per_se = 2;
                } else if ((rdev->pdev->device == 0x9919) ||
@@ -615,15 +893,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        }
        /* enabled rb are just the one not disabled :) */
        disabled_rb_mask = tmp;
+       tmp = 0;
+       for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+               tmp |= (1 << i);
+       /* if all the backends are disabled, fix it up here */
+       if ((disabled_rb_mask & tmp) == tmp) {
+               for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+                       disabled_rb_mask &= ~(1 << i);
+       }
 
        WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
        WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
 
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+       if (ASIC_IS_DCE6(rdev))
+               WREG32(DMIF_ADDR_CALC, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
        WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
        WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+       WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+       WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+       WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
 
        if ((rdev->config.cayman.max_backends_per_se == 1) &&
            (rdev->flags & RADEON_IS_IGP)) {
@@ -931,6 +1222,23 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
        radeon_ring_write(ring, 10); /* poll interval */
 }
 
+void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
+                              struct radeon_ring *ring,
+                              struct radeon_semaphore *semaphore,
+                              bool emit_wait)
+{
+       uint64_t addr = semaphore->gpu_addr;
+
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+       radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+       radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+       radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+}
+
 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
 {
        if (enable)
@@ -1682,6 +1990,16 @@ static int cayman_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = rv770_uvd_resume(rdev);
+       if (!r) {
+               r = radeon_fence_driver_start_ring(rdev,
+                                                  R600_RING_TYPE_UVD_INDEX);
+               if (r)
+                       dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+       }
+       if (r)
+               rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
        r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
        if (r) {
                dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
@@ -1748,6 +2066,18 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+       if (ring->ring_size) {
+               r = radeon_ring_init(rdev, ring, ring->ring_size,
+                                    R600_WB_UVD_RPTR_OFFSET,
+                                    UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+                                    0, 0xfffff, RADEON_CP_PACKET2);
+               if (!r)
+                       r = r600_uvd_init(rdev);
+               if (r)
+                       DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+       }
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1778,6 +2108,9 @@ int cayman_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       /* init golden registers */
+       ni_init_golden_registers(rdev);
+
        rdev->accel_working = true;
        r = cayman_startup(rdev);
        if (r) {
@@ -1794,6 +2127,8 @@ int cayman_suspend(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        cayman_cp_enable(rdev, false);
        cayman_dma_stop(rdev);
+       r600_uvd_rbc_stop(rdev);
+       radeon_uvd_suspend(rdev);
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        cayman_pcie_gart_disable(rdev);
@@ -1834,6 +2169,8 @@ int cayman_init(struct radeon_device *rdev)
                DRM_INFO("GPU not posted. posting now...\n");
                atom_asic_init(rdev->mode_info.atom_context);
        }
+       /* init golden registers */
+       ni_init_golden_registers(rdev);
        /* Initialize scratch registers */
        r600_scratch_init(rdev);
        /* Initialize surface registers */
@@ -1868,6 +2205,13 @@ int cayman_init(struct radeon_device *rdev)
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 64 * 1024);
 
+       r = radeon_uvd_init(rdev);
+       if (!r) {
+               ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+               ring->ring_obj = NULL;
+               r600_ring_init(rdev, ring, 4096);
+       }
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -1919,6 +2263,7 @@ void cayman_fini(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
+       radeon_uvd_fini(rdev);
        cayman_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
@@ -2017,28 +2362,57 @@ void cayman_vm_set_page(struct radeon_device *rdev,
                        }
                }
        } else {
-               while (count) {
-                       ndw = count * 2;
-                       if (ndw > 0xFFFFE)
-                               ndw = 0xFFFFE;
+               if ((flags & RADEON_VM_PAGE_SYSTEM) ||
+                   (count == 1)) {
+                       while (count) {
+                               ndw = count * 2;
+                               if (ndw > 0xFFFFE)
+                                       ndw = 0xFFFFE;
+
+                               /* for non-physically contiguous pages (system) */
+                               ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+                               ib->ptr[ib->length_dw++] = pe;
+                               ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+                               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+                                       if (flags & RADEON_VM_PAGE_SYSTEM) {
+                                               value = radeon_vm_map_gart(rdev, addr);
+                                               value &= 0xFFFFFFFFFFFFF000ULL;
+                                       } else if (flags & RADEON_VM_PAGE_VALID) {
+                                               value = addr;
+                                       } else {
+                                               value = 0;
+                                       }
+                                       addr += incr;
+                                       value |= r600_flags;
+                                       ib->ptr[ib->length_dw++] = value;
+                                       ib->ptr[ib->length_dw++] = upper_32_bits(value);
+                               }
+                       }
+                       while (ib->length_dw & 0x7)
+                               ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+               } else {
+                       while (count) {
+                               ndw = count * 2;
+                               if (ndw > 0xFFFFE)
+                                       ndw = 0xFFFFE;
 
-                       /* for non-physically contiguous pages (system) */
-                       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
-                       ib->ptr[ib->length_dw++] = pe;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-                       for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                               if (flags & RADEON_VM_PAGE_SYSTEM) {
-                                       value = radeon_vm_map_gart(rdev, addr);
-                                       value &= 0xFFFFFFFFFFFFF000ULL;
-                               } else if (flags & RADEON_VM_PAGE_VALID) {
+                               if (flags & RADEON_VM_PAGE_VALID)
                                        value = addr;
-                               } else {
+                               else
                                        value = 0;
-                               }
-                               addr += incr;
-                               value |= r600_flags;
-                               ib->ptr[ib->length_dw++] = value;
+                               /* for physically contiguous pages (vram) */
+                               ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+                               ib->ptr[ib->length_dw++] = pe; /* dst addr */
+                               ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+                               ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+                               ib->ptr[ib->length_dw++] = 0;
+                               ib->ptr[ib->length_dw++] = value; /* value */
                                ib->ptr[ib->length_dw++] = upper_32_bits(value);
+                               ib->ptr[ib->length_dw++] = incr; /* increment size */
+                               ib->ptr[ib->length_dw++] = 0;
+                               pe += ndw * 4;
+                               addr += (ndw / 2) * incr;
+                               count -= ndw / 2;
                        }
                }
                while (ib->length_dw & 0x7)
index 079dee2..e226faf 100644 (file)
 #define ARUBA_GB_ADDR_CONFIG_GOLDEN        0x12010001
 
 #define DMIF_ADDR_CONFIG                               0xBD4
+
+/* DCE6 only */
+#define DMIF_ADDR_CALC                                 0xC00
+
 #define        SRBM_GFX_CNTL                                   0x0E44
 #define                RINGID(x)                                       (((x) & 0x3) << 0)
 #define                VMID(x)                                         (((x) & 0x7) << 0)
 #       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
 #       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
 
+/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW                              0xEF00
+#define UVD_SEMA_ADDR_HIGH                             0xEF04
+#define UVD_SEMA_CMD                                   0xEF08
+#define UVD_UDEC_ADDR_CONFIG                           0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG                                0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG                       0xEF54
+#define UVD_RBC_RB_RPTR                                        0xF690
+#define UVD_RBC_RB_WPTR                                        0xF694
+
 /*
  * PM4
  */
                                         (((vmid) & 0xF) << 20) |       \
                                         (((n) & 0xFFFFF) << 0))
 
+#define DMA_PTE_PDE_PACKET(n)          ((2 << 28) |                    \
+                                        (1 << 26) |                    \
+                                        (1 << 21) |                    \
+                                        (((n) & 0xFFFFF) << 0))
+
 /* async DMA Packet types */
 #define        DMA_PACKET_WRITE                                  0x2
 #define        DMA_PACKET_COPY                                   0x3
index 9db5853..4973bff 100644 (file)
@@ -69,6 +69,38 @@ MODULE_FIRMWARE(FIRMWARE_R520);
  * and others in some cases.
  */
 
+static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+       if (crtc == 0) {
+               if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
+                       return true;
+               else
+                       return false;
+       } else {
+               if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
+                       return true;
+               else
+                       return false;
+       }
+}
+
+static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+       u32 vline1, vline2;
+
+       if (crtc == 0) {
+               vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+               vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+       } else {
+               vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+               vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+       }
+       if (vline1 != vline2)
+               return true;
+       else
+               return false;
+}
+
 /**
  * r100_wait_for_vblank - vblank wait asic callback.
  *
@@ -79,36 +111,33 @@ MODULE_FIRMWARE(FIRMWARE_R520);
  */
 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
 {
-       int i;
+       unsigned i = 0;
 
        if (crtc >= rdev->num_crtc)
                return;
 
        if (crtc == 0) {
-               if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
-                       for (i = 0; i < rdev->usec_timeout; i++) {
-                               if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
-                                       break;
-                               udelay(1);
-                       }
-                       for (i = 0; i < rdev->usec_timeout; i++) {
-                               if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
-                                       break;
-                               udelay(1);
-                       }
-               }
+               if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
+                       return;
        } else {
-               if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
-                       for (i = 0; i < rdev->usec_timeout; i++) {
-                               if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
-                                       break;
-                               udelay(1);
-                       }
-                       for (i = 0; i < rdev->usec_timeout; i++) {
-                               if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
-                                       break;
-                               udelay(1);
-                       }
+               if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
+                       return;
+       }
+
+       /* depending on when we hit vblank, we may be close to active; if so,
+        * wait for another frame.
+        */
+       while (r100_is_in_vblank(rdev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!r100_is_counter_moving(rdev, crtc))
+                               break;
+               }
+       }
+
+       while (!r100_is_in_vblank(rdev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!r100_is_counter_moving(rdev, crtc))
+                               break;
                }
        }
 }
index c0dc8d3..1dd0d32 100644 (file)
 #define AVIVO_D1CRTC_STATUS_HV_COUNT                            0x60ac
 #define AVIVO_D1CRTC_STEREO_CONTROL                             0x60c4
 
+#define AVIVO_D1MODE_MASTER_UPDATE_LOCK                         0x60e0
 #define AVIVO_D1MODE_MASTER_UPDATE_MODE                         0x60e4
+#define AVIVO_D1CRTC_UPDATE_LOCK                                0x60e8
 
 /* master controls */
 #define AVIVO_DC_CRTC_MASTER_EN                                 0x60f8
index 0740db3..1a08008 100644 (file)
@@ -1145,7 +1145,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
        }
        if (rdev->flags & RADEON_IS_AGP) {
                size_bf = mc->gtt_start;
-               size_af = 0xFFFFFFFF - mc->gtt_end;
+               size_af = mc->mc_mask - mc->gtt_end;
                if (size_bf > size_af) {
                        if (mc->mc_vram_size > size_bf) {
                                dev_warn(rdev->dev, "limiting VRAM\n");
@@ -2551,6 +2551,193 @@ void r600_dma_fini(struct radeon_device *rdev)
        radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
 }
 
+/*
+ * UVD
+ */
+int r600_uvd_rbc_start(struct radeon_device *rdev)
+{
+       struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+       uint64_t rptr_addr;
+       uint32_t rb_bufsz, tmp;
+       int r;
+
+       rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
+
+       if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
+               DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
+               return -EINVAL;
+       }
+
+       /* force RBC into idle state */
+       WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+       /* Set the write pointer delay */
+       WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
+
+       /* set the wb address */
+       WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
+
+       /* programm the 4GB memory segment for rptr and ring buffer */
+       WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
+                                  (0x7 << 16) | (0x1 << 31));
+
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32(UVD_RBC_RB_RPTR, 0x0);
+
+       ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
+       WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+
+       /* set the ring address */
+       WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
+
+       /* Set ring buffer size */
+       rb_bufsz = drm_order(ring->ring_size);
+       rb_bufsz = (0x1 << 8) | rb_bufsz;
+       WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
+
+       ring->ready = true;
+       r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
+       if (r) {
+               ring->ready = false;
+               return r;
+       }
+
+       r = radeon_ring_lock(rdev, ring, 10);
+       if (r) {
+               DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
+               return r;
+       }
+
+       tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+       radeon_ring_write(ring, tmp);
+       radeon_ring_write(ring, 0xFFFFF);
+
+       tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+       radeon_ring_write(ring, tmp);
+       radeon_ring_write(ring, 0xFFFFF);
+
+       tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+       radeon_ring_write(ring, tmp);
+       radeon_ring_write(ring, 0xFFFFF);
+
+       /* Clear timeout status bits */
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
+       radeon_ring_write(ring, 0x8);
+
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
+       radeon_ring_write(ring, 3);
+
+       radeon_ring_unlock_commit(rdev, ring);
+
+       return 0;
+}
+
+void r600_uvd_rbc_stop(struct radeon_device *rdev)
+{
+       struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+
+       /* force RBC into idle state */
+       WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+       ring->ready = false;
+}
+
+int r600_uvd_init(struct radeon_device *rdev)
+{
+       int i, j, r;
+
+       /* raise clocks while booting up the VCPU */
+       radeon_set_uvd_clocks(rdev, 53300, 40000);
+
+       /* disable clock gating */
+       WREG32(UVD_CGC_GATE, 0);
+
+       /* disable interupt */
+       WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+
+       /* put LMI, VCPU, RBC etc... into reset */
+       WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
+              LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
+              CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
+       mdelay(5);
+
+       /* take UVD block out of reset */
+       WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
+       mdelay(5);
+
+       /* initialize UVD memory controller */
+       WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+                            (1 << 21) | (1 << 9) | (1 << 20));
+
+       /* disable byte swapping */
+       WREG32(UVD_LMI_SWAP_CNTL, 0);
+       WREG32(UVD_MP_SWAP_CNTL, 0);
+
+       WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
+       WREG32(UVD_MPC_SET_MUXA1, 0x0);
+       WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
+       WREG32(UVD_MPC_SET_MUXB1, 0x0);
+       WREG32(UVD_MPC_SET_ALU, 0);
+       WREG32(UVD_MPC_SET_MUX, 0x88);
+
+       /* Stall UMC */
+       WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+       WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+
+       /* take all subblocks out of reset, except VCPU */
+       WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+       mdelay(5);
+
+       /* enable VCPU clock */
+       WREG32(UVD_VCPU_CNTL,  1 << 9);
+
+       /* enable UMC */
+       WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+
+       /* boot up the VCPU */
+       WREG32(UVD_SOFT_RESET, 0);
+       mdelay(10);
+
+       WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+       for (i = 0; i < 10; ++i) {
+               uint32_t status;
+               for (j = 0; j < 100; ++j) {
+                       status = RREG32(UVD_STATUS);
+                       if (status & 2)
+                               break;
+                       mdelay(10);
+               }
+               r = 0;
+               if (status & 2)
+                       break;
+
+               DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+               WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
+               mdelay(10);
+               WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
+               mdelay(10);
+               r = -1;
+       }
+
+       if (r) {
+               DRM_ERROR("UVD not responding, giving up!!!\n");
+               radeon_set_uvd_clocks(rdev, 0, 0);
+               return r;
+       }
+
+       /* enable interupt */
+       WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
+
+       r = r600_uvd_rbc_start(rdev);
+       if (!r)
+               DRM_INFO("UVD initialized successfully.\n");
+
+       /* lower clocks again */
+       radeon_set_uvd_clocks(rdev, 0, 0);
+
+       return r;
+}
+
 /*
  * GPU scratch registers helpers function.
  */
@@ -2660,6 +2847,40 @@ int r600_dma_ring_test(struct radeon_device *rdev,
        return r;
 }
 
+int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
+       r = radeon_ring_lock(rdev, ring, 3);
+       if (r) {
+               DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
+                         ring->idx, r);
+               return r;
+       }
+       radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+       radeon_ring_write(ring, 0xDEADBEEF);
+       radeon_ring_unlock_commit(rdev, ring);
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = RREG32(UVD_CONTEXT_ID);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < rdev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n",
+                        ring->idx, i);
+       } else {
+               DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+                         ring->idx, tmp);
+               r = -EINVAL;
+       }
+       return r;
+}
+
 /*
  * CP fences/semaphores
  */
@@ -2711,6 +2932,30 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
        }
 }
 
+void r600_uvd_fence_emit(struct radeon_device *rdev,
+                        struct radeon_fence *fence)
+{
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+       uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+       radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+       radeon_ring_write(ring, fence->seq);
+       radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+       radeon_ring_write(ring, addr & 0xffffffff);
+       radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+       radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+       radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+       radeon_ring_write(ring, 0);
+
+       radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+       radeon_ring_write(ring, 2);
+       return;
+}
+
 void r600_semaphore_ring_emit(struct radeon_device *rdev,
                              struct radeon_ring *ring,
                              struct radeon_semaphore *semaphore,
@@ -2780,6 +3025,23 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
 }
 
+void r600_uvd_semaphore_emit(struct radeon_device *rdev,
+                            struct radeon_ring *ring,
+                            struct radeon_semaphore *semaphore,
+                            bool emit_wait)
+{
+       uint64_t addr = semaphore->gpu_addr;
+
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+       radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+       radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+       radeon_ring_write(ring, emit_wait ? 1 : 0);
+}
+
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
@@ -3183,6 +3445,16 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
        radeon_ring_write(ring, ib->length_dw);
 }
 
+void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+       radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
+       radeon_ring_write(ring, ib->gpu_addr);
+       radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
+       radeon_ring_write(ring, ib->length_dw);
+}
+
 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        struct radeon_ib ib;
@@ -3300,6 +3572,41 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        return r;
 }
 
+int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       struct radeon_fence *fence = NULL;
+       int r;
+
+       r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+       if (r) {
+               DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
+               return r;
+       }
+
+       r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+       if (r) {
+               DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+               goto error;
+       }
+
+       r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
+       if (r) {
+               DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+               goto error;
+       }
+
+       r = radeon_fence_wait(fence, false);
+       if (r) {
+               DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+               goto error;
+       }
+       DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
+error:
+       radeon_fence_unref(&fence);
+       radeon_set_uvd_clocks(rdev, 0, 0);
+       return r;
+}
+
 /**
  * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
  *
@@ -4232,7 +4539,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
 
 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
 {
-       u32 link_width_cntl, mask, target_reg;
+       u32 link_width_cntl, mask;
 
        if (rdev->flags & RADEON_IS_IGP)
                return;
@@ -4244,7 +4551,7 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
        if (ASIC_IS_X2(rdev))
                return;
 
-       /* FIXME wait for idle */
+       radeon_gui_idle(rdev);
 
        switch (lanes) {
        case 0:
@@ -4263,53 +4570,24 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
                mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
                break;
        case 12:
+               /* not actually supported */
                mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
                break;
        case 16:
-       default:
                mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
                break;
-       }
-
-       link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
-
-       if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
-           (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
-               return;
-
-       if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+       default:
+               DRM_ERROR("invalid pcie lane request: %d\n", lanes);
                return;
+       }
 
-       link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
-                            RADEON_PCIE_LC_RECONFIG_NOW |
-                            R600_PCIE_LC_RENEGOTIATE_EN |
-                            R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
-       link_width_cntl |= mask;
-
-       WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
-        /* some northbridges can renegotiate the link rather than requiring                                  
-         * a complete re-config.                                                                             
-         * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)                            
-         */
-        if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
-               link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
-        else
-               link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
-
-       WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
-                                                      RADEON_PCIE_LC_RECONFIG_NOW));
-
-        if (rdev->family >= CHIP_RV770)
-               target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
-        else
-               target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
-
-        /* wait for lane set to complete */
-        link_width_cntl = RREG32(target_reg);
-        while (link_width_cntl == 0xffffffff)
-               link_width_cntl = RREG32(target_reg);
+       link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+       link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
+       link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
+       link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
+                           R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
 
+       WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
 }
 
 int r600_get_pcie_lanes(struct radeon_device *rdev)
@@ -4326,13 +4604,11 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
        if (ASIC_IS_X2(rdev))
                return 0;
 
-       /* FIXME wait for idle */
+       radeon_gui_idle(rdev);
 
-       link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+       link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
 
        switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
-       case RADEON_PCIE_LC_LINK_WIDTH_X0:
-               return 0;
        case RADEON_PCIE_LC_LINK_WIDTH_X1:
                return 1;
        case RADEON_PCIE_LC_LINK_WIDTH_X2:
@@ -4341,6 +4617,10 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
                return 4;
        case RADEON_PCIE_LC_LINK_WIDTH_X8:
                return 8;
+       case RADEON_PCIE_LC_LINK_WIDTH_X12:
+               /* not actually supported */
+               return 12;
+       case RADEON_PCIE_LC_LINK_WIDTH_X0:
        case RADEON_PCIE_LC_LINK_WIDTH_X16:
        default:
                return 16;
@@ -4378,7 +4658,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
        if (!(mask & DRM_PCIE_SPEED_50))
                return;
 
-       speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
        if (speed_cntl & LC_CURRENT_DATA_RATE) {
                DRM_INFO("PCIE gen 2 link speeds already enabled\n");
                return;
@@ -4391,23 +4671,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
            (rdev->family == CHIP_RV620) ||
            (rdev->family == CHIP_RV635)) {
                /* advertise upconfig capability */
-               link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+               link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
                link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-               WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-               link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+               WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+               link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
                if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
                        lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
                        link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
                                             LC_RECONFIG_ARC_MISSING_ESCAPE);
                        link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
-                       WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+                       WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
                } else {
                        link_width_cntl |= LC_UPCONFIGURE_DIS;
-                       WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+                       WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
                }
        }
 
-       speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
        if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
            (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
 
@@ -4428,7 +4708,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
                speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
                speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
                speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
-               WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
                tmp = RREG32(0x541c);
                WREG32(0x541c, tmp | 0x8);
@@ -4442,27 +4722,27 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
                if ((rdev->family == CHIP_RV670) ||
                    (rdev->family == CHIP_RV620) ||
                    (rdev->family == CHIP_RV635)) {
-                       training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
+                       training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
                        training_cntl &= ~LC_POINT_7_PLUS_EN;
-                       WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
+                       WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
                } else {
-                       speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+                       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
                        speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
-                       WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+                       WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
                }
 
-               speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
                speed_cntl |= LC_GEN2_EN_STRAP;
-               WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
        } else {
-               link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+               link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
                /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
                if (1)
                        link_width_cntl |= LC_UPCONFIGURE_DIS;
                else
                        link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-               WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
        }
 }
 
index cb03fe2..c92eb86 100644 (file)
@@ -57,10 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
  */
 static int r600_audio_chipset_supported(struct radeon_device *rdev)
 {
-       return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
-               || rdev->family == CHIP_RS600
-               || rdev->family == CHIP_RS690
-               || rdev->family == CHIP_RS740;
+       return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
 }
 
 struct r600_audio r600_audio_status(struct radeon_device *rdev)
@@ -183,65 +180,6 @@ int r600_audio_init(struct radeon_device *rdev)
        return 0;
 }
 
-/*
- * atach the audio codec to the clock source of the encoder
- */
-void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
-{
-       struct drm_device *dev = encoder->dev;
-       struct radeon_device *rdev = dev->dev_private;
-       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-       int base_rate = 48000;
-
-       switch (radeon_encoder->encoder_id) {
-       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-       case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-               WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
-               break;
-       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-               WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
-               break;
-       default:
-               dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
-                         radeon_encoder->encoder_id);
-               return;
-       }
-
-       if (ASIC_IS_DCE4(rdev)) {
-               /* TODO: other PLLs? */
-               WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
-               WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
-               WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
-
-               /* Select DTO source */
-               WREG32(0x5ac, radeon_crtc->crtc_id);
-       } else {
-               switch (dig->dig_encoder) {
-               case 0:
-                       WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
-                       WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
-                       WREG32(R600_AUDIO_CLK_SRCSEL, 0);
-                       break;
-
-               case 1:
-                       WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
-                       WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
-                       WREG32(R600_AUDIO_CLK_SRCSEL, 1);
-                       break;
-               default:
-                       dev_err(rdev->dev,
-                               "Unsupported DIG on encoder 0x%02X\n",
-                               radeon_encoder->encoder_id);
-                       return;
-               }
-       }
-}
-
 /*
  * release the audio timer
  * TODO: How to do this correctly on SMP systems?
index 21ecc0e..47f180a 100644 (file)
@@ -226,6 +226,39 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
                 value, ~HDMI0_AUDIO_TEST_EN);
 }
 
+void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+       u32 base_rate = 48000;
+
+       if (!dig || !dig->afmt)
+               return;
+
+       /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
+        * doesn't matter which one you use.  Just use the first one.
+        */
+       /* XXX: properly calculate this */
+       /* XXX two dtos; generally use dto0 for hdmi */
+       /* Express [24MHz / target pixel clock] as an exact rational
+        * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+        * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+        */
+       if (ASIC_IS_DCE3(rdev)) {
+               /* according to the reg specs, this should DCE3.2 only, but in
+                * practice it seems to cover DCE3.0 as well.
+                */
+               WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50);
+               WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+               WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+       } else {
+               /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
+               WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) |
+                      AUDIO_DTO_MODULE(clock * 100));
+       }
+}
 
 /*
  * update the info frames with the data from the current display mode
@@ -246,7 +279,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
                return;
        offset = dig->afmt->offset;
 
-       r600_audio_set_clock(encoder, mode->clock);
+       r600_audio_set_dto(encoder, mode->clock);
 
        WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
               HDMI0_NULL_SEND); /* send null packets when required */
@@ -415,114 +448,73 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
 /*
  * enable the HDMI engine
  */
-void r600_hdmi_enable(struct drm_encoder *encoder)
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       uint32_t offset;
-       u32 hdmi;
-
-       if (ASIC_IS_DCE6(rdev))
-               return;
+       u32 hdmi = HDMI0_ERROR_ACK;
 
        /* Silent, r600_hdmi_enable will raise WARN for us */
-       if (dig->afmt->enabled)
+       if (enable && dig->afmt->enabled)
+               return;
+       if (!enable && !dig->afmt->enabled)
                return;
-       offset = dig->afmt->offset;
 
        /* Older chipsets require setting HDMI and routing manually */
-       if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
-               hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
+       if (!ASIC_IS_DCE3(rdev)) {
+               if (enable)
+                       hdmi |= HDMI0_ENABLE;
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-                       WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
-                                ~AVIVO_TMDSA_CNTL_HDMI_EN);
-                       hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
+                       if (enable) {
+                               WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN);
+                               hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
+                       } else {
+                               WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN);
+                       }
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-                       WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
-                                ~AVIVO_LVTMA_CNTL_HDMI_EN);
-                       hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+                       if (enable) {
+                               WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN);
+                               hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+                       } else {
+                               WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN);
+                       }
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_DDI:
-                       WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
-                       hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+                       if (enable) {
+                               WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN);
+                               hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+                       } else {
+                               WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN);
+                       }
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-                       hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
+                       if (enable)
+                               hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
                        break;
                default:
                        dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
                                radeon_encoder->encoder_id);
                        break;
                }
-               WREG32(HDMI0_CONTROL + offset, hdmi);
+               WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi);
        }
 
        if (rdev->irq.installed) {
                /* if irq is available use it */
-               radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
+               /* XXX: shouldn't need this on any asics.  Double check DCE2/3 */
+               if (enable)
+                       radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
+               else
+                       radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
        }
 
-       dig->afmt->enabled = true;
+       dig->afmt->enabled = enable;
 
-       DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
-                 offset, radeon_encoder->encoder_id);
+       DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+                 enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
 }
 
-/*
- * disable the HDMI engine
- */
-void r600_hdmi_disable(struct drm_encoder *encoder)
-{
-       struct drm_device *dev = encoder->dev;
-       struct radeon_device *rdev = dev->dev_private;
-       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       uint32_t offset;
-
-       if (ASIC_IS_DCE6(rdev))
-               return;
-
-       /* Called for ATOM_ENCODER_MODE_HDMI only */
-       if (!dig || !dig->afmt) {
-               return;
-       }
-       if (!dig->afmt->enabled)
-               return;
-       offset = dig->afmt->offset;
-
-       DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
-                 offset, radeon_encoder->encoder_id);
-
-       /* disable irq */
-       radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
-
-       /* Older chipsets not handled by AtomBIOS */
-       if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
-               switch (radeon_encoder->encoder_id) {
-               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-                       WREG32_P(AVIVO_TMDSA_CNTL, 0,
-                                ~AVIVO_TMDSA_CNTL_HDMI_EN);
-                       break;
-               case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-                       WREG32_P(AVIVO_LVTMA_CNTL, 0,
-                                ~AVIVO_LVTMA_CNTL_HDMI_EN);
-                       break;
-               case ENCODER_OBJECT_ID_INTERNAL_DDI:
-                       WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
-                       break;
-               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-                       break;
-               default:
-                       dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
-                               radeon_encoder->encoder_id);
-                       break;
-               }
-               WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
-       }
-
-       dig->afmt->enabled = false;
-}
index a42ba11..acb146c 100644 (file)
 #define SRBM_SOFT_RESET                                   0xe60
 #       define SOFT_RESET_DMA                             (1 << 12)
 #       define SOFT_RESET_RLC                             (1 << 13)
+#       define SOFT_RESET_UVD                             (1 << 18)
 #       define RV770_SOFT_RESET_DMA                       (1 << 20)
 
 #define CP_INT_CNTL                                       0xc124
 #       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
 #       define SELECTABLE_DEEMPHASIS                      (1 << 6)
 
-/* Audio clocks */
+/* Audio clocks DCE 2.0/3.0 */
+#define AUDIO_DTO                         0x7340
+#       define AUDIO_DTO_PHASE(x)         (((x) & 0xffff) << 0)
+#       define AUDIO_DTO_MODULE(x)        (((x) & 0xffff) << 16)
+
+/* Audio clocks DCE 3.2 */
 #define DCCG_AUDIO_DTO0_PHASE             0x0514
 #define DCCG_AUDIO_DTO0_MODULE            0x0518
 #define DCCG_AUDIO_DTO0_LOAD              0x051c
 #       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
 #       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
 
+/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW                              0xef00
+#define UVD_SEMA_ADDR_HIGH                             0xef04
+#define UVD_SEMA_CMD                                   0xef08
+
+#define UVD_GPCOM_VCPU_CMD                             0xef0c
+#define UVD_GPCOM_VCPU_DATA0                           0xef10
+#define UVD_GPCOM_VCPU_DATA1                           0xef14
+#define UVD_ENGINE_CNTL                                        0xef18
+
+#define UVD_SEMA_CNTL                                  0xf400
+#define UVD_RB_ARB_CTRL                                        0xf480
+
+#define UVD_LMI_EXT40_ADDR                             0xf498
+#define UVD_CGC_GATE                                   0xf4a8
+#define UVD_LMI_CTRL2                                  0xf4f4
+#define UVD_MASTINT_EN                                 0xf500
+#define UVD_LMI_ADDR_EXT                               0xf594
+#define UVD_LMI_CTRL                                   0xf598
+#define UVD_LMI_SWAP_CNTL                              0xf5b4
+#define UVD_MP_SWAP_CNTL                               0xf5bC
+#define UVD_MPC_CNTL                                   0xf5dC
+#define UVD_MPC_SET_MUXA0                              0xf5e4
+#define UVD_MPC_SET_MUXA1                              0xf5e8
+#define UVD_MPC_SET_MUXB0                              0xf5eC
+#define UVD_MPC_SET_MUXB1                              0xf5f0
+#define UVD_MPC_SET_MUX                                        0xf5f4
+#define UVD_MPC_SET_ALU                                        0xf5f8
+
+#define UVD_VCPU_CNTL                                  0xf660
+#define UVD_SOFT_RESET                                 0xf680
+#define                RBC_SOFT_RESET                                  (1<<0)
+#define                LBSI_SOFT_RESET                                 (1<<1)
+#define                LMI_SOFT_RESET                                  (1<<2)
+#define                VCPU_SOFT_RESET                                 (1<<3)
+#define                CSM_SOFT_RESET                                  (1<<5)
+#define                CXW_SOFT_RESET                                  (1<<6)
+#define                TAP_SOFT_RESET                                  (1<<7)
+#define                LMI_UMC_SOFT_RESET                              (1<<13)
+#define UVD_RBC_IB_BASE                                        0xf684
+#define UVD_RBC_IB_SIZE                                        0xf688
+#define UVD_RBC_RB_BASE                                        0xf68c
+#define UVD_RBC_RB_RPTR                                        0xf690
+#define UVD_RBC_RB_WPTR                                        0xf694
+#define UVD_RBC_RB_WPTR_CNTL                           0xf698
+
+#define UVD_STATUS                                     0xf6bc
+
+#define UVD_SEMA_TIMEOUT_STATUS                                0xf6c0
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL          0xf6c4
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL               0xf6c8
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL                0xf6cc
+
+#define UVD_RBC_RB_CNTL                                        0xf6a4
+#define UVD_RBC_RB_RPTR_ADDR                           0xf6a8
+
+#define UVD_CONTEXT_ID                                 0xf6f4
+
+#      define UPLL_CTLREQ_MASK                         0x00000008
+#      define UPLL_CTLACK_MASK                         0x40000000
+#      define UPLL_CTLACK2_MASK                        0x80000000
+
 /*
  * PM4
  */
index 8263af3..1442ce7 100644 (file)
@@ -95,6 +95,7 @@ extern int radeon_hw_i2c;
 extern int radeon_pcie_gen2;
 extern int radeon_msi;
 extern int radeon_lockup_timeout;
+extern int radeon_fastfb;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -109,24 +110,27 @@ extern int radeon_lockup_timeout;
 #define RADEON_BIOS_NUM_SCRATCH                        8
 
 /* max number of rings */
-#define RADEON_NUM_RINGS                       5
+#define RADEON_NUM_RINGS                       6
 
 /* fence seq are set to this number when signaled */
 #define RADEON_FENCE_SIGNALED_SEQ              0LL
 
 /* internal ring indices */
 /* r1xx+ has gfx CP ring */
-#define RADEON_RING_TYPE_GFX_INDEX             0
+#define RADEON_RING_TYPE_GFX_INDEX     0
 
 /* cayman has 2 compute CP rings */
-#define CAYMAN_RING_TYPE_CP1_INDEX             1
-#define CAYMAN_RING_TYPE_CP2_INDEX             2
+#define CAYMAN_RING_TYPE_CP1_INDEX     1
+#define CAYMAN_RING_TYPE_CP2_INDEX     2
 
 /* R600+ has an async dma ring */
 #define R600_RING_TYPE_DMA_INDEX               3
 /* cayman add a second async dma ring */
 #define CAYMAN_RING_TYPE_DMA1_INDEX            4
 
+/* R600+ */
+#define R600_RING_TYPE_UVD_INDEX       5
+
 /* hardcode those limit for now */
 #define RADEON_VA_IB_OFFSET                    (1 << 20)
 #define RADEON_VA_RESERVED_SIZE                        (8 << 20)
@@ -202,6 +206,11 @@ void radeon_pm_suspend(struct radeon_device *rdev);
 void radeon_pm_resume(struct radeon_device *rdev);
 void radeon_combios_get_power_modes(struct radeon_device *rdev);
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+                                  u8 clock_type,
+                                  u32 clock,
+                                  bool strobe_mode,
+                                  struct atom_clock_dividers *dividers);
 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
 void rs690_pm_info(struct radeon_device *rdev);
 extern int rv6xx_get_temp(struct radeon_device *rdev);
@@ -349,7 +358,8 @@ struct radeon_bo {
        struct radeon_device            *rdev;
        struct drm_gem_object           gem_base;
 
-       struct ttm_bo_kmap_obj dma_buf_vmap;
+       struct ttm_bo_kmap_obj          dma_buf_vmap;
+       pid_t                           pid;
 };
 #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
@@ -357,11 +367,14 @@ struct radeon_bo_list {
        struct ttm_validate_buffer tv;
        struct radeon_bo        *bo;
        uint64_t                gpu_offset;
-       unsigned                rdomain;
-       unsigned                wdomain;
+       bool                    written;
+       unsigned                domain;
+       unsigned                alt_domain;
        u32                     tiling_flags;
 };
 
+int radeon_gem_debugfs_init(struct radeon_device *rdev);
+
 /* sub-allocation manager, it has to be protected by another lock.
  * By conception this is an helper for other part of the driver
  * like the indirect buffer or semaphore, which both have their
@@ -517,6 +530,7 @@ struct radeon_mc {
        bool                    vram_is_ddr;
        bool                    igp_sideport_enabled;
        u64                     gtt_base_align;
+       u64                     mc_mask;
 };
 
 bool radeon_combios_sideport_present(struct radeon_device *rdev);
@@ -918,6 +932,7 @@ struct radeon_wb {
 #define R600_WB_DMA_RPTR_OFFSET   1792
 #define R600_WB_IH_WPTR_OFFSET   2048
 #define CAYMAN_WB_DMA1_RPTR_OFFSET   2304
+#define R600_WB_UVD_RPTR_OFFSET  2560
 #define R600_WB_EVENT_OFFSET     3072
 
 /**
@@ -1118,6 +1133,46 @@ struct radeon_pm {
 int radeon_pm_get_type_index(struct radeon_device *rdev,
                             enum radeon_pm_state_type ps_type,
                             int instance);
+/*
+ * UVD
+ */
+#define RADEON_MAX_UVD_HANDLES 10
+#define RADEON_UVD_STACK_SIZE  (1024*1024)
+#define RADEON_UVD_HEAP_SIZE   (1024*1024)
+
+struct radeon_uvd {
+       struct radeon_bo        *vcpu_bo;
+       void                    *cpu_addr;
+       uint64_t                gpu_addr;
+       atomic_t                handles[RADEON_MAX_UVD_HANDLES];
+       struct drm_file         *filp[RADEON_MAX_UVD_HANDLES];
+       struct delayed_work     idle_work;
+};
+
+int radeon_uvd_init(struct radeon_device *rdev);
+void radeon_uvd_fini(struct radeon_device *rdev);
+int radeon_uvd_suspend(struct radeon_device *rdev);
+int radeon_uvd_resume(struct radeon_device *rdev);
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+                             uint32_t handle, struct radeon_fence **fence);
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+                              uint32_t handle, struct radeon_fence **fence);
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
+void radeon_uvd_free_handles(struct radeon_device *rdev,
+                            struct drm_file *filp);
+int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
+void radeon_uvd_note_usage(struct radeon_device *rdev);
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+                                 unsigned vclk, unsigned dclk,
+                                 unsigned vco_min, unsigned vco_max,
+                                 unsigned fb_factor, unsigned fb_mask,
+                                 unsigned pd_min, unsigned pd_max,
+                                 unsigned pd_even,
+                                 unsigned *optimal_fb_div,
+                                 unsigned *optimal_vclk_div,
+                                 unsigned *optimal_dclk_div);
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+                                unsigned cg_upll_func_cntl);
 
 struct r600_audio {
        int                     channels;
@@ -1229,6 +1284,9 @@ struct radeon_asic {
                void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
                /* get backlight level */
                u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
+               /* audio callbacks */
+               void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
+               void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
        } display;
        /* copy functions for bo handling */
        struct {
@@ -1281,6 +1339,7 @@ struct radeon_asic {
                int (*get_pcie_lanes)(struct radeon_device *rdev);
                void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
                void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+               int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
        } pm;
        /* pageflipping */
        struct {
@@ -1443,6 +1502,7 @@ struct si_asic {
        unsigned multi_gpu_tile_size;
 
        unsigned tile_config;
+       uint32_t tile_mode_array[32];
 };
 
 union radeon_asic_config {
@@ -1608,6 +1668,7 @@ struct radeon_device {
        struct radeon_asic              *asic;
        struct radeon_gem               gem;
        struct radeon_pm                pm;
+       struct radeon_uvd               uvd;
        uint32_t                        bios_scratch[RADEON_BIOS_NUM_SCRATCH];
        struct radeon_wb                wb;
        struct radeon_dummy_page        dummy_page;
@@ -1615,12 +1676,14 @@ struct radeon_device {
        bool                            suspend;
        bool                            need_dma32;
        bool                            accel_working;
+       bool                            fastfb_working; /* IGP feature*/
        struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
        const struct firmware *me_fw;   /* all family ME firmware */
        const struct firmware *pfp_fw;  /* r6/700 PFP firmware */
        const struct firmware *rlc_fw;  /* r6/700 RLC firmware */
        const struct firmware *mc_fw;   /* NI MC firmware */
        const struct firmware *ce_fw;   /* SI CE firmware */
+       const struct firmware *uvd_fw;  /* UVD firmware */
        struct r600_blit r600_blit;
        struct r600_vram_scratch vram_scratch;
        int msi_enabled; /* msi enabled */
@@ -1688,8 +1751,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 #define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
 #define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
 #define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
-#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
-#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
 #define WREG32_P(reg, val, mask)                               \
        do {                                                    \
                uint32_t tmp_ = RREG32(reg);                    \
@@ -1697,6 +1760,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
                tmp_ |= ((val) & ~(mask));                      \
                WREG32(reg, tmp_);                              \
        } while (0)
+#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
 #define WREG32_PLL_P(reg, val, mask)                           \
        do {                                                    \
                uint32_t tmp_ = RREG32_PLL(reg);                \
@@ -1830,6 +1895,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
 #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
 #define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
 #define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
+#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
+#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
 #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
 #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
 #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
@@ -1845,6 +1912,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
 #define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
 #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
 #define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
+#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
 #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
 #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
 #define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
@@ -1892,6 +1960,9 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
 extern int radeon_resume_kms(struct drm_device *dev);
 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
+extern void radeon_program_register_sequence(struct radeon_device *rdev,
+                                            const u32 *registers,
+                                            const u32 array_size);
 
 /*
  * vm
@@ -1964,9 +2035,6 @@ struct radeon_hdmi_acr {
 
 extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
 
-extern void r600_hdmi_enable(struct drm_encoder *encoder);
-extern void r600_hdmi_disable(struct drm_encoder *encoder);
-extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
 extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
                                     u32 tiling_pipe_num,
                                     u32 max_rb_num,
@@ -1977,8 +2045,6 @@ extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
  * evergreen functions used by radeon_encoder.c
  */
 
-extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-
 extern int ni_init_microcode(struct radeon_device *rdev);
 extern int ni_mc_load_microcode(struct radeon_device *rdev);
 
index aba0a89..6417132 100644 (file)
@@ -656,6 +656,8 @@ static struct radeon_asic rs600_asic = {
                .wait_for_vblank = &avivo_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &r600_hdmi_enable,
+               .hdmi_setmode = &r600_hdmi_setmode,
        },
        .copy = {
                .blit = &r100_copy_blit,
@@ -732,6 +734,8 @@ static struct radeon_asic rs690_asic = {
                .wait_for_vblank = &avivo_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &r600_hdmi_enable,
+               .hdmi_setmode = &r600_hdmi_setmode,
        },
        .copy = {
                .blit = &r100_copy_blit,
@@ -970,6 +974,8 @@ static struct radeon_asic r600_asic = {
                .wait_for_vblank = &avivo_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &r600_hdmi_enable,
+               .hdmi_setmode = &r600_hdmi_setmode,
        },
        .copy = {
                .blit = &r600_copy_blit,
@@ -1056,6 +1062,8 @@ static struct radeon_asic rs780_asic = {
                .wait_for_vblank = &avivo_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &r600_hdmi_enable,
+               .hdmi_setmode = &r600_hdmi_setmode,
        },
        .copy = {
                .blit = &r600_copy_blit,
@@ -1130,6 +1138,15 @@ static struct radeon_asic rv770_asic = {
                        .ring_test = &r600_dma_ring_test,
                        .ib_test = &r600_dma_ib_test,
                        .is_lockup = &r600_dma_is_lockup,
+               },
+               [R600_RING_TYPE_UVD_INDEX] = {
+                       .ib_execute = &r600_uvd_ib_execute,
+                       .emit_fence = &r600_uvd_fence_emit,
+                       .emit_semaphore = &r600_uvd_semaphore_emit,
+                       .cs_parse = &radeon_uvd_cs_parse,
+                       .ring_test = &r600_uvd_ring_test,
+                       .ib_test = &r600_uvd_ib_test,
+                       .is_lockup = &radeon_ring_test_lockup,
                }
        },
        .irq = {
@@ -1142,6 +1159,8 @@ static struct radeon_asic rv770_asic = {
                .wait_for_vblank = &avivo_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &r600_hdmi_enable,
+               .hdmi_setmode = &r600_hdmi_setmode,
        },
        .copy = {
                .blit = &r600_copy_blit,
@@ -1174,6 +1193,7 @@ static struct radeon_asic rv770_asic = {
                .get_pcie_lanes = &r600_get_pcie_lanes,
                .set_pcie_lanes = &r600_set_pcie_lanes,
                .set_clock_gating = &radeon_atom_set_clock_gating,
+               .set_uvd_clocks = &rv770_set_uvd_clocks,
        },
        .pflip = {
                .pre_page_flip = &rs600_pre_page_flip,
@@ -1216,6 +1236,15 @@ static struct radeon_asic evergreen_asic = {
                        .ring_test = &r600_dma_ring_test,
                        .ib_test = &r600_dma_ib_test,
                        .is_lockup = &evergreen_dma_is_lockup,
+               },
+               [R600_RING_TYPE_UVD_INDEX] = {
+                       .ib_execute = &r600_uvd_ib_execute,
+                       .emit_fence = &r600_uvd_fence_emit,
+                       .emit_semaphore = &r600_uvd_semaphore_emit,
+                       .cs_parse = &radeon_uvd_cs_parse,
+                       .ring_test = &r600_uvd_ring_test,
+                       .ib_test = &r600_uvd_ib_test,
+                       .is_lockup = &radeon_ring_test_lockup,
                }
        },
        .irq = {
@@ -1228,6 +1257,8 @@ static struct radeon_asic evergreen_asic = {
                .wait_for_vblank = &dce4_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &evergreen_hdmi_enable,
+               .hdmi_setmode = &evergreen_hdmi_setmode,
        },
        .copy = {
                .blit = &r600_copy_blit,
@@ -1260,6 +1291,7 @@ static struct radeon_asic evergreen_asic = {
                .get_pcie_lanes = &r600_get_pcie_lanes,
                .set_pcie_lanes = &r600_set_pcie_lanes,
                .set_clock_gating = NULL,
+               .set_uvd_clocks = &evergreen_set_uvd_clocks,
        },
        .pflip = {
                .pre_page_flip = &evergreen_pre_page_flip,
@@ -1302,6 +1334,15 @@ static struct radeon_asic sumo_asic = {
                        .ring_test = &r600_dma_ring_test,
                        .ib_test = &r600_dma_ib_test,
                        .is_lockup = &evergreen_dma_is_lockup,
+               },
+               [R600_RING_TYPE_UVD_INDEX] = {
+                       .ib_execute = &r600_uvd_ib_execute,
+                       .emit_fence = &r600_uvd_fence_emit,
+                       .emit_semaphore = &r600_uvd_semaphore_emit,
+                       .cs_parse = &radeon_uvd_cs_parse,
+                       .ring_test = &r600_uvd_ring_test,
+                       .ib_test = &r600_uvd_ib_test,
+                       .is_lockup = &radeon_ring_test_lockup,
                }
        },
        .irq = {
@@ -1314,6 +1355,8 @@ static struct radeon_asic sumo_asic = {
                .wait_for_vblank = &dce4_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &evergreen_hdmi_enable,
+               .hdmi_setmode = &evergreen_hdmi_setmode,
        },
        .copy = {
                .blit = &r600_copy_blit,
@@ -1346,6 +1389,7 @@ static struct radeon_asic sumo_asic = {
                .get_pcie_lanes = NULL,
                .set_pcie_lanes = NULL,
                .set_clock_gating = NULL,
+               .set_uvd_clocks = &sumo_set_uvd_clocks,
        },
        .pflip = {
                .pre_page_flip = &evergreen_pre_page_flip,
@@ -1388,6 +1432,15 @@ static struct radeon_asic btc_asic = {
                        .ring_test = &r600_dma_ring_test,
                        .ib_test = &r600_dma_ib_test,
                        .is_lockup = &evergreen_dma_is_lockup,
+               },
+               [R600_RING_TYPE_UVD_INDEX] = {
+                       .ib_execute = &r600_uvd_ib_execute,
+                       .emit_fence = &r600_uvd_fence_emit,
+                       .emit_semaphore = &r600_uvd_semaphore_emit,
+                       .cs_parse = &radeon_uvd_cs_parse,
+                       .ring_test = &r600_uvd_ring_test,
+                       .ib_test = &r600_uvd_ib_test,
+                       .is_lockup = &radeon_ring_test_lockup,
                }
        },
        .irq = {
@@ -1400,6 +1453,8 @@ static struct radeon_asic btc_asic = {
                .wait_for_vblank = &dce4_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &evergreen_hdmi_enable,
+               .hdmi_setmode = &evergreen_hdmi_setmode,
        },
        .copy = {
                .blit = &r600_copy_blit,
@@ -1429,9 +1484,10 @@ static struct radeon_asic btc_asic = {
                .set_engine_clock = &radeon_atom_set_engine_clock,
                .get_memory_clock = &radeon_atom_get_memory_clock,
                .set_memory_clock = &radeon_atom_set_memory_clock,
-               .get_pcie_lanes = NULL,
-               .set_pcie_lanes = NULL,
+               .get_pcie_lanes = &r600_get_pcie_lanes,
+               .set_pcie_lanes = &r600_set_pcie_lanes,
                .set_clock_gating = NULL,
+               .set_uvd_clocks = &evergreen_set_uvd_clocks,
        },
        .pflip = {
                .pre_page_flip = &evergreen_pre_page_flip,
@@ -1517,6 +1573,15 @@ static struct radeon_asic cayman_asic = {
                        .ib_test = &r600_dma_ib_test,
                        .is_lockup = &cayman_dma_is_lockup,
                        .vm_flush = &cayman_dma_vm_flush,
+               },
+               [R600_RING_TYPE_UVD_INDEX] = {
+                       .ib_execute = &r600_uvd_ib_execute,
+                       .emit_fence = &r600_uvd_fence_emit,
+                       .emit_semaphore = &cayman_uvd_semaphore_emit,
+                       .cs_parse = &radeon_uvd_cs_parse,
+                       .ring_test = &r600_uvd_ring_test,
+                       .ib_test = &r600_uvd_ib_test,
+                       .is_lockup = &radeon_ring_test_lockup,
                }
        },
        .irq = {
@@ -1529,6 +1594,8 @@ static struct radeon_asic cayman_asic = {
                .wait_for_vblank = &dce4_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &evergreen_hdmi_enable,
+               .hdmi_setmode = &evergreen_hdmi_setmode,
        },
        .copy = {
                .blit = &r600_copy_blit,
@@ -1558,9 +1625,10 @@ static struct radeon_asic cayman_asic = {
                .set_engine_clock = &radeon_atom_set_engine_clock,
                .get_memory_clock = &radeon_atom_get_memory_clock,
                .set_memory_clock = &radeon_atom_set_memory_clock,
-               .get_pcie_lanes = NULL,
-               .set_pcie_lanes = NULL,
+               .get_pcie_lanes = &r600_get_pcie_lanes,
+               .set_pcie_lanes = &r600_set_pcie_lanes,
                .set_clock_gating = NULL,
+               .set_uvd_clocks = &evergreen_set_uvd_clocks,
        },
        .pflip = {
                .pre_page_flip = &evergreen_pre_page_flip,
@@ -1646,6 +1714,15 @@ static struct radeon_asic trinity_asic = {
                        .ib_test = &r600_dma_ib_test,
                        .is_lockup = &cayman_dma_is_lockup,
                        .vm_flush = &cayman_dma_vm_flush,
+               },
+               [R600_RING_TYPE_UVD_INDEX] = {
+                       .ib_execute = &r600_uvd_ib_execute,
+                       .emit_fence = &r600_uvd_fence_emit,
+                       .emit_semaphore = &cayman_uvd_semaphore_emit,
+                       .cs_parse = &radeon_uvd_cs_parse,
+                       .ring_test = &r600_uvd_ring_test,
+                       .ib_test = &r600_uvd_ib_test,
+                       .is_lockup = &radeon_ring_test_lockup,
                }
        },
        .irq = {
@@ -1690,6 +1767,7 @@ static struct radeon_asic trinity_asic = {
                .get_pcie_lanes = NULL,
                .set_pcie_lanes = NULL,
                .set_clock_gating = NULL,
+               .set_uvd_clocks = &sumo_set_uvd_clocks,
        },
        .pflip = {
                .pre_page_flip = &evergreen_pre_page_flip,
@@ -1775,6 +1853,15 @@ static struct radeon_asic si_asic = {
                        .ib_test = &r600_dma_ib_test,
                        .is_lockup = &si_dma_is_lockup,
                        .vm_flush = &si_dma_vm_flush,
+               },
+               [R600_RING_TYPE_UVD_INDEX] = {
+                       .ib_execute = &r600_uvd_ib_execute,
+                       .emit_fence = &r600_uvd_fence_emit,
+                       .emit_semaphore = &cayman_uvd_semaphore_emit,
+                       .cs_parse = &radeon_uvd_cs_parse,
+                       .ring_test = &r600_uvd_ring_test,
+                       .ib_test = &r600_uvd_ib_test,
+                       .is_lockup = &radeon_ring_test_lockup,
                }
        },
        .irq = {
@@ -1816,9 +1903,10 @@ static struct radeon_asic si_asic = {
                .set_engine_clock = &radeon_atom_set_engine_clock,
                .get_memory_clock = &radeon_atom_get_memory_clock,
                .set_memory_clock = &radeon_atom_set_memory_clock,
-               .get_pcie_lanes = NULL,
-               .set_pcie_lanes = NULL,
+               .get_pcie_lanes = &r600_get_pcie_lanes,
+               .set_pcie_lanes = &r600_set_pcie_lanes,
                .set_clock_gating = NULL,
+               .set_uvd_clocks = &si_set_uvd_clocks,
        },
        .pflip = {
                .pre_page_flip = &evergreen_pre_page_flip,
index 3535f73..2c87365 100644 (file)
@@ -330,6 +330,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
                   unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -373,11 +374,12 @@ void r600_disable_interrupts(struct radeon_device *rdev);
 void r600_rlc_stop(struct radeon_device *rdev);
 /* r600 audio */
 int r600_audio_init(struct radeon_device *rdev);
-void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
 struct r600_audio r600_audio_status(struct radeon_device *rdev);
 void r600_audio_fini(struct radeon_device *rdev);
 int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
 void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
 /* r600 blit */
 int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
                           struct radeon_fence **fence, struct radeon_sa_bo **vb,
@@ -392,6 +394,19 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
 u32 r600_get_xclk(struct radeon_device *rdev);
 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
 
+/* uvd */
+int r600_uvd_init(struct radeon_device *rdev);
+int r600_uvd_rbc_start(struct radeon_device *rdev);
+void r600_uvd_rbc_stop(struct radeon_device *rdev);
+int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r600_uvd_fence_emit(struct radeon_device *rdev,
+                        struct radeon_fence *fence);
+void r600_uvd_semaphore_emit(struct radeon_device *rdev,
+                            struct radeon_ring *ring,
+                            struct radeon_semaphore *semaphore,
+                            bool emit_wait);
+void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+
 /*
  * rv770,rv730,rv710,rv740
  */
@@ -409,6 +424,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
                  unsigned num_gpu_pages,
                   struct radeon_fence **fence);
 u32 rv770_get_xclk(struct radeon_device *rdev);
+int rv770_uvd_resume(struct radeon_device *rdev);
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 
 /*
  * evergreen
@@ -444,6 +461,8 @@ extern void evergreen_pm_prepare(struct radeon_device *rdev);
 extern void evergreen_pm_finish(struct radeon_device *rdev);
 extern void sumo_pm_init_profile(struct radeon_device *rdev);
 extern void btc_pm_init_profile(struct radeon_device *rdev);
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
 extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
 extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
@@ -459,12 +478,18 @@ int evergreen_copy_dma(struct radeon_device *rdev,
                       uint64_t src_offset, uint64_t dst_offset,
                       unsigned num_gpu_pages,
                       struct radeon_fence **fence);
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
 
 /*
  * cayman
  */
 void cayman_fence_ring_emit(struct radeon_device *rdev,
                            struct radeon_fence *fence);
+void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
+                              struct radeon_ring *ring,
+                              struct radeon_semaphore *semaphore,
+                              bool emit_wait);
 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
 int cayman_init(struct radeon_device *rdev);
 void cayman_fini(struct radeon_device *rdev);
@@ -524,5 +549,6 @@ int si_copy_dma(struct radeon_device *rdev,
 void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 u32 si_get_xclk(struct radeon_device *rdev);
 uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 
 #endif
index f22eb57..dea6f63 100644 (file)
@@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
        num_modes = power_info->info.ucNumOfPowerModeEntries;
        if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
                num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+       if (num_modes == 0)
+               return state_index;
        rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
        if (!rdev->pm.power_state)
                return state_index;
@@ -2307,7 +2309,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
                rdev->pm.default_power_state_index = state_index;
                rdev->pm.power_state[state_index].default_clock_mode =
                        &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
-               if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+               if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
                        /* NI chips post without MC ucode, so default clocks are strobe mode only */
                        rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
                        rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2345,7 +2347,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
                        sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
                        rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
                }
-       } else if (ASIC_IS_DCE6(rdev)) {
+       } else if (rdev->family >= CHIP_TAHITI) {
                sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
                sclk |= clock_info->si.ucEngineClockHigh << 16;
                mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
@@ -2358,7 +2360,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
                        le16_to_cpu(clock_info->si.usVDDC);
                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
                        le16_to_cpu(clock_info->si.usVDDCI);
-       } else if (ASIC_IS_DCE4(rdev)) {
+       } else if (rdev->family >= CHIP_CEDAR) {
                sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
                sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
                mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
@@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
        power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
        radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+       if (power_info->pplib.ucNumStates == 0)
+               return state_index;
        rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
                                       power_info->pplib.ucNumStates, GFP_KERNEL);
        if (!rdev->pm.power_state)
@@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
        int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
         u16 data_offset;
        u8 frev, crev;
+       u8 *power_state_offset;
 
        if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
                                   &frev, &crev, &data_offset))
@@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
        non_clock_info_array = (struct _NonClockInfoArray *)
                (mode_info->atom_context->bios + data_offset +
                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+       if (state_array->ucNumEntries == 0)
+               return state_index;
        rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
                                       state_array->ucNumEntries, GFP_KERNEL);
        if (!rdev->pm.power_state)
                return state_index;
+       power_state_offset = (u8 *)state_array->states;
        for (i = 0; i < state_array->ucNumEntries; i++) {
                mode_index = 0;
-               power_state = (union pplib_power_state *)&state_array->states[i];
-               /* XXX this might be an inagua bug... */
-               non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+               power_state = (union pplib_power_state *)power_state_offset;
+               non_clock_array_index = power_state->v2.nonClockInfoIndex;
                non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
                        &non_clock_info_array->nonClockInfo[non_clock_array_index];
                rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
@@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
                if (power_state->v2.ucNumDPMLevels) {
                        for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
                                clock_array_index = power_state->v2.clockInfoIndex[j];
-                               /* XXX this might be an inagua bug... */
-                               if (clock_array_index >= clock_info_array->ucNumEntries)
-                                       continue;
                                clock_info = (union pplib_clock_info *)
                                        &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
                                valid = radeon_atombios_parse_pplib_clock_info(rdev,
@@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
                                                                   non_clock_info);
                        state_index++;
                }
+               power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
        }
        /* if multiple clock modes, mark the lowest as no display */
        for (i = 0; i < state_index; i++) {
@@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                default:
                        break;
                }
-       } else {
+       }
+
+       if (state_index == 0) {
                rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
                if (rdev->pm.power_state) {
                        rdev->pm.power_state[0].clock_info =
@@ -2654,6 +2661,111 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                rdev->pm.current_vddc = 0;
 }
 
+union get_clock_dividers {
+       struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
+       struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
+       struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
+       struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
+       struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
+};
+
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+                                  u8 clock_type,
+                                  u32 clock,
+                                  bool strobe_mode,
+                                  struct atom_clock_dividers *dividers)
+{
+       union get_clock_dividers args;
+       int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
+       u8 frev, crev;
+
+       memset(&args, 0, sizeof(args));
+       memset(dividers, 0, sizeof(struct atom_clock_dividers));
+
+       if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+               return -EINVAL;
+
+       switch (crev) {
+       case 1:
+               /* r4xx, r5xx */
+               args.v1.ucAction = clock_type;
+               args.v1.ulClock = cpu_to_le32(clock);   /* 10 khz */
+
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+               dividers->post_div = args.v1.ucPostDiv;
+               dividers->fb_div = args.v1.ucFbDiv;
+               dividers->enable_post_div = true;
+               break;
+       case 2:
+       case 3:
+               /* r6xx, r7xx, evergreen, ni */
+               if (rdev->family <= CHIP_RV770) {
+                       args.v2.ucAction = clock_type;
+                       args.v2.ulClock = cpu_to_le32(clock);   /* 10 khz */
+
+                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+                       dividers->post_div = args.v2.ucPostDiv;
+                       dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
+                       dividers->ref_div = args.v2.ucAction;
+                       if (rdev->family == CHIP_RV770) {
+                               dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ?
+                                       true : false;
+                               dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0;
+                       } else
+                               dividers->enable_post_div = (dividers->fb_div & 1) ? true : false;
+               } else {
+                       if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+                               args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+                               dividers->post_div = args.v3.ucPostDiv;
+                               dividers->enable_post_div = (args.v3.ucCntlFlag &
+                                                            ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+                               dividers->enable_dithen = (args.v3.ucCntlFlag &
+                                                          ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+                               dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+                               dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+                               dividers->ref_div = args.v3.ucRefDiv;
+                               dividers->vco_mode = (args.v3.ucCntlFlag &
+                                                     ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+                       } else {
+                               args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+                               if (strobe_mode)
+                                       args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+                               dividers->post_div = args.v5.ucPostDiv;
+                               dividers->enable_post_div = (args.v5.ucCntlFlag &
+                                                            ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+                               dividers->enable_dithen = (args.v5.ucCntlFlag &
+                                                          ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+                               dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+                               dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+                               dividers->ref_div = args.v5.ucRefDiv;
+                               dividers->vco_mode = (args.v5.ucCntlFlag &
+                                                     ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+                       }
+               }
+               break;
+       case 4:
+               /* fusion */
+               args.v4.ulClock = cpu_to_le32(clock);   /* 10 khz */
+
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+               dividers->post_div = args.v4.ucPostDiv;
+               dividers->real_clock = le32_to_cpu(args.v4.ulClock);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
 void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
 {
        DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
index 70d3824..7e265a5 100644 (file)
@@ -63,30 +63,50 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                                break;
                        }
                }
-               if (!duplicate) {
-                       p->relocs[i].gobj = drm_gem_object_lookup(ddev,
-                                                                 p->filp,
-                                                                 r->handle);
-                       if (p->relocs[i].gobj == NULL) {
-                               DRM_ERROR("gem object lookup failed 0x%x\n",
-                                         r->handle);
-                               return -ENOENT;
-                       }
-                       p->relocs_ptr[i] = &p->relocs[i];
-                       p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
-                       p->relocs[i].lobj.bo = p->relocs[i].robj;
-                       p->relocs[i].lobj.wdomain = r->write_domain;
-                       p->relocs[i].lobj.rdomain = r->read_domains;
-                       p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
-                       p->relocs[i].handle = r->handle;
-                       p->relocs[i].flags = r->flags;
-                       radeon_bo_list_add_object(&p->relocs[i].lobj,
-                                                 &p->validated);
-
-               } else
+               if (duplicate) {
                        p->relocs[i].handle = 0;
+                       continue;
+               }
+
+               p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
+                                                         r->handle);
+               if (p->relocs[i].gobj == NULL) {
+                       DRM_ERROR("gem object lookup failed 0x%x\n",
+                                 r->handle);
+                       return -ENOENT;
+               }
+               p->relocs_ptr[i] = &p->relocs[i];
+               p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+               p->relocs[i].lobj.bo = p->relocs[i].robj;
+               p->relocs[i].lobj.written = !!r->write_domain;
+
+               /* the first reloc of an UVD job is the
+                  msg and that must be in VRAM */
+               if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
+                       /* TODO: is this still needed for NI+ ? */
+                       p->relocs[i].lobj.domain =
+                               RADEON_GEM_DOMAIN_VRAM;
+
+                       p->relocs[i].lobj.alt_domain =
+                               RADEON_GEM_DOMAIN_VRAM;
+
+               } else {
+                       uint32_t domain = r->write_domain ?
+                               r->write_domain : r->read_domains;
+
+                       p->relocs[i].lobj.domain = domain;
+                       if (domain == RADEON_GEM_DOMAIN_VRAM)
+                               domain |= RADEON_GEM_DOMAIN_GTT;
+                       p->relocs[i].lobj.alt_domain = domain;
+               }
+
+               p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
+               p->relocs[i].handle = r->handle;
+
+               radeon_bo_list_add_object(&p->relocs[i].lobj,
+                                         &p->validated);
        }
-       return radeon_bo_list_validate(&p->validated);
+       return radeon_bo_list_validate(&p->validated, p->ring);
 }
 
 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -121,6 +141,9 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
                        return -EINVAL;
                }
                break;
+       case RADEON_CS_RING_UVD:
+               p->ring = R600_RING_TYPE_UVD_INDEX;
+               break;
        }
        return 0;
 }
@@ -241,15 +264,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                        return -EINVAL;
                }
 
-               /* we only support VM on SI+ */
-               if ((p->rdev->family >= CHIP_TAHITI) &&
-                   ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
-                       DRM_ERROR("VM required on SI+!\n");
+               if (radeon_cs_get_ring(p, ring, priority))
                        return -EINVAL;
-               }
 
-               if (radeon_cs_get_ring(p, ring, priority))
+               /* we only support VM on some SI+ rings */
+               if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) &&
+                  ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+                       DRM_ERROR("Ring %d requires VM!\n", p->ring);
                        return -EINVAL;
+               }
        }
 
        /* deal with non-vm */
@@ -526,6 +549,10 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                r = radeon_cs_handle_lockup(rdev, r);
                return r;
        }
+
+       if (parser.ring == R600_RING_TYPE_UVD_INDEX)
+               radeon_uvd_note_usage(rdev);
+
        r = radeon_cs_ib_chunk(rdev, &parser);
        if (r) {
                goto out;
index 44b8034..a8f6089 100644 (file)
@@ -97,6 +97,42 @@ static const char radeon_family_name[][16] = {
        "LAST",
 };
 
+/**
+ * radeon_program_register_sequence - program an array of registers.
+ *
+ * @rdev: radeon_device pointer
+ * @registers: pointer to the register array
+ * @array_size: size of the register array
+ *
+ * Programs an array or registers with and and or masks.
+ * This is a helper for setting golden registers.
+ */
+void radeon_program_register_sequence(struct radeon_device *rdev,
+                                     const u32 *registers,
+                                     const u32 array_size)
+{
+       u32 tmp, reg, and_mask, or_mask;
+       int i;
+
+       if (array_size % 3)
+               return;
+
+       for (i = 0; i < array_size; i +=3) {
+               reg = registers[i + 0];
+               and_mask = registers[i + 1];
+               or_mask = registers[i + 2];
+
+               if (and_mask == 0xffffffff) {
+                       tmp = or_mask;
+               } else {
+                       tmp = RREG32(reg);
+                       tmp &= ~and_mask;
+                       tmp |= or_mask;
+               }
+               WREG32(reg, tmp);
+       }
+}
+
 /**
  * radeon_surface_init - Clear GPU surface registers.
  *
@@ -359,7 +395,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
        uint64_t limit = (uint64_t)radeon_vram_limit << 20;
 
        mc->vram_start = base;
-       if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
+       if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
                dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
                mc->real_vram_size = mc->aper_size;
                mc->mc_vram_size = mc->aper_size;
@@ -394,7 +430,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 {
        u64 size_af, size_bf;
 
-       size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+       size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
        size_bf = mc->vram_start & ~mc->gtt_base_align;
        if (size_bf > size_af) {
                if (mc->gtt_size > size_bf) {
@@ -1068,6 +1104,17 @@ int radeon_device_init(struct radeon_device *rdev,
                radeon_agp_disable(rdev);
        }
 
+       /* Set the internal MC address mask
+        * This is the max address of the GPU's
+        * internal address space.
+        */
+       if (rdev->family >= CHIP_CAYMAN)
+               rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+       else if (rdev->family >= CHIP_CEDAR)
+               rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
+       else
+               rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
+
        /* set DMA mask + need_dma32 flags.
         * PCIE - can handle 40-bits.
         * IGP - can handle 40-bits
@@ -1131,6 +1178,11 @@ int radeon_device_init(struct radeon_device *rdev,
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
 
+       r = radeon_gem_debugfs_init(rdev);
+       if (r) {
+               DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+       }
+
        if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
                /* Acceleration not working on AGP card try again
                 * with fallback to PCI or PCIE GART
index 66a7f0f..d33f484 100644 (file)
  *   2.28.0 - r600-eg: Add MEM_WRITE packet support
  *   2.29.0 - R500 FP16 color clear registers
  *   2.30.0 - fix for FMASK texturing
+ *   2.31.0 - Add fastfb support for rs690
+ *   2.32.0 - new info request for rings working
+ *   2.33.0 - Add SI tiling mode array query
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       30
+#define KMS_DRIVER_MINOR       33
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -160,6 +163,7 @@ int radeon_hw_i2c = 0;
 int radeon_pcie_gen2 = -1;
 int radeon_msi = -1;
 int radeon_lockup_timeout = 10000;
+int radeon_fastfb = 0;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -212,6 +216,9 @@ module_param_named(msi, radeon_msi, int, 0444);
 MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
 module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
 
+MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
+module_param_named(fastfb, radeon_fastfb, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
 };
index 3435625..5b937df 100644 (file)
@@ -31,9 +31,9 @@
 #include <linux/seq_file.h>
 #include <linux/atomic.h>
 #include <linux/wait.h>
-#include <linux/list.h>
 #include <linux/kref.h>
 #include <linux/slab.h>
+#include <linux/firmware.h>
 #include <drm/drmP.h>
 #include "radeon_reg.h"
 #include "radeon.h"
@@ -768,7 +768,19 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
        radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
        if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
                rdev->fence_drv[ring].scratch_reg = 0;
-               index = R600_WB_EVENT_OFFSET + ring * 4;
+               if (ring != R600_RING_TYPE_UVD_INDEX) {
+                       index = R600_WB_EVENT_OFFSET + ring * 4;
+                       rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+                       rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
+                                                        index;
+
+               } else {
+                       /* put fence directly behind firmware */
+                       index = ALIGN(rdev->uvd_fw->size, 8);
+                       rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
+                       rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
+               }
+
        } else {
                r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
                if (r) {
@@ -778,9 +790,9 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
                index = RADEON_WB_SCRATCH_OFFSET +
                        rdev->fence_drv[ring].scratch_reg -
                        rdev->scratch.reg_base;
+               rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+               rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
        }
-       rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
-       rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
        radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
        rdev->fence_drv[ring].initialized = true;
        dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
index fe5c1f6..aa79603 100644 (file)
@@ -84,6 +84,7 @@ retry:
                return r;
        }
        *obj = &robj->gem_base;
+       robj->pid = task_pid_nr(current);
 
        mutex_lock(&rdev->gem.mutex);
        list_add_tail(&robj->list, &rdev->gem.objects);
@@ -575,3 +576,52 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
 {
        return drm_gem_handle_delete(file_priv, handle);
 }
+
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_bo *rbo;
+       unsigned i = 0;
+
+       mutex_lock(&rdev->gem.mutex);
+       list_for_each_entry(rbo, &rdev->gem.objects, list) {
+               unsigned domain;
+               const char *placement;
+
+               domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
+               switch (domain) {
+               case RADEON_GEM_DOMAIN_VRAM:
+                       placement = "VRAM";
+                       break;
+               case RADEON_GEM_DOMAIN_GTT:
+                       placement = " GTT";
+                       break;
+               case RADEON_GEM_DOMAIN_CPU:
+               default:
+                       placement = " CPU";
+                       break;
+               }
+               seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
+                          i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
+                          placement, (unsigned long)rbo->pid);
+               i++;
+       }
+       mutex_unlock(&rdev->gem.mutex);
+       return 0;
+}
+
+static struct drm_info_list radeon_debugfs_gem_list[] = {
+       {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
+};
+#endif
+
+int radeon_gem_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
+#endif
+       return 0;
+}
index c75cb2c..4f2d4f4 100644 (file)
@@ -50,9 +50,13 @@ int radeon_driver_unload_kms(struct drm_device *dev)
 
        if (rdev == NULL)
                return 0;
+       if (rdev->rmmio == NULL)
+               goto done_free;
        radeon_acpi_fini(rdev);
        radeon_modeset_fini(rdev);
        radeon_device_fini(rdev);
+
+done_free:
        kfree(rdev);
        dev->dev_private = NULL;
        return 0;
@@ -176,80 +180,65 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_info *info = data;
        struct radeon_mode_info *minfo = &rdev->mode_info;
-       uint32_t value, *value_ptr;
-       uint64_t value64, *value_ptr64;
+       uint32_t *value, value_tmp, *value_ptr, value_size;
+       uint64_t value64;
        struct drm_crtc *crtc;
        int i, found;
 
-       /* TIMESTAMP is a 64-bit value, needs special handling. */
-       if (info->request == RADEON_INFO_TIMESTAMP) {
-               if (rdev->family >= CHIP_R600) {
-                       value_ptr64 = (uint64_t*)((unsigned long)info->value);
-                       value64 = radeon_get_gpu_clock_counter(rdev);
-
-                       if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
-                               DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
-                               return -EFAULT;
-                       }
-                       return 0;
-               } else {
-                       DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
-                       return -EINVAL;
-               }
-       }
-
        value_ptr = (uint32_t *)((unsigned long)info->value);
-       if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
-               DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
-               return -EFAULT;
-       }
+       value = &value_tmp;
+       value_size = sizeof(uint32_t);
 
        switch (info->request) {
        case RADEON_INFO_DEVICE_ID:
-               value = dev->pci_device;
+               *value = dev->pci_device;
                break;
        case RADEON_INFO_NUM_GB_PIPES:
-               value = rdev->num_gb_pipes;
+               *value = rdev->num_gb_pipes;
                break;
        case RADEON_INFO_NUM_Z_PIPES:
-               value = rdev->num_z_pipes;
+               *value = rdev->num_z_pipes;
                break;
        case RADEON_INFO_ACCEL_WORKING:
                /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
                if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
-                       value = false;
+                       *value = false;
                else
-                       value = rdev->accel_working;
+                       *value = rdev->accel_working;
                break;
        case RADEON_INFO_CRTC_FROM_ID:
+               if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+                       DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+                       return -EFAULT;
+               }
                for (i = 0, found = 0; i < rdev->num_crtc; i++) {
                        crtc = (struct drm_crtc *)minfo->crtcs[i];
-                       if (crtc && crtc->base.id == value) {
+                       if (crtc && crtc->base.id == *value) {
                                struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-                               value = radeon_crtc->crtc_id;
+                               *value = radeon_crtc->crtc_id;
                                found = 1;
                                break;
                        }
                }
                if (!found) {
-                       DRM_DEBUG_KMS("unknown crtc id %d\n", value);
+                       DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
                        return -EINVAL;
                }
                break;
        case RADEON_INFO_ACCEL_WORKING2:
-               value = rdev->accel_working;
+               *value = rdev->accel_working;
                break;
        case RADEON_INFO_TILING_CONFIG:
                if (rdev->family >= CHIP_TAHITI)
-                       value = rdev->config.si.tile_config;
+                       *value = rdev->config.si.tile_config;
                else if (rdev->family >= CHIP_CAYMAN)
-                       value = rdev->config.cayman.tile_config;
+                       *value = rdev->config.cayman.tile_config;
                else if (rdev->family >= CHIP_CEDAR)
-                       value = rdev->config.evergreen.tile_config;
+                       *value = rdev->config.evergreen.tile_config;
                else if (rdev->family >= CHIP_RV770)
-                       value = rdev->config.rv770.tile_config;
+                       *value = rdev->config.rv770.tile_config;
                else if (rdev->family >= CHIP_R600)
-                       value = rdev->config.r600.tile_config;
+                       *value = rdev->config.r600.tile_config;
                else {
                        DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
                        return -EINVAL;
@@ -262,73 +251,81 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                 *
                 * When returning, the value is 1 if filp owns hyper-z access,
                 * 0 otherwise. */
-               if (value >= 2) {
-                       DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
+               if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+                       DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+                       return -EFAULT;
+               }
+               if (*value >= 2) {
+                       DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
                        return -EINVAL;
                }
-               radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
+               radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
                break;
        case RADEON_INFO_WANT_CMASK:
                /* The same logic as Hyper-Z. */
-               if (value >= 2) {
-                       DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
+               if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+                       DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+                       return -EFAULT;
+               }
+               if (*value >= 2) {
+                       DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
                        return -EINVAL;
                }
-               radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
+               radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
                break;
        case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
                /* return clock value in KHz */
                if (rdev->asic->get_xclk)
-                       value = radeon_get_xclk(rdev) * 10;
+                       *value = radeon_get_xclk(rdev) * 10;
                else
-                       value = rdev->clock.spll.reference_freq * 10;
+                       *value = rdev->clock.spll.reference_freq * 10;
                break;
        case RADEON_INFO_NUM_BACKENDS:
                if (rdev->family >= CHIP_TAHITI)
-                       value = rdev->config.si.max_backends_per_se *
+                       *value = rdev->config.si.max_backends_per_se *
                                rdev->config.si.max_shader_engines;
                else if (rdev->family >= CHIP_CAYMAN)
-                       value = rdev->config.cayman.max_backends_per_se *
+                       *value = rdev->config.cayman.max_backends_per_se *
                                rdev->config.cayman.max_shader_engines;
                else if (rdev->family >= CHIP_CEDAR)
-                       value = rdev->config.evergreen.max_backends;
+                       *value = rdev->config.evergreen.max_backends;
                else if (rdev->family >= CHIP_RV770)
-                       value = rdev->config.rv770.max_backends;
+                       *value = rdev->config.rv770.max_backends;
                else if (rdev->family >= CHIP_R600)
-                       value = rdev->config.r600.max_backends;
+                       *value = rdev->config.r600.max_backends;
                else {
                        return -EINVAL;
                }
                break;
        case RADEON_INFO_NUM_TILE_PIPES:
                if (rdev->family >= CHIP_TAHITI)
-                       value = rdev->config.si.max_tile_pipes;
+                       *value = rdev->config.si.max_tile_pipes;
                else if (rdev->family >= CHIP_CAYMAN)
-                       value = rdev->config.cayman.max_tile_pipes;
+                       *value = rdev->config.cayman.max_tile_pipes;
                else if (rdev->family >= CHIP_CEDAR)
-                       value = rdev->config.evergreen.max_tile_pipes;
+                       *value = rdev->config.evergreen.max_tile_pipes;
                else if (rdev->family >= CHIP_RV770)
-                       value = rdev->config.rv770.max_tile_pipes;
+                       *value = rdev->config.rv770.max_tile_pipes;
                else if (rdev->family >= CHIP_R600)
-                       value = rdev->config.r600.max_tile_pipes;
+                       *value = rdev->config.r600.max_tile_pipes;
                else {
                        return -EINVAL;
                }
                break;
        case RADEON_INFO_FUSION_GART_WORKING:
-               value = 1;
+               *value = 1;
                break;
        case RADEON_INFO_BACKEND_MAP:
                if (rdev->family >= CHIP_TAHITI)
-                       value = rdev->config.si.backend_map;
+                       *value = rdev->config.si.backend_map;
                else if (rdev->family >= CHIP_CAYMAN)
-                       value = rdev->config.cayman.backend_map;
+                       *value = rdev->config.cayman.backend_map;
                else if (rdev->family >= CHIP_CEDAR)
-                       value = rdev->config.evergreen.backend_map;
+                       *value = rdev->config.evergreen.backend_map;
                else if (rdev->family >= CHIP_RV770)
-                       value = rdev->config.rv770.backend_map;
+                       *value = rdev->config.rv770.backend_map;
                else if (rdev->family >= CHIP_R600)
-                       value = rdev->config.r600.backend_map;
+                       *value = rdev->config.r600.backend_map;
                else {
                        return -EINVAL;
                }
@@ -337,50 +334,91 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                /* this is where we report if vm is supported or not */
                if (rdev->family < CHIP_CAYMAN)
                        return -EINVAL;
-               value = RADEON_VA_RESERVED_SIZE;
+               *value = RADEON_VA_RESERVED_SIZE;
                break;
        case RADEON_INFO_IB_VM_MAX_SIZE:
                /* this is where we report if vm is supported or not */
                if (rdev->family < CHIP_CAYMAN)
                        return -EINVAL;
-               value = RADEON_IB_VM_MAX_SIZE;
+               *value = RADEON_IB_VM_MAX_SIZE;
                break;
        case RADEON_INFO_MAX_PIPES:
                if (rdev->family >= CHIP_TAHITI)
-                       value = rdev->config.si.max_cu_per_sh;
+                       *value = rdev->config.si.max_cu_per_sh;
                else if (rdev->family >= CHIP_CAYMAN)
-                       value = rdev->config.cayman.max_pipes_per_simd;
+                       *value = rdev->config.cayman.max_pipes_per_simd;
                else if (rdev->family >= CHIP_CEDAR)
-                       value = rdev->config.evergreen.max_pipes;
+                       *value = rdev->config.evergreen.max_pipes;
                else if (rdev->family >= CHIP_RV770)
-                       value = rdev->config.rv770.max_pipes;
+                       *value = rdev->config.rv770.max_pipes;
                else if (rdev->family >= CHIP_R600)
-                       value = rdev->config.r600.max_pipes;
+                       *value = rdev->config.r600.max_pipes;
                else {
                        return -EINVAL;
                }
                break;
+       case RADEON_INFO_TIMESTAMP:
+               if (rdev->family < CHIP_R600) {
+                       DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+                       return -EINVAL;
+               }
+               value = (uint32_t*)&value64;
+               value_size = sizeof(uint64_t);
+               value64 = radeon_get_gpu_clock_counter(rdev);
+               break;
        case RADEON_INFO_MAX_SE:
                if (rdev->family >= CHIP_TAHITI)
-                       value = rdev->config.si.max_shader_engines;
+                       *value = rdev->config.si.max_shader_engines;
                else if (rdev->family >= CHIP_CAYMAN)
-                       value = rdev->config.cayman.max_shader_engines;
+                       *value = rdev->config.cayman.max_shader_engines;
                else if (rdev->family >= CHIP_CEDAR)
-                       value = rdev->config.evergreen.num_ses;
+                       *value = rdev->config.evergreen.num_ses;
                else
-                       value = 1;
+                       *value = 1;
                break;
        case RADEON_INFO_MAX_SH_PER_SE:
                if (rdev->family >= CHIP_TAHITI)
-                       value = rdev->config.si.max_sh_per_se;
+                       *value = rdev->config.si.max_sh_per_se;
                else
                        return -EINVAL;
                break;
+       case RADEON_INFO_FASTFB_WORKING:
+               *value = rdev->fastfb_working;
+               break;
+       case RADEON_INFO_RING_WORKING:
+               if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+                       DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+                       return -EFAULT;
+               }
+               switch (*value) {
+               case RADEON_CS_RING_GFX:
+               case RADEON_CS_RING_COMPUTE:
+                       *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
+                       break;
+               case RADEON_CS_RING_DMA:
+                       *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
+                       *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
+                       break;
+               case RADEON_CS_RING_UVD:
+                       *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case RADEON_INFO_SI_TILE_MODE_ARRAY:
+               if (rdev->family < CHIP_TAHITI) {
+                       DRM_DEBUG_KMS("tile mode array is si only!\n");
+                       return -EINVAL;
+               }
+               value = rdev->config.si.tile_mode_array;
+               value_size = sizeof(uint32_t)*32;
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
        }
-       if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
+       if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
                DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
                return -EFAULT;
        }
@@ -513,6 +551,7 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
                rdev->hyperz_filp = NULL;
        if (rdev->cmask_filp == file_priv)
                rdev->cmask_filp = NULL;
+       radeon_uvd_free_handles(rdev, file_priv);
 }
 
 /*
index 4003f5a..44e579e 100644 (file)
@@ -492,6 +492,29 @@ struct radeon_framebuffer {
 #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
                                ((em) == ATOM_ENCODER_MODE_DP_MST))
 
+struct atom_clock_dividers {
+       u32 post_div;
+       union {
+               struct {
+#ifdef __BIG_ENDIAN
+                       u32 reserved : 6;
+                       u32 whole_fb_div : 12;
+                       u32 frac_fb_div : 14;
+#else
+                       u32 frac_fb_div : 14;
+                       u32 whole_fb_div : 12;
+                       u32 reserved : 6;
+#endif
+               };
+               u32 fb_div;
+       };
+       u32 ref_div;
+       bool enable_post_div;
+       bool enable_dithen;
+       u32 vco_mode;
+       u32 real_clock;
+};
+
 extern enum radeon_tv_std
 radeon_combios_get_tv_info(struct radeon_device *rdev);
 extern enum radeon_tv_std
index d3aface..1424ccd 100644 (file)
@@ -321,8 +321,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
 int radeon_bo_init(struct radeon_device *rdev)
 {
        /* Add an MTRR for the VRAM */
-       rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
+       if (!rdev->fastfb_working) {
+               rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
                        MTRR_TYPE_WRCOMB, 1);
+       }
        DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
                rdev->mc.mc_vram_size >> 20,
                (unsigned long long)rdev->mc.aper_size >> 20);
@@ -339,14 +341,14 @@ void radeon_bo_fini(struct radeon_device *rdev)
 void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
                                struct list_head *head)
 {
-       if (lobj->wdomain) {
+       if (lobj->written) {
                list_add(&lobj->tv.head, head);
        } else {
                list_add_tail(&lobj->tv.head, head);
        }
 }
 
-int radeon_bo_list_validate(struct list_head *head)
+int radeon_bo_list_validate(struct list_head *head, int ring)
 {
        struct radeon_bo_list *lobj;
        struct radeon_bo *bo;
@@ -360,15 +362,17 @@ int radeon_bo_list_validate(struct list_head *head)
        list_for_each_entry(lobj, head, tv.head) {
                bo = lobj->bo;
                if (!bo->pin_count) {
-                       domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+                       domain = lobj->domain;
                        
                retry:
                        radeon_ttm_placement_from_domain(bo, domain);
+                       if (ring == R600_RING_TYPE_UVD_INDEX)
+                               radeon_uvd_force_into_uvd_segment(bo);
                        r = ttm_bo_validate(&bo->tbo, &bo->placement,
                                                true, false);
                        if (unlikely(r)) {
-                               if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
-                                       domain |= RADEON_GEM_DOMAIN_GTT;
+                               if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
+                                       domain = lobj->alt_domain;
                                        goto retry;
                                }
                                return r;
index 5fc86b0..e2cb80a 100644 (file)
@@ -128,7 +128,7 @@ extern int radeon_bo_init(struct radeon_device *rdev);
 extern void radeon_bo_fini(struct radeon_device *rdev);
 extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
                                struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, int ring);
 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
                                struct vm_area_struct *vma);
 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
index 338fd6a..788c64c 100644 (file)
@@ -843,7 +843,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
        struct radeon_device *rdev = dev->dev_private;
 
        seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
-       seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+       /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+       if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+               seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+       else
+               seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
        seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
        if (rdev->asic->pm.get_memory_clock)
                seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
index 8d58e26..e17faa7 100644 (file)
@@ -180,7 +180,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
                radeon_semaphore_free(rdev, &ib->semaphore, NULL);
        }
        /* if we can't remember our last VM flush then flush now! */
-       if (ib->vm && !ib->vm->last_flush) {
+       /* XXX figure out why we have to flush for every IB */
+       if (ib->vm /*&& !ib->vm->last_flush*/) {
                radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
        }
        if (const_ib) {
@@ -368,7 +369,7 @@ void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 rptr;
 
-       if (rdev->wb.enabled)
+       if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
                rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
        else
                rptr = RREG32(ring->rptr_reg);
@@ -821,18 +822,20 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
        return 0;
 }
 
-static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
-static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
-static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
-static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
-static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
+static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
+static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
 
 static struct drm_info_list radeon_debugfs_ring_info_list[] = {
-       {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
-       {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
-       {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
-       {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
-       {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
+       {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
+       {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
+       {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
+       {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
+       {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
+       {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
 };
 
 static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
index cb80099..0abe5a9 100644 (file)
@@ -64,7 +64,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
        }
 
        r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
-                            RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
+                            domain, NULL, &sa_manager->bo);
        if (r) {
                dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
                return r;
index fda09c9..bbed4af 100644 (file)
@@ -252,6 +252,36 @@ void radeon_test_moves(struct radeon_device *rdev)
                radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
 }
 
+static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
+                                            struct radeon_ring *ring,
+                                            struct radeon_fence **fence)
+{
+       int r;
+
+       if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
+               r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+               if (r) {
+                       DRM_ERROR("Failed to get dummy create msg\n");
+                       return r;
+               }
+
+               r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence);
+               if (r) {
+                       DRM_ERROR("Failed to get dummy destroy msg\n");
+                       return r;
+               }
+       } else {
+               r = radeon_ring_lock(rdev, ring, 64);
+               if (r) {
+                       DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
+                       return r;
+               }
+               radeon_fence_emit(rdev, fence, ring->idx);
+               radeon_ring_unlock_commit(rdev, ring);
+       }
+       return 0;
+}
+
 void radeon_test_ring_sync(struct radeon_device *rdev,
                           struct radeon_ring *ringA,
                           struct radeon_ring *ringB)
@@ -272,21 +302,24 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
-       r = radeon_fence_emit(rdev, &fence1, ringA->idx);
-       if (r) {
-               DRM_ERROR("Failed to emit fence 1\n");
-               radeon_ring_unlock_undo(rdev, ringA);
+       radeon_ring_unlock_commit(rdev, ringA);
+
+       r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
+       if (r)
                goto out_cleanup;
-       }
-       radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
-       r = radeon_fence_emit(rdev, &fence2, ringA->idx);
+
+       r = radeon_ring_lock(rdev, ringA, 64);
        if (r) {
-               DRM_ERROR("Failed to emit fence 2\n");
-               radeon_ring_unlock_undo(rdev, ringA);
+               DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
                goto out_cleanup;
        }
+       radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
        radeon_ring_unlock_commit(rdev, ringA);
 
+       r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
+       if (r)
+               goto out_cleanup;
+
        mdelay(1000);
 
        if (radeon_fence_signaled(fence1)) {
@@ -364,27 +397,22 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
-       r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
-       if (r) {
-               DRM_ERROR("Failed to emit sync fence 1\n");
-               radeon_ring_unlock_undo(rdev, ringA);
-               goto out_cleanup;
-       }
        radeon_ring_unlock_commit(rdev, ringA);
 
+       r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
+       if (r)
+               goto out_cleanup;
+
        r = radeon_ring_lock(rdev, ringB, 64);
        if (r) {
                DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
-       r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
-       if (r) {
-               DRM_ERROR("Failed to create sync fence 2\n");
-               radeon_ring_unlock_undo(rdev, ringB);
-               goto out_cleanup;
-       }
        radeon_ring_unlock_commit(rdev, ringB);
+       r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
+       if (r)
+               goto out_cleanup;
 
        mdelay(1000);
 
@@ -393,7 +421,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        if (radeon_fence_signaled(fenceB)) {
-               DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+               DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
                goto out_cleanup;
        }
 
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
new file mode 100644 (file)
index 0000000..906e5c0
--- /dev/null
@@ -0,0 +1,831 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <deathsimple@vodafone.de>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "radeon.h"
+#include "r600d.h"
+
+/* 1 second timeout */
+#define UVD_IDLE_TIMEOUT_MS    1000
+
+/* Firmware Names */
+#define FIRMWARE_RV710         "radeon/RV710_uvd.bin"
+#define FIRMWARE_CYPRESS       "radeon/CYPRESS_uvd.bin"
+#define FIRMWARE_SUMO          "radeon/SUMO_uvd.bin"
+#define FIRMWARE_TAHITI                "radeon/TAHITI_uvd.bin"
+
+MODULE_FIRMWARE(FIRMWARE_RV710);
+MODULE_FIRMWARE(FIRMWARE_CYPRESS);
+MODULE_FIRMWARE(FIRMWARE_SUMO);
+MODULE_FIRMWARE(FIRMWARE_TAHITI);
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work);
+
+int radeon_uvd_init(struct radeon_device *rdev)
+{
+       struct platform_device *pdev;
+       unsigned long bo_size;
+       const char *fw_name;
+       int i, r;
+
+       INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
+
+       pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
+       r = IS_ERR(pdev);
+       if (r) {
+               dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n");
+               return -EINVAL;
+       }
+
+       switch (rdev->family) {
+       case CHIP_RV710:
+       case CHIP_RV730:
+       case CHIP_RV740:
+               fw_name = FIRMWARE_RV710;
+               break;
+
+       case CHIP_CYPRESS:
+       case CHIP_HEMLOCK:
+       case CHIP_JUNIPER:
+       case CHIP_REDWOOD:
+       case CHIP_CEDAR:
+               fw_name = FIRMWARE_CYPRESS;
+               break;
+
+       case CHIP_SUMO:
+       case CHIP_SUMO2:
+       case CHIP_PALM:
+       case CHIP_CAYMAN:
+       case CHIP_BARTS:
+       case CHIP_TURKS:
+       case CHIP_CAICOS:
+               fw_name = FIRMWARE_SUMO;
+               break;
+
+       case CHIP_TAHITI:
+       case CHIP_VERDE:
+       case CHIP_PITCAIRN:
+       case CHIP_ARUBA:
+               fw_name = FIRMWARE_TAHITI;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev);
+       if (r) {
+               dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+                       fw_name);
+               platform_device_unregister(pdev);
+               return r;
+       }
+
+       platform_device_unregister(pdev);
+
+       bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
+                 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
+       r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
+                            RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
+       if (r) {
+               dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
+               return r;
+       }
+
+       r = radeon_uvd_resume(rdev);
+       if (r)
+               return r;
+
+       memset(rdev->uvd.cpu_addr, 0, bo_size);
+       memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+       r = radeon_uvd_suspend(rdev);
+       if (r)
+               return r;
+
+       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+               atomic_set(&rdev->uvd.handles[i], 0);
+               rdev->uvd.filp[i] = NULL;
+       }
+
+       return 0;
+}
+
+void radeon_uvd_fini(struct radeon_device *rdev)
+{
+       radeon_uvd_suspend(rdev);
+       radeon_bo_unref(&rdev->uvd.vcpu_bo);
+}
+
+int radeon_uvd_suspend(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->uvd.vcpu_bo == NULL)
+               return 0;
+
+       r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+       if (!r) {
+               radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+               radeon_bo_unpin(rdev->uvd.vcpu_bo);
+               radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+       }
+       return r;
+}
+
+int radeon_uvd_resume(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->uvd.vcpu_bo == NULL)
+               return -EINVAL;
+
+       r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+       if (r) {
+               radeon_bo_unref(&rdev->uvd.vcpu_bo);
+               dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
+               return r;
+       }
+
+       r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+                         &rdev->uvd.gpu_addr);
+       if (r) {
+               radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+               radeon_bo_unref(&rdev->uvd.vcpu_bo);
+               dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
+               return r;
+       }
+
+       r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+       if (r) {
+               dev_err(rdev->dev, "(%d) UVD map failed\n", r);
+               return r;
+       }
+
+       radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+       return 0;
+}
+
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
+{
+       rbo->placement.fpfn = 0 >> PAGE_SHIFT;
+       rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+}
+
+void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
+{
+       int i, r;
+       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+               if (rdev->uvd.filp[i] == filp) {
+                       uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+                       struct radeon_fence *fence;
+
+                       r = radeon_uvd_get_destroy_msg(rdev,
+                               R600_RING_TYPE_UVD_INDEX, handle, &fence);
+                       if (r) {
+                               DRM_ERROR("Error destroying UVD (%d)!\n", r);
+                               continue;
+                       }
+
+                       radeon_fence_wait(fence, false);
+                       radeon_fence_unref(&fence);
+
+                       rdev->uvd.filp[i] = NULL;
+                       atomic_set(&rdev->uvd.handles[i], 0);
+               }
+       }
+}
+
+static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+{
+       unsigned stream_type = msg[4];
+       unsigned width = msg[6];
+       unsigned height = msg[7];
+       unsigned dpb_size = msg[9];
+       unsigned pitch = msg[28];
+
+       unsigned width_in_mb = width / 16;
+       unsigned height_in_mb = ALIGN(height / 16, 2);
+
+       unsigned image_size, tmp, min_dpb_size;
+
+       image_size = width * height;
+       image_size += image_size / 2;
+       image_size = ALIGN(image_size, 1024);
+
+       switch (stream_type) {
+       case 0: /* H264 */
+
+               /* reference picture buffer */
+               min_dpb_size = image_size * 17;
+
+               /* macroblock context buffer */
+               min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
+
+               /* IT surface buffer */
+               min_dpb_size += width_in_mb * height_in_mb * 32;
+               break;
+
+       case 1: /* VC1 */
+
+               /* reference picture buffer */
+               min_dpb_size = image_size * 3;
+
+               /* CONTEXT_BUFFER */
+               min_dpb_size += width_in_mb * height_in_mb * 128;
+
+               /* IT surface buffer */
+               min_dpb_size += width_in_mb * 64;
+
+               /* DB surface buffer */
+               min_dpb_size += width_in_mb * 128;
+
+               /* BP */
+               tmp = max(width_in_mb, height_in_mb);
+               min_dpb_size += ALIGN(tmp * 7 * 16, 64);
+               break;
+
+       case 3: /* MPEG2 */
+
+               /* reference picture buffer */
+               min_dpb_size = image_size * 3;
+               break;
+
+       case 4: /* MPEG4 */
+
+               /* reference picture buffer */
+               min_dpb_size = image_size * 3;
+
+               /* CM */
+               min_dpb_size += width_in_mb * height_in_mb * 64;
+
+               /* IT surface buffer */
+               min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
+               break;
+
+       default:
+               DRM_ERROR("UVD codec not handled %d!\n", stream_type);
+               return -EINVAL;
+       }
+
+       if (width > pitch) {
+               DRM_ERROR("Invalid UVD decoding target pitch!\n");
+               return -EINVAL;
+       }
+
+       if (dpb_size < min_dpb_size) {
+               DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
+                         dpb_size, min_dpb_size);
+               return -EINVAL;
+       }
+
+       buf_sizes[0x1] = dpb_size;
+       buf_sizes[0x2] = image_size;
+       return 0;
+}
+
+static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
+                            unsigned offset, unsigned buf_sizes[])
+{
+       int32_t *msg, msg_type, handle;
+       void *ptr;
+
+       int i, r;
+
+       if (offset & 0x3F) {
+               DRM_ERROR("UVD messages must be 64 byte aligned!\n");
+               return -EINVAL;
+       }
+
+       r = radeon_bo_kmap(bo, &ptr);
+       if (r)
+               return r;
+
+       msg = ptr + offset;
+
+       msg_type = msg[1];
+       handle = msg[2];
+
+       if (handle == 0) {
+               DRM_ERROR("Invalid UVD handle!\n");
+               return -EINVAL;
+       }
+
+       if (msg_type == 1) {
+               /* it's a decode msg, calc buffer sizes */
+               r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+               radeon_bo_kunmap(bo);
+               if (r)
+                       return r;
+
+       } else if (msg_type == 2) {
+               /* it's a destroy msg, free the handle */
+               for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+                       atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
+               radeon_bo_kunmap(bo);
+               return 0;
+       } else {
+               /* it's a create msg, no special handling needed */
+               radeon_bo_kunmap(bo);
+       }
+
+       /* create or decode, validate the handle */
+       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+               if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
+                       return 0;
+       }
+
+       /* handle not found try to alloc a new one */
+       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+               if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+                       p->rdev->uvd.filp[i] = p->filp;
+                       return 0;
+               }
+       }
+
+       DRM_ERROR("No more free UVD handles!\n");
+       return -EINVAL;
+}
+
+static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
+                              int data0, int data1,
+                              unsigned buf_sizes[])
+{
+       struct radeon_cs_chunk *relocs_chunk;
+       struct radeon_cs_reloc *reloc;
+       unsigned idx, cmd, offset;
+       uint64_t start, end;
+       int r;
+
+       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       offset = radeon_get_ib_value(p, data0);
+       idx = radeon_get_ib_value(p, data1);
+       if (idx >= relocs_chunk->length_dw) {
+               DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+                         idx, relocs_chunk->length_dw);
+               return -EINVAL;
+       }
+
+       reloc = p->relocs_ptr[(idx / 4)];
+       start = reloc->lobj.gpu_offset;
+       end = start + radeon_bo_size(reloc->robj);
+       start += offset;
+
+       p->ib.ptr[data0] = start & 0xFFFFFFFF;
+       p->ib.ptr[data1] = start >> 32;
+
+       cmd = radeon_get_ib_value(p, p->idx) >> 1;
+
+       if (cmd < 0x4) {
+               if ((end - start) < buf_sizes[cmd]) {
+                       DRM_ERROR("buffer to small (%d / %d)!\n",
+                                 (unsigned)(end - start), buf_sizes[cmd]);
+                       return -EINVAL;
+               }
+
+       } else if (cmd != 0x100) {
+               DRM_ERROR("invalid UVD command %X!\n", cmd);
+               return -EINVAL;
+       }
+
+       if ((start >> 28) != (end >> 28)) {
+               DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+                         start, end);
+               return -EINVAL;
+       }
+
+       /* TODO: is this still necessary on NI+ ? */
+       if ((cmd == 0 || cmd == 0x3) &&
+           (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
+               DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
+                         start, end);
+               return -EINVAL;
+       }
+
+       if (cmd == 0) {
+               r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
+               if (r)
+                       return r;
+       }
+
+       return 0;
+}
+
+static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
+                            struct radeon_cs_packet *pkt,
+                            int *data0, int *data1,
+                            unsigned buf_sizes[])
+{
+       int i, r;
+
+       p->idx++;
+       for (i = 0; i <= pkt->count; ++i) {
+               switch (pkt->reg + i*4) {
+               case UVD_GPCOM_VCPU_DATA0:
+                       *data0 = p->idx;
+                       break;
+               case UVD_GPCOM_VCPU_DATA1:
+                       *data1 = p->idx;
+                       break;
+               case UVD_GPCOM_VCPU_CMD:
+                       r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
+                       if (r)
+                               return r;
+                       break;
+               case UVD_ENGINE_CNTL:
+                       break;
+               default:
+                       DRM_ERROR("Invalid reg 0x%X!\n",
+                                 pkt->reg + i*4);
+                       return -EINVAL;
+               }
+               p->idx++;
+       }
+       return 0;
+}
+
+int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
+{
+       struct radeon_cs_packet pkt;
+       int r, data0 = 0, data1 = 0;
+
+       /* minimum buffer sizes */
+       unsigned buf_sizes[] = {
+               [0x00000000]    =       2048,
+               [0x00000001]    =       32 * 1024 * 1024,
+               [0x00000002]    =       2048 * 1152 * 3,
+               [0x00000003]    =       2048,
+       };
+
+       if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
+               DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
+                         p->chunks[p->chunk_ib_idx].length_dw);
+               return -EINVAL;
+       }
+
+       if (p->chunk_relocs_idx == -1) {
+               DRM_ERROR("No relocation chunk !\n");
+               return -EINVAL;
+       }
+
+
+       do {
+               r = radeon_cs_packet_parse(p, &pkt, p->idx);
+               if (r)
+                       return r;
+               switch (pkt.type) {
+               case RADEON_PACKET_TYPE0:
+                       r = radeon_uvd_cs_reg(p, &pkt, &data0,
+                                             &data1, buf_sizes);
+                       if (r)
+                               return r;
+                       break;
+               case RADEON_PACKET_TYPE2:
+                       p->idx += pkt.count + 2;
+                       break;
+               default:
+                       DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+                       return -EINVAL;
+               }
+       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+       return 0;
+}
+
+static int radeon_uvd_send_msg(struct radeon_device *rdev,
+                              int ring, struct radeon_bo *bo,
+                              struct radeon_fence **fence)
+{
+       struct ttm_validate_buffer tv;
+       struct list_head head;
+       struct radeon_ib ib;
+       uint64_t addr;
+       int i, r;
+
+       memset(&tv, 0, sizeof(tv));
+       tv.bo = &bo->tbo;
+
+       INIT_LIST_HEAD(&head);
+       list_add(&tv.head, &head);
+
+       r = ttm_eu_reserve_buffers(&head);
+       if (r)
+               return r;
+
+       radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
+       radeon_uvd_force_into_uvd_segment(bo);
+
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+       if (r) {
+               ttm_eu_backoff_reservation(&head);
+               return r;
+       }
+
+       r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
+       if (r) {
+               ttm_eu_backoff_reservation(&head);
+               return r;
+       }
+
+       addr = radeon_bo_gpu_offset(bo);
+       ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
+       ib.ptr[1] = addr;
+       ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
+       ib.ptr[3] = addr >> 32;
+       ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
+       ib.ptr[5] = 0;
+       for (i = 6; i < 16; ++i)
+               ib.ptr[i] = PACKET2(0);
+       ib.length_dw = 16;
+
+       r = radeon_ib_schedule(rdev, &ib, NULL);
+       if (r) {
+               ttm_eu_backoff_reservation(&head);
+               return r;
+       }
+       ttm_eu_fence_buffer_objects(&head, ib.fence);
+
+       if (fence)
+               *fence = radeon_fence_ref(ib.fence);
+
+       radeon_ib_free(rdev, &ib);
+       radeon_bo_unref(&bo);
+       return 0;
+}
+
+/* multiple fence commands without any stream commands in between can
+   crash the vcpu so just try to emmit a dummy create/destroy msg to
+   avoid this */
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+                             uint32_t handle, struct radeon_fence **fence)
+{
+       struct radeon_bo *bo;
+       uint32_t *msg;
+       int r, i;
+
+       r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
+                            RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+       if (r)
+               return r;
+
+       r = radeon_bo_reserve(bo, false);
+       if (r) {
+               radeon_bo_unref(&bo);
+               return r;
+       }
+
+       r = radeon_bo_kmap(bo, (void **)&msg);
+       if (r) {
+               radeon_bo_unreserve(bo);
+               radeon_bo_unref(&bo);
+               return r;
+       }
+
+       /* stitch together an UVD create msg */
+       msg[0] = 0x00000de4;
+       msg[1] = 0x00000000;
+       msg[2] = handle;
+       msg[3] = 0x00000000;
+       msg[4] = 0x00000000;
+       msg[5] = 0x00000000;
+       msg[6] = 0x00000000;
+       msg[7] = 0x00000780;
+       msg[8] = 0x00000440;
+       msg[9] = 0x00000000;
+       msg[10] = 0x01b37000;
+       for (i = 11; i < 1024; ++i)
+               msg[i] = 0x0;
+
+       radeon_bo_kunmap(bo);
+       radeon_bo_unreserve(bo);
+
+       return radeon_uvd_send_msg(rdev, ring, bo, fence);
+}
+
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+                              uint32_t handle, struct radeon_fence **fence)
+{
+       struct radeon_bo *bo;
+       uint32_t *msg;
+       int r, i;
+
+       r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
+                            RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+       if (r)
+               return r;
+
+       r = radeon_bo_reserve(bo, false);
+       if (r) {
+               radeon_bo_unref(&bo);
+               return r;
+       }
+
+       r = radeon_bo_kmap(bo, (void **)&msg);
+       if (r) {
+               radeon_bo_unreserve(bo);
+               radeon_bo_unref(&bo);
+               return r;
+       }
+
+       /* stitch together an UVD destroy msg */
+       msg[0] = 0x00000de4;
+       msg[1] = 0x00000002;
+       msg[2] = handle;
+       msg[3] = 0x00000000;
+       for (i = 4; i < 1024; ++i)
+               msg[i] = 0x0;
+
+       radeon_bo_kunmap(bo);
+       radeon_bo_unreserve(bo);
+
+       return radeon_uvd_send_msg(rdev, ring, bo, fence);
+}
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work)
+{
+       struct radeon_device *rdev =
+               container_of(work, struct radeon_device, uvd.idle_work.work);
+
+       if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
+               radeon_set_uvd_clocks(rdev, 0, 0);
+       else
+               schedule_delayed_work(&rdev->uvd.idle_work,
+                                     msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+}
+
+void radeon_uvd_note_usage(struct radeon_device *rdev)
+{
+       bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
+       set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
+                                           msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+       if (set_clocks)
+               radeon_set_uvd_clocks(rdev, 53300, 40000);
+}
+
+static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
+                                             unsigned target_freq,
+                                             unsigned pd_min,
+                                             unsigned pd_even)
+{
+       unsigned post_div = vco_freq / target_freq;
+
+       /* adjust to post divider minimum value */
+       if (post_div < pd_min)
+               post_div = pd_min;
+
+       /* we alway need a frequency less than or equal the target */
+       if ((vco_freq / post_div) > target_freq)
+               post_div += 1;
+
+       /* post dividers above a certain value must be even */
+       if (post_div > pd_even && post_div % 2)
+               post_div += 1;
+
+       return post_div;
+}
+
+/**
+ * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
+ *
+ * @rdev: radeon_device pointer
+ * @vclk: wanted VCLK
+ * @dclk: wanted DCLK
+ * @vco_min: minimum VCO frequency
+ * @vco_max: maximum VCO frequency
+ * @fb_factor: factor to multiply vco freq with
+ * @fb_mask: limit and bitmask for feedback divider
+ * @pd_min: post divider minimum
+ * @pd_max: post divider maximum
+ * @pd_even: post divider must be even above this value
+ * @optimal_fb_div: resulting feedback divider
+ * @optimal_vclk_div: resulting vclk post divider
+ * @optimal_dclk_div: resulting dclk post divider
+ *
+ * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
+ * Returns zero on success -EINVAL on error.
+ */
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+                                 unsigned vclk, unsigned dclk,
+                                 unsigned vco_min, unsigned vco_max,
+                                 unsigned fb_factor, unsigned fb_mask,
+                                 unsigned pd_min, unsigned pd_max,
+                                 unsigned pd_even,
+                                 unsigned *optimal_fb_div,
+                                 unsigned *optimal_vclk_div,
+                                 unsigned *optimal_dclk_div)
+{
+       unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
+
+       /* start off with something large */
+       unsigned optimal_score = ~0;
+
+       /* loop through vco from low to high */
+       vco_min = max(max(vco_min, vclk), dclk);
+       for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
+
+               uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
+               unsigned vclk_div, dclk_div, score;
+
+               do_div(fb_div, ref_freq);
+
+               /* fb div out of range ? */
+               if (fb_div > fb_mask)
+                       break; /* it can oly get worse */
+
+               fb_div &= fb_mask;
+
+               /* calc vclk divider with current vco freq */
+               vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
+                                                        pd_min, pd_even);
+               if (vclk_div > pd_max)
+                       break; /* vco is too big, it has to stop */
+
+               /* calc dclk divider with current vco freq */
+               dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
+                                                        pd_min, pd_even);
+               if (vclk_div > pd_max)
+                       break; /* vco is too big, it has to stop */
+
+               /* calc score with current vco freq */
+               score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
+
+               /* determine if this vco setting is better than current optimal settings */
+               if (score < optimal_score) {
+                       *optimal_fb_div = fb_div;
+                       *optimal_vclk_div = vclk_div;
+                       *optimal_dclk_div = dclk_div;
+                       optimal_score = score;
+                       if (optimal_score == 0)
+                               break; /* it can't get better than this */
+               }
+       }
+
+       /* did we found a valid setup ? */
+       if (optimal_score == ~0)
+               return -EINVAL;
+
+       return 0;
+}
+
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+                               unsigned cg_upll_func_cntl)
+{
+       unsigned i;
+
+       /* make sure UPLL_CTLREQ is deasserted */
+       WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+       mdelay(10);
+
+       /* assert UPLL_CTLREQ */
+       WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
+
+       /* wait for CTLACK and CTLACK2 to get asserted */
+       for (i = 0; i < 100; ++i) {
+               uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
+               if ((RREG32(cg_upll_func_cntl) & mask) == mask)
+                       break;
+               mdelay(10);
+       }
+
+       /* deassert UPLL_CTLREQ */
+       WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+       if (i == 100) {
+               DRM_ERROR("Timeout setting UVD clocks!\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
index 5a0fc74..46fa1b0 100644 (file)
@@ -52,23 +52,59 @@ static const u32 crtc_offsets[2] =
        AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
 };
 
+static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+       if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+               return true;
+       else
+               return false;
+}
+
+static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+       u32 pos1, pos2;
+
+       pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+       pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+       if (pos1 != pos2)
+               return true;
+       else
+               return false;
+}
+
+/**
+ * avivo_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r5xx-r7xx).
+ */
 void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
 {
-       int i;
+       unsigned i = 0;
 
        if (crtc >= rdev->num_crtc)
                return;
 
-       if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
-               for (i = 0; i < rdev->usec_timeout; i++) {
-                       if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
+       if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
+               return;
+
+       /* depending on when we hit vblank, we may be close to active; if so,
+        * wait for another frame.
+        */
+       while (avivo_is_in_vblank(rdev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!avivo_is_counter_moving(rdev, crtc))
                                break;
-                       udelay(1);
                }
-               for (i = 0; i < rdev->usec_timeout; i++) {
-                       if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+       }
+
+       while (!avivo_is_in_vblank(rdev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!avivo_is_counter_moving(rdev, crtc))
                                break;
-                       udelay(1);
                }
        }
 }
index 5706d2a..ab4c86c 100644 (file)
@@ -148,6 +148,8 @@ void rs690_pm_info(struct radeon_device *rdev)
 static void rs690_mc_init(struct radeon_device *rdev)
 {
        u64 base;
+       uint32_t h_addr, l_addr;
+       unsigned long long k8_addr;
 
        rs400_gart_adjust_size(rdev);
        rdev->mc.vram_is_ddr = true;
@@ -160,6 +162,27 @@ static void rs690_mc_init(struct radeon_device *rdev)
        base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
        base = G_000100_MC_FB_START(base) << 16;
        rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+       /* Use K8 direct mapping for fast fb access. */ 
+       rdev->fastfb_working = false;
+       h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
+       l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
+       k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+       if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)      
+#endif
+       {
+               /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport 
+                * memory is present.
+                */
+               if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+                       DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", 
+                                       (unsigned long long)rdev->mc.aper_base, k8_addr);
+                       rdev->mc.aper_base = (resource_size_t)k8_addr;
+                       rdev->fastfb_working = true;
+               }
+       }  
+
        rs690_pm_info(rdev);
        radeon_vram_location(rdev, &rdev->mc, base);
        rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
index 36e6398..8af3ccf 100644 (file)
@@ -29,6 +29,9 @@
 #define __RS690D_H__
 
 /* Registers */
+#define R_00001E_K8_FB_LOCATION                      0x00001E
+#define R_00005F_MC_MISC_UMA_CNTL                    0x00005F
+#define   G_00005F_K8_ADDR_EXT(x)                      (((x) >> 0) & 0xFF)
 #define R_000078_MC_INDEX                            0x000078
 #define   S_000078_MC_IND_ADDR(x)                      (((x) & 0x1FF) << 0)
 #define   G_000078_MC_IND_ADDR(x)                      (((x) >> 0) & 0x1FF)
index 435ed35..ffcba73 100644 (file)
@@ -303,8 +303,10 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
                        tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
                        if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
                                radeon_wait_for_vblank(rdev, i);
+                               WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
                                WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                        }
                        /* wait for the next frame */
                        frame_count = radeon_get_vblank_counter(rdev, i);
@@ -313,6 +315,15 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
                                        break;
                                udelay(1);
                        }
+
+                       /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+                       WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                       tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+                       tmp &= ~AVIVO_CRTC_EN;
+                       WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+                       WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       save->crtc_enabled[i] = false;
+                       /* ***** */
                } else {
                        save->crtc_enabled[i] = false;
                }
@@ -338,6 +349,22 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
        }
        /* wait for the MC to settle */
        udelay(100);
+
+       /* lock double buffered regs */
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (save->crtc_enabled[i]) {
+                       tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+                       if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
+                               tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+                               WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+                       if (!(tmp & 1)) {
+                               tmp |= 1;
+                               WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+                       }
+               }
+       }
 }
 
 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -348,7 +375,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
        /* update crtc base addresses */
        for (i = 0; i < rdev->num_crtc; i++) {
                if (rdev->family >= CHIP_RV770) {
-                       if (i == 1) {
+                       if (i == 0) {
                                WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
                                       upper_32_bits(rdev->mc.vram_start));
                                WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
@@ -367,6 +394,33 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
        }
        WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
 
+       /* unlock regs and wait for update */
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (save->crtc_enabled[i]) {
+                       tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
+                       if ((tmp & 0x3) != 0) {
+                               tmp &= ~0x3;
+                               WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+                       if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
+                               tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+                               WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+                       if (tmp & 1) {
+                               tmp &= ~1;
+                               WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+                       }
+                       for (j = 0; j < rdev->usec_timeout; j++) {
+                               tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+                               if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       }
+
        if (rdev->family >= CHIP_R600) {
                /* unblackout the MC */
                if (rdev->family >= CHIP_RV770)
index d63fe1d..83f612a 100644 (file)
 static void rv770_gpu_init(struct radeon_device *rdev);
 void rv770_fini(struct radeon_device *rdev);
 static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+       unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+       int r;
+
+       /* RV740 uses evergreen uvd clk programming */
+       if (rdev->family == CHIP_RV740)
+               return evergreen_set_uvd_clocks(rdev, vclk, dclk);
+
+       /* bypass vclk and dclk with bclk */
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+                VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+                ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+       if (!vclk || !dclk) {
+               /* keep the Bypass mode, put PLL to sleep */
+               WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+               return 0;
+       }
+
+       r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
+                                         43663, 0x03FFFFFE, 1, 30, ~0,
+                                         &fb_div, &vclk_div, &dclk_div);
+       if (r)
+               return r;
+
+       fb_div |= 1;
+       vclk_div -= 1;
+       dclk_div -= 1;
+
+       /* set UPLL_FB_DIV to 0x50000 */
+       WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
+
+       /* deassert UPLL_RESET and UPLL_SLEEP */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~(UPLL_RESET_MASK | UPLL_SLEEP_MASK));
+
+       /* assert BYPASS EN and FB_DIV[0] <- ??? why? */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+       WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
+
+       r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+       if (r)
+               return r;
+
+       /* assert PLL_RESET */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+       /* set the required FB_DIV, REF_DIV, Post divder values */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+                UPLL_SW_HILEN(vclk_div >> 1) |
+                UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
+                UPLL_SW_HILEN2(dclk_div >> 1) |
+                UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)),
+                ~UPLL_SW_MASK);
+
+       WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div),
+                ~UPLL_FB_DIV_MASK);
+
+       /* give the PLL some time to settle */
+       mdelay(15);
+
+       /* deassert PLL_RESET */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+       mdelay(15);
+
+       /* deassert BYPASS EN and FB_DIV[0] <- ??? why? */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+       WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
+
+       r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+       if (r)
+               return r;
+
+       /* switch VCLK and DCLK selection */
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+                VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+                ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+       mdelay(100);
+
+       return 0;
+}
+
+static const u32 r7xx_golden_registers[] =
+{
+       0x8d00, 0xffffffff, 0x0e0e0074,
+       0x8d04, 0xffffffff, 0x013a2b34,
+       0x9508, 0xffffffff, 0x00000002,
+       0x8b20, 0xffffffff, 0,
+       0x88c4, 0xffffffff, 0x000000c2,
+       0x28350, 0xffffffff, 0,
+       0x9058, 0xffffffff, 0x0fffc40f,
+       0x240c, 0xffffffff, 0x00000380,
+       0x733c, 0xffffffff, 0x00000002,
+       0x2650, 0x00040000, 0,
+       0x20bc, 0x00040000, 0,
+       0x7300, 0xffffffff, 0x001000f0
+};
+
+static const u32 r7xx_golden_dyn_gpr_registers[] =
+{
+       0x8db0, 0xffffffff, 0x98989898,
+       0x8db4, 0xffffffff, 0x98989898,
+       0x8db8, 0xffffffff, 0x98989898,
+       0x8dbc, 0xffffffff, 0x98989898,
+       0x8dc0, 0xffffffff, 0x98989898,
+       0x8dc4, 0xffffffff, 0x98989898,
+       0x8dc8, 0xffffffff, 0x98989898,
+       0x8dcc, 0xffffffff, 0x98989898,
+       0x88c4, 0xffffffff, 0x00000082
+};
+
+static const u32 rv770_golden_registers[] =
+{
+       0x562c, 0xffffffff, 0,
+       0x3f90, 0xffffffff, 0,
+       0x9148, 0xffffffff, 0,
+       0x3f94, 0xffffffff, 0,
+       0x914c, 0xffffffff, 0,
+       0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770ce_golden_registers[] =
+{
+       0x562c, 0xffffffff, 0,
+       0x3f90, 0xffffffff, 0x00cc0000,
+       0x9148, 0xffffffff, 0x00cc0000,
+       0x3f94, 0xffffffff, 0x00cc0000,
+       0x914c, 0xffffffff, 0x00cc0000,
+       0x9b7c, 0xffffffff, 0x00fa0000,
+       0x3f8c, 0xffffffff, 0x00fa0000,
+       0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770_mgcg_init[] =
+{
+       0x8bcc, 0xffffffff, 0x130300f9,
+       0x5448, 0xffffffff, 0x100,
+       0x55e4, 0xffffffff, 0x100,
+       0x160c, 0xffffffff, 0x100,
+       0x5644, 0xffffffff, 0x100,
+       0xc164, 0xffffffff, 0x100,
+       0x8a18, 0xffffffff, 0x100,
+       0x897c, 0xffffffff, 0x8000100,
+       0x8b28, 0xffffffff, 0x3c000100,
+       0x9144, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10000,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10001,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10002,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10003,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x0,
+       0x9870, 0xffffffff, 0x100,
+       0x8d58, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x0,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x1,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x2,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x3,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x4,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x5,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x6,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x7,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x8,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x9,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x8000,
+       0x9490, 0xffffffff, 0x0,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x1,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x2,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x3,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x4,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x5,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x6,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x7,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x8,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x9,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x8000,
+       0x9604, 0xffffffff, 0x0,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x1,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x2,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x3,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x4,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x5,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x6,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x7,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x8,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x9,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x80000000,
+       0x9030, 0xffffffff, 0x100,
+       0x9034, 0xffffffff, 0x100,
+       0x9038, 0xffffffff, 0x100,
+       0x903c, 0xffffffff, 0x100,
+       0x9040, 0xffffffff, 0x100,
+       0xa200, 0xffffffff, 0x100,
+       0xa204, 0xffffffff, 0x100,
+       0xa208, 0xffffffff, 0x100,
+       0xa20c, 0xffffffff, 0x100,
+       0x971c, 0xffffffff, 0x100,
+       0x915c, 0xffffffff, 0x00020001,
+       0x9160, 0xffffffff, 0x00040003,
+       0x916c, 0xffffffff, 0x00060005,
+       0x9170, 0xffffffff, 0x00080007,
+       0x9174, 0xffffffff, 0x000a0009,
+       0x9178, 0xffffffff, 0x000c000b,
+       0x917c, 0xffffffff, 0x000e000d,
+       0x9180, 0xffffffff, 0x0010000f,
+       0x918c, 0xffffffff, 0x00120011,
+       0x9190, 0xffffffff, 0x00140013,
+       0x9194, 0xffffffff, 0x00020001,
+       0x9198, 0xffffffff, 0x00040003,
+       0x919c, 0xffffffff, 0x00060005,
+       0x91a8, 0xffffffff, 0x00080007,
+       0x91ac, 0xffffffff, 0x000a0009,
+       0x91b0, 0xffffffff, 0x000c000b,
+       0x91b4, 0xffffffff, 0x000e000d,
+       0x91b8, 0xffffffff, 0x0010000f,
+       0x91c4, 0xffffffff, 0x00120011,
+       0x91c8, 0xffffffff, 0x00140013,
+       0x91cc, 0xffffffff, 0x00020001,
+       0x91d0, 0xffffffff, 0x00040003,
+       0x91d4, 0xffffffff, 0x00060005,
+       0x91e0, 0xffffffff, 0x00080007,
+       0x91e4, 0xffffffff, 0x000a0009,
+       0x91e8, 0xffffffff, 0x000c000b,
+       0x91ec, 0xffffffff, 0x00020001,
+       0x91f0, 0xffffffff, 0x00040003,
+       0x91f4, 0xffffffff, 0x00060005,
+       0x9200, 0xffffffff, 0x00080007,
+       0x9204, 0xffffffff, 0x000a0009,
+       0x9208, 0xffffffff, 0x000c000b,
+       0x920c, 0xffffffff, 0x000e000d,
+       0x9210, 0xffffffff, 0x0010000f,
+       0x921c, 0xffffffff, 0x00120011,
+       0x9220, 0xffffffff, 0x00140013,
+       0x9224, 0xffffffff, 0x00020001,
+       0x9228, 0xffffffff, 0x00040003,
+       0x922c, 0xffffffff, 0x00060005,
+       0x9238, 0xffffffff, 0x00080007,
+       0x923c, 0xffffffff, 0x000a0009,
+       0x9240, 0xffffffff, 0x000c000b,
+       0x9244, 0xffffffff, 0x000e000d,
+       0x9248, 0xffffffff, 0x0010000f,
+       0x9254, 0xffffffff, 0x00120011,
+       0x9258, 0xffffffff, 0x00140013,
+       0x925c, 0xffffffff, 0x00020001,
+       0x9260, 0xffffffff, 0x00040003,
+       0x9264, 0xffffffff, 0x00060005,
+       0x9270, 0xffffffff, 0x00080007,
+       0x9274, 0xffffffff, 0x000a0009,
+       0x9278, 0xffffffff, 0x000c000b,
+       0x927c, 0xffffffff, 0x000e000d,
+       0x9280, 0xffffffff, 0x0010000f,
+       0x928c, 0xffffffff, 0x00120011,
+       0x9290, 0xffffffff, 0x00140013,
+       0x9294, 0xffffffff, 0x00020001,
+       0x929c, 0xffffffff, 0x00040003,
+       0x92a0, 0xffffffff, 0x00060005,
+       0x92a4, 0xffffffff, 0x00080007
+};
+
+static const u32 rv710_golden_registers[] =
+{
+       0x3f90, 0x00ff0000, 0x00fc0000,
+       0x9148, 0x00ff0000, 0x00fc0000,
+       0x3f94, 0x00ff0000, 0x00fc0000,
+       0x914c, 0x00ff0000, 0x00fc0000,
+       0xb4c, 0x00000020, 0x00000020,
+       0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv710_mgcg_init[] =
+{
+       0x8bcc, 0xffffffff, 0x13030040,
+       0x5448, 0xffffffff, 0x100,
+       0x55e4, 0xffffffff, 0x100,
+       0x160c, 0xffffffff, 0x100,
+       0x5644, 0xffffffff, 0x100,
+       0xc164, 0xffffffff, 0x100,
+       0x8a18, 0xffffffff, 0x100,
+       0x897c, 0xffffffff, 0x8000100,
+       0x8b28, 0xffffffff, 0x3c000100,
+       0x9144, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10000,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x0,
+       0x9870, 0xffffffff, 0x100,
+       0x8d58, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x0,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x1,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x8000,
+       0x9490, 0xffffffff, 0x0,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x1,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x8000,
+       0x9604, 0xffffffff, 0x0,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x1,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x80000000,
+       0x9030, 0xffffffff, 0x100,
+       0x9034, 0xffffffff, 0x100,
+       0x9038, 0xffffffff, 0x100,
+       0x903c, 0xffffffff, 0x100,
+       0x9040, 0xffffffff, 0x100,
+       0xa200, 0xffffffff, 0x100,
+       0xa204, 0xffffffff, 0x100,
+       0xa208, 0xffffffff, 0x100,
+       0xa20c, 0xffffffff, 0x100,
+       0x971c, 0xffffffff, 0x100,
+       0x915c, 0xffffffff, 0x00020001,
+       0x9174, 0xffffffff, 0x00000003,
+       0x9178, 0xffffffff, 0x00050001,
+       0x917c, 0xffffffff, 0x00030002,
+       0x918c, 0xffffffff, 0x00000004,
+       0x9190, 0xffffffff, 0x00070006,
+       0x9194, 0xffffffff, 0x00050001,
+       0x9198, 0xffffffff, 0x00030002,
+       0x91a8, 0xffffffff, 0x00000004,
+       0x91ac, 0xffffffff, 0x00070006,
+       0x91e8, 0xffffffff, 0x00000001,
+       0x9294, 0xffffffff, 0x00000001,
+       0x929c, 0xffffffff, 0x00000002,
+       0x92a0, 0xffffffff, 0x00040003,
+       0x9150, 0xffffffff, 0x4d940000
+};
+
+static const u32 rv730_golden_registers[] =
+{
+       0x3f90, 0x00ff0000, 0x00f00000,
+       0x9148, 0x00ff0000, 0x00f00000,
+       0x3f94, 0x00ff0000, 0x00f00000,
+       0x914c, 0x00ff0000, 0x00f00000,
+       0x900c, 0xffffffff, 0x003b033f,
+       0xb4c, 0x00000020, 0x00000020,
+       0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv730_mgcg_init[] =
+{
+       0x8bcc, 0xffffffff, 0x130300f9,
+       0x5448, 0xffffffff, 0x100,
+       0x55e4, 0xffffffff, 0x100,
+       0x160c, 0xffffffff, 0x100,
+       0x5644, 0xffffffff, 0x100,
+       0xc164, 0xffffffff, 0x100,
+       0x8a18, 0xffffffff, 0x100,
+       0x897c, 0xffffffff, 0x8000100,
+       0x8b28, 0xffffffff, 0x3c000100,
+       0x9144, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10000,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10001,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x0,
+       0x9870, 0xffffffff, 0x100,
+       0x8d58, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x0,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x1,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x2,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x3,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x4,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x5,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x6,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x7,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x8000,
+       0x9490, 0xffffffff, 0x0,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x1,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x2,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x3,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x4,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x5,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x6,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x7,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x8000,
+       0x9604, 0xffffffff, 0x0,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x1,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x2,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x3,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x4,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x5,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x6,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x7,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x80000000,
+       0x9030, 0xffffffff, 0x100,
+       0x9034, 0xffffffff, 0x100,
+       0x9038, 0xffffffff, 0x100,
+       0x903c, 0xffffffff, 0x100,
+       0x9040, 0xffffffff, 0x100,
+       0xa200, 0xffffffff, 0x100,
+       0xa204, 0xffffffff, 0x100,
+       0xa208, 0xffffffff, 0x100,
+       0xa20c, 0xffffffff, 0x100,
+       0x971c, 0xffffffff, 0x100,
+       0x915c, 0xffffffff, 0x00020001,
+       0x916c, 0xffffffff, 0x00040003,
+       0x9170, 0xffffffff, 0x00000005,
+       0x9178, 0xffffffff, 0x00050001,
+       0x917c, 0xffffffff, 0x00030002,
+       0x918c, 0xffffffff, 0x00000004,
+       0x9190, 0xffffffff, 0x00070006,
+       0x9194, 0xffffffff, 0x00050001,
+       0x9198, 0xffffffff, 0x00030002,
+       0x91a8, 0xffffffff, 0x00000004,
+       0x91ac, 0xffffffff, 0x00070006,
+       0x91b0, 0xffffffff, 0x00050001,
+       0x91b4, 0xffffffff, 0x00030002,
+       0x91c4, 0xffffffff, 0x00000004,
+       0x91c8, 0xffffffff, 0x00070006,
+       0x91cc, 0xffffffff, 0x00050001,
+       0x91d0, 0xffffffff, 0x00030002,
+       0x91e0, 0xffffffff, 0x00000004,
+       0x91e4, 0xffffffff, 0x00070006,
+       0x91e8, 0xffffffff, 0x00000001,
+       0x91ec, 0xffffffff, 0x00050001,
+       0x91f0, 0xffffffff, 0x00030002,
+       0x9200, 0xffffffff, 0x00000004,
+       0x9204, 0xffffffff, 0x00070006,
+       0x9208, 0xffffffff, 0x00050001,
+       0x920c, 0xffffffff, 0x00030002,
+       0x921c, 0xffffffff, 0x00000004,
+       0x9220, 0xffffffff, 0x00070006,
+       0x9224, 0xffffffff, 0x00050001,
+       0x9228, 0xffffffff, 0x00030002,
+       0x9238, 0xffffffff, 0x00000004,
+       0x923c, 0xffffffff, 0x00070006,
+       0x9240, 0xffffffff, 0x00050001,
+       0x9244, 0xffffffff, 0x00030002,
+       0x9254, 0xffffffff, 0x00000004,
+       0x9258, 0xffffffff, 0x00070006,
+       0x9294, 0xffffffff, 0x00000001,
+       0x929c, 0xffffffff, 0x00000002,
+       0x92a0, 0xffffffff, 0x00040003,
+       0x92a4, 0xffffffff, 0x00000005
+};
+
+static const u32 rv740_golden_registers[] =
+{
+       0x88c4, 0xffffffff, 0x00000082,
+       0x28a50, 0xfffffffc, 0x00000004,
+       0x2650, 0x00040000, 0,
+       0x20bc, 0x00040000, 0,
+       0x733c, 0xffffffff, 0x00000002,
+       0x7300, 0xffffffff, 0x001000f0,
+       0x3f90, 0x00ff0000, 0,
+       0x9148, 0x00ff0000, 0,
+       0x3f94, 0x00ff0000, 0,
+       0x914c, 0x00ff0000, 0,
+       0x240c, 0xffffffff, 0x00000380,
+       0x8a14, 0x00000007, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ff0fff,
+       0x28a4c, 0xffffffff, 0x00004000,
+       0xa180, 0xffffffff, 0x00003f3f,
+       0x8d00, 0xffffffff, 0x0e0e003a,
+       0x8d04, 0xffffffff, 0x013a0e2a,
+       0x8c00, 0xffffffff, 0xe400000f,
+       0x8db0, 0xffffffff, 0x98989898,
+       0x8db4, 0xffffffff, 0x98989898,
+       0x8db8, 0xffffffff, 0x98989898,
+       0x8dbc, 0xffffffff, 0x98989898,
+       0x8dc0, 0xffffffff, 0x98989898,
+       0x8dc4, 0xffffffff, 0x98989898,
+       0x8dc8, 0xffffffff, 0x98989898,
+       0x8dcc, 0xffffffff, 0x98989898,
+       0x9058, 0xffffffff, 0x0fffc40f,
+       0x900c, 0xffffffff, 0x003b033f,
+       0x28350, 0xffffffff, 0,
+       0x8cf0, 0x1fffffff, 0x08e00420,
+       0x9508, 0xffffffff, 0x00000002,
+       0x88c4, 0xffffffff, 0x000000c2,
+       0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv740_mgcg_init[] =
+{
+       0x8bcc, 0xffffffff, 0x13030100,
+       0x5448, 0xffffffff, 0x100,
+       0x55e4, 0xffffffff, 0x100,
+       0x160c, 0xffffffff, 0x100,
+       0x5644, 0xffffffff, 0x100,
+       0xc164, 0xffffffff, 0x100,
+       0x8a18, 0xffffffff, 0x100,
+       0x897c, 0xffffffff, 0x100,
+       0x8b28, 0xffffffff, 0x100,
+       0x9144, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10000,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10001,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10002,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x10003,
+       0x9a50, 0xffffffff, 0x100,
+       0x9a1c, 0xffffffff, 0x0,
+       0x9870, 0xffffffff, 0x100,
+       0x8d58, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x0,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x1,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x2,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x3,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x4,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x5,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x6,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x7,
+       0x9510, 0xffffffff, 0x100,
+       0x9500, 0xffffffff, 0x8000,
+       0x9490, 0xffffffff, 0x0,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x1,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x2,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x3,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x4,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x5,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x6,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x7,
+       0x949c, 0xffffffff, 0x100,
+       0x9490, 0xffffffff, 0x8000,
+       0x9604, 0xffffffff, 0x0,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x1,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x2,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x3,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x4,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x5,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x6,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x7,
+       0x9654, 0xffffffff, 0x100,
+       0x9604, 0xffffffff, 0x80000000,
+       0x9030, 0xffffffff, 0x100,
+       0x9034, 0xffffffff, 0x100,
+       0x9038, 0xffffffff, 0x100,
+       0x903c, 0xffffffff, 0x100,
+       0x9040, 0xffffffff, 0x100,
+       0xa200, 0xffffffff, 0x100,
+       0xa204, 0xffffffff, 0x100,
+       0xa208, 0xffffffff, 0x100,
+       0xa20c, 0xffffffff, 0x100,
+       0x971c, 0xffffffff, 0x100,
+       0x915c, 0xffffffff, 0x00020001,
+       0x9160, 0xffffffff, 0x00040003,
+       0x916c, 0xffffffff, 0x00060005,
+       0x9170, 0xffffffff, 0x00080007,
+       0x9174, 0xffffffff, 0x000a0009,
+       0x9178, 0xffffffff, 0x000c000b,
+       0x917c, 0xffffffff, 0x000e000d,
+       0x9180, 0xffffffff, 0x0010000f,
+       0x918c, 0xffffffff, 0x00120011,
+       0x9190, 0xffffffff, 0x00140013,
+       0x9194, 0xffffffff, 0x00020001,
+       0x9198, 0xffffffff, 0x00040003,
+       0x919c, 0xffffffff, 0x00060005,
+       0x91a8, 0xffffffff, 0x00080007,
+       0x91ac, 0xffffffff, 0x000a0009,
+       0x91b0, 0xffffffff, 0x000c000b,
+       0x91b4, 0xffffffff, 0x000e000d,
+       0x91b8, 0xffffffff, 0x0010000f,
+       0x91c4, 0xffffffff, 0x00120011,
+       0x91c8, 0xffffffff, 0x00140013,
+       0x91cc, 0xffffffff, 0x00020001,
+       0x91d0, 0xffffffff, 0x00040003,
+       0x91d4, 0xffffffff, 0x00060005,
+       0x91e0, 0xffffffff, 0x00080007,
+       0x91e4, 0xffffffff, 0x000a0009,
+       0x91e8, 0xffffffff, 0x000c000b,
+       0x91ec, 0xffffffff, 0x00020001,
+       0x91f0, 0xffffffff, 0x00040003,
+       0x91f4, 0xffffffff, 0x00060005,
+       0x9200, 0xffffffff, 0x00080007,
+       0x9204, 0xffffffff, 0x000a0009,
+       0x9208, 0xffffffff, 0x000c000b,
+       0x920c, 0xffffffff, 0x000e000d,
+       0x9210, 0xffffffff, 0x0010000f,
+       0x921c, 0xffffffff, 0x00120011,
+       0x9220, 0xffffffff, 0x00140013,
+       0x9224, 0xffffffff, 0x00020001,
+       0x9228, 0xffffffff, 0x00040003,
+       0x922c, 0xffffffff, 0x00060005,
+       0x9238, 0xffffffff, 0x00080007,
+       0x923c, 0xffffffff, 0x000a0009,
+       0x9240, 0xffffffff, 0x000c000b,
+       0x9244, 0xffffffff, 0x000e000d,
+       0x9248, 0xffffffff, 0x0010000f,
+       0x9254, 0xffffffff, 0x00120011,
+       0x9258, 0xffffffff, 0x00140013,
+       0x9294, 0xffffffff, 0x00020001,
+       0x929c, 0xffffffff, 0x00040003,
+       0x92a0, 0xffffffff, 0x00060005,
+       0x92a4, 0xffffffff, 0x00080007
+};
+
+static void rv770_init_golden_registers(struct radeon_device *rdev)
+{
+       switch (rdev->family) {
+       case CHIP_RV770:
+               radeon_program_register_sequence(rdev,
+                                                r7xx_golden_registers,
+                                                (const u32)ARRAY_SIZE(r7xx_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                r7xx_golden_dyn_gpr_registers,
+                                                (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+               if (rdev->pdev->device == 0x994e)
+                       radeon_program_register_sequence(rdev,
+                                                        rv770ce_golden_registers,
+                                                        (const u32)ARRAY_SIZE(rv770ce_golden_registers));
+               else
+                       radeon_program_register_sequence(rdev,
+                                                        rv770_golden_registers,
+                                                        (const u32)ARRAY_SIZE(rv770_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                rv770_mgcg_init,
+                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+               break;
+       case CHIP_RV730:
+               radeon_program_register_sequence(rdev,
+                                                r7xx_golden_registers,
+                                                (const u32)ARRAY_SIZE(r7xx_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                r7xx_golden_dyn_gpr_registers,
+                                                (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+               radeon_program_register_sequence(rdev,
+                                                rv730_golden_registers,
+                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                rv730_mgcg_init,
+                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+               break;
+       case CHIP_RV710:
+               radeon_program_register_sequence(rdev,
+                                                r7xx_golden_registers,
+                                                (const u32)ARRAY_SIZE(r7xx_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                r7xx_golden_dyn_gpr_registers,
+                                                (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+               radeon_program_register_sequence(rdev,
+                                                rv710_golden_registers,
+                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                rv710_mgcg_init,
+                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+               break;
+       case CHIP_RV740:
+               radeon_program_register_sequence(rdev,
+                                                rv740_golden_registers,
+                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                rv740_mgcg_init,
+                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+               break;
+       default:
+               break;
+       }
+}
 
 #define PCIE_BUS_CLK                10000
 #define TCLK                        (PCIE_BUS_CLK / 10)
@@ -68,6 +801,105 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
        return reference_clock;
 }
 
+int rv770_uvd_resume(struct radeon_device *rdev)
+{
+       uint64_t addr;
+       uint32_t chip_id, size;
+       int r;
+
+       r = radeon_uvd_resume(rdev);
+       if (r)
+               return r;
+
+       /* programm the VCPU memory controller bits 0-27 */
+       addr = rdev->uvd.gpu_addr >> 3;
+       size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+       WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+       WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+       addr += size;
+       size = RADEON_UVD_STACK_SIZE >> 3;
+       WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+       WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+       addr += size;
+       size = RADEON_UVD_HEAP_SIZE >> 3;
+       WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+       WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+       /* bits 28-31 */
+       addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+       WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+       /* bits 32-39 */
+       addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+       WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+       /* tell firmware which hardware it is running on */
+       switch (rdev->family) {
+       default:
+               return -EINVAL;
+       case CHIP_RV710:
+               chip_id = 0x01000005;
+               break;
+       case CHIP_RV730:
+               chip_id = 0x01000006;
+               break;
+       case CHIP_RV740:
+               chip_id = 0x01000007;
+               break;
+       case CHIP_CYPRESS:
+       case CHIP_HEMLOCK:
+               chip_id = 0x01000008;
+               break;
+       case CHIP_JUNIPER:
+               chip_id = 0x01000009;
+               break;
+       case CHIP_REDWOOD:
+               chip_id = 0x0100000a;
+               break;
+       case CHIP_CEDAR:
+               chip_id = 0x0100000b;
+               break;
+       case CHIP_SUMO:
+               chip_id = 0x0100000c;
+               break;
+       case CHIP_SUMO2:
+               chip_id = 0x0100000d;
+               break;
+       case CHIP_PALM:
+               chip_id = 0x0100000e;
+               break;
+       case CHIP_CAYMAN:
+               chip_id = 0x0100000f;
+               break;
+       case CHIP_BARTS:
+               chip_id = 0x01000010;
+               break;
+       case CHIP_TURKS:
+               chip_id = 0x01000011;
+               break;
+       case CHIP_CAICOS:
+               chip_id = 0x01000012;
+               break;
+       case CHIP_TAHITI:
+               chip_id = 0x01000014;
+               break;
+       case CHIP_VERDE:
+               chip_id = 0x01000015;
+               break;
+       case CHIP_PITCAIRN:
+               chip_id = 0x01000016;
+               break;
+       case CHIP_ARUBA:
+               chip_id = 0x01000017;
+               break;
+       }
+       WREG32(UVD_VCPU_CHIP_ID, chip_id);
+
+       return 0;
+}
+
 u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -611,6 +1443,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
        WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
        WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
+       if (rdev->family == CHIP_RV730) {
+               WREG32(UVD_UDEC_DB_TILING_CONFIG, (gb_tiling_config & 0xffff));
+               WREG32(UVD_UDEC_DBW_TILING_CONFIG, (gb_tiling_config & 0xffff));
+               WREG32(UVD_UDEC_TILING_CONFIG, (gb_tiling_config & 0xffff));
+       }
 
        WREG32(CGTS_SYS_TCC_DISABLE, 0);
        WREG32(CGTS_TCC_DISABLE, 0);
@@ -840,7 +1677,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
        }
        if (rdev->flags & RADEON_IS_AGP) {
                size_bf = mc->gtt_start;
-               size_af = 0xFFFFFFFF - mc->gtt_end;
+               size_af = mc->mc_mask - mc->gtt_end;
                if (size_bf > size_af) {
                        if (mc->mc_vram_size > size_bf) {
                                dev_warn(rdev->dev, "limiting VRAM\n");
@@ -1040,6 +1877,17 @@ static int rv770_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = rv770_uvd_resume(rdev);
+       if (!r) {
+               r = radeon_fence_driver_start_ring(rdev,
+                                                  R600_RING_TYPE_UVD_INDEX);
+               if (r)
+                       dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+       }
+
+       if (r)
+               rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -1074,6 +1922,19 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+       if (ring->ring_size) {
+               r = radeon_ring_init(rdev, ring, ring->ring_size,
+                                    R600_WB_UVD_RPTR_OFFSET,
+                                    UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+                                    0, 0xfffff, RADEON_CP_PACKET2);
+               if (!r)
+                       r = r600_uvd_init(rdev);
+
+               if (r)
+                       DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+       }
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1100,6 +1961,9 @@ int rv770_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       /* init golden registers */
+       rv770_init_golden_registers(rdev);
+
        rdev->accel_working = true;
        r = rv770_startup(rdev);
        if (r) {
@@ -1115,6 +1979,7 @@ int rv770_resume(struct radeon_device *rdev)
 int rv770_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
+       radeon_uvd_suspend(rdev);
        r700_cp_stop(rdev);
        r600_dma_stop(rdev);
        r600_irq_suspend(rdev);
@@ -1156,6 +2021,8 @@ int rv770_init(struct radeon_device *rdev)
                DRM_INFO("GPU not posted. posting now...\n");
                atom_asic_init(rdev->mode_info.atom_context);
        }
+       /* init golden registers */
+       rv770_init_golden_registers(rdev);
        /* Initialize scratch registers */
        r600_scratch_init(rdev);
        /* Initialize surface registers */
@@ -1190,6 +2057,13 @@ int rv770_init(struct radeon_device *rdev)
        rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
 
+       r = radeon_uvd_init(rdev);
+       if (!r) {
+               rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+               r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
+                              4096);
+       }
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -1224,6 +2098,7 @@ void rv770_fini(struct radeon_device *rdev)
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        rv770_pcie_gart_fini(rdev);
+       radeon_uvd_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
@@ -1264,23 +2139,23 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
        DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
 
        /* advertise upconfig capability */
-       link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+       link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
        link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-       WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-       link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+       WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+       link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
        if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
                lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
                link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
                                     LC_RECONFIG_ARC_MISSING_ESCAPE);
                link_width_cntl |= lanes | LC_RECONFIG_NOW |
                        LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
-               WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
        } else {
                link_width_cntl |= LC_UPCONFIGURE_DIS;
-               WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
        }
 
-       speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
        if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
            (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
 
@@ -1293,29 +2168,29 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
                WREG16(0x4088, link_cntl2);
                WREG32(MM_CFGREGS_CNTL, 0);
 
-               speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
                speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
-               WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
-               speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
                speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
-               WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
-               speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
                speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
-               WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
-               speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
                speed_cntl |= LC_GEN2_EN_STRAP;
-               WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
        } else {
-               link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+               link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
                /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
                if (1)
                        link_width_cntl |= LC_UPCONFIGURE_DIS;
                else
                        link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-               WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+               WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
        }
 }
index c55f950..85b1626 100644 (file)
 #define R7XX_MAX_PIPES             8
 #define R7XX_MAX_PIPES_MASK        0xff
 
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL                              0x718
+#      define UPLL_RESET_MASK                          0x00000001
+#      define UPLL_SLEEP_MASK                          0x00000002
+#      define UPLL_BYPASS_EN_MASK                      0x00000004
+#      define UPLL_CTLREQ_MASK                         0x00000008
+#      define UPLL_REF_DIV(x)                          ((x) << 16)
+#      define UPLL_REF_DIV_MASK                        0x003F0000
+#      define UPLL_CTLACK_MASK                         0x40000000
+#      define UPLL_CTLACK2_MASK                        0x80000000
+#define CG_UPLL_FUNC_CNTL_2                            0x71c
+#      define UPLL_SW_HILEN(x)                         ((x) << 0)
+#      define UPLL_SW_LOLEN(x)                         ((x) << 4)
+#      define UPLL_SW_HILEN2(x)                        ((x) << 8)
+#      define UPLL_SW_LOLEN2(x)                        ((x) << 12)
+#      define UPLL_SW_MASK                             0x0000FFFF
+#      define VCLK_SRC_SEL(x)                          ((x) << 20)
+#      define VCLK_SRC_SEL_MASK                        0x01F00000
+#      define DCLK_SRC_SEL(x)                          ((x) << 25)
+#      define DCLK_SRC_SEL_MASK                        0x3E000000
+#define CG_UPLL_FUNC_CNTL_3                            0x720
+#      define UPLL_FB_DIV(x)                           ((x) << 0)
+#      define UPLL_FB_DIV_MASK                         0x01FFFFFF
+
 /* Registers */
 #define        CB_COLOR0_BASE                                  0x28040
 #define        CB_COLOR1_BASE                                  0x28044
 #define DMA_TILING_CONFIG                               0x3ec8
 #define DMA_TILING_CONFIG2                              0xd0b8
 
+/* RV730 only */
+#define UVD_UDEC_TILING_CONFIG                          0xef40
+#define UVD_UDEC_DB_TILING_CONFIG                       0xef44
+#define UVD_UDEC_DBW_TILING_CONFIG                      0xef48
+
 #define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
 #define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
 #define                INACTIVE_QD_PIPES_MASK                          0x0000FF00
 #       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
 #       define SELECTABLE_DEEMPHASIS                      (1 << 6)
 
+/* UVD */
+#define UVD_LMI_EXT40_ADDR                             0xf498
+#define UVD_VCPU_CHIP_ID                               0xf4d4
+#define UVD_VCPU_CACHE_OFFSET0                         0xf4d8
+#define UVD_VCPU_CACHE_SIZE0                           0xf4dc
+#define UVD_VCPU_CACHE_OFFSET1                         0xf4e0
+#define UVD_VCPU_CACHE_SIZE1                           0xf4e4
+#define UVD_VCPU_CACHE_OFFSET2                         0xf4e8
+#define UVD_VCPU_CACHE_SIZE2                           0xf4ec
+#define UVD_LMI_ADDR_EXT                               0xf594
+
+#define UVD_RBC_RB_RPTR                                        0xf690
+#define UVD_RBC_RB_WPTR                                        0xf694
+
 #endif
index bafbe32..f0b6c2f 100644 (file)
@@ -70,6 +70,794 @@ extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
 
+static const u32 tahiti_golden_rlc_registers[] =
+{
+       0xc424, 0xffffffff, 0x00601005,
+       0xc47c, 0xffffffff, 0x10104040,
+       0xc488, 0xffffffff, 0x0100000a,
+       0xc314, 0xffffffff, 0x00000800,
+       0xc30c, 0xffffffff, 0x800000f4,
+       0xf4a8, 0xffffffff, 0x00000000
+};
+
+static const u32 tahiti_golden_registers[] =
+{
+       0x9a10, 0x00010000, 0x00018208,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9838, 0x0002021c, 0x00020200,
+       0xc78, 0x00000080, 0x00000000,
+       0xd030, 0x000300c0, 0x00800040,
+       0xd830, 0x000300c0, 0x00800040,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x5bc0, 0x00200000, 0x50100000,
+       0x7030, 0x31000311, 0x00000011,
+       0x277c, 0x00000003, 0x000007ff,
+       0x240c, 0x000007ff, 0x00000000,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ffffff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x4e000000,
+       0x28350, 0x3f3f3fff, 0x2a00126a,
+       0x30, 0x000000ff, 0x0040,
+       0x34, 0x00000040, 0x00004040,
+       0x9100, 0x07ffffff, 0x03000000,
+       0x8e88, 0x01ff1f3f, 0x00000000,
+       0x8e84, 0x01ff1f3f, 0x00000000,
+       0x9060, 0x0000007f, 0x00000020,
+       0x9508, 0x00010000, 0x00010000,
+       0xac14, 0x00000200, 0x000002fb,
+       0xac10, 0xffffffff, 0x0000543b,
+       0xac0c, 0xffffffff, 0xa9210876,
+       0x88d0, 0xffffffff, 0x000fff40,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x1410, 0x20000000, 0x20fffed8,
+       0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+       0xc64, 0x00000001, 0x00000001
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+       0xc424, 0xffffffff, 0x00601004,
+       0xc47c, 0xffffffff, 0x10102020,
+       0xc488, 0xffffffff, 0x01000020,
+       0xc314, 0xffffffff, 0x00000800,
+       0xc30c, 0xffffffff, 0x800000a4
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+       0x9a10, 0x00010000, 0x00018208,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9838, 0x0002021c, 0x00020200,
+       0xc78, 0x00000080, 0x00000000,
+       0xd030, 0x000300c0, 0x00800040,
+       0xd830, 0x000300c0, 0x00800040,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x5bc0, 0x00200000, 0x50100000,
+       0x7030, 0x31000311, 0x00000011,
+       0x2ae4, 0x00073ffe, 0x000022a2,
+       0x240c, 0x000007ff, 0x00000000,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ffffff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x4e000000,
+       0x28350, 0x3f3f3fff, 0x2a00126a,
+       0x30, 0x000000ff, 0x0040,
+       0x34, 0x00000040, 0x00004040,
+       0x9100, 0x07ffffff, 0x03000000,
+       0x9060, 0x0000007f, 0x00000020,
+       0x9508, 0x00010000, 0x00010000,
+       0xac14, 0x000003ff, 0x000000f7,
+       0xac10, 0xffffffff, 0x00000000,
+       0xac0c, 0xffffffff, 0x32761054,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+       0xc424, 0xffffffff, 0x033f1005,
+       0xc47c, 0xffffffff, 0x10808020,
+       0xc488, 0xffffffff, 0x00800008,
+       0xc314, 0xffffffff, 0x00001000,
+       0xc30c, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+       0x9a10, 0x00010000, 0x00018208,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9838, 0x0002021c, 0x00020200,
+       0xc78, 0x00000080, 0x00000000,
+       0xd030, 0x000300c0, 0x00800040,
+       0xd030, 0x000300c0, 0x00800040,
+       0xd830, 0x000300c0, 0x00800040,
+       0xd830, 0x000300c0, 0x00800040,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x5bc0, 0x00200000, 0x50100000,
+       0x7030, 0x31000311, 0x00000011,
+       0x2ae4, 0x00073ffe, 0x000022a2,
+       0x2ae4, 0x00073ffe, 0x000022a2,
+       0x2ae4, 0x00073ffe, 0x000022a2,
+       0x240c, 0x000007ff, 0x00000000,
+       0x240c, 0x000007ff, 0x00000000,
+       0x240c, 0x000007ff, 0x00000000,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ffffff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x4e000000,
+       0x28350, 0x3f3f3fff, 0x0000124a,
+       0x28350, 0x3f3f3fff, 0x0000124a,
+       0x28350, 0x3f3f3fff, 0x0000124a,
+       0x30, 0x000000ff, 0x0040,
+       0x34, 0x00000040, 0x00004040,
+       0x9100, 0x07ffffff, 0x03000000,
+       0x9100, 0x07ffffff, 0x03000000,
+       0x8e88, 0x01ff1f3f, 0x00000000,
+       0x8e88, 0x01ff1f3f, 0x00000000,
+       0x8e88, 0x01ff1f3f, 0x00000000,
+       0x8e84, 0x01ff1f3f, 0x00000000,
+       0x8e84, 0x01ff1f3f, 0x00000000,
+       0x8e84, 0x01ff1f3f, 0x00000000,
+       0x9060, 0x0000007f, 0x00000020,
+       0x9508, 0x00010000, 0x00010000,
+       0xac14, 0x000003ff, 0x00000003,
+       0xac14, 0x000003ff, 0x00000003,
+       0xac14, 0x000003ff, 0x00000003,
+       0xac10, 0xffffffff, 0x00000000,
+       0xac10, 0xffffffff, 0x00000000,
+       0xac10, 0xffffffff, 0x00000000,
+       0xac0c, 0xffffffff, 0x00001032,
+       0xac0c, 0xffffffff, 0x00001032,
+       0xac0c, 0xffffffff, 0x00001032,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+       0xc424, 0xffffffff, 0x00601005,
+       0xc47c, 0xffffffff, 0x10104040,
+       0xc488, 0xffffffff, 0x0100000a,
+       0xc314, 0xffffffff, 0x00000800,
+       0xc30c, 0xffffffff, 0x800000f4
+};
+
+static const u32 oland_golden_registers[] =
+{
+       0x9a10, 0x00010000, 0x00018208,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9838, 0x0002021c, 0x00020200,
+       0xc78, 0x00000080, 0x00000000,
+       0xd030, 0x000300c0, 0x00800040,
+       0xd830, 0x000300c0, 0x00800040,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x5bc0, 0x00200000, 0x50100000,
+       0x7030, 0x31000311, 0x00000011,
+       0x2ae4, 0x00073ffe, 0x000022a2,
+       0x240c, 0x000007ff, 0x00000000,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ffffff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x4e000000,
+       0x28350, 0x3f3f3fff, 0x00000082,
+       0x30, 0x000000ff, 0x0040,
+       0x34, 0x00000040, 0x00004040,
+       0x9100, 0x07ffffff, 0x03000000,
+       0x9060, 0x0000007f, 0x00000020,
+       0x9508, 0x00010000, 0x00010000,
+       0xac14, 0x000003ff, 0x000000f3,
+       0xac10, 0xffffffff, 0x00000000,
+       0xac0c, 0xffffffff, 0x00003210,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+       0xc400, 0xffffffff, 0xfffffffc,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9a60, 0xffffffff, 0x00000100,
+       0x92a4, 0xffffffff, 0x00000100,
+       0xc164, 0xffffffff, 0x00000100,
+       0x9774, 0xffffffff, 0x00000100,
+       0x8984, 0xffffffff, 0x06000100,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x92a0, 0xffffffff, 0x00000100,
+       0xc380, 0xffffffff, 0x00000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x8d88, 0xffffffff, 0x00000100,
+       0x8d8c, 0xffffffff, 0x00000100,
+       0x9030, 0xffffffff, 0x00000100,
+       0x9034, 0xffffffff, 0x00000100,
+       0x9038, 0xffffffff, 0x00000100,
+       0x903c, 0xffffffff, 0x00000100,
+       0xad80, 0xffffffff, 0x00000100,
+       0xac54, 0xffffffff, 0x00000100,
+       0x897c, 0xffffffff, 0x06000100,
+       0x9868, 0xffffffff, 0x00000100,
+       0x9510, 0xffffffff, 0x00000100,
+       0xaf04, 0xffffffff, 0x00000100,
+       0xae04, 0xffffffff, 0x00000100,
+       0x949c, 0xffffffff, 0x00000100,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9160, 0xffffffff, 0x00010000,
+       0x9164, 0xffffffff, 0x00030002,
+       0x9168, 0xffffffff, 0x00040007,
+       0x916c, 0xffffffff, 0x00060005,
+       0x9170, 0xffffffff, 0x00090008,
+       0x9174, 0xffffffff, 0x00020001,
+       0x9178, 0xffffffff, 0x00040003,
+       0x917c, 0xffffffff, 0x00000007,
+       0x9180, 0xffffffff, 0x00060005,
+       0x9184, 0xffffffff, 0x00090008,
+       0x9188, 0xffffffff, 0x00030002,
+       0x918c, 0xffffffff, 0x00050004,
+       0x9190, 0xffffffff, 0x00000008,
+       0x9194, 0xffffffff, 0x00070006,
+       0x9198, 0xffffffff, 0x000a0009,
+       0x919c, 0xffffffff, 0x00040003,
+       0x91a0, 0xffffffff, 0x00060005,
+       0x91a4, 0xffffffff, 0x00000009,
+       0x91a8, 0xffffffff, 0x00080007,
+       0x91ac, 0xffffffff, 0x000b000a,
+       0x91b0, 0xffffffff, 0x00050004,
+       0x91b4, 0xffffffff, 0x00070006,
+       0x91b8, 0xffffffff, 0x0008000b,
+       0x91bc, 0xffffffff, 0x000a0009,
+       0x91c0, 0xffffffff, 0x000d000c,
+       0x91c4, 0xffffffff, 0x00060005,
+       0x91c8, 0xffffffff, 0x00080007,
+       0x91cc, 0xffffffff, 0x0000000b,
+       0x91d0, 0xffffffff, 0x000a0009,
+       0x91d4, 0xffffffff, 0x000d000c,
+       0x91d8, 0xffffffff, 0x00070006,
+       0x91dc, 0xffffffff, 0x00090008,
+       0x91e0, 0xffffffff, 0x0000000c,
+       0x91e4, 0xffffffff, 0x000b000a,
+       0x91e8, 0xffffffff, 0x000e000d,
+       0x91ec, 0xffffffff, 0x00080007,
+       0x91f0, 0xffffffff, 0x000a0009,
+       0x91f4, 0xffffffff, 0x0000000d,
+       0x91f8, 0xffffffff, 0x000c000b,
+       0x91fc, 0xffffffff, 0x000f000e,
+       0x9200, 0xffffffff, 0x00090008,
+       0x9204, 0xffffffff, 0x000b000a,
+       0x9208, 0xffffffff, 0x000c000f,
+       0x920c, 0xffffffff, 0x000e000d,
+       0x9210, 0xffffffff, 0x00110010,
+       0x9214, 0xffffffff, 0x000a0009,
+       0x9218, 0xffffffff, 0x000c000b,
+       0x921c, 0xffffffff, 0x0000000f,
+       0x9220, 0xffffffff, 0x000e000d,
+       0x9224, 0xffffffff, 0x00110010,
+       0x9228, 0xffffffff, 0x000b000a,
+       0x922c, 0xffffffff, 0x000d000c,
+       0x9230, 0xffffffff, 0x00000010,
+       0x9234, 0xffffffff, 0x000f000e,
+       0x9238, 0xffffffff, 0x00120011,
+       0x923c, 0xffffffff, 0x000c000b,
+       0x9240, 0xffffffff, 0x000e000d,
+       0x9244, 0xffffffff, 0x00000011,
+       0x9248, 0xffffffff, 0x0010000f,
+       0x924c, 0xffffffff, 0x00130012,
+       0x9250, 0xffffffff, 0x000d000c,
+       0x9254, 0xffffffff, 0x000f000e,
+       0x9258, 0xffffffff, 0x00100013,
+       0x925c, 0xffffffff, 0x00120011,
+       0x9260, 0xffffffff, 0x00150014,
+       0x9264, 0xffffffff, 0x000e000d,
+       0x9268, 0xffffffff, 0x0010000f,
+       0x926c, 0xffffffff, 0x00000013,
+       0x9270, 0xffffffff, 0x00120011,
+       0x9274, 0xffffffff, 0x00150014,
+       0x9278, 0xffffffff, 0x000f000e,
+       0x927c, 0xffffffff, 0x00110010,
+       0x9280, 0xffffffff, 0x00000014,
+       0x9284, 0xffffffff, 0x00130012,
+       0x9288, 0xffffffff, 0x00160015,
+       0x928c, 0xffffffff, 0x0010000f,
+       0x9290, 0xffffffff, 0x00120011,
+       0x9294, 0xffffffff, 0x00000015,
+       0x9298, 0xffffffff, 0x00140013,
+       0x929c, 0xffffffff, 0x00170016,
+       0x9150, 0xffffffff, 0x96940200,
+       0x8708, 0xffffffff, 0x00900100,
+       0xc478, 0xffffffff, 0x00000080,
+       0xc404, 0xffffffff, 0x0020003f,
+       0x30, 0xffffffff, 0x0000001c,
+       0x34, 0x000f0000, 0x000f0000,
+       0x160c, 0xffffffff, 0x00000100,
+       0x1024, 0xffffffff, 0x00000100,
+       0x102c, 0x00000101, 0x00000000,
+       0x20a8, 0xffffffff, 0x00000104,
+       0x264c, 0x000c0000, 0x000c0000,
+       0x2648, 0x000c0000, 0x000c0000,
+       0x55e4, 0xff000fff, 0x00000100,
+       0x55e8, 0x00000001, 0x00000001,
+       0x2f50, 0x00000001, 0x00000001,
+       0x30cc, 0xc0000fff, 0x00000104,
+       0xc1e4, 0x00000001, 0x00000001,
+       0xd0c0, 0xfffffff0, 0x00000100,
+       0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+       0xc400, 0xffffffff, 0xfffffffc,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9a60, 0xffffffff, 0x00000100,
+       0x92a4, 0xffffffff, 0x00000100,
+       0xc164, 0xffffffff, 0x00000100,
+       0x9774, 0xffffffff, 0x00000100,
+       0x8984, 0xffffffff, 0x06000100,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x92a0, 0xffffffff, 0x00000100,
+       0xc380, 0xffffffff, 0x00000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x8d88, 0xffffffff, 0x00000100,
+       0x8d8c, 0xffffffff, 0x00000100,
+       0x9030, 0xffffffff, 0x00000100,
+       0x9034, 0xffffffff, 0x00000100,
+       0x9038, 0xffffffff, 0x00000100,
+       0x903c, 0xffffffff, 0x00000100,
+       0xad80, 0xffffffff, 0x00000100,
+       0xac54, 0xffffffff, 0x00000100,
+       0x897c, 0xffffffff, 0x06000100,
+       0x9868, 0xffffffff, 0x00000100,
+       0x9510, 0xffffffff, 0x00000100,
+       0xaf04, 0xffffffff, 0x00000100,
+       0xae04, 0xffffffff, 0x00000100,
+       0x949c, 0xffffffff, 0x00000100,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9160, 0xffffffff, 0x00010000,
+       0x9164, 0xffffffff, 0x00030002,
+       0x9168, 0xffffffff, 0x00040007,
+       0x916c, 0xffffffff, 0x00060005,
+       0x9170, 0xffffffff, 0x00090008,
+       0x9174, 0xffffffff, 0x00020001,
+       0x9178, 0xffffffff, 0x00040003,
+       0x917c, 0xffffffff, 0x00000007,
+       0x9180, 0xffffffff, 0x00060005,
+       0x9184, 0xffffffff, 0x00090008,
+       0x9188, 0xffffffff, 0x00030002,
+       0x918c, 0xffffffff, 0x00050004,
+       0x9190, 0xffffffff, 0x00000008,
+       0x9194, 0xffffffff, 0x00070006,
+       0x9198, 0xffffffff, 0x000a0009,
+       0x919c, 0xffffffff, 0x00040003,
+       0x91a0, 0xffffffff, 0x00060005,
+       0x91a4, 0xffffffff, 0x00000009,
+       0x91a8, 0xffffffff, 0x00080007,
+       0x91ac, 0xffffffff, 0x000b000a,
+       0x91b0, 0xffffffff, 0x00050004,
+       0x91b4, 0xffffffff, 0x00070006,
+       0x91b8, 0xffffffff, 0x0008000b,
+       0x91bc, 0xffffffff, 0x000a0009,
+       0x91c0, 0xffffffff, 0x000d000c,
+       0x9200, 0xffffffff, 0x00090008,
+       0x9204, 0xffffffff, 0x000b000a,
+       0x9208, 0xffffffff, 0x000c000f,
+       0x920c, 0xffffffff, 0x000e000d,
+       0x9210, 0xffffffff, 0x00110010,
+       0x9214, 0xffffffff, 0x000a0009,
+       0x9218, 0xffffffff, 0x000c000b,
+       0x921c, 0xffffffff, 0x0000000f,
+       0x9220, 0xffffffff, 0x000e000d,
+       0x9224, 0xffffffff, 0x00110010,
+       0x9228, 0xffffffff, 0x000b000a,
+       0x922c, 0xffffffff, 0x000d000c,
+       0x9230, 0xffffffff, 0x00000010,
+       0x9234, 0xffffffff, 0x000f000e,
+       0x9238, 0xffffffff, 0x00120011,
+       0x923c, 0xffffffff, 0x000c000b,
+       0x9240, 0xffffffff, 0x000e000d,
+       0x9244, 0xffffffff, 0x00000011,
+       0x9248, 0xffffffff, 0x0010000f,
+       0x924c, 0xffffffff, 0x00130012,
+       0x9250, 0xffffffff, 0x000d000c,
+       0x9254, 0xffffffff, 0x000f000e,
+       0x9258, 0xffffffff, 0x00100013,
+       0x925c, 0xffffffff, 0x00120011,
+       0x9260, 0xffffffff, 0x00150014,
+       0x9150, 0xffffffff, 0x96940200,
+       0x8708, 0xffffffff, 0x00900100,
+       0xc478, 0xffffffff, 0x00000080,
+       0xc404, 0xffffffff, 0x0020003f,
+       0x30, 0xffffffff, 0x0000001c,
+       0x34, 0x000f0000, 0x000f0000,
+       0x160c, 0xffffffff, 0x00000100,
+       0x1024, 0xffffffff, 0x00000100,
+       0x102c, 0x00000101, 0x00000000,
+       0x20a8, 0xffffffff, 0x00000104,
+       0x55e4, 0xff000fff, 0x00000100,
+       0x55e8, 0x00000001, 0x00000001,
+       0x2f50, 0x00000001, 0x00000001,
+       0x30cc, 0xc0000fff, 0x00000104,
+       0xc1e4, 0x00000001, 0x00000001,
+       0xd0c0, 0xfffffff0, 0x00000100,
+       0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 verde_mgcg_cgcg_init[] =
+{
+       0xc400, 0xffffffff, 0xfffffffc,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9a60, 0xffffffff, 0x00000100,
+       0x92a4, 0xffffffff, 0x00000100,
+       0xc164, 0xffffffff, 0x00000100,
+       0x9774, 0xffffffff, 0x00000100,
+       0x8984, 0xffffffff, 0x06000100,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x92a0, 0xffffffff, 0x00000100,
+       0xc380, 0xffffffff, 0x00000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x8d88, 0xffffffff, 0x00000100,
+       0x8d8c, 0xffffffff, 0x00000100,
+       0x9030, 0xffffffff, 0x00000100,
+       0x9034, 0xffffffff, 0x00000100,
+       0x9038, 0xffffffff, 0x00000100,
+       0x903c, 0xffffffff, 0x00000100,
+       0xad80, 0xffffffff, 0x00000100,
+       0xac54, 0xffffffff, 0x00000100,
+       0x897c, 0xffffffff, 0x06000100,
+       0x9868, 0xffffffff, 0x00000100,
+       0x9510, 0xffffffff, 0x00000100,
+       0xaf04, 0xffffffff, 0x00000100,
+       0xae04, 0xffffffff, 0x00000100,
+       0x949c, 0xffffffff, 0x00000100,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9160, 0xffffffff, 0x00010000,
+       0x9164, 0xffffffff, 0x00030002,
+       0x9168, 0xffffffff, 0x00040007,
+       0x916c, 0xffffffff, 0x00060005,
+       0x9170, 0xffffffff, 0x00090008,
+       0x9174, 0xffffffff, 0x00020001,
+       0x9178, 0xffffffff, 0x00040003,
+       0x917c, 0xffffffff, 0x00000007,
+       0x9180, 0xffffffff, 0x00060005,
+       0x9184, 0xffffffff, 0x00090008,
+       0x9188, 0xffffffff, 0x00030002,
+       0x918c, 0xffffffff, 0x00050004,
+       0x9190, 0xffffffff, 0x00000008,
+       0x9194, 0xffffffff, 0x00070006,
+       0x9198, 0xffffffff, 0x000a0009,
+       0x919c, 0xffffffff, 0x00040003,
+       0x91a0, 0xffffffff, 0x00060005,
+       0x91a4, 0xffffffff, 0x00000009,
+       0x91a8, 0xffffffff, 0x00080007,
+       0x91ac, 0xffffffff, 0x000b000a,
+       0x91b0, 0xffffffff, 0x00050004,
+       0x91b4, 0xffffffff, 0x00070006,
+       0x91b8, 0xffffffff, 0x0008000b,
+       0x91bc, 0xffffffff, 0x000a0009,
+       0x91c0, 0xffffffff, 0x000d000c,
+       0x9200, 0xffffffff, 0x00090008,
+       0x9204, 0xffffffff, 0x000b000a,
+       0x9208, 0xffffffff, 0x000c000f,
+       0x920c, 0xffffffff, 0x000e000d,
+       0x9210, 0xffffffff, 0x00110010,
+       0x9214, 0xffffffff, 0x000a0009,
+       0x9218, 0xffffffff, 0x000c000b,
+       0x921c, 0xffffffff, 0x0000000f,
+       0x9220, 0xffffffff, 0x000e000d,
+       0x9224, 0xffffffff, 0x00110010,
+       0x9228, 0xffffffff, 0x000b000a,
+       0x922c, 0xffffffff, 0x000d000c,
+       0x9230, 0xffffffff, 0x00000010,
+       0x9234, 0xffffffff, 0x000f000e,
+       0x9238, 0xffffffff, 0x00120011,
+       0x923c, 0xffffffff, 0x000c000b,
+       0x9240, 0xffffffff, 0x000e000d,
+       0x9244, 0xffffffff, 0x00000011,
+       0x9248, 0xffffffff, 0x0010000f,
+       0x924c, 0xffffffff, 0x00130012,
+       0x9250, 0xffffffff, 0x000d000c,
+       0x9254, 0xffffffff, 0x000f000e,
+       0x9258, 0xffffffff, 0x00100013,
+       0x925c, 0xffffffff, 0x00120011,
+       0x9260, 0xffffffff, 0x00150014,
+       0x9150, 0xffffffff, 0x96940200,
+       0x8708, 0xffffffff, 0x00900100,
+       0xc478, 0xffffffff, 0x00000080,
+       0xc404, 0xffffffff, 0x0020003f,
+       0x30, 0xffffffff, 0x0000001c,
+       0x34, 0x000f0000, 0x000f0000,
+       0x160c, 0xffffffff, 0x00000100,
+       0x1024, 0xffffffff, 0x00000100,
+       0x102c, 0x00000101, 0x00000000,
+       0x20a8, 0xffffffff, 0x00000104,
+       0x264c, 0x000c0000, 0x000c0000,
+       0x2648, 0x000c0000, 0x000c0000,
+       0x55e4, 0xff000fff, 0x00000100,
+       0x55e8, 0x00000001, 0x00000001,
+       0x2f50, 0x00000001, 0x00000001,
+       0x30cc, 0xc0000fff, 0x00000104,
+       0xc1e4, 0x00000001, 0x00000001,
+       0xd0c0, 0xfffffff0, 0x00000100,
+       0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 oland_mgcg_cgcg_init[] =
+{
+       0xc400, 0xffffffff, 0xfffffffc,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9a60, 0xffffffff, 0x00000100,
+       0x92a4, 0xffffffff, 0x00000100,
+       0xc164, 0xffffffff, 0x00000100,
+       0x9774, 0xffffffff, 0x00000100,
+       0x8984, 0xffffffff, 0x06000100,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x92a0, 0xffffffff, 0x00000100,
+       0xc380, 0xffffffff, 0x00000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x8d88, 0xffffffff, 0x00000100,
+       0x8d8c, 0xffffffff, 0x00000100,
+       0x9030, 0xffffffff, 0x00000100,
+       0x9034, 0xffffffff, 0x00000100,
+       0x9038, 0xffffffff, 0x00000100,
+       0x903c, 0xffffffff, 0x00000100,
+       0xad80, 0xffffffff, 0x00000100,
+       0xac54, 0xffffffff, 0x00000100,
+       0x897c, 0xffffffff, 0x06000100,
+       0x9868, 0xffffffff, 0x00000100,
+       0x9510, 0xffffffff, 0x00000100,
+       0xaf04, 0xffffffff, 0x00000100,
+       0xae04, 0xffffffff, 0x00000100,
+       0x949c, 0xffffffff, 0x00000100,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9160, 0xffffffff, 0x00010000,
+       0x9164, 0xffffffff, 0x00030002,
+       0x9168, 0xffffffff, 0x00040007,
+       0x916c, 0xffffffff, 0x00060005,
+       0x9170, 0xffffffff, 0x00090008,
+       0x9174, 0xffffffff, 0x00020001,
+       0x9178, 0xffffffff, 0x00040003,
+       0x917c, 0xffffffff, 0x00000007,
+       0x9180, 0xffffffff, 0x00060005,
+       0x9184, 0xffffffff, 0x00090008,
+       0x9188, 0xffffffff, 0x00030002,
+       0x918c, 0xffffffff, 0x00050004,
+       0x9190, 0xffffffff, 0x00000008,
+       0x9194, 0xffffffff, 0x00070006,
+       0x9198, 0xffffffff, 0x000a0009,
+       0x919c, 0xffffffff, 0x00040003,
+       0x91a0, 0xffffffff, 0x00060005,
+       0x91a4, 0xffffffff, 0x00000009,
+       0x91a8, 0xffffffff, 0x00080007,
+       0x91ac, 0xffffffff, 0x000b000a,
+       0x91b0, 0xffffffff, 0x00050004,
+       0x91b4, 0xffffffff, 0x00070006,
+       0x91b8, 0xffffffff, 0x0008000b,
+       0x91bc, 0xffffffff, 0x000a0009,
+       0x91c0, 0xffffffff, 0x000d000c,
+       0x91c4, 0xffffffff, 0x00060005,
+       0x91c8, 0xffffffff, 0x00080007,
+       0x91cc, 0xffffffff, 0x0000000b,
+       0x91d0, 0xffffffff, 0x000a0009,
+       0x91d4, 0xffffffff, 0x000d000c,
+       0x9150, 0xffffffff, 0x96940200,
+       0x8708, 0xffffffff, 0x00900100,
+       0xc478, 0xffffffff, 0x00000080,
+       0xc404, 0xffffffff, 0x0020003f,
+       0x30, 0xffffffff, 0x0000001c,
+       0x34, 0x000f0000, 0x000f0000,
+       0x160c, 0xffffffff, 0x00000100,
+       0x1024, 0xffffffff, 0x00000100,
+       0x102c, 0x00000101, 0x00000000,
+       0x20a8, 0xffffffff, 0x00000104,
+       0x264c, 0x000c0000, 0x000c0000,
+       0x2648, 0x000c0000, 0x000c0000,
+       0x55e4, 0xff000fff, 0x00000100,
+       0x55e8, 0x00000001, 0x00000001,
+       0x2f50, 0x00000001, 0x00000001,
+       0x30cc, 0xc0000fff, 0x00000104,
+       0xc1e4, 0x00000001, 0x00000001,
+       0xd0c0, 0xfffffff0, 0x00000100,
+       0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static u32 verde_pg_init[] =
+{
+       0x353c, 0xffffffff, 0x40000,
+       0x3538, 0xffffffff, 0x200010ff,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x7007,
+       0x3538, 0xffffffff, 0x300010ff,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x400000,
+       0x3538, 0xffffffff, 0x100010ff,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x120200,
+       0x3538, 0xffffffff, 0x500010ff,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x1e1e16,
+       0x3538, 0xffffffff, 0x600010ff,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x171f1e,
+       0x3538, 0xffffffff, 0x700010ff,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x353c, 0xffffffff, 0x0,
+       0x3538, 0xffffffff, 0x9ff,
+       0x3500, 0xffffffff, 0x0,
+       0x3504, 0xffffffff, 0x10000800,
+       0x3504, 0xffffffff, 0xf,
+       0x3504, 0xffffffff, 0xf,
+       0x3500, 0xffffffff, 0x4,
+       0x3504, 0xffffffff, 0x1000051e,
+       0x3504, 0xffffffff, 0xffff,
+       0x3504, 0xffffffff, 0xffff,
+       0x3500, 0xffffffff, 0x8,
+       0x3504, 0xffffffff, 0x80500,
+       0x3500, 0xffffffff, 0x12,
+       0x3504, 0xffffffff, 0x9050c,
+       0x3500, 0xffffffff, 0x1d,
+       0x3504, 0xffffffff, 0xb052c,
+       0x3500, 0xffffffff, 0x2a,
+       0x3504, 0xffffffff, 0x1053e,
+       0x3500, 0xffffffff, 0x2d,
+       0x3504, 0xffffffff, 0x10546,
+       0x3500, 0xffffffff, 0x30,
+       0x3504, 0xffffffff, 0xa054e,
+       0x3500, 0xffffffff, 0x3c,
+       0x3504, 0xffffffff, 0x1055f,
+       0x3500, 0xffffffff, 0x3f,
+       0x3504, 0xffffffff, 0x10567,
+       0x3500, 0xffffffff, 0x42,
+       0x3504, 0xffffffff, 0x1056f,
+       0x3500, 0xffffffff, 0x45,
+       0x3504, 0xffffffff, 0x10572,
+       0x3500, 0xffffffff, 0x48,
+       0x3504, 0xffffffff, 0x20575,
+       0x3500, 0xffffffff, 0x4c,
+       0x3504, 0xffffffff, 0x190801,
+       0x3500, 0xffffffff, 0x67,
+       0x3504, 0xffffffff, 0x1082a,
+       0x3500, 0xffffffff, 0x6a,
+       0x3504, 0xffffffff, 0x1b082d,
+       0x3500, 0xffffffff, 0x87,
+       0x3504, 0xffffffff, 0x310851,
+       0x3500, 0xffffffff, 0xba,
+       0x3504, 0xffffffff, 0x891,
+       0x3500, 0xffffffff, 0xbc,
+       0x3504, 0xffffffff, 0x893,
+       0x3500, 0xffffffff, 0xbe,
+       0x3504, 0xffffffff, 0x20895,
+       0x3500, 0xffffffff, 0xc2,
+       0x3504, 0xffffffff, 0x20899,
+       0x3500, 0xffffffff, 0xc6,
+       0x3504, 0xffffffff, 0x2089d,
+       0x3500, 0xffffffff, 0xca,
+       0x3504, 0xffffffff, 0x8a1,
+       0x3500, 0xffffffff, 0xcc,
+       0x3504, 0xffffffff, 0x8a3,
+       0x3500, 0xffffffff, 0xce,
+       0x3504, 0xffffffff, 0x308a5,
+       0x3500, 0xffffffff, 0xd3,
+       0x3504, 0xffffffff, 0x6d08cd,
+       0x3500, 0xffffffff, 0x142,
+       0x3504, 0xffffffff, 0x2000095a,
+       0x3504, 0xffffffff, 0x1,
+       0x3500, 0xffffffff, 0x144,
+       0x3504, 0xffffffff, 0x301f095b,
+       0x3500, 0xffffffff, 0x165,
+       0x3504, 0xffffffff, 0xc094d,
+       0x3500, 0xffffffff, 0x173,
+       0x3504, 0xffffffff, 0xf096d,
+       0x3500, 0xffffffff, 0x184,
+       0x3504, 0xffffffff, 0x15097f,
+       0x3500, 0xffffffff, 0x19b,
+       0x3504, 0xffffffff, 0xc0998,
+       0x3500, 0xffffffff, 0x1a9,
+       0x3504, 0xffffffff, 0x409a7,
+       0x3500, 0xffffffff, 0x1af,
+       0x3504, 0xffffffff, 0xcdc,
+       0x3500, 0xffffffff, 0x1b1,
+       0x3504, 0xffffffff, 0x800,
+       0x3508, 0xffffffff, 0x6c9b2000,
+       0x3510, 0xfc00, 0x2000,
+       0x3544, 0xffffffff, 0xfc0,
+       0x28d4, 0x00000100, 0x100
+};
+
+static void si_init_golden_registers(struct radeon_device *rdev)
+{
+       switch (rdev->family) {
+       case CHIP_TAHITI:
+               radeon_program_register_sequence(rdev,
+                                                tahiti_golden_registers,
+                                                (const u32)ARRAY_SIZE(tahiti_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                tahiti_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+               radeon_program_register_sequence(rdev,
+                                                tahiti_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+               radeon_program_register_sequence(rdev,
+                                                tahiti_golden_registers2,
+                                                (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+               break;
+       case CHIP_PITCAIRN:
+               radeon_program_register_sequence(rdev,
+                                                pitcairn_golden_registers,
+                                                (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                pitcairn_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+               radeon_program_register_sequence(rdev,
+                                                pitcairn_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+               break;
+       case CHIP_VERDE:
+               radeon_program_register_sequence(rdev,
+                                                verde_golden_registers,
+                                                (const u32)ARRAY_SIZE(verde_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                verde_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+               radeon_program_register_sequence(rdev,
+                                                verde_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+               radeon_program_register_sequence(rdev,
+                                                verde_pg_init,
+                                                (const u32)ARRAY_SIZE(verde_pg_init));
+               break;
+       case CHIP_OLAND:
+               radeon_program_register_sequence(rdev,
+                                                oland_golden_registers,
+                                                (const u32)ARRAY_SIZE(oland_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                oland_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+               radeon_program_register_sequence(rdev,
+                                                oland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+               break;
+       default:
+               break;
+       }
+}
+
 #define PCIE_BUS_CLK                10000
 #define TCLK                        (PCIE_BUS_CLK / 10)
 
@@ -1211,6 +1999,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
                                gb_tile_moden = 0;
                                break;
                        }
+                       rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
        } else if ((rdev->family == CHIP_VERDE) ||
@@ -1451,6 +2240,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
                                gb_tile_moden = 0;
                                break;
                        }
+                       rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
        } else
@@ -1463,7 +2253,7 @@ static void si_select_se_sh(struct radeon_device *rdev,
        u32 data = INSTANCE_BROADCAST_WRITES;
 
        if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
-               data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+               data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
        else if (se_num == 0xffffffff)
                data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
        else if (sh_num == 0xffffffff)
@@ -1765,9 +2555,13 @@ static void si_gpu_init(struct radeon_device *rdev)
 
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMIF_ADDR_CALC, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
        WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
        WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+       WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+       WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+       WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
 
        si_tiling_mode_table_init(rdev);
 
@@ -2538,46 +3332,6 @@ static void si_mc_program(struct radeon_device *rdev)
        rv515_vga_render_disable(rdev);
 }
 
-/* SI MC address space is 40 bits */
-static void si_vram_location(struct radeon_device *rdev,
-                            struct radeon_mc *mc, u64 base)
-{
-       mc->vram_start = base;
-       if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) {
-               dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
-               mc->real_vram_size = mc->aper_size;
-               mc->mc_vram_size = mc->aper_size;
-       }
-       mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
-       dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
-                       mc->mc_vram_size >> 20, mc->vram_start,
-                       mc->vram_end, mc->real_vram_size >> 20);
-}
-
-static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
-{
-       u64 size_af, size_bf;
-
-       size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
-       size_bf = mc->vram_start & ~mc->gtt_base_align;
-       if (size_bf > size_af) {
-               if (mc->gtt_size > size_bf) {
-                       dev_warn(rdev->dev, "limiting GTT\n");
-                       mc->gtt_size = size_bf;
-               }
-               mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
-       } else {
-               if (mc->gtt_size > size_af) {
-                       dev_warn(rdev->dev, "limiting GTT\n");
-                       mc->gtt_size = size_af;
-               }
-               mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
-       }
-       mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
-       dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
-                       mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
-}
-
 static void si_vram_gtt_location(struct radeon_device *rdev,
                                 struct radeon_mc *mc)
 {
@@ -2587,9 +3341,9 @@ static void si_vram_gtt_location(struct radeon_device *rdev,
                mc->real_vram_size = 0xFFC0000000ULL;
                mc->mc_vram_size = 0xFFC0000000ULL;
        }
-       si_vram_location(rdev, &rdev->mc, 0);
+       radeon_vram_location(rdev, &rdev->mc, 0);
        rdev->mc.gtt_base_align = 0;
-       si_gtt_location(rdev, mc);
+       radeon_gtt_location(rdev, mc);
 }
 
 static int si_mc_init(struct radeon_device *rdev)
@@ -4322,14 +5076,6 @@ static int si_startup(struct radeon_device *rdev)
                return r;
        si_gpu_init(rdev);
 
-#if 0
-       r = evergreen_blit_init(rdev);
-       if (r) {
-               r600_blit_fini(rdev);
-               rdev->asic->copy = NULL;
-               dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
-       }
-#endif
        /* allocate rlc buffers */
        r = si_rlc_init(rdev);
        if (r) {
@@ -4372,6 +5118,16 @@ static int si_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = rv770_uvd_resume(rdev);
+       if (!r) {
+               r = radeon_fence_driver_start_ring(rdev,
+                                                  R600_RING_TYPE_UVD_INDEX);
+               if (r)
+                       dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+       }
+       if (r)
+               rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
        /* Enable IRQ */
        r = si_irq_init(rdev);
        if (r) {
@@ -4429,6 +5185,18 @@ static int si_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+       if (ring->ring_size) {
+               r = radeon_ring_init(rdev, ring, ring->ring_size,
+                                    R600_WB_UVD_RPTR_OFFSET,
+                                    UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+                                    0, 0xfffff, RADEON_CP_PACKET2);
+               if (!r)
+                       r = r600_uvd_init(rdev);
+               if (r)
+                       DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+       }
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -4455,6 +5223,9 @@ int si_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       /* init golden registers */
+       si_init_golden_registers(rdev);
+
        rdev->accel_working = true;
        r = si_startup(rdev);
        if (r) {
@@ -4472,6 +5243,8 @@ int si_suspend(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        si_cp_enable(rdev, false);
        cayman_dma_stop(rdev);
+       r600_uvd_rbc_stop(rdev);
+       radeon_uvd_suspend(rdev);
        si_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        si_pcie_gart_disable(rdev);
@@ -4512,6 +5285,8 @@ int si_init(struct radeon_device *rdev)
                DRM_INFO("GPU not posted. posting now...\n");
                atom_asic_init(rdev->mode_info.atom_context);
        }
+       /* init golden registers */
+       si_init_golden_registers(rdev);
        /* Initialize scratch registers */
        si_scratch_init(rdev);
        /* Initialize surface registers */
@@ -4557,6 +5332,13 @@ int si_init(struct radeon_device *rdev)
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 64 * 1024);
 
+       r = radeon_uvd_init(rdev);
+       if (!r) {
+               ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+               ring->ring_obj = NULL;
+               r600_ring_init(rdev, ring, 4096);
+       }
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -4594,9 +5376,6 @@ int si_init(struct radeon_device *rdev)
 
 void si_fini(struct radeon_device *rdev)
 {
-#if 0
-       r600_blit_fini(rdev);
-#endif
        si_cp_fini(rdev);
        cayman_dma_fini(rdev);
        si_irq_fini(rdev);
@@ -4605,6 +5384,7 @@ void si_fini(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
+       radeon_uvd_fini(rdev);
        si_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
@@ -4634,3 +5414,94 @@ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
        mutex_unlock(&rdev->gpu_clock_mutex);
        return clock;
 }
+
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+       unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+       int r;
+
+       /* bypass vclk and dclk with bclk */
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+               VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+               ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+       /* put PLL in bypass mode */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+       if (!vclk || !dclk) {
+               /* keep the Bypass mode, put PLL to sleep */
+               WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+               return 0;
+       }
+
+       r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+                                         16384, 0x03FFFFFF, 0, 128, 5,
+                                         &fb_div, &vclk_div, &dclk_div);
+       if (r)
+               return r;
+
+       /* set RESET_ANTI_MUX to 0 */
+       WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
+
+       /* set VCO_MODE to 1 */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+       /* toggle UPLL_SLEEP to 1 then back to 0 */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+       /* deassert UPLL_RESET */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+       mdelay(1);
+
+       r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+       if (r)
+               return r;
+
+       /* assert UPLL_RESET again */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+       /* disable spread spectrum. */
+       WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+       /* set feedback divider */
+       WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+       /* set ref divider to 0 */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+       if (fb_div < 307200)
+               WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+       else
+               WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+       /* set PDIV_A and PDIV_B */
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+               UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+               ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+       /* give the PLL some time to settle */
+       mdelay(15);
+
+       /* deassert PLL_RESET */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+       mdelay(15);
+
+       /* switch from bypass mode to normal mode */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+       r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+       if (r)
+               return r;
+
+       /* switch VCLK and DCLK selection */
+       WREG32_P(CG_UPLL_FUNC_CNTL_2,
+               VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+               ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+       mdelay(100);
+
+       return 0;
+}
index 23fc08f..222877b 100644 (file)
 #define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
 #define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
 
+/* discrete uvd clocks */
+#define        CG_UPLL_FUNC_CNTL                               0x634
+#      define UPLL_RESET_MASK                          0x00000001
+#      define UPLL_SLEEP_MASK                          0x00000002
+#      define UPLL_BYPASS_EN_MASK                      0x00000004
+#      define UPLL_CTLREQ_MASK                         0x00000008
+#      define UPLL_VCO_MODE_MASK                       0x00000600
+#      define UPLL_REF_DIV_MASK                        0x003F0000
+#      define UPLL_CTLACK_MASK                         0x40000000
+#      define UPLL_CTLACK2_MASK                        0x80000000
+#define        CG_UPLL_FUNC_CNTL_2                             0x638
+#      define UPLL_PDIV_A(x)                           ((x) << 0)
+#      define UPLL_PDIV_A_MASK                         0x0000007F
+#      define UPLL_PDIV_B(x)                           ((x) << 8)
+#      define UPLL_PDIV_B_MASK                         0x00007F00
+#      define VCLK_SRC_SEL(x)                          ((x) << 20)
+#      define VCLK_SRC_SEL_MASK                        0x01F00000
+#      define DCLK_SRC_SEL(x)                          ((x) << 25)
+#      define DCLK_SRC_SEL_MASK                        0x3E000000
+#define        CG_UPLL_FUNC_CNTL_3                             0x63C
+#      define UPLL_FB_DIV(x)                           ((x) << 0)
+#      define UPLL_FB_DIV_MASK                         0x01FFFFFF
+#define        CG_UPLL_FUNC_CNTL_4                             0x644
+#      define UPLL_SPARE_ISPARE9                       0x00020000
+#define        CG_UPLL_FUNC_CNTL_5                             0x648
+#      define RESET_ANTI_MUX_MASK                      0x00000200
+#define        CG_UPLL_SPREAD_SPECTRUM                         0x650
+#      define SSEN_MASK                                0x00000001
+
 #define        CG_MULT_THERMAL_STATUS                                  0x714
 #define                ASIC_MAX_TEMP(x)                                ((x) << 0)
 #define                ASIC_MAX_TEMP_MASK                              0x000001ff
@@ -65,6 +94,8 @@
 
 #define DMIF_ADDR_CONFIG                               0xBD4
 
+#define DMIF_ADDR_CALC                                 0xC00
+
 #define        SRBM_STATUS                                     0xE50
 #define                GRBM_RQ_PENDING                         (1 << 5)
 #define                VMC_BUSY                                (1 << 8)
 #       define THREAD_TRACE_FLUSH                       (54 << 0)
 #       define THREAD_TRACE_FINISH                      (55 << 0)
 
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG                           0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG                                0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG                       0xEF54
+#define UVD_RBC_RB_RPTR                                        0xF690
+#define UVD_RBC_RB_WPTR                                        0xF694
+
 /*
  * PM4
  */
index d917a41..7dff49e 100644 (file)
@@ -494,10 +494,10 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
 
        if (event) {
                event->pipe = 0;
+               drm_vblank_get(dev, 0);
                spin_lock_irqsave(&dev->event_lock, flags);
                scrtc->event = event;
                spin_unlock_irqrestore(&dev->event_lock, flags);
-               drm_vblank_get(dev, 0);
        }
 
        return 0;
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
deleted file mode 100644 (file)
index 80f73d1..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-ccflags-y := -Iinclude/drm
-ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
-
-tegra-drm-y := drm.o fb.o dc.o host1x.o
-tegra-drm-y += output.o rgb.o hdmi.o
-
-obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
deleted file mode 100644 (file)
index 9d452df..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include <linux/dma-mapping.h>
-#include <asm/dma-iommu.h>
-
-#include "drm.h"
-
-#define DRIVER_NAME "tegra"
-#define DRIVER_DESC "NVIDIA Tegra graphics"
-#define DRIVER_DATE "20120330"
-#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
-       struct device *dev = drm->dev;
-       struct host1x *host1x;
-       int err;
-
-       host1x = dev_get_drvdata(dev);
-       drm->dev_private = host1x;
-       host1x->drm = drm;
-
-       drm_mode_config_init(drm);
-
-       err = host1x_drm_init(host1x, drm);
-       if (err < 0)
-               return err;
-
-       err = drm_vblank_init(drm, drm->mode_config.num_crtc);
-       if (err < 0)
-               return err;
-
-       err = tegra_drm_fb_init(drm);
-       if (err < 0)
-               return err;
-
-       drm_kms_helper_poll_init(drm);
-
-       return 0;
-}
-
-static int tegra_drm_unload(struct drm_device *drm)
-{
-       drm_kms_helper_poll_fini(drm);
-       tegra_drm_fb_exit(drm);
-
-       drm_mode_config_cleanup(drm);
-
-       return 0;
-}
-
-static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
-{
-       return 0;
-}
-
-static void tegra_drm_lastclose(struct drm_device *drm)
-{
-       struct host1x *host1x = drm->dev_private;
-
-       drm_fbdev_cma_restore_mode(host1x->fbdev);
-}
-
-static struct drm_ioctl_desc tegra_drm_ioctls[] = {
-};
-
-static const struct file_operations tegra_drm_fops = {
-       .owner = THIS_MODULE,
-       .open = drm_open,
-       .release = drm_release,
-       .unlocked_ioctl = drm_ioctl,
-       .mmap = drm_gem_cma_mmap,
-       .poll = drm_poll,
-       .fasync = drm_fasync,
-       .read = drm_read,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = drm_compat_ioctl,
-#endif
-       .llseek = noop_llseek,
-};
-
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
-{
-       struct drm_crtc *crtc;
-
-       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
-               struct tegra_dc *dc = to_tegra_dc(crtc);
-
-               if (dc->pipe == pipe)
-                       return crtc;
-       }
-
-       return NULL;
-}
-
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
-{
-       /* TODO: implement real hardware counter using syncpoints */
-       return drm_vblank_count(dev, crtc);
-}
-
-static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
-{
-       struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
-       struct tegra_dc *dc = to_tegra_dc(crtc);
-
-       if (!crtc)
-               return -ENODEV;
-
-       tegra_dc_enable_vblank(dc);
-
-       return 0;
-}
-
-static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
-{
-       struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
-       struct tegra_dc *dc = to_tegra_dc(crtc);
-
-       if (crtc)
-               tegra_dc_disable_vblank(dc);
-}
-
-static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
-{
-       struct drm_crtc *crtc;
-
-       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-               tegra_dc_cancel_page_flip(crtc, file);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
-{
-       struct drm_info_node *node = (struct drm_info_node *)s->private;
-       struct drm_device *drm = node->minor->dev;
-       struct drm_framebuffer *fb;
-
-       mutex_lock(&drm->mode_config.fb_lock);
-
-       list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
-               seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
-                          fb->base.id, fb->width, fb->height, fb->depth,
-                          fb->bits_per_pixel,
-                          atomic_read(&fb->refcount.refcount));
-       }
-
-       mutex_unlock(&drm->mode_config.fb_lock);
-
-       return 0;
-}
-
-static struct drm_info_list tegra_debugfs_list[] = {
-       { "framebuffers", tegra_debugfs_framebuffers, 0 },
-};
-
-static int tegra_debugfs_init(struct drm_minor *minor)
-{
-       return drm_debugfs_create_files(tegra_debugfs_list,
-                                       ARRAY_SIZE(tegra_debugfs_list),
-                                       minor->debugfs_root, minor);
-}
-
-static void tegra_debugfs_cleanup(struct drm_minor *minor)
-{
-       drm_debugfs_remove_files(tegra_debugfs_list,
-                                ARRAY_SIZE(tegra_debugfs_list), minor);
-}
-#endif
-
-struct drm_driver tegra_drm_driver = {
-       .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
-       .load = tegra_drm_load,
-       .unload = tegra_drm_unload,
-       .open = tegra_drm_open,
-       .preclose = tegra_drm_preclose,
-       .lastclose = tegra_drm_lastclose,
-
-       .get_vblank_counter = tegra_drm_get_vblank_counter,
-       .enable_vblank = tegra_drm_enable_vblank,
-       .disable_vblank = tegra_drm_disable_vblank,
-
-#if defined(CONFIG_DEBUG_FS)
-       .debugfs_init = tegra_debugfs_init,
-       .debugfs_cleanup = tegra_debugfs_cleanup,
-#endif
-
-       .gem_free_object = drm_gem_cma_free_object,
-       .gem_vm_ops = &drm_gem_cma_vm_ops,
-       .dumb_create = drm_gem_cma_dumb_create,
-       .dumb_map_offset = drm_gem_cma_dumb_map_offset,
-       .dumb_destroy = drm_gem_cma_dumb_destroy,
-
-       .ioctls = tegra_drm_ioctls,
-       .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
-       .fops = &tegra_drm_fops,
-
-       .name = DRIVER_NAME,
-       .desc = DRIVER_DESC,
-       .date = DRIVER_DATE,
-       .major = DRIVER_MAJOR,
-       .minor = DRIVER_MINOR,
-       .patchlevel = DRIVER_PATCHLEVEL,
-};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
deleted file mode 100644 (file)
index 0391495..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "drm.h"
-
-static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
-{
-       struct host1x *host1x = drm->dev_private;
-
-       drm_fbdev_cma_hotplug_event(host1x->fbdev);
-}
-
-static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
-       .fb_create = drm_fb_cma_create,
-       .output_poll_changed = tegra_drm_fb_output_poll_changed,
-};
-
-int tegra_drm_fb_init(struct drm_device *drm)
-{
-       struct host1x *host1x = drm->dev_private;
-       struct drm_fbdev_cma *fbdev;
-
-       drm->mode_config.min_width = 0;
-       drm->mode_config.min_height = 0;
-
-       drm->mode_config.max_width = 4096;
-       drm->mode_config.max_height = 4096;
-
-       drm->mode_config.funcs = &tegra_drm_mode_funcs;
-
-       fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
-                                  drm->mode_config.num_connector);
-       if (IS_ERR(fbdev))
-               return PTR_ERR(fbdev);
-
-       host1x->fbdev = fbdev;
-
-       return 0;
-}
-
-void tegra_drm_fb_exit(struct drm_device *drm)
-{
-       struct host1x *host1x = drm->dev_private;
-
-       drm_fbdev_cma_fini(host1x->fbdev);
-}
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
deleted file mode 100644 (file)
index 92e25a7..0000000
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-#include "drm.h"
-
-struct host1x_drm_client {
-       struct host1x_client *client;
-       struct device_node *np;
-       struct list_head list;
-};
-
-static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
-{
-       struct host1x_drm_client *client;
-
-       client = kzalloc(sizeof(*client), GFP_KERNEL);
-       if (!client)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&client->list);
-       client->np = of_node_get(np);
-
-       list_add_tail(&client->list, &host1x->drm_clients);
-
-       return 0;
-}
-
-static int host1x_activate_drm_client(struct host1x *host1x,
-                                     struct host1x_drm_client *drm,
-                                     struct host1x_client *client)
-{
-       mutex_lock(&host1x->drm_clients_lock);
-       list_del_init(&drm->list);
-       list_add_tail(&drm->list, &host1x->drm_active);
-       drm->client = client;
-       mutex_unlock(&host1x->drm_clients_lock);
-
-       return 0;
-}
-
-static int host1x_remove_drm_client(struct host1x *host1x,
-                                   struct host1x_drm_client *client)
-{
-       mutex_lock(&host1x->drm_clients_lock);
-       list_del_init(&client->list);
-       mutex_unlock(&host1x->drm_clients_lock);
-
-       of_node_put(client->np);
-       kfree(client);
-
-       return 0;
-}
-
-static int host1x_parse_dt(struct host1x *host1x)
-{
-       static const char * const compat[] = {
-               "nvidia,tegra20-dc",
-               "nvidia,tegra20-hdmi",
-               "nvidia,tegra30-dc",
-               "nvidia,tegra30-hdmi",
-       };
-       unsigned int i;
-       int err;
-
-       for (i = 0; i < ARRAY_SIZE(compat); i++) {
-               struct device_node *np;
-
-               for_each_child_of_node(host1x->dev->of_node, np) {
-                       if (of_device_is_compatible(np, compat[i]) &&
-                           of_device_is_available(np)) {
-                               err = host1x_add_drm_client(host1x, np);
-                               if (err < 0)
-                                       return err;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int tegra_host1x_probe(struct platform_device *pdev)
-{
-       struct host1x *host1x;
-       struct resource *regs;
-       int err;
-
-       host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
-       if (!host1x)
-               return -ENOMEM;
-
-       mutex_init(&host1x->drm_clients_lock);
-       INIT_LIST_HEAD(&host1x->drm_clients);
-       INIT_LIST_HEAD(&host1x->drm_active);
-       mutex_init(&host1x->clients_lock);
-       INIT_LIST_HEAD(&host1x->clients);
-       host1x->dev = &pdev->dev;
-
-       err = host1x_parse_dt(host1x);
-       if (err < 0) {
-               dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
-               return err;
-       }
-
-       host1x->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(host1x->clk))
-               return PTR_ERR(host1x->clk);
-
-       err = clk_prepare_enable(host1x->clk);
-       if (err < 0)
-               return err;
-
-       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs) {
-               err = -ENXIO;
-               goto err;
-       }
-
-       err = platform_get_irq(pdev, 0);
-       if (err < 0)
-               goto err;
-
-       host1x->syncpt = err;
-
-       err = platform_get_irq(pdev, 1);
-       if (err < 0)
-               goto err;
-
-       host1x->irq = err;
-
-       host1x->regs = devm_ioremap_resource(&pdev->dev, regs);
-       if (IS_ERR(host1x->regs)) {
-               err = PTR_ERR(host1x->regs);
-               goto err;
-       }
-
-       platform_set_drvdata(pdev, host1x);
-
-       return 0;
-
-err:
-       clk_disable_unprepare(host1x->clk);
-       return err;
-}
-
-static int tegra_host1x_remove(struct platform_device *pdev)
-{
-       struct host1x *host1x = platform_get_drvdata(pdev);
-
-       clk_disable_unprepare(host1x->clk);
-
-       return 0;
-}
-
-int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
-{
-       struct host1x_client *client;
-
-       mutex_lock(&host1x->clients_lock);
-
-       list_for_each_entry(client, &host1x->clients, list) {
-               if (client->ops && client->ops->drm_init) {
-                       int err = client->ops->drm_init(client, drm);
-                       if (err < 0) {
-                               dev_err(host1x->dev,
-                                       "DRM setup failed for %s: %d\n",
-                                       dev_name(client->dev), err);
-                               return err;
-                       }
-               }
-       }
-
-       mutex_unlock(&host1x->clients_lock);
-
-       return 0;
-}
-
-int host1x_drm_exit(struct host1x *host1x)
-{
-       struct platform_device *pdev = to_platform_device(host1x->dev);
-       struct host1x_client *client;
-
-       if (!host1x->drm)
-               return 0;
-
-       mutex_lock(&host1x->clients_lock);
-
-       list_for_each_entry_reverse(client, &host1x->clients, list) {
-               if (client->ops && client->ops->drm_exit) {
-                       int err = client->ops->drm_exit(client);
-                       if (err < 0) {
-                               dev_err(host1x->dev,
-                                       "DRM cleanup failed for %s: %d\n",
-                                       dev_name(client->dev), err);
-                               return err;
-                       }
-               }
-       }
-
-       mutex_unlock(&host1x->clients_lock);
-
-       drm_platform_exit(&tegra_drm_driver, pdev);
-       host1x->drm = NULL;
-
-       return 0;
-}
-
-int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
-{
-       struct host1x_drm_client *drm, *tmp;
-       int err;
-
-       mutex_lock(&host1x->clients_lock);
-       list_add_tail(&client->list, &host1x->clients);
-       mutex_unlock(&host1x->clients_lock);
-
-       list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
-               if (drm->np == client->dev->of_node)
-                       host1x_activate_drm_client(host1x, drm, client);
-
-       if (list_empty(&host1x->drm_clients)) {
-               struct platform_device *pdev = to_platform_device(host1x->dev);
-
-               err = drm_platform_init(&tegra_drm_driver, pdev);
-               if (err < 0) {
-                       dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
-                       return err;
-               }
-       }
-
-       client->host1x = host1x;
-
-       return 0;
-}
-
-int host1x_unregister_client(struct host1x *host1x,
-                            struct host1x_client *client)
-{
-       struct host1x_drm_client *drm, *tmp;
-       int err;
-
-       list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
-               if (drm->client == client) {
-                       err = host1x_drm_exit(host1x);
-                       if (err < 0) {
-                               dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
-                                       err);
-                               return err;
-                       }
-
-                       host1x_remove_drm_client(host1x, drm);
-                       break;
-               }
-       }
-
-       mutex_lock(&host1x->clients_lock);
-       list_del_init(&client->list);
-       mutex_unlock(&host1x->clients_lock);
-
-       return 0;
-}
-
-static struct of_device_id tegra_host1x_of_match[] = {
-       { .compatible = "nvidia,tegra30-host1x", },
-       { .compatible = "nvidia,tegra20-host1x", },
-       { },
-};
-MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
-
-struct platform_driver tegra_host1x_driver = {
-       .driver = {
-               .name = "tegra-host1x",
-               .owner = THIS_MODULE,
-               .of_match_table = tegra_host1x_of_match,
-       },
-       .probe = tegra_host1x_probe,
-       .remove = tegra_host1x_remove,
-};
-
-static int __init tegra_host1x_init(void)
-{
-       int err;
-
-       err = platform_driver_register(&tegra_host1x_driver);
-       if (err < 0)
-               return err;
-
-       err = platform_driver_register(&tegra_dc_driver);
-       if (err < 0)
-               goto unregister_host1x;
-
-       err = platform_driver_register(&tegra_hdmi_driver);
-       if (err < 0)
-               goto unregister_dc;
-
-       return 0;
-
-unregister_dc:
-       platform_driver_unregister(&tegra_dc_driver);
-unregister_host1x:
-       platform_driver_unregister(&tegra_host1x_driver);
-       return err;
-}
-module_init(tegra_host1x_init);
-
-static void __exit tegra_host1x_exit(void)
-{
-       platform_driver_unregister(&tegra_hdmi_driver);
-       platform_driver_unregister(&tegra_dc_driver);
-       platform_driver_unregister(&tegra_host1x_driver);
-}
-module_exit(tegra_host1x_exit);
-
-MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
-MODULE_LICENSE("GPL");
index deda656..7d2eefe 100644 (file)
@@ -1,4 +1,7 @@
-ccflags-y := -Iinclude/drm -Werror
+ccflags-y := -Iinclude/drm
+ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+       ccflags-y += -Werror
+endif
 
 tilcdc-y := \
        tilcdc_crtc.o \
index c5b592d..2b5461b 100644 (file)
@@ -75,7 +75,7 @@ static int modeset_init(struct drm_device *dev)
                mod->funcs->modeset_init(mod, dev);
        }
 
-       if ((priv->num_encoders = 0) || (priv->num_connectors == 0)) {
+       if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
                /* oh nos! */
                dev_err(dev->dev, "no encoders/connectors found\n");
                return -ENXIO;
@@ -299,11 +299,10 @@ static int tilcdc_irq_postinstall(struct drm_device *dev)
        struct tilcdc_drm_private *priv = dev->dev_private;
 
        /* enable FIFO underflow irq: */
-       if (priv->rev == 1) {
+       if (priv->rev == 1)
                tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
-       } else {
+       else
                tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
-       }
 
        return 0;
 }
@@ -363,7 +362,7 @@ static const struct {
        uint8_t  rev;
        uint8_t  save;
        uint32_t reg;
-} registers[] =        {
+} registers[] =                {
 #define REG(rev, save, reg) { #reg, rev, save, reg }
                /* exists in revision 1: */
                REG(1, false, LCDC_PID_REG),
index 580b74e..ea963f9 100644 (file)
@@ -305,7 +305,7 @@ static const struct tilcdc_module_ops panel_module_ops = {
  */
 
 /* maybe move this somewhere common if it is needed by other outputs? */
-static struct tilcdc_panel_info * of_get_panel_info(struct device_node *np)
+static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
 {
        struct device_node *info_np;
        struct tilcdc_panel_info *info;
@@ -413,7 +413,6 @@ static struct of_device_id panel_of_match[] = {
                { .compatible = "ti,tilcdc,panel", },
                { },
 };
-MODULE_DEVICE_TABLE(of, panel_of_match);
 
 struct platform_driver panel_driver = {
        .probe = panel_probe,
index 568dc1c..db1d2fc 100644 (file)
@@ -353,7 +353,6 @@ static struct of_device_id slave_of_match[] = {
                { .compatible = "ti,tilcdc,slave", },
                { },
 };
-MODULE_DEVICE_TABLE(of, slave_of_match);
 
 struct platform_driver slave_driver = {
        .probe = slave_probe,
index 58d487b..a36788f 100644 (file)
@@ -396,7 +396,6 @@ static struct of_device_id tfp410_of_match[] = {
                { .compatible = "ti,tilcdc,tfp410", },
                { },
 };
-MODULE_DEVICE_TABLE(of, tfp410_of_match);
 
 struct platform_driver tfp410_driver = {
        .probe = tfp410_probe,
index 8be35c8..af89458 100644 (file)
@@ -86,6 +86,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
        mutex_lock(&man->io_reserve_mutex);
        return 0;
 }
+EXPORT_SYMBOL(ttm_mem_io_lock);
 
 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 {
@@ -94,6 +95,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 
        mutex_unlock(&man->io_reserve_mutex);
 }
+EXPORT_SYMBOL(ttm_mem_io_unlock);
 
 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
 {
@@ -111,8 +113,9 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
        return 0;
 }
 
-static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
-                             struct ttm_mem_reg *mem)
+
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+                      struct ttm_mem_reg *mem)
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
        int ret = 0;
@@ -134,9 +137,10 @@ retry:
        }
        return ret;
 }
+EXPORT_SYMBOL(ttm_mem_io_reserve);
 
-static void ttm_mem_io_free(struct ttm_bo_device *bdev,
-                           struct ttm_mem_reg *mem)
+void ttm_mem_io_free(struct ttm_bo_device *bdev,
+                    struct ttm_mem_reg *mem)
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 
@@ -149,6 +153,7 @@ static void ttm_mem_io_free(struct ttm_bo_device *bdev,
                bdev->driver->io_mem_free(bdev, mem);
 
 }
+EXPORT_SYMBOL(ttm_mem_io_free);
 
 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
 {
index 74705f3..3df9f16 100644 (file)
@@ -147,7 +147,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
            bo->vm_node->start - vma->vm_pgoff;
-       page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
+       page_last = vma_pages(vma) +
            bo->vm_node->start - vma->vm_pgoff;
 
        if (unlikely(page_offset >= bo->num_pages)) {
@@ -258,7 +258,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
 
        read_lock(&bdev->vm_lock);
        bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
-                                (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+                                vma_pages(vma));
        if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
                bo = NULL;
        read_unlock(&bdev->vm_lock);
index 9f4be3d..dc0c065 100644 (file)
@@ -482,7 +482,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
        struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
        struct drm_device *dev = ufbdev->helper.dev;
        struct fb_info *info;
-       struct device *device = &dev->usbdev->dev;
+       struct device *device = dev->dev;
        struct drm_framebuffer *fb;
        struct drm_mode_fb_cmd2 mode_cmd;
        struct udl_gem_object *obj;
index 3816270..ef034fa 100644 (file)
@@ -303,6 +303,8 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
        if (IS_ERR(attach))
                return ERR_CAST(attach);
 
+       get_dma_buf(dma_buf);
+
        sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
        if (IS_ERR(sg)) {
                ret = PTR_ERR(sg);
@@ -322,5 +324,7 @@ fail_unmap:
        dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
 fail_detach:
        dma_buf_detach(dma_buf, attach);
+       dma_buf_put(dma_buf);
+
        return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
new file mode 100644 (file)
index 0000000..ccfd42b
--- /dev/null
@@ -0,0 +1,24 @@
+config TEGRA_HOST1X
+       tristate "NVIDIA Tegra host1x driver"
+       depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+       help
+         Driver for the NVIDIA Tegra host1x hardware.
+
+         The Tegra host1x module is the DMA engine for register access to
+         Tegra's graphics- and multimedia-related modules. The modules served
+         by host1x are referred to as clients. host1x includes some other
+         functionality, such as synchronization.
+
+if TEGRA_HOST1X
+
+config TEGRA_HOST1X_FIREWALL
+       bool "Enable HOST1X security firewall"
+       default y
+       help
+         Say yes if kernel should protect command streams from tampering.
+
+         If unsure, choose Y.
+
+source "drivers/gpu/host1x/drm/Kconfig"
+
+endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
new file mode 100644 (file)
index 0000000..3b037b6
--- /dev/null
@@ -0,0 +1,20 @@
+ccflags-y = -Idrivers/gpu/host1x
+
+host1x-y = \
+       syncpt.o \
+       dev.o \
+       intr.o \
+       cdma.o \
+       channel.o \
+       job.o \
+       debug.o \
+       hw/host1x01.o
+
+ccflags-y += -Iinclude/drm
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
+host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
+host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
+host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
+obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
new file mode 100644 (file)
index 0000000..de72172
--- /dev/null
@@ -0,0 +1,491 @@
+/*
+ * Tegra host1x Command DMA
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <asm/cacheflush.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <trace/events/host1x.h>
+
+#include "cdma.h"
+#include "channel.h"
+#include "dev.h"
+#include "debug.h"
+#include "host1x_bo.h"
+#include "job.h"
+
+/*
+ * push_buffer
+ *
+ * The push buffer is a circular array of words to be fetched by command DMA.
+ * Note that it works slightly differently to the sync queue; fence == pos
+ * means that the push buffer is full, not empty.
+ */
+
+#define HOST1X_PUSHBUFFER_SLOTS        512
+
+/*
+ * Clean up push buffer resources
+ */
+static void host1x_pushbuffer_destroy(struct push_buffer *pb)
+{
+       struct host1x_cdma *cdma = pb_to_cdma(pb);
+       struct host1x *host1x = cdma_to_host1x(cdma);
+
+       if (pb->phys != 0)
+               dma_free_writecombine(host1x->dev, pb->size_bytes + 4,
+                                     pb->mapped, pb->phys);
+
+       pb->mapped = NULL;
+       pb->phys = 0;
+}
+
+/*
+ * Init push buffer resources
+ */
+static int host1x_pushbuffer_init(struct push_buffer *pb)
+{
+       struct host1x_cdma *cdma = pb_to_cdma(pb);
+       struct host1x *host1x = cdma_to_host1x(cdma);
+
+       pb->mapped = NULL;
+       pb->phys = 0;
+       pb->size_bytes = HOST1X_PUSHBUFFER_SLOTS * 8;
+
+       /* initialize buffer pointers */
+       pb->fence = pb->size_bytes - 8;
+       pb->pos = 0;
+
+       /* allocate and map pushbuffer memory */
+       pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4,
+                                           &pb->phys, GFP_KERNEL);
+       if (!pb->mapped)
+               goto fail;
+
+       host1x_hw_pushbuffer_init(host1x, pb);
+
+       return 0;
+
+fail:
+       host1x_pushbuffer_destroy(pb);
+       return -ENOMEM;
+}
+
+/*
+ * Push two words to the push buffer
+ * Caller must ensure push buffer is not full
+ */
+static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
+{
+       u32 pos = pb->pos;
+       u32 *p = (u32 *)((u32)pb->mapped + pos);
+       WARN_ON(pos == pb->fence);
+       *(p++) = op1;
+       *(p++) = op2;
+       pb->pos = (pos + 8) & (pb->size_bytes - 1);
+}
+
+/*
+ * Pop a number of two word slots from the push buffer
+ * Caller must ensure push buffer is not empty
+ */
+static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
+{
+       /* Advance the next write position */
+       pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1);
+}
+
+/*
+ * Return the number of two word slots free in the push buffer
+ */
+static u32 host1x_pushbuffer_space(struct push_buffer *pb)
+{
+       return ((pb->fence - pb->pos) & (pb->size_bytes - 1)) / 8;
+}
+
+/*
+ * Sleep (if necessary) until the requested event happens
+ *   - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
+ *     - Returns 1
+ *   - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
+ *     - Return the amount of space (> 0)
+ * Must be called with the cdma lock held.
+ */
+unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
+                                    enum cdma_event event)
+{
+       for (;;) {
+               unsigned int space;
+
+               if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+                       space = list_empty(&cdma->sync_queue) ? 1 : 0;
+               else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
+                       struct push_buffer *pb = &cdma->push_buffer;
+                       space = host1x_pushbuffer_space(pb);
+               } else {
+                       WARN_ON(1);
+                       return -EINVAL;
+               }
+
+               if (space)
+                       return space;
+
+               trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
+                                      event);
+
+               /* If somebody has managed to already start waiting, yield */
+               if (cdma->event != CDMA_EVENT_NONE) {
+                       mutex_unlock(&cdma->lock);
+                       schedule();
+                       mutex_lock(&cdma->lock);
+                       continue;
+               }
+               cdma->event = event;
+
+               mutex_unlock(&cdma->lock);
+               down(&cdma->sem);
+               mutex_lock(&cdma->lock);
+       }
+       return 0;
+}
+
+/*
+ * Start timer that tracks the time spent by the job.
+ * Must be called with the cdma lock held.
+ */
+static void cdma_start_timer_locked(struct host1x_cdma *cdma,
+                                   struct host1x_job *job)
+{
+       struct host1x *host = cdma_to_host1x(cdma);
+
+       if (cdma->timeout.client) {
+               /* timer already started */
+               return;
+       }
+
+       cdma->timeout.client = job->client;
+       cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
+       cdma->timeout.syncpt_val = job->syncpt_end;
+       cdma->timeout.start_ktime = ktime_get();
+
+       schedule_delayed_work(&cdma->timeout.wq,
+                             msecs_to_jiffies(job->timeout));
+}
+
+/*
+ * Stop timer when a buffer submission completes.
+ * Must be called with the cdma lock held.
+ */
+static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
+{
+       cancel_delayed_work(&cdma->timeout.wq);
+       cdma->timeout.client = 0;
+}
+
+/*
+ * For all sync queue entries that have already finished according to the
+ * current sync point registers:
+ *  - unpin & unref their mems
+ *  - pop their push buffer slots
+ *  - remove them from the sync queue
+ * This is normally called from the host code's worker thread, but can be
+ * called manually if necessary.
+ * Must be called with the cdma lock held.
+ */
+static void update_cdma_locked(struct host1x_cdma *cdma)
+{
+       bool signal = false;
+       struct host1x *host1x = cdma_to_host1x(cdma);
+       struct host1x_job *job, *n;
+
+       /* If CDMA is stopped, queue is cleared and we can return */
+       if (!cdma->running)
+               return;
+
+       /*
+        * Walk the sync queue, reading the sync point registers as necessary,
+        * to consume as many sync queue entries as possible without blocking
+        */
+       list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
+               struct host1x_syncpt *sp =
+                       host1x_syncpt_get(host1x, job->syncpt_id);
+
+               /* Check whether this syncpt has completed, and bail if not */
+               if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
+                       /* Start timer on next pending syncpt */
+                       if (job->timeout)
+                               cdma_start_timer_locked(cdma, job);
+                       break;
+               }
+
+               /* Cancel timeout, when a buffer completes */
+               if (cdma->timeout.client)
+                       stop_cdma_timer_locked(cdma);
+
+               /* Unpin the memory */
+               host1x_job_unpin(job);
+
+               /* Pop push buffer slots */
+               if (job->num_slots) {
+                       struct push_buffer *pb = &cdma->push_buffer;
+                       host1x_pushbuffer_pop(pb, job->num_slots);
+                       if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
+                               signal = true;
+               }
+
+               list_del(&job->list);
+               host1x_job_put(job);
+       }
+
+       if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY &&
+           list_empty(&cdma->sync_queue))
+               signal = true;
+
+       if (signal) {
+               cdma->event = CDMA_EVENT_NONE;
+               up(&cdma->sem);
+       }
+}
+
+void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
+                                  struct device *dev)
+{
+       u32 restart_addr;
+       u32 syncpt_incrs;
+       struct host1x_job *job = NULL;
+       u32 syncpt_val;
+       struct host1x *host1x = cdma_to_host1x(cdma);
+
+       syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
+
+       dev_dbg(dev, "%s: starting cleanup (thresh %d)\n",
+               __func__, syncpt_val);
+
+       /*
+        * Move the sync_queue read pointer to the first entry that hasn't
+        * completed based on the current HW syncpt value. It's likely there
+        * won't be any (i.e. we're still at the head), but covers the case
+        * where a syncpt incr happens just prior/during the teardown.
+        */
+
+       dev_dbg(dev, "%s: skip completed buffers still in sync_queue\n",
+               __func__);
+
+       list_for_each_entry(job, &cdma->sync_queue, list) {
+               if (syncpt_val < job->syncpt_end)
+                       break;
+
+               host1x_job_dump(dev, job);
+       }
+
+       /*
+        * Walk the sync_queue, first incrementing with the CPU syncpts that
+        * are partially executed (the first buffer) or fully skipped while
+        * still in the current context (slots are also NOP-ed).
+        *
+        * At the point contexts are interleaved, syncpt increments must be
+        * done inline with the pushbuffer from a GATHER buffer to maintain
+        * the order (slots are modified to be a GATHER of syncpt incrs).
+        *
+        * Note: save in restart_addr the location where the timed out buffer
+        * started in the PB, so we can start the refetch from there (with the
+        * modified NOP-ed PB slots). This lets things appear to have completed
+        * properly for this buffer and resources are freed.
+        */
+
+       dev_dbg(dev, "%s: perform CPU incr on pending same ctx buffers\n",
+               __func__);
+
+       if (!list_empty(&cdma->sync_queue))
+               restart_addr = job->first_get;
+       else
+               restart_addr = cdma->last_pos;
+
+       /* do CPU increments as long as this context continues */
+       list_for_each_entry_from(job, &cdma->sync_queue, list) {
+               /* different context, gets us out of this loop */
+               if (job->client != cdma->timeout.client)
+                       break;
+
+               /* won't need a timeout when replayed */
+               job->timeout = 0;
+
+               syncpt_incrs = job->syncpt_end - syncpt_val;
+               dev_dbg(dev, "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
+
+               host1x_job_dump(dev, job);
+
+               /* safe to use CPU to incr syncpts */
+               host1x_hw_cdma_timeout_cpu_incr(host1x, cdma, job->first_get,
+                                               syncpt_incrs, job->syncpt_end,
+                                               job->num_slots);
+
+               syncpt_val += syncpt_incrs;
+       }
+
+       /* The following sumbits from the same client may be dependent on the
+        * failed submit and therefore they may fail. Force a small timeout
+        * to make the queue cleanup faster */
+
+       list_for_each_entry_from(job, &cdma->sync_queue, list)
+               if (job->client == cdma->timeout.client)
+                       job->timeout = min_t(unsigned int, job->timeout, 500);
+
+       dev_dbg(dev, "%s: finished sync_queue modification\n", __func__);
+
+       /* roll back DMAGET and start up channel again */
+       host1x_hw_cdma_resume(host1x, cdma, restart_addr);
+}
+
+/*
+ * Create a cdma
+ */
+int host1x_cdma_init(struct host1x_cdma *cdma)
+{
+       int err;
+
+       mutex_init(&cdma->lock);
+       sema_init(&cdma->sem, 0);
+
+       INIT_LIST_HEAD(&cdma->sync_queue);
+
+       cdma->event = CDMA_EVENT_NONE;
+       cdma->running = false;
+       cdma->torndown = false;
+
+       err = host1x_pushbuffer_init(&cdma->push_buffer);
+       if (err)
+               return err;
+       return 0;
+}
+
+/*
+ * Destroy a cdma
+ */
+int host1x_cdma_deinit(struct host1x_cdma *cdma)
+{
+       struct push_buffer *pb = &cdma->push_buffer;
+       struct host1x *host1x = cdma_to_host1x(cdma);
+
+       if (cdma->running) {
+               pr_warn("%s: CDMA still running\n", __func__);
+               return -EBUSY;
+       }
+
+       host1x_pushbuffer_destroy(pb);
+       host1x_hw_cdma_timeout_destroy(host1x, cdma);
+
+       return 0;
+}
+
+/*
+ * Begin a cdma submit
+ */
+int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
+{
+       struct host1x *host1x = cdma_to_host1x(cdma);
+
+       mutex_lock(&cdma->lock);
+
+       if (job->timeout) {
+               /* init state on first submit with timeout value */
+               if (!cdma->timeout.initialized) {
+                       int err;
+                       err = host1x_hw_cdma_timeout_init(host1x, cdma,
+                                                         job->syncpt_id);
+                       if (err) {
+                               mutex_unlock(&cdma->lock);
+                               return err;
+                       }
+               }
+       }
+       if (!cdma->running)
+               host1x_hw_cdma_start(host1x, cdma);
+
+       cdma->slots_free = 0;
+       cdma->slots_used = 0;
+       cdma->first_get = cdma->push_buffer.pos;
+
+       trace_host1x_cdma_begin(dev_name(job->channel->dev));
+       return 0;
+}
+
+/*
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
+{
+       struct host1x *host1x = cdma_to_host1x(cdma);
+       struct push_buffer *pb = &cdma->push_buffer;
+       u32 slots_free = cdma->slots_free;
+
+       if (host1x_debug_trace_cmdbuf)
+               trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
+                                      op1, op2);
+
+       if (slots_free == 0) {
+               host1x_hw_cdma_flush(host1x, cdma);
+               slots_free = host1x_cdma_wait_locked(cdma,
+                                               CDMA_EVENT_PUSH_BUFFER_SPACE);
+       }
+       cdma->slots_free = slots_free - 1;
+       cdma->slots_used++;
+       host1x_pushbuffer_push(pb, op1, op2);
+}
+
+/*
+ * End a cdma submit
+ * Kick off DMA, add job to the sync queue, and a number of slots to be freed
+ * from the pushbuffer. The handles for a submit must all be pinned at the same
+ * time, but they can be unpinned in smaller chunks.
+ */
+void host1x_cdma_end(struct host1x_cdma *cdma,
+                    struct host1x_job *job)
+{
+       struct host1x *host1x = cdma_to_host1x(cdma);
+       bool idle = list_empty(&cdma->sync_queue);
+
+       host1x_hw_cdma_flush(host1x, cdma);
+
+       job->first_get = cdma->first_get;
+       job->num_slots = cdma->slots_used;
+       host1x_job_get(job);
+       list_add_tail(&job->list, &cdma->sync_queue);
+
+       /* start timer on idle -> active transitions */
+       if (job->timeout && idle)
+               cdma_start_timer_locked(cdma, job);
+
+       trace_host1x_cdma_end(dev_name(job->channel->dev));
+       mutex_unlock(&cdma->lock);
+}
+
+/*
+ * Update cdma state according to current sync point values
+ */
+void host1x_cdma_update(struct host1x_cdma *cdma)
+{
+       mutex_lock(&cdma->lock);
+       update_cdma_locked(cdma);
+       mutex_unlock(&cdma->lock);
+}
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
new file mode 100644 (file)
index 0000000..313c4b7
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Tegra host1x Command DMA
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_CDMA_H
+#define __HOST1X_CDMA_H
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+
+struct host1x_syncpt;
+struct host1x_userctx_timeout;
+struct host1x_job;
+
+/*
+ * cdma
+ *
+ * This is in charge of a host command DMA channel.
+ * Sends ops to a push buffer, and takes responsibility for unpinning
+ * (& possibly freeing) of memory after those ops have completed.
+ * Producer:
+ *     begin
+ *             push - send ops to the push buffer
+ *     end - start command DMA and enqueue handles to be unpinned
+ * Consumer:
+ *     update - call to update sync queue and push buffer, unpin memory
+ */
+
+struct push_buffer {
+       u32 *mapped;                    /* mapped pushbuffer memory */
+       dma_addr_t phys;                /* physical address of pushbuffer */
+       u32 fence;                      /* index we've written */
+       u32 pos;                        /* index to write to */
+       u32 size_bytes;
+};
+
+struct buffer_timeout {
+       struct delayed_work wq;         /* work queue */
+       bool initialized;               /* timer one-time setup flag */
+       struct host1x_syncpt *syncpt;   /* buffer completion syncpt */
+       u32 syncpt_val;                 /* syncpt value when completed */
+       ktime_t start_ktime;            /* starting time */
+       /* context timeout information */
+       int client;
+};
+
+enum cdma_event {
+       CDMA_EVENT_NONE,                /* not waiting for any event */
+       CDMA_EVENT_SYNC_QUEUE_EMPTY,    /* wait for empty sync queue */
+       CDMA_EVENT_PUSH_BUFFER_SPACE    /* wait for space in push buffer */
+};
+
+struct host1x_cdma {
+       struct mutex lock;              /* controls access to shared state */
+       struct semaphore sem;           /* signalled when event occurs */
+       enum cdma_event event;          /* event that sem is waiting for */
+       unsigned int slots_used;        /* pb slots used in current submit */
+       unsigned int slots_free;        /* pb slots free in current submit */
+       unsigned int first_get;         /* DMAGET value, where submit begins */
+       unsigned int last_pos;          /* last value written to DMAPUT */
+       struct push_buffer push_buffer; /* channel's push buffer */
+       struct list_head sync_queue;    /* job queue */
+       struct buffer_timeout timeout;  /* channel's timeout state/wq */
+       bool running;
+       bool torndown;
+};
+
+#define cdma_to_channel(cdma) container_of(cdma, struct host1x_channel, cdma)
+#define cdma_to_host1x(cdma) dev_get_drvdata(cdma_to_channel(cdma)->dev->parent)
+#define pb_to_cdma(pb) container_of(pb, struct host1x_cdma, push_buffer)
+
+int host1x_cdma_init(struct host1x_cdma *cdma);
+int host1x_cdma_deinit(struct host1x_cdma *cdma);
+void host1x_cdma_stop(struct host1x_cdma *cdma);
+int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job);
+void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2);
+void host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job);
+void host1x_cdma_update(struct host1x_cdma *cdma);
+void host1x_cdma_peek(struct host1x_cdma *cdma, u32 dmaget, int slot,
+                     u32 *out);
+unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
+                                    enum cdma_event event);
+void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
+                                  struct device *dev);
+#endif
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
new file mode 100644 (file)
index 0000000..83ea51b
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Tegra host1x Channel
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "channel.h"
+#include "dev.h"
+#include "job.h"
+
+/* Constructor for the host1x device list */
+int host1x_channel_list_init(struct host1x *host)
+{
+       INIT_LIST_HEAD(&host->chlist.list);
+       mutex_init(&host->chlist_mutex);
+
+       if (host->info->nb_channels > BITS_PER_LONG) {
+               WARN(1, "host1x hardware has more channels than supported by the driver\n");
+               return -ENOSYS;
+       }
+
+       return 0;
+}
+
+int host1x_job_submit(struct host1x_job *job)
+{
+       struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
+
+       return host1x_hw_channel_submit(host, job);
+}
+
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
+{
+       int err = 0;
+
+       mutex_lock(&channel->reflock);
+
+       if (channel->refcount == 0)
+               err = host1x_cdma_init(&channel->cdma);
+
+       if (!err)
+               channel->refcount++;
+
+       mutex_unlock(&channel->reflock);
+
+       return err ? NULL : channel;
+}
+
+void host1x_channel_put(struct host1x_channel *channel)
+{
+       mutex_lock(&channel->reflock);
+
+       if (channel->refcount == 1) {
+               struct host1x *host = dev_get_drvdata(channel->dev->parent);
+
+               host1x_hw_cdma_stop(host, &channel->cdma);
+               host1x_cdma_deinit(&channel->cdma);
+       }
+
+       channel->refcount--;
+
+       mutex_unlock(&channel->reflock);
+}
+
+struct host1x_channel *host1x_channel_request(struct device *dev)
+{
+       struct host1x *host = dev_get_drvdata(dev->parent);
+       int max_channels = host->info->nb_channels;
+       struct host1x_channel *channel = NULL;
+       int index, err;
+
+       mutex_lock(&host->chlist_mutex);
+
+       index = find_first_zero_bit(&host->allocated_channels, max_channels);
+       if (index >= max_channels)
+               goto fail;
+
+       channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+       if (!channel)
+               goto fail;
+
+       err = host1x_hw_channel_init(host, channel, index);
+       if (err < 0)
+               goto fail;
+
+       /* Link device to host1x_channel */
+       channel->dev = dev;
+
+       /* Add to channel list */
+       list_add_tail(&channel->list, &host->chlist.list);
+
+       host->allocated_channels |= BIT(index);
+
+       mutex_unlock(&host->chlist_mutex);
+       return channel;
+
+fail:
+       dev_err(dev, "failed to init channel\n");
+       kfree(channel);
+       mutex_unlock(&host->chlist_mutex);
+       return NULL;
+}
+
+void host1x_channel_free(struct host1x_channel *channel)
+{
+       struct host1x *host = dev_get_drvdata(channel->dev->parent);
+
+       host->allocated_channels &= ~BIT(channel->id);
+       list_del(&channel->list);
+       kfree(channel);
+}
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
new file mode 100644 (file)
index 0000000..48723b8
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Tegra host1x Channel
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_CHANNEL_H
+#define __HOST1X_CHANNEL_H
+
+#include <linux/io.h>
+
+#include "cdma.h"
+
+struct host1x;
+
+struct host1x_channel {
+       struct list_head list;
+
+       unsigned int refcount;
+       unsigned int id;
+       struct mutex reflock;
+       struct mutex submitlock;
+       void __iomem *regs;
+       struct device *dev;
+       struct host1x_cdma cdma;
+};
+
+/* channel list operations */
+int host1x_channel_list_init(struct host1x *host);
+
+struct host1x_channel *host1x_channel_request(struct device *dev);
+void host1x_channel_free(struct host1x_channel *channel);
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_put(struct host1x_channel *channel);
+int host1x_job_submit(struct host1x_job *job);
+
+#define host1x_for_each_channel(host, channel)                         \
+       list_for_each_entry(channel, &host->chlist.list, list)
+
+#endif
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
new file mode 100644 (file)
index 0000000..3ec7d77
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011-2013 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "debug.h"
+#include "channel.h"
+
+unsigned int host1x_debug_trace_cmdbuf;
+
+static pid_t host1x_debug_force_timeout_pid;
+static u32 host1x_debug_force_timeout_val;
+static u32 host1x_debug_force_timeout_channel;
+
+void host1x_debug_output(struct output *o, const char *fmt, ...)
+{
+       va_list args;
+       int len;
+
+       va_start(args, fmt);
+       len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
+       va_end(args);
+       o->fn(o->ctx, o->buf, len);
+}
+
+static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
+{
+       struct host1x *m = dev_get_drvdata(ch->dev->parent);
+       struct output *o = data;
+
+       mutex_lock(&ch->reflock);
+       if (ch->refcount) {
+               mutex_lock(&ch->cdma.lock);
+               if (show_fifo)
+                       host1x_hw_show_channel_fifo(m, ch, o);
+               host1x_hw_show_channel_cdma(m, ch, o);
+               mutex_unlock(&ch->cdma.lock);
+       }
+       mutex_unlock(&ch->reflock);
+
+       return 0;
+}
+
+static void show_syncpts(struct host1x *m, struct output *o)
+{
+       int i;
+       host1x_debug_output(o, "---- syncpts ----\n");
+       for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
+               u32 max = host1x_syncpt_read_max(m->syncpt + i);
+               u32 min = host1x_syncpt_load(m->syncpt + i);
+               if (!min && !max)
+                       continue;
+               host1x_debug_output(o, "id %d (%s) min %d max %d\n",
+                                   i, m->syncpt[i].name, min, max);
+       }
+
+       for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
+               u32 base_val;
+               base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
+               if (base_val)
+                       host1x_debug_output(o, "waitbase id %d val %d\n", i,
+                                           base_val);
+       }
+
+       host1x_debug_output(o, "\n");
+}
+
+static void show_all(struct host1x *m, struct output *o)
+{
+       struct host1x_channel *ch;
+
+       host1x_hw_show_mlocks(m, o);
+       show_syncpts(m, o);
+       host1x_debug_output(o, "---- channels ----\n");
+
+       host1x_for_each_channel(m, ch)
+               show_channels(ch, o, true);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void show_all_no_fifo(struct host1x *host1x, struct output *o)
+{
+       struct host1x_channel *ch;
+
+       host1x_hw_show_mlocks(host1x, o);
+       show_syncpts(host1x, o);
+       host1x_debug_output(o, "---- channels ----\n");
+
+       host1x_for_each_channel(host1x, ch)
+               show_channels(ch, o, false);
+}
+
+static int host1x_debug_show_all(struct seq_file *s, void *unused)
+{
+       struct output o = {
+               .fn = write_to_seqfile,
+               .ctx = s
+       };
+       show_all(s->private, &o);
+       return 0;
+}
+
+static int host1x_debug_show(struct seq_file *s, void *unused)
+{
+       struct output o = {
+               .fn = write_to_seqfile,
+               .ctx = s
+       };
+       show_all_no_fifo(s->private, &o);
+       return 0;
+}
+
+static int host1x_debug_open_all(struct inode *inode, struct file *file)
+{
+       return single_open(file, host1x_debug_show_all, inode->i_private);
+}
+
+static const struct file_operations host1x_debug_all_fops = {
+       .open           = host1x_debug_open_all,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int host1x_debug_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, host1x_debug_show, inode->i_private);
+}
+
+static const struct file_operations host1x_debug_fops = {
+       .open           = host1x_debug_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+void host1x_debug_init(struct host1x *host1x)
+{
+       struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
+
+       if (!de)
+               return;
+
+       /* Store the created entry */
+       host1x->debugfs = de;
+
+       debugfs_create_file("status", S_IRUGO, de, host1x, &host1x_debug_fops);
+       debugfs_create_file("status_all", S_IRUGO, de, host1x,
+                           &host1x_debug_all_fops);
+
+       debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de,
+                          &host1x_debug_trace_cmdbuf);
+
+       host1x_hw_debug_init(host1x, de);
+
+       debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de,
+                          &host1x_debug_force_timeout_pid);
+       debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de,
+                          &host1x_debug_force_timeout_val);
+       debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de,
+                          &host1x_debug_force_timeout_channel);
+}
+
+void host1x_debug_deinit(struct host1x *host1x)
+{
+       debugfs_remove_recursive(host1x->debugfs);
+}
+#else
+void host1x_debug_init(struct host1x *host1x)
+{
+}
+void host1x_debug_deinit(struct host1x *host1x)
+{
+}
+#endif
+
+void host1x_debug_dump(struct host1x *host1x)
+{
+       struct output o = {
+               .fn = write_to_printk
+       };
+       show_all(host1x, &o);
+}
+
+void host1x_debug_dump_syncpts(struct host1x *host1x)
+{
+       struct output o = {
+               .fn = write_to_printk
+       };
+       show_syncpts(host1x, &o);
+}
diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h
new file mode 100644 (file)
index 0000000..4595b2e
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Tegra host1x Debug
+ *
+ * Copyright (c) 2011-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __HOST1X_DEBUG_H
+#define __HOST1X_DEBUG_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+struct host1x;
+
+struct output {
+       void (*fn)(void *ctx, const char *str, size_t len);
+       void *ctx;
+       char buf[256];
+};
+
+static inline void write_to_seqfile(void *ctx, const char *str, size_t len)
+{
+       seq_write((struct seq_file *)ctx, str, len);
+}
+
+static inline void write_to_printk(void *ctx, const char *str, size_t len)
+{
+       pr_info("%s", str);
+}
+
+void __printf(2, 3) host1x_debug_output(struct output *o, const char *fmt, ...);
+
+extern unsigned int host1x_debug_trace_cmdbuf;
+
+void host1x_debug_init(struct host1x *host1x);
+void host1x_debug_deinit(struct host1x *host1x);
+void host1x_debug_dump(struct host1x *host1x);
+void host1x_debug_dump_syncpts(struct host1x *host1x);
+
+#endif
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
new file mode 100644 (file)
index 0000000..28e28a2
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * Tegra host1x driver
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/host1x.h>
+
+#include "dev.h"
+#include "intr.h"
+#include "channel.h"
+#include "debug.h"
+#include "hw/host1x01.h"
+#include "host1x_client.h"
+
+void host1x_set_drm_data(struct device *dev, void *data)
+{
+       struct host1x *host1x = dev_get_drvdata(dev);
+       host1x->drm_data = data;
+}
+
+void *host1x_get_drm_data(struct device *dev)
+{
+       struct host1x *host1x = dev_get_drvdata(dev);
+       return host1x->drm_data;
+}
+
+void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
+{
+       void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
+
+       writel(v, sync_regs + r);
+}
+
+u32 host1x_sync_readl(struct host1x *host1x, u32 r)
+{
+       void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
+
+       return readl(sync_regs + r);
+}
+
+void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
+{
+       writel(v, ch->regs + r);
+}
+
+u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
+{
+       return readl(ch->regs + r);
+}
+
+static const struct host1x_info host1x01_info = {
+       .nb_channels    = 8,
+       .nb_pts         = 32,
+       .nb_mlocks      = 16,
+       .nb_bases       = 8,
+       .init           = host1x01_init,
+       .sync_offset    = 0x3000,
+};
+
+static struct of_device_id host1x_of_match[] = {
+       { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
+       { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
+       { },
+};
+MODULE_DEVICE_TABLE(of, host1x_of_match);
+
+static int host1x_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *id;
+       struct host1x *host;
+       struct resource *regs;
+       int syncpt_irq;
+       int err;
+
+       id = of_match_device(host1x_of_match, &pdev->dev);
+       if (!id)
+               return -EINVAL;
+
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!regs) {
+               dev_err(&pdev->dev, "failed to get registers\n");
+               return -ENXIO;
+       }
+
+       syncpt_irq = platform_get_irq(pdev, 0);
+       if (syncpt_irq < 0) {
+               dev_err(&pdev->dev, "failed to get IRQ\n");
+               return -ENXIO;
+       }
+
+       host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+       if (!host)
+               return -ENOMEM;
+
+       host->dev = &pdev->dev;
+       host->info = id->data;
+
+       /* set common host1x device data */
+       platform_set_drvdata(pdev, host);
+
+       host->regs = devm_ioremap_resource(&pdev->dev, regs);
+       if (IS_ERR(host->regs))
+               return PTR_ERR(host->regs);
+
+       if (host->info->init) {
+               err = host->info->init(host);
+               if (err)
+                       return err;
+       }
+
+       host->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(host->clk)) {
+               dev_err(&pdev->dev, "failed to get clock\n");
+               err = PTR_ERR(host->clk);
+               return err;
+       }
+
+       err = host1x_channel_list_init(host);
+       if (err) {
+               dev_err(&pdev->dev, "failed to initialize channel list\n");
+               return err;
+       }
+
+       err = clk_prepare_enable(host->clk);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to enable clock\n");
+               return err;
+       }
+
+       err = host1x_syncpt_init(host);
+       if (err) {
+               dev_err(&pdev->dev, "failed to initialize syncpts\n");
+               return err;
+       }
+
+       err = host1x_intr_init(host, syncpt_irq);
+       if (err) {
+               dev_err(&pdev->dev, "failed to initialize interrupts\n");
+               goto fail_deinit_syncpt;
+       }
+
+       host1x_debug_init(host);
+
+       host1x_drm_alloc(pdev);
+
+       return 0;
+
+fail_deinit_syncpt:
+       host1x_syncpt_deinit(host);
+       return err;
+}
+
+static int __exit host1x_remove(struct platform_device *pdev)
+{
+       struct host1x *host = platform_get_drvdata(pdev);
+
+       host1x_intr_deinit(host);
+       host1x_syncpt_deinit(host);
+       clk_disable_unprepare(host->clk);
+
+       return 0;
+}
+
+static struct platform_driver tegra_host1x_driver = {
+       .probe = host1x_probe,
+       .remove = __exit_p(host1x_remove),
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = "tegra-host1x",
+               .of_match_table = host1x_of_match,
+       },
+};
+
+static int __init tegra_host1x_init(void)
+{
+       int err;
+
+       err = platform_driver_register(&tegra_host1x_driver);
+       if (err < 0)
+               return err;
+
+#ifdef CONFIG_DRM_TEGRA
+       err = platform_driver_register(&tegra_dc_driver);
+       if (err < 0)
+               goto unregister_host1x;
+
+       err = platform_driver_register(&tegra_hdmi_driver);
+       if (err < 0)
+               goto unregister_dc;
+
+       err = platform_driver_register(&tegra_gr2d_driver);
+       if (err < 0)
+               goto unregister_hdmi;
+#endif
+
+       return 0;
+
+#ifdef CONFIG_DRM_TEGRA
+unregister_hdmi:
+       platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dc:
+       platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+       platform_driver_unregister(&tegra_host1x_driver);
+       return err;
+#endif
+}
+module_init(tegra_host1x_init);
+
+static void __exit tegra_host1x_exit(void)
+{
+#ifdef CONFIG_DRM_TEGRA
+       platform_driver_unregister(&tegra_gr2d_driver);
+       platform_driver_unregister(&tegra_hdmi_driver);
+       platform_driver_unregister(&tegra_dc_driver);
+#endif
+       platform_driver_unregister(&tegra_host1x_driver);
+}
+module_exit(tegra_host1x_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
+MODULE_DESCRIPTION("Host1x driver for Tegra products");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
new file mode 100644 (file)
index 0000000..a1607d6
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_DEV_H
+#define HOST1X_DEV_H
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+
+#include "channel.h"
+#include "syncpt.h"
+#include "intr.h"
+#include "cdma.h"
+#include "job.h"
+
+struct host1x_syncpt;
+struct host1x_channel;
+struct host1x_cdma;
+struct host1x_job;
+struct push_buffer;
+struct output;
+struct dentry;
+
+struct host1x_channel_ops {
+       int (*init)(struct host1x_channel *channel, struct host1x *host,
+                   unsigned int id);
+       int (*submit)(struct host1x_job *job);
+};
+
+struct host1x_cdma_ops {
+       void (*start)(struct host1x_cdma *cdma);
+       void (*stop)(struct host1x_cdma *cdma);
+       void (*flush)(struct  host1x_cdma *cdma);
+       int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
+       void (*timeout_destroy)(struct host1x_cdma *cdma);
+       void (*freeze)(struct host1x_cdma *cdma);
+       void (*resume)(struct host1x_cdma *cdma, u32 getptr);
+       void (*timeout_cpu_incr)(struct host1x_cdma *cdma, u32 getptr,
+                                u32 syncpt_incrs, u32 syncval, u32 nr_slots);
+};
+
+struct host1x_pushbuffer_ops {
+       void (*init)(struct push_buffer *pb);
+};
+
+struct host1x_debug_ops {
+       void (*debug_init)(struct dentry *de);
+       void (*show_channel_cdma)(struct host1x *host,
+                                 struct host1x_channel *ch,
+                                 struct output *o);
+       void (*show_channel_fifo)(struct host1x *host,
+                                 struct host1x_channel *ch,
+                                 struct output *o);
+       void (*show_mlocks)(struct host1x *host, struct output *output);
+
+};
+
+struct host1x_syncpt_ops {
+       void (*restore)(struct host1x_syncpt *syncpt);
+       void (*restore_wait_base)(struct host1x_syncpt *syncpt);
+       void (*load_wait_base)(struct host1x_syncpt *syncpt);
+       u32 (*load)(struct host1x_syncpt *syncpt);
+       void (*cpu_incr)(struct host1x_syncpt *syncpt);
+       int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
+};
+
+struct host1x_intr_ops {
+       int (*init_host_sync)(struct host1x *host, u32 cpm,
+               void (*syncpt_thresh_work)(struct work_struct *work));
+       void (*set_syncpt_threshold)(
+               struct host1x *host, u32 id, u32 thresh);
+       void (*enable_syncpt_intr)(struct host1x *host, u32 id);
+       void (*disable_syncpt_intr)(struct host1x *host, u32 id);
+       void (*disable_all_syncpt_intrs)(struct host1x *host);
+       int (*free_syncpt_irq)(struct host1x *host);
+};
+
+struct host1x_info {
+       int     nb_channels;            /* host1x: num channels supported */
+       int     nb_pts;                 /* host1x: num syncpoints supported */
+       int     nb_bases;               /* host1x: num syncpoints supported */
+       int     nb_mlocks;              /* host1x: number of mlocks */
+       int     (*init)(struct host1x *); /* initialize per SoC ops */
+       int     sync_offset;
+};
+
+struct host1x {
+       const struct host1x_info *info;
+
+       void __iomem *regs;
+       struct host1x_syncpt *syncpt;
+       struct device *dev;
+       struct clk *clk;
+
+       struct mutex intr_mutex;
+       struct workqueue_struct *intr_wq;
+       int intr_syncpt_irq;
+
+       const struct host1x_syncpt_ops *syncpt_op;
+       const struct host1x_intr_ops *intr_op;
+       const struct host1x_channel_ops *channel_op;
+       const struct host1x_cdma_ops *cdma_op;
+       const struct host1x_pushbuffer_ops *cdma_pb_op;
+       const struct host1x_debug_ops *debug_op;
+
+       struct host1x_syncpt *nop_sp;
+
+       struct mutex chlist_mutex;
+       struct host1x_channel chlist;
+       unsigned long allocated_channels;
+       unsigned int num_allocated_channels;
+
+       struct dentry *debugfs;
+
+       void *drm_data;
+};
+
+void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
+u32 host1x_sync_readl(struct host1x *host1x, u32 r);
+void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v);
+u32 host1x_ch_readl(struct host1x_channel *ch, u32 r);
+
+static inline void host1x_hw_syncpt_restore(struct host1x *host,
+                                           struct host1x_syncpt *sp)
+{
+       host->syncpt_op->restore(sp);
+}
+
+static inline void host1x_hw_syncpt_restore_wait_base(struct host1x *host,
+                                                     struct host1x_syncpt *sp)
+{
+       host->syncpt_op->restore_wait_base(sp);
+}
+
+static inline void host1x_hw_syncpt_load_wait_base(struct host1x *host,
+                                                  struct host1x_syncpt *sp)
+{
+       host->syncpt_op->load_wait_base(sp);
+}
+
+static inline u32 host1x_hw_syncpt_load(struct host1x *host,
+                                       struct host1x_syncpt *sp)
+{
+       return host->syncpt_op->load(sp);
+}
+
+static inline void host1x_hw_syncpt_cpu_incr(struct host1x *host,
+                                            struct host1x_syncpt *sp)
+{
+       host->syncpt_op->cpu_incr(sp);
+}
+
+static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
+                                             struct host1x_syncpt *sp,
+                                             void *patch_addr)
+{
+       return host->syncpt_op->patch_wait(sp, patch_addr);
+}
+
+static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
+                       void (*syncpt_thresh_work)(struct work_struct *))
+{
+       return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work);
+}
+
+static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
+                                                      u32 id, u32 thresh)
+{
+       host->intr_op->set_syncpt_threshold(host, id, thresh);
+}
+
+static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
+                                                    u32 id)
+{
+       host->intr_op->enable_syncpt_intr(host, id);
+}
+
+static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
+                                                     u32 id)
+{
+       host->intr_op->disable_syncpt_intr(host, id);
+}
+
+static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host)
+{
+       host->intr_op->disable_all_syncpt_intrs(host);
+}
+
+static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
+{
+       return host->intr_op->free_syncpt_irq(host);
+}
+
+static inline int host1x_hw_channel_init(struct host1x *host,
+                                        struct host1x_channel *channel,
+                                        int chid)
+{
+       return host->channel_op->init(channel, host, chid);
+}
+
+static inline int host1x_hw_channel_submit(struct host1x *host,
+                                          struct host1x_job *job)
+{
+       return host->channel_op->submit(job);
+}
+
+static inline void host1x_hw_cdma_start(struct host1x *host,
+                                       struct host1x_cdma *cdma)
+{
+       host->cdma_op->start(cdma);
+}
+
+static inline void host1x_hw_cdma_stop(struct host1x *host,
+                                      struct host1x_cdma *cdma)
+{
+       host->cdma_op->stop(cdma);
+}
+
+static inline void host1x_hw_cdma_flush(struct host1x *host,
+                                       struct host1x_cdma *cdma)
+{
+       host->cdma_op->flush(cdma);
+}
+
+static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
+                                             struct host1x_cdma *cdma,
+                                             u32 syncpt_id)
+{
+       return host->cdma_op->timeout_init(cdma, syncpt_id);
+}
+
+static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
+                                                 struct host1x_cdma *cdma)
+{
+       host->cdma_op->timeout_destroy(cdma);
+}
+
+static inline void host1x_hw_cdma_freeze(struct host1x *host,
+                                        struct host1x_cdma *cdma)
+{
+       host->cdma_op->freeze(cdma);
+}
+
+static inline void host1x_hw_cdma_resume(struct host1x *host,
+                                        struct host1x_cdma *cdma, u32 getptr)
+{
+       host->cdma_op->resume(cdma, getptr);
+}
+
+static inline void host1x_hw_cdma_timeout_cpu_incr(struct host1x *host,
+                                                  struct host1x_cdma *cdma,
+                                                  u32 getptr,
+                                                  u32 syncpt_incrs,
+                                                  u32 syncval, u32 nr_slots)
+{
+       host->cdma_op->timeout_cpu_incr(cdma, getptr, syncpt_incrs, syncval,
+                                       nr_slots);
+}
+
+static inline void host1x_hw_pushbuffer_init(struct host1x *host,
+                                            struct push_buffer *pb)
+{
+       host->cdma_pb_op->init(pb);
+}
+
+static inline void host1x_hw_debug_init(struct host1x *host, struct dentry *de)
+{
+       if (host->debug_op && host->debug_op->debug_init)
+               host->debug_op->debug_init(de);
+}
+
+static inline void host1x_hw_show_channel_cdma(struct host1x *host,
+                                              struct host1x_channel *channel,
+                                              struct output *o)
+{
+       host->debug_op->show_channel_cdma(host, channel, o);
+}
+
+static inline void host1x_hw_show_channel_fifo(struct host1x *host,
+                                              struct host1x_channel *channel,
+                                              struct output *o)
+{
+       host->debug_op->show_channel_fifo(host, channel, o);
+}
+
+static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
+{
+       host->debug_op->show_mlocks(host, o);
+}
+
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_gr2d_driver;
+
+#endif
similarity index 53%
rename from drivers/gpu/drm/tegra/Kconfig
rename to drivers/gpu/host1x/drm/Kconfig
index be1daf7..69853a4 100644 (file)
@@ -1,12 +1,10 @@
 config DRM_TEGRA
-       tristate "NVIDIA Tegra DRM"
-       depends on DRM && OF && ARCH_TEGRA
+       bool "NVIDIA Tegra DRM"
+       depends on DRM
        select DRM_KMS_HELPER
-       select DRM_GEM_CMA_HELPER
-       select DRM_KMS_CMA_HELPER
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
        help
          Choose this option if you have an NVIDIA Tegra SoC.
 
@@ -15,6 +13,14 @@ config DRM_TEGRA
 
 if DRM_TEGRA
 
+config DRM_TEGRA_STAGING
+       bool "Enable HOST1X interface"
+       depends on STAGING
+       help
+         Say yes if HOST1X should be available for userspace DRM users.
+
+         If unsure, choose N.
+
 config DRM_TEGRA_DEBUG
        bool "NVIDIA Tegra DRM debug support"
        help
similarity index 97%
rename from drivers/gpu/drm/tegra/dc.c
rename to drivers/gpu/host1x/drm/dc.c
index de94707..1e20603 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/clk/tegra.h>
 
-#include "drm.h"
+#include "host1x_client.h"
 #include "dc.h"
+#include "drm.h"
+#include "gem.h"
 
 struct tegra_plane {
        struct drm_plane base;
@@ -51,9 +53,9 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        window.bits_per_pixel = fb->bits_per_pixel;
 
        for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
-               struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+               struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
 
-               window.base[i] = gem->paddr + fb->offsets[i];
+               window.base[i] = bo->paddr + fb->offsets[i];
 
                /*
                 * Tegra doesn't support different strides for U and V planes
@@ -103,7 +105,9 @@ static const struct drm_plane_funcs tegra_plane_funcs = {
 };
 
 static const uint32_t plane_formats[] = {
+       DRM_FORMAT_XBGR8888,
        DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_RGB565,
        DRM_FORMAT_UYVY,
        DRM_FORMAT_YUV420,
        DRM_FORMAT_YUV422,
@@ -136,7 +140,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
 static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
                             struct drm_framebuffer *fb)
 {
-       struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, 0);
+       struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
        unsigned long value;
 
        tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -144,7 +148,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
        value = fb->offsets[0] + y * fb->pitches[0] +
                x * fb->bits_per_pixel / 8;
 
-       tegra_dc_writel(dc, gem->paddr + value, DC_WINBUF_START_ADDR);
+       tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR);
        tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
 
        value = GENERAL_UPDATE | WIN_A_UPDATE;
@@ -186,20 +190,20 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
 {
        struct drm_device *drm = dc->base.dev;
        struct drm_crtc *crtc = &dc->base;
-       struct drm_gem_cma_object *gem;
        unsigned long flags, base;
+       struct tegra_bo *bo;
 
        if (!dc->event)
                return;
 
-       gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
+       bo = tegra_fb_get_plane(crtc->fb, 0);
 
        /* check if new start address has been latched */
        tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
        base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
        tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
 
-       if (base == gem->paddr + crtc->fb->offsets[0]) {
+       if (base == bo->paddr + crtc->fb->offsets[0]) {
                spin_lock_irqsave(&drm->event_lock, flags);
                drm_send_vblank_event(drm, dc->pipe, dc->event);
                drm_vblank_put(drm, dc->pipe);
@@ -541,6 +545,9 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
 unsigned int tegra_dc_format(uint32_t format)
 {
        switch (format) {
+       case DRM_FORMAT_XBGR8888:
+               return WIN_COLOR_DEPTH_R8G8B8A8;
+
        case DRM_FORMAT_XRGB8888:
                return WIN_COLOR_DEPTH_B8G8R8A8;
 
@@ -569,7 +576,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
                               struct drm_display_mode *adjusted,
                               int x, int y, struct drm_framebuffer *old_fb)
 {
-       struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
+       struct tegra_bo *bo = tegra_fb_get_plane(crtc->fb, 0);
        struct tegra_dc *dc = to_tegra_dc(crtc);
        struct tegra_dc_window window;
        unsigned long div, value;
@@ -616,7 +623,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
        window.format = tegra_dc_format(crtc->fb->pixel_format);
        window.bits_per_pixel = crtc->fb->bits_per_pixel;
        window.stride[0] = crtc->fb->pitches[0];
-       window.base[0] = gem->paddr;
+       window.base[0] = bo->paddr;
 
        err = tegra_dc_setup_window(dc, 0, &window);
        if (err < 0)
@@ -1097,7 +1104,7 @@ static const struct host1x_client_ops dc_client_ops = {
 
 static int tegra_dc_probe(struct platform_device *pdev)
 {
-       struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+       struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
        struct resource *regs;
        struct tegra_dc *dc;
        int err;
@@ -1160,7 +1167,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
 
 static int tegra_dc_remove(struct platform_device *pdev)
 {
-       struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+       struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
        struct tegra_dc *dc = platform_get_drvdata(pdev);
        int err;
 
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
new file mode 100644 (file)
index 0000000..2b561c9
--- /dev/null
@@ -0,0 +1,640 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <linux/dma-mapping.h>
+#include <asm/dma-iommu.h>
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+
+#include "host1x_client.h"
+#include "dev.h"
+#include "drm.h"
+#include "gem.h"
+#include "syncpt.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct host1x_drm_client {
+       struct host1x_client *client;
+       struct device_node *np;
+       struct list_head list;
+};
+
+static int host1x_add_drm_client(struct host1x_drm *host1x,
+                                struct device_node *np)
+{
+       struct host1x_drm_client *client;
+
+       client = kzalloc(sizeof(*client), GFP_KERNEL);
+       if (!client)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&client->list);
+       client->np = of_node_get(np);
+
+       list_add_tail(&client->list, &host1x->drm_clients);
+
+       return 0;
+}
+
+static int host1x_activate_drm_client(struct host1x_drm *host1x,
+                                     struct host1x_drm_client *drm,
+                                     struct host1x_client *client)
+{
+       mutex_lock(&host1x->drm_clients_lock);
+       list_del_init(&drm->list);
+       list_add_tail(&drm->list, &host1x->drm_active);
+       drm->client = client;
+       mutex_unlock(&host1x->drm_clients_lock);
+
+       return 0;
+}
+
+static int host1x_remove_drm_client(struct host1x_drm *host1x,
+                                   struct host1x_drm_client *client)
+{
+       mutex_lock(&host1x->drm_clients_lock);
+       list_del_init(&client->list);
+       mutex_unlock(&host1x->drm_clients_lock);
+
+       of_node_put(client->np);
+       kfree(client);
+
+       return 0;
+}
+
+static int host1x_parse_dt(struct host1x_drm *host1x)
+{
+       static const char * const compat[] = {
+               "nvidia,tegra20-dc",
+               "nvidia,tegra20-hdmi",
+               "nvidia,tegra20-gr2d",
+               "nvidia,tegra30-dc",
+               "nvidia,tegra30-hdmi",
+               "nvidia,tegra30-gr2d",
+       };
+       unsigned int i;
+       int err;
+
+       for (i = 0; i < ARRAY_SIZE(compat); i++) {
+               struct device_node *np;
+
+               for_each_child_of_node(host1x->dev->of_node, np) {
+                       if (of_device_is_compatible(np, compat[i]) &&
+                           of_device_is_available(np)) {
+                               err = host1x_add_drm_client(host1x, np);
+                               if (err < 0)
+                                       return err;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+int host1x_drm_alloc(struct platform_device *pdev)
+{
+       struct host1x_drm *host1x;
+       int err;
+
+       host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
+       if (!host1x)
+               return -ENOMEM;
+
+       mutex_init(&host1x->drm_clients_lock);
+       INIT_LIST_HEAD(&host1x->drm_clients);
+       INIT_LIST_HEAD(&host1x->drm_active);
+       mutex_init(&host1x->clients_lock);
+       INIT_LIST_HEAD(&host1x->clients);
+       host1x->dev = &pdev->dev;
+
+       err = host1x_parse_dt(host1x);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
+               return err;
+       }
+
+       host1x_set_drm_data(&pdev->dev, host1x);
+
+       return 0;
+}
+
+int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
+{
+       struct host1x_client *client;
+
+       mutex_lock(&host1x->clients_lock);
+
+       list_for_each_entry(client, &host1x->clients, list) {
+               if (client->ops && client->ops->drm_init) {
+                       int err = client->ops->drm_init(client, drm);
+                       if (err < 0) {
+                               dev_err(host1x->dev,
+                                       "DRM setup failed for %s: %d\n",
+                                       dev_name(client->dev), err);
+                               return err;
+                       }
+               }
+       }
+
+       mutex_unlock(&host1x->clients_lock);
+
+       return 0;
+}
+
+int host1x_drm_exit(struct host1x_drm *host1x)
+{
+       struct platform_device *pdev = to_platform_device(host1x->dev);
+       struct host1x_client *client;
+
+       if (!host1x->drm)
+               return 0;
+
+       mutex_lock(&host1x->clients_lock);
+
+       list_for_each_entry_reverse(client, &host1x->clients, list) {
+               if (client->ops && client->ops->drm_exit) {
+                       int err = client->ops->drm_exit(client);
+                       if (err < 0) {
+                               dev_err(host1x->dev,
+                                       "DRM cleanup failed for %s: %d\n",
+                                       dev_name(client->dev), err);
+                               return err;
+                       }
+               }
+       }
+
+       mutex_unlock(&host1x->clients_lock);
+
+       drm_platform_exit(&tegra_drm_driver, pdev);
+       host1x->drm = NULL;
+
+       return 0;
+}
+
+int host1x_register_client(struct host1x_drm *host1x,
+                          struct host1x_client *client)
+{
+       struct host1x_drm_client *drm, *tmp;
+       int err;
+
+       mutex_lock(&host1x->clients_lock);
+       list_add_tail(&client->list, &host1x->clients);
+       mutex_unlock(&host1x->clients_lock);
+
+       list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
+               if (drm->np == client->dev->of_node)
+                       host1x_activate_drm_client(host1x, drm, client);
+
+       if (list_empty(&host1x->drm_clients)) {
+               struct platform_device *pdev = to_platform_device(host1x->dev);
+
+               err = drm_platform_init(&tegra_drm_driver, pdev);
+               if (err < 0) {
+                       dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+int host1x_unregister_client(struct host1x_drm *host1x,
+                            struct host1x_client *client)
+{
+       struct host1x_drm_client *drm, *tmp;
+       int err;
+
+       list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
+               if (drm->client == client) {
+                       err = host1x_drm_exit(host1x);
+                       if (err < 0) {
+                               dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
+                                       err);
+                               return err;
+                       }
+
+                       host1x_remove_drm_client(host1x, drm);
+                       break;
+               }
+       }
+
+       mutex_lock(&host1x->clients_lock);
+       list_del_init(&client->list);
+       mutex_unlock(&host1x->clients_lock);
+
+       return 0;
+}
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+       struct host1x_drm *host1x;
+       int err;
+
+       host1x = host1x_get_drm_data(drm->dev);
+       drm->dev_private = host1x;
+       host1x->drm = drm;
+
+       drm_mode_config_init(drm);
+
+       err = host1x_drm_init(host1x, drm);
+       if (err < 0)
+               return err;
+
+       err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+       if (err < 0)
+               return err;
+
+       err = tegra_drm_fb_init(drm);
+       if (err < 0)
+               return err;
+
+       drm_kms_helper_poll_init(drm);
+
+       return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+       drm_kms_helper_poll_fini(drm);
+       tegra_drm_fb_exit(drm);
+
+       drm_mode_config_cleanup(drm);
+
+       return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+       struct host1x_drm_file *fpriv;
+
+       fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+       if (!fpriv)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&fpriv->contexts);
+       filp->driver_priv = fpriv;
+
+       return 0;
+}
+
+static void host1x_drm_context_free(struct host1x_drm_context *context)
+{
+       context->client->ops->close_channel(context);
+       kfree(context);
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+       struct host1x_drm *host1x = drm->dev_private;
+
+       tegra_fbdev_restore_mode(host1x->fbdev);
+}
+
+#ifdef CONFIG_DRM_TEGRA_STAGING
+static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
+                                        struct host1x_drm_context *context)
+{
+       struct host1x_drm_context *ctx;
+
+       list_for_each_entry(ctx, &file->contexts, list)
+               if (ctx == context)
+                       return true;
+
+       return false;
+}
+
+static int tegra_gem_create(struct drm_device *drm, void *data,
+                           struct drm_file *file)
+{
+       struct drm_tegra_gem_create *args = data;
+       struct tegra_bo *bo;
+
+       bo = tegra_bo_create_with_handle(file, drm, args->size,
+                                        &args->handle);
+       if (IS_ERR(bo))
+               return PTR_ERR(bo);
+
+       return 0;
+}
+
+static int tegra_gem_mmap(struct drm_device *drm, void *data,
+                         struct drm_file *file)
+{
+       struct drm_tegra_gem_mmap *args = data;
+       struct drm_gem_object *gem;
+       struct tegra_bo *bo;
+
+       gem = drm_gem_object_lookup(drm, file, args->handle);
+       if (!gem)
+               return -EINVAL;
+
+       bo = to_tegra_bo(gem);
+
+       args->offset = tegra_bo_get_mmap_offset(bo);
+
+       drm_gem_object_unreference(gem);
+
+       return 0;
+}
+
+static int tegra_syncpt_read(struct drm_device *drm, void *data,
+                            struct drm_file *file)
+{
+       struct drm_tegra_syncpt_read *args = data;
+       struct host1x *host = dev_get_drvdata(drm->dev);
+       struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+
+       if (!sp)
+               return -EINVAL;
+
+       args->value = host1x_syncpt_read_min(sp);
+       return 0;
+}
+
+static int tegra_syncpt_incr(struct drm_device *drm, void *data,
+                            struct drm_file *file)
+{
+       struct drm_tegra_syncpt_incr *args = data;
+       struct host1x *host = dev_get_drvdata(drm->dev);
+       struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+
+       if (!sp)
+               return -EINVAL;
+
+       host1x_syncpt_incr(sp);
+       return 0;
+}
+
+static int tegra_syncpt_wait(struct drm_device *drm, void *data,
+                            struct drm_file *file)
+{
+       struct drm_tegra_syncpt_wait *args = data;
+       struct host1x *host = dev_get_drvdata(drm->dev);
+       struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+
+       if (!sp)
+               return -EINVAL;
+
+       return host1x_syncpt_wait(sp, args->thresh, args->timeout,
+                                 &args->value);
+}
+
+static int tegra_open_channel(struct drm_device *drm, void *data,
+                             struct drm_file *file)
+{
+       struct drm_tegra_open_channel *args = data;
+       struct host1x_client *client;
+       struct host1x_drm_context *context;
+       struct host1x_drm_file *fpriv = file->driver_priv;
+       struct host1x_drm *host1x = drm->dev_private;
+       int err = -ENODEV;
+
+       context = kzalloc(sizeof(*context), GFP_KERNEL);
+       if (!context)
+               return -ENOMEM;
+
+       list_for_each_entry(client, &host1x->clients, list)
+               if (client->class == args->client) {
+                       err = client->ops->open_channel(client, context);
+                       if (err)
+                               break;
+
+                       context->client = client;
+                       list_add(&context->list, &fpriv->contexts);
+                       args->context = (uintptr_t)context;
+                       return 0;
+               }
+
+       kfree(context);
+       return err;
+}
+
+static int tegra_close_channel(struct drm_device *drm, void *data,
+                              struct drm_file *file)
+{
+       struct drm_tegra_close_channel *args = data;
+       struct host1x_drm_file *fpriv = file->driver_priv;
+       struct host1x_drm_context *context =
+               (struct host1x_drm_context *)(uintptr_t)args->context;
+
+       if (!host1x_drm_file_owns_context(fpriv, context))
+               return -EINVAL;
+
+       list_del(&context->list);
+       host1x_drm_context_free(context);
+
+       return 0;
+}
+
+static int tegra_get_syncpt(struct drm_device *drm, void *data,
+                           struct drm_file *file)
+{
+       struct drm_tegra_get_syncpt *args = data;
+       struct host1x_drm_file *fpriv = file->driver_priv;
+       struct host1x_drm_context *context =
+               (struct host1x_drm_context *)(uintptr_t)args->context;
+       struct host1x_syncpt *syncpt;
+
+       if (!host1x_drm_file_owns_context(fpriv, context))
+               return -ENODEV;
+
+       if (args->index >= context->client->num_syncpts)
+               return -EINVAL;
+
+       syncpt = context->client->syncpts[args->index];
+       args->id = host1x_syncpt_id(syncpt);
+
+       return 0;
+}
+
+static int tegra_submit(struct drm_device *drm, void *data,
+                       struct drm_file *file)
+{
+       struct drm_tegra_submit *args = data;
+       struct host1x_drm_file *fpriv = file->driver_priv;
+       struct host1x_drm_context *context =
+               (struct host1x_drm_context *)(uintptr_t)args->context;
+
+       if (!host1x_drm_file_owns_context(fpriv, context))
+               return -ENODEV;
+
+       return context->client->ops->submit(context, args, drm, file);
+}
+#endif
+
+static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+#ifdef CONFIG_DRM_TEGRA_STAGING
+       DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
+#endif
+};
+
+static const struct file_operations tegra_drm_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = tegra_drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+{
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
+               struct tegra_dc *dc = to_tegra_dc(crtc);
+
+               if (dc->pipe == pipe)
+                       return crtc;
+       }
+
+       return NULL;
+}
+
+static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+       /* TODO: implement real hardware counter using syncpoints */
+       return drm_vblank_count(dev, crtc);
+}
+
+static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+{
+       struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+       struct tegra_dc *dc = to_tegra_dc(crtc);
+
+       if (!crtc)
+               return -ENODEV;
+
+       tegra_dc_enable_vblank(dc);
+
+       return 0;
+}
+
+static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+{
+       struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+       struct tegra_dc *dc = to_tegra_dc(crtc);
+
+       if (crtc)
+               tegra_dc_disable_vblank(dc);
+}
+
+static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+       struct host1x_drm_file *fpriv = file->driver_priv;
+       struct host1x_drm_context *context, *tmp;
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+               tegra_dc_cancel_page_flip(crtc, file);
+
+       list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
+               host1x_drm_context_free(context);
+
+       kfree(fpriv);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *)s->private;
+       struct drm_device *drm = node->minor->dev;
+       struct drm_framebuffer *fb;
+
+       mutex_lock(&drm->mode_config.fb_lock);
+
+       list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+               seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+                          fb->base.id, fb->width, fb->height, fb->depth,
+                          fb->bits_per_pixel,
+                          atomic_read(&fb->refcount.refcount));
+       }
+
+       mutex_unlock(&drm->mode_config.fb_lock);
+
+       return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+       { "framebuffers", tegra_debugfs_framebuffers, 0 },
+};
+
+static int tegra_debugfs_init(struct drm_minor *minor)
+{
+       return drm_debugfs_create_files(tegra_debugfs_list,
+                                       ARRAY_SIZE(tegra_debugfs_list),
+                                       minor->debugfs_root, minor);
+}
+
+static void tegra_debugfs_cleanup(struct drm_minor *minor)
+{
+       drm_debugfs_remove_files(tegra_debugfs_list,
+                                ARRAY_SIZE(tegra_debugfs_list), minor);
+}
+#endif
+
+struct drm_driver tegra_drm_driver = {
+       .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
+       .load = tegra_drm_load,
+       .unload = tegra_drm_unload,
+       .open = tegra_drm_open,
+       .preclose = tegra_drm_preclose,
+       .lastclose = tegra_drm_lastclose,
+
+       .get_vblank_counter = tegra_drm_get_vblank_counter,
+       .enable_vblank = tegra_drm_enable_vblank,
+       .disable_vblank = tegra_drm_disable_vblank,
+
+#if defined(CONFIG_DEBUG_FS)
+       .debugfs_init = tegra_debugfs_init,
+       .debugfs_cleanup = tegra_debugfs_cleanup,
+#endif
+
+       .gem_free_object = tegra_bo_free_object,
+       .gem_vm_ops = &tegra_bo_vm_ops,
+       .dumb_create = tegra_bo_dumb_create,
+       .dumb_map_offset = tegra_bo_dumb_map_offset,
+       .dumb_destroy = tegra_bo_dumb_destroy,
+
+       .ioctls = tegra_drm_ioctls,
+       .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+       .fops = &tegra_drm_fops,
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
+};
similarity index 77%
rename from drivers/gpu/drm/tegra/drm.h
rename to drivers/gpu/host1x/drm/drm.h
index 6dd75a2..02ce020 100644 (file)
@@ -1,24 +1,36 @@
 /*
  * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
 
-#ifndef TEGRA_DRM_H
-#define TEGRA_DRM_H 1
+#ifndef HOST1X_DRM_H
+#define HOST1X_DRM_H 1
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fixed.h>
+#include <uapi/drm/tegra_drm.h>
 
-struct host1x {
+#include "host1x.h"
+
+struct tegra_fb {
+       struct drm_framebuffer base;
+       struct tegra_bo **planes;
+       unsigned int num_planes;
+};
+
+struct tegra_fbdev {
+       struct drm_fb_helper base;
+       struct tegra_fb *fb;
+};
+
+struct host1x_drm {
        struct drm_device *drm;
        struct device *dev;
        void __iomem *regs;
@@ -33,31 +45,53 @@ struct host1x {
        struct mutex clients_lock;
        struct list_head clients;
 
-       struct drm_fbdev_cma *fbdev;
+       struct tegra_fbdev *fbdev;
 };
 
 struct host1x_client;
 
+struct host1x_drm_context {
+       struct host1x_client *client;
+       struct host1x_channel *channel;
+       struct list_head list;
+};
+
 struct host1x_client_ops {
        int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
        int (*drm_exit)(struct host1x_client *client);
+       int (*open_channel)(struct host1x_client *client,
+                           struct host1x_drm_context *context);
+       void (*close_channel)(struct host1x_drm_context *context);
+       int (*submit)(struct host1x_drm_context *context,
+                     struct drm_tegra_submit *args, struct drm_device *drm,
+                     struct drm_file *file);
+};
+
+struct host1x_drm_file {
+       struct list_head contexts;
 };
 
 struct host1x_client {
-       struct host1x *host1x;
+       struct host1x_drm *host1x;
        struct device *dev;
 
        const struct host1x_client_ops *ops;
 
+       enum host1x_class class;
+       struct host1x_channel *channel;
+
+       struct host1x_syncpt **syncpts;
+       unsigned int num_syncpts;
+
        struct list_head list;
 };
 
-extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm);
-extern int host1x_drm_exit(struct host1x *host1x);
+extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
+extern int host1x_drm_exit(struct host1x_drm *host1x);
 
-extern int host1x_register_client(struct host1x *host1x,
+extern int host1x_register_client(struct host1x_drm *host1x,
                                  struct host1x_client *client);
-extern int host1x_unregister_client(struct host1x *host1x,
+extern int host1x_unregister_client(struct host1x_drm *host1x,
                                    struct host1x_client *client);
 
 struct tegra_output;
@@ -66,7 +100,7 @@ struct tegra_dc {
        struct host1x_client client;
        spinlock_t lock;
 
-       struct host1x *host1x;
+       struct host1x_drm *host1x;
        struct device *dev;
 
        struct drm_crtc base;
@@ -226,12 +260,12 @@ extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output
 extern int tegra_output_exit(struct tegra_output *output);
 
 /* from fb.c */
+struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
+                                   unsigned int index);
 extern int tegra_drm_fb_init(struct drm_device *drm);
 extern void tegra_drm_fb_exit(struct drm_device *drm);
+extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
 
-extern struct platform_driver tegra_host1x_driver;
-extern struct platform_driver tegra_hdmi_driver;
-extern struct platform_driver tegra_dc_driver;
 extern struct drm_driver tegra_drm_driver;
 
-#endif /* TEGRA_DRM_H */
+#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/host1x/drm/fb.c
new file mode 100644 (file)
index 0000000..979a3e3
--- /dev/null
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2012-2013 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Based on the KMS/FB CMA helpers
+ *   Copyright (C) 2012 Analog Device Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include "drm.h"
+#include "gem.h"
+
+static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
+{
+       return container_of(fb, struct tegra_fb, base);
+}
+
+static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
+{
+       return container_of(helper, struct tegra_fbdev, base);
+}
+
+struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
+                                   unsigned int index)
+{
+       struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+       if (index >= drm_format_num_planes(framebuffer->pixel_format))
+               return NULL;
+
+       return fb->planes[index];
+}
+
+static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
+{
+       struct tegra_fb *fb = to_tegra_fb(framebuffer);
+       unsigned int i;
+
+       for (i = 0; i < fb->num_planes; i++) {
+               struct tegra_bo *bo = fb->planes[i];
+
+               if (bo)
+                       drm_gem_object_unreference_unlocked(&bo->gem);
+       }
+
+       drm_framebuffer_cleanup(framebuffer);
+       kfree(fb->planes);
+       kfree(fb);
+}
+
+static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer,
+                                 struct drm_file *file, unsigned int *handle)
+{
+       struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+       return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
+}
+
+static struct drm_framebuffer_funcs tegra_fb_funcs = {
+       .destroy = tegra_fb_destroy,
+       .create_handle = tegra_fb_create_handle,
+};
+
+static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
+                                      struct drm_mode_fb_cmd2 *mode_cmd,
+                                      struct tegra_bo **planes,
+                                      unsigned int num_planes)
+{
+       struct tegra_fb *fb;
+       unsigned int i;
+       int err;
+
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb)
+               return ERR_PTR(-ENOMEM);
+
+       fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
+       if (!fb->planes)
+               return ERR_PTR(-ENOMEM);
+
+       fb->num_planes = num_planes;
+
+       drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+
+       for (i = 0; i < fb->num_planes; i++)
+               fb->planes[i] = planes[i];
+
+       err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs);
+       if (err < 0) {
+               dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
+                       err);
+               kfree(fb->planes);
+               kfree(fb);
+               return ERR_PTR(err);
+       }
+
+       return fb;
+}
+
+static struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
+                                              struct drm_file *file,
+                                              struct drm_mode_fb_cmd2 *cmd)
+{
+       unsigned int hsub, vsub, i;
+       struct tegra_bo *planes[4];
+       struct drm_gem_object *gem;
+       struct tegra_fb *fb;
+       int err;
+
+       hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format);
+       vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format);
+
+       for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) {
+               unsigned int width = cmd->width / (i ? hsub : 1);
+               unsigned int height = cmd->height / (i ? vsub : 1);
+               unsigned int size, bpp;
+
+               gem = drm_gem_object_lookup(drm, file, cmd->handles[i]);
+               if (!gem) {
+                       err = -ENXIO;
+                       goto unreference;
+               }
+
+               bpp = drm_format_plane_cpp(cmd->pixel_format, i);
+
+               size = (height - 1) * cmd->pitches[i] +
+                      width * bpp + cmd->offsets[i];
+
+               if (gem->size < size) {
+                       err = -EINVAL;
+                       goto unreference;
+               }
+
+               planes[i] = to_tegra_bo(gem);
+       }
+
+       fb = tegra_fb_alloc(drm, cmd, planes, i);
+       if (IS_ERR(fb)) {
+               err = PTR_ERR(fb);
+               goto unreference;
+       }
+
+       return &fb->base;
+
+unreference:
+       while (i--)
+               drm_gem_object_unreference_unlocked(&planes[i]->gem);
+
+       return ERR_PTR(err);
+}
+
+static struct fb_ops tegra_fb_ops = {
+       .owner = THIS_MODULE,
+       .fb_fillrect = sys_fillrect,
+       .fb_copyarea = sys_copyarea,
+       .fb_imageblit = sys_imageblit,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_pan_display = drm_fb_helper_pan_display,
+       .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int tegra_fbdev_probe(struct drm_fb_helper *helper,
+                            struct drm_fb_helper_surface_size *sizes)
+{
+       struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
+       struct drm_device *drm = helper->dev;
+       struct drm_mode_fb_cmd2 cmd = { 0 };
+       unsigned int bytes_per_pixel;
+       struct drm_framebuffer *fb;
+       unsigned long offset;
+       struct fb_info *info;
+       struct tegra_bo *bo;
+       size_t size;
+       int err;
+
+       bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+       cmd.width = sizes->surface_width;
+       cmd.height = sizes->surface_height;
+       cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+       cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                    sizes->surface_depth);
+
+       size = cmd.pitches[0] * cmd.height;
+
+       bo = tegra_bo_create(drm, size);
+       if (IS_ERR(bo))
+               return PTR_ERR(bo);
+
+       info = framebuffer_alloc(0, drm->dev);
+       if (!info) {
+               dev_err(drm->dev, "failed to allocate framebuffer info\n");
+               tegra_bo_free_object(&bo->gem);
+               return -ENOMEM;
+       }
+
+       fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
+       if (IS_ERR(fbdev->fb)) {
+               dev_err(drm->dev, "failed to allocate DRM framebuffer\n");
+               err = PTR_ERR(fbdev->fb);
+               goto release;
+       }
+
+       fb = &fbdev->fb->base;
+       helper->fb = fb;
+       helper->fbdev = info;
+
+       info->par = helper;
+       info->flags = FBINFO_FLAG_DEFAULT;
+       info->fbops = &tegra_fb_ops;
+
+       err = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (err < 0) {
+               dev_err(drm->dev, "failed to allocate color map: %d\n", err);
+               goto destroy;
+       }
+
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+       drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
+
+       offset = info->var.xoffset * bytes_per_pixel +
+                info->var.yoffset * fb->pitches[0];
+
+       drm->mode_config.fb_base = (resource_size_t)bo->paddr;
+       info->screen_base = bo->vaddr + offset;
+       info->screen_size = size;
+       info->fix.smem_start = (unsigned long)(bo->paddr + offset);
+       info->fix.smem_len = size;
+
+       return 0;
+
+destroy:
+       drm_framebuffer_unregister_private(fb);
+       tegra_fb_destroy(fb);
+release:
+       framebuffer_release(info);
+       return err;
+}
+
+static struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
+       .fb_probe = tegra_fbdev_probe,
+};
+
+static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
+                                             unsigned int preferred_bpp,
+                                             unsigned int num_crtc,
+                                             unsigned int max_connectors)
+{
+       struct drm_fb_helper *helper;
+       struct tegra_fbdev *fbdev;
+       int err;
+
+       fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+       if (!fbdev) {
+               dev_err(drm->dev, "failed to allocate DRM fbdev\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       fbdev->base.funcs = &tegra_fb_helper_funcs;
+       helper = &fbdev->base;
+
+       err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
+       if (err < 0) {
+               dev_err(drm->dev, "failed to initialize DRM FB helper\n");
+               goto free;
+       }
+
+       err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
+       if (err < 0) {
+               dev_err(drm->dev, "failed to add connectors\n");
+               goto fini;
+       }
+
+       drm_helper_disable_unused_functions(drm);
+
+       err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
+       if (err < 0) {
+               dev_err(drm->dev, "failed to set initial configuration\n");
+               goto fini;
+       }
+
+       return fbdev;
+
+fini:
+       drm_fb_helper_fini(&fbdev->base);
+free:
+       kfree(fbdev);
+       return ERR_PTR(err);
+}
+
+static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
+{
+       struct fb_info *info = fbdev->base.fbdev;
+
+       if (info) {
+               int err;
+
+               err = unregister_framebuffer(info);
+               if (err < 0)
+                       DRM_DEBUG_KMS("failed to unregister framebuffer\n");
+
+               if (info->cmap.len)
+                       fb_dealloc_cmap(&info->cmap);
+
+               framebuffer_release(info);
+       }
+
+       if (fbdev->fb) {
+               drm_framebuffer_unregister_private(&fbdev->fb->base);
+               tegra_fb_destroy(&fbdev->fb->base);
+       }
+
+       drm_fb_helper_fini(&fbdev->base);
+       kfree(fbdev);
+}
+
+static void tegra_fb_output_poll_changed(struct drm_device *drm)
+{
+       struct host1x_drm *host1x = drm->dev_private;
+
+       if (host1x->fbdev)
+               drm_fb_helper_hotplug_event(&host1x->fbdev->base);
+}
+
+static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+       .fb_create = tegra_fb_create,
+       .output_poll_changed = tegra_fb_output_poll_changed,
+};
+
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+       struct host1x_drm *host1x = drm->dev_private;
+       struct tegra_fbdev *fbdev;
+
+       drm->mode_config.min_width = 0;
+       drm->mode_config.min_height = 0;
+
+       drm->mode_config.max_width = 4096;
+       drm->mode_config.max_height = 4096;
+
+       drm->mode_config.funcs = &tegra_drm_mode_funcs;
+
+       fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
+                                  drm->mode_config.num_connector);
+       if (IS_ERR(fbdev))
+               return PTR_ERR(fbdev);
+
+       host1x->fbdev = fbdev;
+
+       return 0;
+}
+
+void tegra_drm_fb_exit(struct drm_device *drm)
+{
+       struct host1x_drm *host1x = drm->dev_private;
+
+       tegra_fbdev_free(host1x->fbdev);
+}
+
+void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
+{
+       if (fbdev) {
+               drm_modeset_lock_all(fbdev->base.dev);
+               drm_fb_helper_restore_fbdev_mode(&fbdev->base);
+               drm_modeset_unlock_all(fbdev->base.dev);
+       }
+}
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
new file mode 100644 (file)
index 0000000..c5e9a9b
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * NVIDIA Tegra DRM GEM helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
+ *
+ * Based on the GEM/CMA helpers
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "gem.h"
+
+static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
+{
+       return container_of(bo, struct tegra_bo, base);
+}
+
+static void tegra_bo_put(struct host1x_bo *bo)
+{
+       struct tegra_bo *obj = host1x_to_drm_bo(bo);
+       struct drm_device *drm = obj->gem.dev;
+
+       mutex_lock(&drm->struct_mutex);
+       drm_gem_object_unreference(&obj->gem);
+       mutex_unlock(&drm->struct_mutex);
+}
+
+static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
+{
+       struct tegra_bo *obj = host1x_to_drm_bo(bo);
+
+       return obj->paddr;
+}
+
+static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+}
+
+static void *tegra_bo_mmap(struct host1x_bo *bo)
+{
+       struct tegra_bo *obj = host1x_to_drm_bo(bo);
+
+       return obj->vaddr;
+}
+
+static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+}
+
+static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
+{
+       struct tegra_bo *obj = host1x_to_drm_bo(bo);
+
+       return obj->vaddr + page * PAGE_SIZE;
+}
+
+static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
+                           void *addr)
+{
+}
+
+static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
+{
+       struct tegra_bo *obj = host1x_to_drm_bo(bo);
+       struct drm_device *drm = obj->gem.dev;
+
+       mutex_lock(&drm->struct_mutex);
+       drm_gem_object_reference(&obj->gem);
+       mutex_unlock(&drm->struct_mutex);
+
+       return bo;
+}
+
+const struct host1x_bo_ops tegra_bo_ops = {
+       .get = tegra_bo_get,
+       .put = tegra_bo_put,
+       .pin = tegra_bo_pin,
+       .unpin = tegra_bo_unpin,
+       .mmap = tegra_bo_mmap,
+       .munmap = tegra_bo_munmap,
+       .kmap = tegra_bo_kmap,
+       .kunmap = tegra_bo_kunmap,
+};
+
+static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
+{
+       dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+}
+
+unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
+{
+       return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
+}
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
+{
+       struct tegra_bo *bo;
+       int err;
+
+       bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+       if (!bo)
+               return ERR_PTR(-ENOMEM);
+
+       host1x_bo_init(&bo->base, &tegra_bo_ops);
+       size = round_up(size, PAGE_SIZE);
+
+       bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
+                                          GFP_KERNEL | __GFP_NOWARN);
+       if (!bo->vaddr) {
+               dev_err(drm->dev, "failed to allocate buffer with size %u\n",
+                       size);
+               err = -ENOMEM;
+               goto err_dma;
+       }
+
+       err = drm_gem_object_init(drm, &bo->gem, size);
+       if (err)
+               goto err_init;
+
+       err = drm_gem_create_mmap_offset(&bo->gem);
+       if (err)
+               goto err_mmap;
+
+       return bo;
+
+err_mmap:
+       drm_gem_object_release(&bo->gem);
+err_init:
+       tegra_bo_destroy(drm, bo);
+err_dma:
+       kfree(bo);
+
+       return ERR_PTR(err);
+
+}
+
+struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
+                                           struct drm_device *drm,
+                                           unsigned int size,
+                                           unsigned int *handle)
+{
+       struct tegra_bo *bo;
+       int ret;
+
+       bo = tegra_bo_create(drm, size);
+       if (IS_ERR(bo))
+               return bo;
+
+       ret = drm_gem_handle_create(file, &bo->gem, handle);
+       if (ret)
+               goto err;
+
+       drm_gem_object_unreference_unlocked(&bo->gem);
+
+       return bo;
+
+err:
+       tegra_bo_free_object(&bo->gem);
+       return ERR_PTR(ret);
+}
+
+void tegra_bo_free_object(struct drm_gem_object *gem)
+{
+       struct tegra_bo *bo = to_tegra_bo(gem);
+
+       if (gem->map_list.map)
+               drm_gem_free_mmap_offset(gem);
+
+       drm_gem_object_release(gem);
+       tegra_bo_destroy(gem->dev, bo);
+
+       kfree(bo);
+}
+
+int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+                        struct drm_mode_create_dumb *args)
+{
+       int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+       struct tegra_bo *bo;
+
+       if (args->pitch < min_pitch)
+               args->pitch = min_pitch;
+
+       if (args->size < args->pitch * args->height)
+               args->size = args->pitch * args->height;
+
+       bo = tegra_bo_create_with_handle(file, drm, args->size,
+                                           &args->handle);
+       if (IS_ERR(bo))
+               return PTR_ERR(bo);
+
+       return 0;
+}
+
+int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
+                            uint32_t handle, uint64_t *offset)
+{
+       struct drm_gem_object *gem;
+       struct tegra_bo *bo;
+
+       mutex_lock(&drm->struct_mutex);
+
+       gem = drm_gem_object_lookup(drm, file, handle);
+       if (!gem) {
+               dev_err(drm->dev, "failed to lookup GEM object\n");
+               mutex_unlock(&drm->struct_mutex);
+               return -EINVAL;
+       }
+
+       bo = to_tegra_bo(gem);
+
+       *offset = tegra_bo_get_mmap_offset(bo);
+
+       drm_gem_object_unreference(gem);
+
+       mutex_unlock(&drm->struct_mutex);
+
+       return 0;
+}
+
+const struct vm_operations_struct tegra_bo_vm_ops = {
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct drm_gem_object *gem;
+       struct tegra_bo *bo;
+       int ret;
+
+       ret = drm_gem_mmap(file, vma);
+       if (ret)
+               return ret;
+
+       gem = vma->vm_private_data;
+       bo = to_tegra_bo(gem);
+
+       ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
+                             vma->vm_end - vma->vm_start, vma->vm_page_prot);
+       if (ret)
+               drm_gem_vm_close(vma);
+
+       return ret;
+}
+
+int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
+                         unsigned int handle)
+{
+       return drm_gem_handle_delete(file, handle);
+}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
new file mode 100644 (file)
index 0000000..34de2b4
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Tegra host1x GEM implementation
+ *
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_GEM_H
+#define __HOST1X_GEM_H
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+
+#include "host1x_bo.h"
+
+struct tegra_bo {
+       struct drm_gem_object gem;
+       struct host1x_bo base;
+       dma_addr_t paddr;
+       void *vaddr;
+};
+
+static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
+{
+       return container_of(gem, struct tegra_bo, gem);
+}
+
+extern const struct host1x_bo_ops tegra_bo_ops;
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
+struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
+                                           struct drm_device *drm,
+                                           unsigned int size,
+                                           unsigned int *handle);
+void tegra_bo_free_object(struct drm_gem_object *gem);
+unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
+int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+                        struct drm_mode_create_dumb *args);
+int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
+                            uint32_t handle, uint64_t *offset);
+int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
+                         unsigned int handle);
+
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct tegra_bo_vm_ops;
+
+#endif
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
new file mode 100644 (file)
index 0000000..6a45ae0
--- /dev/null
@@ -0,0 +1,339 @@
+/*
+ * drivers/video/tegra/host/gr2d/gr2d.c
+ *
+ * Tegra Graphics 2D
+ *
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+
+#include "channel.h"
+#include "drm.h"
+#include "gem.h"
+#include "job.h"
+#include "host1x.h"
+#include "host1x_bo.h"
+#include "host1x_client.h"
+#include "syncpt.h"
+
+struct gr2d {
+       struct host1x_client client;
+       struct clk *clk;
+       struct host1x_channel *channel;
+       unsigned long *addr_regs;
+};
+
+static inline struct gr2d *to_gr2d(struct host1x_client *client)
+{
+       return container_of(client, struct gr2d, client);
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
+
+static int gr2d_client_init(struct host1x_client *client,
+                           struct drm_device *drm)
+{
+       return 0;
+}
+
+static int gr2d_client_exit(struct host1x_client *client)
+{
+       return 0;
+}
+
+static int gr2d_open_channel(struct host1x_client *client,
+                            struct host1x_drm_context *context)
+{
+       struct gr2d *gr2d = to_gr2d(client);
+
+       context->channel = host1x_channel_get(gr2d->channel);
+
+       if (!context->channel)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void gr2d_close_channel(struct host1x_drm_context *context)
+{
+       host1x_channel_put(context->channel);
+}
+
+static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
+                                         struct drm_file *file,
+                                         u32 handle)
+{
+       struct drm_gem_object *gem;
+       struct tegra_bo *bo;
+
+       gem = drm_gem_object_lookup(drm, file, handle);
+       if (!gem)
+               return 0;
+
+       mutex_lock(&drm->struct_mutex);
+       drm_gem_object_unreference(gem);
+       mutex_unlock(&drm->struct_mutex);
+
+       bo = to_tegra_bo(gem);
+       return &bo->base;
+}
+
+static int gr2d_submit(struct host1x_drm_context *context,
+                      struct drm_tegra_submit *args, struct drm_device *drm,
+                      struct drm_file *file)
+{
+       struct host1x_job *job;
+       unsigned int num_cmdbufs = args->num_cmdbufs;
+       unsigned int num_relocs = args->num_relocs;
+       unsigned int num_waitchks = args->num_waitchks;
+       struct drm_tegra_cmdbuf __user *cmdbufs =
+               (void * __user)(uintptr_t)args->cmdbufs;
+       struct drm_tegra_reloc __user *relocs =
+               (void * __user)(uintptr_t)args->relocs;
+       struct drm_tegra_waitchk __user *waitchks =
+               (void * __user)(uintptr_t)args->waitchks;
+       struct drm_tegra_syncpt syncpt;
+       int err;
+
+       /* We don't yet support other than one syncpt_incr struct per submit */
+       if (args->num_syncpts != 1)
+               return -EINVAL;
+
+       job = host1x_job_alloc(context->channel, args->num_cmdbufs,
+                              args->num_relocs, args->num_waitchks);
+       if (!job)
+               return -ENOMEM;
+
+       job->num_relocs = args->num_relocs;
+       job->num_waitchk = args->num_waitchks;
+       job->client = (u32)args->context;
+       job->class = context->client->class;
+       job->serialize = true;
+
+       while (num_cmdbufs) {
+               struct drm_tegra_cmdbuf cmdbuf;
+               struct host1x_bo *bo;
+
+               err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
+               if (err)
+                       goto fail;
+
+               bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
+               if (!bo)
+                       goto fail;
+
+               host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
+               num_cmdbufs--;
+               cmdbufs++;
+       }
+
+       err = copy_from_user(job->relocarray, relocs,
+                            sizeof(*relocs) * num_relocs);
+       if (err)
+               goto fail;
+
+       while (num_relocs--) {
+               struct host1x_reloc *reloc = &job->relocarray[num_relocs];
+               struct host1x_bo *cmdbuf, *target;
+
+               cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
+               target = host1x_bo_lookup(drm, file, (u32)reloc->target);
+
+               reloc->cmdbuf = cmdbuf;
+               reloc->target = target;
+
+               if (!reloc->target || !reloc->cmdbuf)
+                       goto fail;
+       }
+
+       err = copy_from_user(job->waitchk, waitchks,
+                            sizeof(*waitchks) * num_waitchks);
+       if (err)
+               goto fail;
+
+       err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
+                            sizeof(syncpt));
+       if (err)
+               goto fail;
+
+       job->syncpt_id = syncpt.id;
+       job->syncpt_incrs = syncpt.incrs;
+       job->timeout = 10000;
+       job->is_addr_reg = gr2d_is_addr_reg;
+
+       if (args->timeout && args->timeout < 10000)
+               job->timeout = args->timeout;
+
+       err = host1x_job_pin(job, context->client->dev);
+       if (err)
+               goto fail;
+
+       err = host1x_job_submit(job);
+       if (err)
+               goto fail_submit;
+
+       args->fence = job->syncpt_end;
+
+       host1x_job_put(job);
+       return 0;
+
+fail_submit:
+       host1x_job_unpin(job);
+fail:
+       host1x_job_put(job);
+       return err;
+}
+
+static struct host1x_client_ops gr2d_client_ops = {
+       .drm_init = gr2d_client_init,
+       .drm_exit = gr2d_client_exit,
+       .open_channel = gr2d_open_channel,
+       .close_channel = gr2d_close_channel,
+       .submit = gr2d_submit,
+};
+
+static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
+{
+       const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
+                                     0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
+       unsigned long *bitmap;
+       int i;
+
+       bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
+                             GFP_KERNEL);
+
+       for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
+               u32 reg = gr2d_addr_regs[i];
+               bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
+       }
+
+       gr2d->addr_regs = bitmap;
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
+{
+       struct gr2d *gr2d = dev_get_drvdata(dev);
+
+       switch (class) {
+       case HOST1X_CLASS_HOST1X:
+               return reg == 0x2b;
+       case HOST1X_CLASS_GR2D:
+       case HOST1X_CLASS_GR2D_SB:
+               reg &= 0xff;
+               if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
+                       return 1;
+       default:
+               return 0;
+       }
+}
+
+static const struct of_device_id gr2d_match[] = {
+       { .compatible = "nvidia,tegra30-gr2d" },
+       { .compatible = "nvidia,tegra20-gr2d" },
+       { },
+};
+
+static int gr2d_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
+       int err;
+       struct gr2d *gr2d = NULL;
+       struct host1x_syncpt **syncpts;
+
+       gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
+       if (!gr2d)
+               return -ENOMEM;
+
+       syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+       if (!syncpts)
+               return -ENOMEM;
+
+       gr2d->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(gr2d->clk)) {
+               dev_err(dev, "cannot get clock\n");
+               return PTR_ERR(gr2d->clk);
+       }
+
+       err = clk_prepare_enable(gr2d->clk);
+       if (err) {
+               dev_err(dev, "cannot turn on clock\n");
+               return err;
+       }
+
+       gr2d->channel = host1x_channel_request(dev);
+       if (!gr2d->channel)
+               return -ENOMEM;
+
+       *syncpts = host1x_syncpt_request(dev, 0);
+       if (!(*syncpts)) {
+               host1x_channel_free(gr2d->channel);
+               return -ENOMEM;
+       }
+
+       gr2d->client.ops = &gr2d_client_ops;
+       gr2d->client.dev = dev;
+       gr2d->client.class = HOST1X_CLASS_GR2D;
+       gr2d->client.syncpts = syncpts;
+       gr2d->client.num_syncpts = 1;
+
+       err = host1x_register_client(host1x, &gr2d->client);
+       if (err < 0) {
+               dev_err(dev, "failed to register host1x client: %d\n", err);
+               return err;
+       }
+
+       gr2d_init_addr_reg_map(dev, gr2d);
+
+       platform_set_drvdata(pdev, gr2d);
+
+       return 0;
+}
+
+static int __exit gr2d_remove(struct platform_device *pdev)
+{
+       struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+       struct gr2d *gr2d = platform_get_drvdata(pdev);
+       unsigned int i;
+       int err;
+
+       err = host1x_unregister_client(host1x, &gr2d->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
+               return err;
+       }
+
+       for (i = 0; i < gr2d->client.num_syncpts; i++)
+               host1x_syncpt_free(gr2d->client.syncpts[i]);
+
+       host1x_channel_free(gr2d->channel);
+       clk_disable_unprepare(gr2d->clk);
+
+       return 0;
+}
+
+struct platform_driver tegra_gr2d_driver = {
+       .probe = gr2d_probe,
+       .remove = __exit_p(gr2d_remove),
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = "gr2d",
+               .of_match_table = gr2d_match,
+       }
+};
similarity index 99%
rename from drivers/gpu/drm/tegra/hdmi.c
rename to drivers/gpu/host1x/drm/hdmi.c
index bb747f6..01097da 100644 (file)
@@ -22,6 +22,7 @@
 #include "hdmi.h"
 #include "drm.h"
 #include "dc.h"
+#include "host1x_client.h"
 
 struct tegra_hdmi {
        struct host1x_client client;
@@ -1189,7 +1190,7 @@ static const struct host1x_client_ops hdmi_client_ops = {
 
 static int tegra_hdmi_probe(struct platform_device *pdev)
 {
-       struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+       struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
        struct tegra_hdmi *hdmi;
        struct resource *regs;
        int err;
@@ -1278,7 +1279,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
 
 static int tegra_hdmi_remove(struct platform_device *pdev)
 {
-       struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+       struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
        struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
        int err;
 
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
new file mode 100644 (file)
index 0000000..a2bc1e6
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Tegra host1x driver
+ *
+ * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __LINUX_HOST1X_H
+#define __LINUX_HOST1X_H
+
+enum host1x_class {
+       HOST1X_CLASS_HOST1X     = 0x1,
+       HOST1X_CLASS_GR2D       = 0x51,
+       HOST1X_CLASS_GR2D_SB    = 0x52
+};
+
+#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
new file mode 100644 (file)
index 0000000..4c1f10b
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Tegra host1x Memory Management Abstraction header
+ *
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HOST1X_BO_H
+#define _HOST1X_BO_H
+
+struct host1x_bo;
+
+struct host1x_bo_ops {
+       struct host1x_bo *(*get)(struct host1x_bo *bo);
+       void (*put)(struct host1x_bo *bo);
+       dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
+       void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+       void *(*mmap)(struct host1x_bo *bo);
+       void (*munmap)(struct host1x_bo *bo, void *addr);
+       void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
+       void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
+};
+
+struct host1x_bo {
+       const struct host1x_bo_ops *ops;
+};
+
+static inline void host1x_bo_init(struct host1x_bo *bo,
+                                 const struct host1x_bo_ops *ops)
+{
+       bo->ops = ops;
+}
+
+static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
+{
+       return bo->ops->get(bo);
+}
+
+static inline void host1x_bo_put(struct host1x_bo *bo)
+{
+       bo->ops->put(bo);
+}
+
+static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
+                                      struct sg_table **sgt)
+{
+       return bo->ops->pin(bo, sgt);
+}
+
+static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+       bo->ops->unpin(bo, sgt);
+}
+
+static inline void *host1x_bo_mmap(struct host1x_bo *bo)
+{
+       return bo->ops->mmap(bo);
+}
+
+static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+       bo->ops->munmap(bo, addr);
+}
+
+static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
+{
+       return bo->ops->kmap(bo, pagenum);
+}
+
+static inline void host1x_bo_kunmap(struct host1x_bo *bo,
+                                   unsigned int pagenum, void *addr)
+{
+       bo->ops->kunmap(bo, pagenum, addr);
+}
+
+#endif
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/host1x_client.h
new file mode 100644 (file)
index 0000000..9b85f10
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_CLIENT_H
+#define HOST1X_CLIENT_H
+
+struct device;
+struct platform_device;
+
+#ifdef CONFIG_DRM_TEGRA
+int host1x_drm_alloc(struct platform_device *pdev);
+#else
+static inline int host1x_drm_alloc(struct platform_device *pdev)
+{
+       return 0;
+}
+#endif
+
+void host1x_set_drm_data(struct device *dev, void *data);
+void *host1x_get_drm_data(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
new file mode 100644 (file)
index 0000000..9b50863
--- /dev/null
@@ -0,0 +1,6 @@
+ccflags-y = -Idrivers/gpu/host1x
+
+host1x-hw-objs  = \
+       host1x01.o
+
+obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
new file mode 100644 (file)
index 0000000..590b69d
--- /dev/null
@@ -0,0 +1,326 @@
+/*
+ * Tegra host1x Command DMA
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+#include "cdma.h"
+#include "channel.h"
+#include "dev.h"
+#include "debug.h"
+
+/*
+ * Put the restart at the end of pushbuffer memor
+ */
+static void push_buffer_init(struct push_buffer *pb)
+{
+       *(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0);
+}
+
+/*
+ * Increment timedout buffer's syncpt via CPU.
+ */
+static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
+                               u32 syncpt_incrs, u32 syncval, u32 nr_slots)
+{
+       struct host1x *host1x = cdma_to_host1x(cdma);
+       struct push_buffer *pb = &cdma->push_buffer;
+       u32 i;
+
+       for (i = 0; i < syncpt_incrs; i++)
+               host1x_syncpt_cpu_incr(cdma->timeout.syncpt);
+
+       /* after CPU incr, ensure shadow is up to date */
+       host1x_syncpt_load(cdma->timeout.syncpt);
+
+       /* NOP all the PB slots */
+       while (nr_slots--) {
+               u32 *p = (u32 *)((u32)pb->mapped + getptr);
+               *(p++) = HOST1X_OPCODE_NOP;
+               *(p++) = HOST1X_OPCODE_NOP;
+               dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__,
+                       pb->phys + getptr);
+               getptr = (getptr + 8) & (pb->size_bytes - 1);
+       }
+       wmb();
+}
+
+/*
+ * Start channel DMA
+ */
+static void cdma_start(struct host1x_cdma *cdma)
+{
+       struct host1x_channel *ch = cdma_to_channel(cdma);
+
+       if (cdma->running)
+               return;
+
+       cdma->last_pos = cdma->push_buffer.pos;
+
+       host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+                        HOST1X_CHANNEL_DMACTRL);
+
+       /* set base, put and end pointer */
+       host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
+       host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
+       host1x_ch_writel(ch, cdma->push_buffer.phys +
+                        cdma->push_buffer.size_bytes + 4,
+                        HOST1X_CHANNEL_DMAEND);
+
+       /* reset GET */
+       host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
+                        HOST1X_CHANNEL_DMACTRL_DMAGETRST |
+                        HOST1X_CHANNEL_DMACTRL_DMAINITGET,
+                        HOST1X_CHANNEL_DMACTRL);
+
+       /* start the command DMA */
+       host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
+
+       cdma->running = true;
+}
+
+/*
+ * Similar to cdma_start(), but rather than starting from an idle
+ * state (where DMA GET is set to DMA PUT), on a timeout we restore
+ * DMA GET from an explicit value (so DMA may again be pending).
+ */
+static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
+{
+       struct host1x *host1x = cdma_to_host1x(cdma);
+       struct host1x_channel *ch = cdma_to_channel(cdma);
+
+       if (cdma->running)
+               return;
+
+       cdma->last_pos = cdma->push_buffer.pos;
+
+       host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+                        HOST1X_CHANNEL_DMACTRL);
+
+       /* set base, end pointer (all of memory) */
+       host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
+       host1x_ch_writel(ch, cdma->push_buffer.phys +
+                        cdma->push_buffer.size_bytes,
+                        HOST1X_CHANNEL_DMAEND);
+
+       /* set GET, by loading the value in PUT (then reset GET) */
+       host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT);
+       host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
+                        HOST1X_CHANNEL_DMACTRL_DMAGETRST |
+                        HOST1X_CHANNEL_DMACTRL_DMAINITGET,
+                        HOST1X_CHANNEL_DMACTRL);
+
+       dev_dbg(host1x->dev,
+               "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__,
+               host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
+               host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
+               cdma->last_pos);
+
+       /* deassert GET reset and set PUT */
+       host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+                        HOST1X_CHANNEL_DMACTRL);
+       host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
+
+       /* start the command DMA */
+       host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
+
+       cdma->running = true;
+}
+
+/*
+ * Kick channel DMA into action by writing its PUT offset (if it has changed)
+ */
+static void cdma_flush(struct host1x_cdma *cdma)
+{
+       struct host1x_channel *ch = cdma_to_channel(cdma);
+
+       if (cdma->push_buffer.pos != cdma->last_pos) {
+               host1x_ch_writel(ch, cdma->push_buffer.pos,
+                                HOST1X_CHANNEL_DMAPUT);
+               cdma->last_pos = cdma->push_buffer.pos;
+       }
+}
+
+static void cdma_stop(struct host1x_cdma *cdma)
+{
+       struct host1x_channel *ch = cdma_to_channel(cdma);
+
+       mutex_lock(&cdma->lock);
+       if (cdma->running) {
+               host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
+               host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+                                HOST1X_CHANNEL_DMACTRL);
+               cdma->running = false;
+       }
+       mutex_unlock(&cdma->lock);
+}
+
+/*
+ * Stops both channel's command processor and CDMA immediately.
+ * Also, tears down the channel and resets corresponding module.
+ */
+static void cdma_freeze(struct host1x_cdma *cdma)
+{
+       struct host1x *host = cdma_to_host1x(cdma);
+       struct host1x_channel *ch = cdma_to_channel(cdma);
+       u32 cmdproc_stop;
+
+       if (cdma->torndown && !cdma->running) {
+               dev_warn(host->dev, "Already torn down\n");
+               return;
+       }
+
+       dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id);
+
+       cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
+       cmdproc_stop |= BIT(ch->id);
+       host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+
+       dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
+               __func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
+               host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
+               cdma->last_pos);
+
+       host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+                        HOST1X_CHANNEL_DMACTRL);
+
+       host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
+
+       cdma->running = false;
+       cdma->torndown = true;
+}
+
+static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
+{
+       struct host1x *host1x = cdma_to_host1x(cdma);
+       struct host1x_channel *ch = cdma_to_channel(cdma);
+       u32 cmdproc_stop;
+
+       dev_dbg(host1x->dev,
+               "resuming channel (id %d, DMAGET restart = 0x%x)\n",
+               ch->id, getptr);
+
+       cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
+       cmdproc_stop &= ~(BIT(ch->id));
+       host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+
+       cdma->torndown = false;
+       cdma_timeout_restart(cdma, getptr);
+}
+
+/*
+ * If this timeout fires, it indicates the current sync_queue entry has
+ * exceeded its TTL and the userctx should be timed out and remaining
+ * submits already issued cleaned up (future submits return an error).
+ */
+static void cdma_timeout_handler(struct work_struct *work)
+{
+       struct host1x_cdma *cdma;
+       struct host1x *host1x;
+       struct host1x_channel *ch;
+
+       u32 syncpt_val;
+
+       u32 prev_cmdproc, cmdproc_stop;
+
+       cdma = container_of(to_delayed_work(work), struct host1x_cdma,
+                           timeout.wq);
+       host1x = cdma_to_host1x(cdma);
+       ch = cdma_to_channel(cdma);
+
+       host1x_debug_dump(cdma_to_host1x(cdma));
+
+       mutex_lock(&cdma->lock);
+
+       if (!cdma->timeout.client) {
+               dev_dbg(host1x->dev,
+                       "cdma_timeout: expired, but has no clientid\n");
+               mutex_unlock(&cdma->lock);
+               return;
+       }
+
+       /* stop processing to get a clean snapshot */
+       prev_cmdproc = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
+       cmdproc_stop = prev_cmdproc | BIT(ch->id);
+       host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+
+       dev_dbg(host1x->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
+               prev_cmdproc, cmdproc_stop);
+
+       syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
+
+       /* has buffer actually completed? */
+       if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
+               dev_dbg(host1x->dev,
+                       "cdma_timeout: expired, but buffer had completed\n");
+               /* restore */
+               cmdproc_stop = prev_cmdproc & ~(BIT(ch->id));
+               host1x_sync_writel(host1x, cmdproc_stop,
+                                  HOST1X_SYNC_CMDPROC_STOP);
+               mutex_unlock(&cdma->lock);
+               return;
+       }
+
+       dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
+               __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
+               syncpt_val, cdma->timeout.syncpt_val);
+
+       /* stop HW, resetting channel/module */
+       host1x_hw_cdma_freeze(host1x, cdma);
+
+       host1x_cdma_update_sync_queue(cdma, ch->dev);
+       mutex_unlock(&cdma->lock);
+}
+
+/*
+ * Init timeout resources
+ */
+static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
+{
+       INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
+       cdma->timeout.initialized = true;
+
+       return 0;
+}
+
+/*
+ * Clean up timeout resources
+ */
+static void cdma_timeout_destroy(struct host1x_cdma *cdma)
+{
+       if (cdma->timeout.initialized)
+               cancel_delayed_work(&cdma->timeout.wq);
+       cdma->timeout.initialized = false;
+}
+
+static const struct host1x_cdma_ops host1x_cdma_ops = {
+       .start = cdma_start,
+       .stop = cdma_stop,
+       .flush = cdma_flush,
+
+       .timeout_init = cdma_timeout_init,
+       .timeout_destroy = cdma_timeout_destroy,
+       .freeze = cdma_freeze,
+       .resume = cdma_resume,
+       .timeout_cpu_incr = cdma_timeout_cpu_incr,
+};
+
+static const struct host1x_pushbuffer_ops host1x_pushbuffer_ops = {
+       .init = push_buffer_init,
+};
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
new file mode 100644 (file)
index 0000000..ee19962
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * Tegra host1x Channel
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <trace/events/host1x.h>
+
+#include "host1x.h"
+#include "host1x_bo.h"
+#include "channel.h"
+#include "dev.h"
+#include "intr.h"
+#include "job.h"
+
+#define HOST1X_CHANNEL_SIZE 16384
+#define TRACE_MAX_LENGTH 128U
+
+static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
+                              u32 offset, u32 words)
+{
+       void *mem = NULL;
+
+       if (host1x_debug_trace_cmdbuf)
+               mem = host1x_bo_mmap(bo);
+
+       if (mem) {
+               u32 i;
+               /*
+                * Write in batches of 128 as there seems to be a limit
+                * of how much you can output to ftrace at once.
+                */
+               for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
+                       trace_host1x_cdma_push_gather(
+                               dev_name(cdma_to_channel(cdma)->dev),
+                               (u32)bo, min(words - i, TRACE_MAX_LENGTH),
+                               offset + i * sizeof(u32), mem);
+               }
+               host1x_bo_munmap(bo, mem);
+       }
+}
+
+static void submit_gathers(struct host1x_job *job)
+{
+       struct host1x_cdma *cdma = &job->channel->cdma;
+       unsigned int i;
+
+       for (i = 0; i < job->num_gathers; i++) {
+               struct host1x_job_gather *g = &job->gathers[i];
+               u32 op1 = host1x_opcode_gather(g->words);
+               u32 op2 = g->base + g->offset;
+               trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
+               host1x_cdma_push(cdma, op1, op2);
+       }
+}
+
+static int channel_submit(struct host1x_job *job)
+{
+       struct host1x_channel *ch = job->channel;
+       struct host1x_syncpt *sp;
+       u32 user_syncpt_incrs = job->syncpt_incrs;
+       u32 prev_max = 0;
+       u32 syncval;
+       int err;
+       struct host1x_waitlist *completed_waiter = NULL;
+       struct host1x *host = dev_get_drvdata(ch->dev->parent);
+
+       sp = host->syncpt + job->syncpt_id;
+       trace_host1x_channel_submit(dev_name(ch->dev),
+                                   job->num_gathers, job->num_relocs,
+                                   job->num_waitchk, job->syncpt_id,
+                                   job->syncpt_incrs);
+
+       /* before error checks, return current max */
+       prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
+
+       /* get submit lock */
+       err = mutex_lock_interruptible(&ch->submitlock);
+       if (err)
+               goto error;
+
+       completed_waiter = kzalloc(sizeof(*completed_waiter), GFP_KERNEL);
+       if (!completed_waiter) {
+               mutex_unlock(&ch->submitlock);
+               err = -ENOMEM;
+               goto error;
+       }
+
+       /* begin a CDMA submit */
+       err = host1x_cdma_begin(&ch->cdma, job);
+       if (err) {
+               mutex_unlock(&ch->submitlock);
+               goto error;
+       }
+
+       if (job->serialize) {
+               /*
+                * Force serialization by inserting a host wait for the
+                * previous job to finish before this one can commence.
+                */
+               host1x_cdma_push(&ch->cdma,
+                                host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
+                                       host1x_uclass_wait_syncpt_r(), 1),
+                                host1x_class_host_wait_syncpt(job->syncpt_id,
+                                       host1x_syncpt_read_max(sp)));
+       }
+
+       syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
+
+       job->syncpt_end = syncval;
+
+       /* add a setclass for modules that require it */
+       if (job->class)
+               host1x_cdma_push(&ch->cdma,
+                                host1x_opcode_setclass(job->class, 0, 0),
+                                HOST1X_OPCODE_NOP);
+
+       submit_gathers(job);
+
+       /* end CDMA submit & stash pinned hMems into sync queue */
+       host1x_cdma_end(&ch->cdma, job);
+
+       trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
+
+       /* schedule a submit complete interrupt */
+       err = host1x_intr_add_action(host, job->syncpt_id, syncval,
+                                    HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
+                                    completed_waiter, NULL);
+       completed_waiter = NULL;
+       WARN(err, "Failed to set submit complete interrupt");
+
+       mutex_unlock(&ch->submitlock);
+
+       return 0;
+
+error:
+       kfree(completed_waiter);
+       return err;
+}
+
+static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
+                              unsigned int index)
+{
+       ch->id = index;
+       mutex_init(&ch->reflock);
+       mutex_init(&ch->submitlock);
+
+       ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
+       return 0;
+}
+
+static const struct host1x_channel_ops host1x_channel_ops = {
+       .init = host1x_channel_init,
+       .submit = channel_submit,
+};
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
new file mode 100644 (file)
index 0000000..334c038
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011-2013 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "debug.h"
+#include "cdma.h"
+#include "channel.h"
+#include "host1x_bo.h"
+
+#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
+
+enum {
+       HOST1X_OPCODE_SETCLASS  = 0x00,
+       HOST1X_OPCODE_INCR      = 0x01,
+       HOST1X_OPCODE_NONINCR   = 0x02,
+       HOST1X_OPCODE_MASK      = 0x03,
+       HOST1X_OPCODE_IMM       = 0x04,
+       HOST1X_OPCODE_RESTART   = 0x05,
+       HOST1X_OPCODE_GATHER    = 0x06,
+       HOST1X_OPCODE_EXTEND    = 0x0e,
+};
+
+enum {
+       HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK      = 0x00,
+       HOST1X_OPCODE_EXTEND_RELEASE_MLOCK      = 0x01,
+};
+
+static unsigned int show_channel_command(struct output *o, u32 val)
+{
+       unsigned mask;
+       unsigned subop;
+
+       switch (val >> 28) {
+       case HOST1X_OPCODE_SETCLASS:
+               mask = val & 0x3f;
+               if (mask) {
+                       host1x_debug_output(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
+                                           val >> 6 & 0x3ff,
+                                           val >> 16 & 0xfff, mask);
+                       return hweight8(mask);
+               } else {
+                       host1x_debug_output(o, "SETCL(class=%03x)\n",
+                                           val >> 6 & 0x3ff);
+                       return 0;
+               }
+
+       case HOST1X_OPCODE_INCR:
+               host1x_debug_output(o, "INCR(offset=%03x, [",
+                                   val >> 16 & 0xfff);
+               return val & 0xffff;
+
+       case HOST1X_OPCODE_NONINCR:
+               host1x_debug_output(o, "NONINCR(offset=%03x, [",
+                                   val >> 16 & 0xfff);
+               return val & 0xffff;
+
+       case HOST1X_OPCODE_MASK:
+               mask = val & 0xffff;
+               host1x_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
+                                   val >> 16 & 0xfff, mask);
+               return hweight16(mask);
+
+       case HOST1X_OPCODE_IMM:
+               host1x_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
+                                   val >> 16 & 0xfff, val & 0xffff);
+               return 0;
+
+       case HOST1X_OPCODE_RESTART:
+               host1x_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
+               return 0;
+
+       case HOST1X_OPCODE_GATHER:
+               host1x_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+                                   val >> 16 & 0xfff, val >> 15 & 0x1,
+                                   val >> 14 & 0x1, val & 0x3fff);
+               return 1;
+
+       case HOST1X_OPCODE_EXTEND:
+               subop = val >> 24 & 0xf;
+               if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK)
+                       host1x_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
+                                           val & 0xff);
+               else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK)
+                       host1x_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
+                                           val & 0xff);
+               else
+                       host1x_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
+               return 0;
+
+       default:
+               return 0;
+       }
+}
+
+static void show_gather(struct output *o, phys_addr_t phys_addr,
+                       unsigned int words, struct host1x_cdma *cdma,
+                       phys_addr_t pin_addr, u32 *map_addr)
+{
+       /* Map dmaget cursor to corresponding mem handle */
+       u32 offset = phys_addr - pin_addr;
+       unsigned int data_count = 0, i;
+
+       /*
+        * Sometimes we're given different hardware address to the same
+        * page - in these cases the offset will get an invalid number and
+        * we just have to bail out.
+        */
+       if (offset > HOST1X_DEBUG_MAX_PAGE_OFFSET) {
+               host1x_debug_output(o, "[address mismatch]\n");
+               return;
+       }
+
+       for (i = 0; i < words; i++) {
+               u32 addr = phys_addr + i * 4;
+               u32 val = *(map_addr + offset / 4 + i);
+
+               if (!data_count) {
+                       host1x_debug_output(o, "%08x: %08x:", addr, val);
+                       data_count = show_channel_command(o, val);
+               } else {
+                       host1x_debug_output(o, "%08x%s", val,
+                                           data_count > 0 ? ", " : "])\n");
+                       data_count--;
+               }
+       }
+}
+
+static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
+{
+       struct host1x_job *job;
+
+       list_for_each_entry(job, &cdma->sync_queue, list) {
+               int i;
+               host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
+                                   job, job->syncpt_id, job->syncpt_end,
+                                   job->first_get, job->timeout,
+                                   job->num_slots, job->num_unpins);
+
+               for (i = 0; i < job->num_gathers; i++) {
+                       struct host1x_job_gather *g = &job->gathers[i];
+                       u32 *mapped;
+
+                       if (job->gather_copy_mapped)
+                               mapped = (u32 *)job->gather_copy_mapped;
+                       else
+                               mapped = host1x_bo_mmap(g->bo);
+
+                       if (!mapped) {
+                               host1x_debug_output(o, "[could not mmap]\n");
+                               continue;
+                       }
+
+                       host1x_debug_output(o, "    GATHER at %08x+%04x, %d words\n",
+                                           g->base, g->offset, g->words);
+
+                       show_gather(o, g->base + g->offset, g->words, cdma,
+                                   g->base, mapped);
+
+                       if (!job->gather_copy_mapped)
+                               host1x_bo_munmap(g->bo, mapped);
+               }
+       }
+}
+
+static void host1x_debug_show_channel_cdma(struct host1x *host,
+                                          struct host1x_channel *ch,
+                                          struct output *o)
+{
+       struct host1x_cdma *cdma = &ch->cdma;
+       u32 dmaput, dmaget, dmactrl;
+       u32 cbstat, cbread;
+       u32 val, base, baseval;
+
+       dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
+       dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
+       dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
+       cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
+       cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
+
+       host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
+
+       if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
+           !ch->cdma.push_buffer.mapped) {
+               host1x_debug_output(o, "inactive\n\n");
+               return;
+       }
+
+       if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
+           HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+           HOST1X_UCLASS_WAIT_SYNCPT)
+               host1x_debug_output(o, "waiting on syncpt %d val %d\n",
+                                   cbread >> 24, cbread & 0xffffff);
+       else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
+          HOST1X_CLASS_HOST1X &&
+          HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+          HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
+
+               base = (cbread >> 16) & 0xff;
+               baseval =
+                       host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
+               val = cbread & 0xffff;
+               host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
+                                   cbread >> 24, baseval + val, base,
+                                   baseval, val);
+       } else
+               host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
+                                   HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
+                                   HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
+                                   cbread);
+
+       host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+                           dmaput, dmaget, dmactrl);
+       host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
+
+       show_channel_gathers(o, cdma);
+       host1x_debug_output(o, "\n");
+}
+
+static void host1x_debug_show_channel_fifo(struct host1x *host,
+                                          struct host1x_channel *ch,
+                                          struct output *o)
+{
+       u32 val, rd_ptr, wr_ptr, start, end;
+       unsigned int data_count = 0;
+
+       host1x_debug_output(o, "%d: fifo:\n", ch->id);
+
+       val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
+       host1x_debug_output(o, "FIFOSTAT %08x\n", val);
+       if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
+               host1x_debug_output(o, "[empty]\n");
+               return;
+       }
+
+       host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+       host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
+                          HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
+                          HOST1X_SYNC_CFPEEK_CTRL);
+
+       val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
+       rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
+       wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
+
+       val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
+       start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
+       end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
+
+       do {
+               host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+               host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
+                                  HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
+                                  HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
+                                  HOST1X_SYNC_CFPEEK_CTRL);
+               val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
+
+               if (!data_count) {
+                       host1x_debug_output(o, "%08x:", val);
+                       data_count = show_channel_command(o, val);
+               } else {
+                       host1x_debug_output(o, "%08x%s", val,
+                                           data_count > 0 ? ", " : "])\n");
+                       data_count--;
+               }
+
+               if (rd_ptr == end)
+                       rd_ptr = start;
+               else
+                       rd_ptr++;
+       } while (rd_ptr != wr_ptr);
+
+       if (data_count)
+               host1x_debug_output(o, ", ...])\n");
+       host1x_debug_output(o, "\n");
+
+       host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+}
+
+static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
+{
+       int i;
+
+       host1x_debug_output(o, "---- mlocks ----\n");
+       for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
+               u32 owner =
+                       host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
+               if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
+                       host1x_debug_output(o, "%d: locked by channel %d\n",
+                               i, HOST1X_SYNC_MLOCK_OWNER_CHID_F(owner));
+               else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
+                       host1x_debug_output(o, "%d: locked by cpu\n", i);
+               else
+                       host1x_debug_output(o, "%d: unlocked\n", i);
+       }
+       host1x_debug_output(o, "\n");
+}
+
+static const struct host1x_debug_ops host1x_debug_ops = {
+       .show_channel_cdma = host1x_debug_show_channel_cdma,
+       .show_channel_fifo = host1x_debug_show_channel_fifo,
+       .show_mlocks = host1x_debug_show_mlocks,
+};
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
new file mode 100644 (file)
index 0000000..a14e91c
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for T20 and T30 Architecture Chips
+ *
+ * Copyright (c) 2011-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "hw/host1x01.h"
+#include "hw/host1x01_hardware.h"
+
+/* include code */
+#include "hw/cdma_hw.c"
+#include "hw/channel_hw.c"
+#include "hw/debug_hw.c"
+#include "hw/intr_hw.c"
+#include "hw/syncpt_hw.c"
+
+#include "dev.h"
+
+int host1x01_init(struct host1x *host)
+{
+       host->channel_op = &host1x_channel_ops;
+       host->cdma_op = &host1x_cdma_ops;
+       host->cdma_pb_op = &host1x_pushbuffer_ops;
+       host->syncpt_op = &host1x_syncpt_ops;
+       host->intr_op = &host1x_intr_ops;
+       host->debug_op = &host1x_debug_ops;
+
+       return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x01.h b/drivers/gpu/host1x/hw/host1x01.h
new file mode 100644 (file)
index 0000000..2706b67
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Host1x init for T20 and T30 Architecture Chips
+ *
+ * Copyright (c) 2011-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HOST1X_HOST1X01_H
+#define HOST1X_HOST1X01_H
+
+struct host1x;
+
+int host1x01_init(struct host1x *host);
+
+#endif /* HOST1X_HOST1X01_H_ */
diff --git a/drivers/gpu/host1x/hw/host1x01_hardware.h b/drivers/gpu/host1x/hw/host1x01_hardware.h
new file mode 100644 (file)
index 0000000..5f0fb86
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Tegra host1x Register Offsets for Tegra20 and Tegra30
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X01_HARDWARE_H
+#define __HOST1X_HOST1X01_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x01_channel.h"
+#include "hw_host1x01_sync.h"
+#include "hw_host1x01_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+       unsigned indx, unsigned threshold)
+{
+       return host1x_uclass_wait_syncpt_indx_f(indx)
+               | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+       unsigned indx, unsigned threshold)
+{
+       return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+               | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+       unsigned indx, unsigned base_indx, unsigned offset)
+{
+       return host1x_uclass_wait_syncpt_base_indx_f(indx)
+               | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+               | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+       unsigned base_indx, unsigned offset)
+{
+       return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+               | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+       unsigned cond, unsigned indx)
+{
+       return host1x_uclass_incr_syncpt_cond_f(cond)
+               | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+       unsigned mod_id, unsigned offset, bool auto_inc)
+{
+       u32 v = host1x_uclass_indoff_indbe_f(0xf)
+               | host1x_uclass_indoff_indmodid_f(mod_id)
+               | host1x_uclass_indoff_indroffset_f(offset);
+       if (auto_inc)
+               v |= host1x_uclass_indoff_autoinc_f(1);
+       return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+       unsigned mod_id, unsigned offset, bool auto_inc)
+{
+       u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+               | host1x_uclass_indoff_indroffset_f(offset)
+               | host1x_uclass_indoff_rwn_read_v();
+       if (auto_inc)
+               v |= host1x_uclass_indoff_autoinc_f(1);
+       return v;
+}
+
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+       unsigned class_id, unsigned offset, unsigned mask)
+{
+       return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+       return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+       return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+       return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+       return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+       return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+               host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+       return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+       return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset,        unsigned count)
+{
+       return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+       return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_channel.h b/drivers/gpu/host1x/hw/hw_host1x01_channel.h
new file mode 100644 (file)
index 0000000..b4bc7ca
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef __hw_host1x_channel_host1x_h__
+#define __hw_host1x_channel_host1x_h__
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+       return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+       host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+       return (r >> 10) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+       host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+       return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+       host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+       return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+       host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+       return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+       host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+       return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+       host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+       return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+       host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+       return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+       host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+       return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+       host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+       return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+       host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+       return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+       host1x_channel_dmactrl_dmainitget()
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_sync.h b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
new file mode 100644 (file)
index 0000000..ac704e5
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef __hw_host1x01_sync_h__
+#define __hw_host1x01_sync_h__
+
+#define REGISTER_STRIDE        4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+       return 0x400 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+       host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+       return 0x40 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+       host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+       return 0x60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+       host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+       return 0x68 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+       host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+       return 0x80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+       host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+       return (r >> 0) & 0x1ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+       host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+       return (r >> 16) & 0x1ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+       host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+       return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+       host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+       return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+       host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+       return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+       host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+       return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+       host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+       return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+       host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+       return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+       host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+       return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+       host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+       return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+       host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+       return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+       host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+       return 0x500 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+       host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+       return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+       host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+       return 0x700 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+       host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+       return 0x720 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+       host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+       return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+       host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+       return (v & 0x1ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+       host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+       return (v & 0x7) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+       host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+       return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+       host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+       return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+       host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+       return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+       host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+       return (r >> 0) & 0x1ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+       host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+       return (r >> 16) & 0x1ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+       host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+       return 0x758 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+       host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+       return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+       host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+       return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+       host1x_sync_cbstat_cbclass_v(r)
+
+#endif /* __hw_host1x01_sync_h__ */
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
new file mode 100644 (file)
index 0000000..42f3ce1
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef __hw_host1x_uclass_host1x_h__
+#define __hw_host1x_uclass_host1x_h__
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+       return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+       host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+       return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+       host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+       return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+       host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+       return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+       host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+       host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+       host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+       return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+       host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+       host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+       return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+       host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+       host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+       host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+       return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+       host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+       return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+       host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+       return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+       host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+       return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+       host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+       return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+       host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+       return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+       host1x_uclass_indoff_indroffset_f(v)
+#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
new file mode 100644 (file)
index 0000000..b592eef
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Tegra host1x Interrupt Management
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/mach/irq.h>
+
+#include "intr.h"
+#include "dev.h"
+
+/*
+ * Sync point threshold interrupt service function
+ * Handles sync point threshold triggers, in interrupt context
+ */
+static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
+{
+       unsigned int id = syncpt->id;
+       struct host1x *host = syncpt->host;
+
+       host1x_sync_writel(host, BIT_MASK(id),
+               HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
+       host1x_sync_writel(host, BIT_MASK(id),
+               HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
+
+       queue_work(host->intr_wq, &syncpt->intr.work);
+}
+
+static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
+{
+       struct host1x *host = dev_id;
+       unsigned long reg;
+       int i, id;
+
+       for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) {
+               reg = host1x_sync_readl(host,
+                       HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
+               for_each_set_bit(id, &reg, BITS_PER_LONG) {
+                       struct host1x_syncpt *syncpt =
+                               host->syncpt + (i * BITS_PER_LONG + id);
+                       host1x_intr_syncpt_handle(syncpt);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
+{
+       u32 i;
+
+       for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) {
+               host1x_sync_writel(host, 0xffffffffu,
+                       HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
+               host1x_sync_writel(host, 0xffffffffu,
+                       HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
+       }
+}
+
+static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
+       void (*syncpt_thresh_work)(struct work_struct *))
+{
+       int i, err;
+
+       host1x_hw_intr_disable_all_syncpt_intrs(host);
+
+       for (i = 0; i < host->info->nb_pts; i++)
+               INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work);
+
+       err = devm_request_irq(host->dev, host->intr_syncpt_irq,
+                              syncpt_thresh_isr, IRQF_SHARED,
+                              "host1x_syncpt", host);
+       if (IS_ERR_VALUE(err)) {
+               WARN_ON(1);
+               return err;
+       }
+
+       /* disable the ip_busy_timeout. this prevents write drops */
+       host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
+
+       /*
+        * increase the auto-ack timout to the maximum value. 2d will hang
+        * otherwise on Tegra2.
+        */
+       host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
+
+       /* update host clocks per usec */
+       host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
+
+       return 0;
+}
+
+static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
+       u32 id, u32 thresh)
+{
+       host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
+}
+
+static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
+{
+       host1x_sync_writel(host, BIT_MASK(id),
+               HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
+}
+
+static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
+{
+       host1x_sync_writel(host, BIT_MASK(id),
+               HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
+       host1x_sync_writel(host, BIT_MASK(id),
+               HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
+}
+
+static int _host1x_free_syncpt_irq(struct host1x *host)
+{
+       devm_free_irq(host->dev, host->intr_syncpt_irq, host);
+       flush_workqueue(host->intr_wq);
+       return 0;
+}
+
+static const struct host1x_intr_ops host1x_intr_ops = {
+       .init_host_sync = _host1x_intr_init_host_sync,
+       .set_syncpt_threshold = _host1x_intr_set_syncpt_threshold,
+       .enable_syncpt_intr = _host1x_intr_enable_syncpt_intr,
+       .disable_syncpt_intr = _host1x_intr_disable_syncpt_intr,
+       .disable_all_syncpt_intrs = _host1x_intr_disable_all_syncpt_intrs,
+       .free_syncpt_irq = _host1x_free_syncpt_irq,
+};
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
new file mode 100644 (file)
index 0000000..6117499
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Tegra host1x Syncpoints
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "syncpt.h"
+
+/*
+ * Write the current syncpoint value back to hw.
+ */
+static void syncpt_restore(struct host1x_syncpt *sp)
+{
+       struct host1x *host = sp->host;
+       int min = host1x_syncpt_read_min(sp);
+       host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
+}
+
+/*
+ * Write the current waitbase value back to hw.
+ */
+static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
+{
+       struct host1x *host = sp->host;
+       host1x_sync_writel(host, sp->base_val,
+                          HOST1X_SYNC_SYNCPT_BASE(sp->id));
+}
+
+/*
+ * Read waitbase value from hw.
+ */
+static void syncpt_read_wait_base(struct host1x_syncpt *sp)
+{
+       struct host1x *host = sp->host;
+       sp->base_val =
+               host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
+}
+
+/*
+ * Updates the last value read from hardware.
+ */
+static u32 syncpt_load(struct host1x_syncpt *sp)
+{
+       struct host1x *host = sp->host;
+       u32 old, live;
+
+       /* Loop in case there's a race writing to min_val */
+       do {
+               old = host1x_syncpt_read_min(sp);
+               live = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT(sp->id));
+       } while ((u32)atomic_cmpxchg(&sp->min_val, old, live) != old);
+
+       if (!host1x_syncpt_check_max(sp, live))
+               dev_err(host->dev, "%s failed: id=%u, min=%d, max=%d\n",
+                       __func__, sp->id, host1x_syncpt_read_min(sp),
+                       host1x_syncpt_read_max(sp));
+
+       return live;
+}
+
+/*
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache.
+ */
+static void syncpt_cpu_incr(struct host1x_syncpt *sp)
+{
+       struct host1x *host = sp->host;
+       u32 reg_offset = sp->id / 32;
+
+       if (!host1x_syncpt_client_managed(sp) &&
+           host1x_syncpt_idle(sp)) {
+               dev_err(host->dev, "Trying to increment syncpoint id %d beyond max\n",
+                       sp->id);
+               host1x_debug_dump(sp->host);
+               return;
+       }
+       host1x_sync_writel(host, BIT_MASK(sp->id),
+                          HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
+       wmb();
+}
+
+/* remove a wait pointed to by patch_addr */
+static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
+{
+       u32 override = host1x_class_host_wait_syncpt(
+               HOST1X_SYNCPT_RESERVED, 0);
+
+       *((u32 *)patch_addr) = override;
+       return 0;
+}
+
+static const struct host1x_syncpt_ops host1x_syncpt_ops = {
+       .restore = syncpt_restore,
+       .restore_wait_base = syncpt_restore_wait_base,
+       .load_wait_base = syncpt_read_wait_base,
+       .load = syncpt_load,
+       .cpu_incr = syncpt_cpu_incr,
+       .patch_wait = syncpt_patch_wait,
+};
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
new file mode 100644 (file)
index 0000000..2491bf8
--- /dev/null
@@ -0,0 +1,354 @@
+/*
+ * Tegra host1x Interrupt Management
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include <trace/events/host1x.h>
+#include "channel.h"
+#include "dev.h"
+#include "intr.h"
+
+/* Wait list management */
+
+enum waitlist_state {
+       WLS_PENDING,
+       WLS_REMOVED,
+       WLS_CANCELLED,
+       WLS_HANDLED
+};
+
+static void waiter_release(struct kref *kref)
+{
+       kfree(container_of(kref, struct host1x_waitlist, refcount));
+}
+
+/*
+ * add a waiter to a waiter queue, sorted by threshold
+ * returns true if it was added at the head of the queue
+ */
+static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
+                               struct list_head *queue)
+{
+       struct host1x_waitlist *pos;
+       u32 thresh = waiter->thresh;
+
+       list_for_each_entry_reverse(pos, queue, list)
+               if ((s32)(pos->thresh - thresh) <= 0) {
+                       list_add(&waiter->list, &pos->list);
+                       return false;
+               }
+
+       list_add(&waiter->list, queue);
+       return true;
+}
+
+/*
+ * run through a waiter queue for a single sync point ID
+ * and gather all completed waiters into lists by actions
+ */
+static void remove_completed_waiters(struct list_head *head, u32 sync,
+                       struct list_head completed[HOST1X_INTR_ACTION_COUNT])
+{
+       struct list_head *dest;
+       struct host1x_waitlist *waiter, *next, *prev;
+
+       list_for_each_entry_safe(waiter, next, head, list) {
+               if ((s32)(waiter->thresh - sync) > 0)
+                       break;
+
+               dest = completed + waiter->action;
+
+               /* consolidate submit cleanups */
+               if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
+                   !list_empty(dest)) {
+                       prev = list_entry(dest->prev,
+                                         struct host1x_waitlist, list);
+                       if (prev->data == waiter->data) {
+                               prev->count++;
+                               dest = NULL;
+                       }
+               }
+
+               /* PENDING->REMOVED or CANCELLED->HANDLED */
+               if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
+                       list_del(&waiter->list);
+                       kref_put(&waiter->refcount, waiter_release);
+               } else
+                       list_move_tail(&waiter->list, dest);
+       }
+}
+
+static void reset_threshold_interrupt(struct host1x *host,
+                                     struct list_head *head,
+                                     unsigned int id)
+{
+       u32 thresh =
+               list_first_entry(head, struct host1x_waitlist, list)->thresh;
+
+       host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
+       host1x_hw_intr_enable_syncpt_intr(host, id);
+}
+
+static void action_submit_complete(struct host1x_waitlist *waiter)
+{
+       struct host1x_channel *channel = waiter->data;
+
+       host1x_cdma_update(&channel->cdma);
+
+       /*  Add nr_completed to trace */
+       trace_host1x_channel_submit_complete(dev_name(channel->dev),
+                                            waiter->count, waiter->thresh);
+
+}
+
+static void action_wakeup(struct host1x_waitlist *waiter)
+{
+       wait_queue_head_t *wq = waiter->data;
+       wake_up(wq);
+}
+
+static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
+{
+       wait_queue_head_t *wq = waiter->data;
+       wake_up_interruptible(wq);
+}
+
+typedef void (*action_handler)(struct host1x_waitlist *waiter);
+
+static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
+       action_submit_complete,
+       action_wakeup,
+       action_wakeup_interruptible,
+};
+
+static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
+{
+       struct list_head *head = completed;
+       int i;
+
+       for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
+               action_handler handler = action_handlers[i];
+               struct host1x_waitlist *waiter, *next;
+
+               list_for_each_entry_safe(waiter, next, head, list) {
+                       list_del(&waiter->list);
+                       handler(waiter);
+                       WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
+                               WLS_REMOVED);
+                       kref_put(&waiter->refcount, waiter_release);
+               }
+       }
+}
+
+/*
+ * Remove & handle all waiters that have completed for the given syncpt
+ */
+static int process_wait_list(struct host1x *host,
+                            struct host1x_syncpt *syncpt,
+                            u32 threshold)
+{
+       struct list_head completed[HOST1X_INTR_ACTION_COUNT];
+       unsigned int i;
+       int empty;
+
+       for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
+               INIT_LIST_HEAD(completed + i);
+
+       spin_lock(&syncpt->intr.lock);
+
+       remove_completed_waiters(&syncpt->intr.wait_head, threshold,
+                                completed);
+
+       empty = list_empty(&syncpt->intr.wait_head);
+       if (empty)
+               host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
+       else
+               reset_threshold_interrupt(host, &syncpt->intr.wait_head,
+                                         syncpt->id);
+
+       spin_unlock(&syncpt->intr.lock);
+
+       run_handlers(completed);
+
+       return empty;
+}
+
+/*
+ * Sync point threshold interrupt service thread function
+ * Handles sync point threshold triggers, in thread context
+ */
+
+static void syncpt_thresh_work(struct work_struct *work)
+{
+       struct host1x_syncpt_intr *syncpt_intr =
+               container_of(work, struct host1x_syncpt_intr, work);
+       struct host1x_syncpt *syncpt =
+               container_of(syncpt_intr, struct host1x_syncpt, intr);
+       unsigned int id = syncpt->id;
+       struct host1x *host = syncpt->host;
+
+       (void)process_wait_list(host, syncpt,
+                               host1x_syncpt_load(host->syncpt + id));
+}
+
+int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+                          enum host1x_intr_action action, void *data,
+                          struct host1x_waitlist *waiter, void **ref)
+{
+       struct host1x_syncpt *syncpt;
+       int queue_was_empty;
+
+       if (waiter == NULL) {
+               pr_warn("%s: NULL waiter\n", __func__);
+               return -EINVAL;
+       }
+
+       /* initialize a new waiter */
+       INIT_LIST_HEAD(&waiter->list);
+       kref_init(&waiter->refcount);
+       if (ref)
+               kref_get(&waiter->refcount);
+       waiter->thresh = thresh;
+       waiter->action = action;
+       atomic_set(&waiter->state, WLS_PENDING);
+       waiter->data = data;
+       waiter->count = 1;
+
+       syncpt = host->syncpt + id;
+
+       spin_lock(&syncpt->intr.lock);
+
+       queue_was_empty = list_empty(&syncpt->intr.wait_head);
+
+       if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
+               /* added at head of list - new threshold value */
+               host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
+
+               /* added as first waiter - enable interrupt */
+               if (queue_was_empty)
+                       host1x_hw_intr_enable_syncpt_intr(host, id);
+       }
+
+       spin_unlock(&syncpt->intr.lock);
+
+       if (ref)
+               *ref = waiter;
+       return 0;
+}
+
+void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
+{
+       struct host1x_waitlist *waiter = ref;
+       struct host1x_syncpt *syncpt;
+
+       while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
+              WLS_REMOVED)
+               schedule();
+
+       syncpt = host->syncpt + id;
+       (void)process_wait_list(host, syncpt,
+                               host1x_syncpt_load(host->syncpt + id));
+
+       kref_put(&waiter->refcount, waiter_release);
+}
+
+int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
+{
+       unsigned int id;
+       u32 nb_pts = host1x_syncpt_nb_pts(host);
+
+       mutex_init(&host->intr_mutex);
+       host->intr_syncpt_irq = irq_sync;
+       host->intr_wq = create_workqueue("host_syncpt");
+       if (!host->intr_wq)
+               return -ENOMEM;
+
+       for (id = 0; id < nb_pts; ++id) {
+               struct host1x_syncpt *syncpt = host->syncpt + id;
+
+               spin_lock_init(&syncpt->intr.lock);
+               INIT_LIST_HEAD(&syncpt->intr.wait_head);
+               snprintf(syncpt->intr.thresh_irq_name,
+                        sizeof(syncpt->intr.thresh_irq_name),
+                        "host1x_sp_%02d", id);
+       }
+
+       host1x_intr_start(host);
+
+       return 0;
+}
+
+void host1x_intr_deinit(struct host1x *host)
+{
+       host1x_intr_stop(host);
+       destroy_workqueue(host->intr_wq);
+}
+
+void host1x_intr_start(struct host1x *host)
+{
+       u32 hz = clk_get_rate(host->clk);
+       int err;
+
+       mutex_lock(&host->intr_mutex);
+       err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
+                                           syncpt_thresh_work);
+       if (err) {
+               mutex_unlock(&host->intr_mutex);
+               return;
+       }
+       mutex_unlock(&host->intr_mutex);
+}
+
+void host1x_intr_stop(struct host1x *host)
+{
+       unsigned int id;
+       struct host1x_syncpt *syncpt = host->syncpt;
+       u32 nb_pts = host1x_syncpt_nb_pts(host);
+
+       mutex_lock(&host->intr_mutex);
+
+       host1x_hw_intr_disable_all_syncpt_intrs(host);
+
+       for (id = 0; id < nb_pts; ++id) {
+               struct host1x_waitlist *waiter, *next;
+
+               list_for_each_entry_safe(waiter, next,
+                       &syncpt[id].intr.wait_head, list) {
+                       if (atomic_cmpxchg(&waiter->state,
+                           WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
+                               list_del(&waiter->list);
+                               kref_put(&waiter->refcount, waiter_release);
+                       }
+               }
+
+               if (!list_empty(&syncpt[id].intr.wait_head)) {
+                       /* output diagnostics */
+                       mutex_unlock(&host->intr_mutex);
+                       pr_warn("%s cannot stop syncpt intr id=%d\n",
+                               __func__, id);
+                       return;
+               }
+       }
+
+       host1x_hw_intr_free_syncpt_irq(host);
+
+       mutex_unlock(&host->intr_mutex);
+}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
new file mode 100644 (file)
index 0000000..2b8adf0
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Tegra host1x Interrupt Management
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_INTR_H
+#define __HOST1X_INTR_H
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+struct host1x;
+
+enum host1x_intr_action {
+       /*
+        * Perform cleanup after a submit has completed.
+        * 'data' points to a channel
+        */
+       HOST1X_INTR_ACTION_SUBMIT_COMPLETE = 0,
+
+       /*
+        * Wake up a  task.
+        * 'data' points to a wait_queue_head_t
+        */
+       HOST1X_INTR_ACTION_WAKEUP,
+
+       /*
+        * Wake up a interruptible task.
+        * 'data' points to a wait_queue_head_t
+        */
+       HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+
+       HOST1X_INTR_ACTION_COUNT
+};
+
+struct host1x_syncpt_intr {
+       spinlock_t lock;
+       struct list_head wait_head;
+       char thresh_irq_name[12];
+       struct work_struct work;
+};
+
+struct host1x_waitlist {
+       struct list_head list;
+       struct kref refcount;
+       u32 thresh;
+       enum host1x_intr_action action;
+       atomic_t state;
+       void *data;
+       int count;
+};
+
+/*
+ * Schedule an action to be taken when a sync point reaches the given threshold.
+ *
+ * @id the sync point
+ * @thresh the threshold
+ * @action the action to take
+ * @data a pointer to extra data depending on action, see above
+ * @waiter waiter structure - assumes ownership
+ * @ref must be passed if cancellation is possible, else NULL
+ *
+ * This is a non-blocking api.
+ */
+int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+       enum host1x_intr_action action, void *data,
+       struct host1x_waitlist *waiter, void **ref);
+
+/*
+ * Unreference an action submitted to host1x_intr_add_action().
+ * You must call this if you passed non-NULL as ref.
+ * @ref the ref returned from host1x_intr_add_action()
+ */
+void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
+
+/* Initialize host1x sync point interrupt */
+int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
+
+/* Deinitialize host1x sync point interrupt */
+void host1x_intr_deinit(struct host1x *host);
+
+/* Enable host1x sync point interrupt */
+void host1x_intr_start(struct host1x *host);
+
+/* Disable host1x sync point interrupt */
+void host1x_intr_stop(struct host1x *host);
+
+irqreturn_t host1x_syncpt_thresh_fn(void *dev_id);
+#endif
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
new file mode 100644 (file)
index 0000000..f665d67
--- /dev/null
@@ -0,0 +1,603 @@
+/*
+ * Tegra host1x Job
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <trace/events/host1x.h>
+
+#include "channel.h"
+#include "dev.h"
+#include "host1x_bo.h"
+#include "job.h"
+#include "syncpt.h"
+
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+                                   u32 num_cmdbufs, u32 num_relocs,
+                                   u32 num_waitchks)
+{
+       struct host1x_job *job = NULL;
+       unsigned int num_unpins = num_cmdbufs + num_relocs;
+       u64 total;
+       void *mem;
+
+       /* Check that we're not going to overflow */
+       total = sizeof(struct host1x_job) +
+               num_relocs * sizeof(struct host1x_reloc) +
+               num_unpins * sizeof(struct host1x_job_unpin_data) +
+               num_waitchks * sizeof(struct host1x_waitchk) +
+               num_cmdbufs * sizeof(struct host1x_job_gather) +
+               num_unpins * sizeof(dma_addr_t) +
+               num_unpins * sizeof(u32 *);
+       if (total > ULONG_MAX)
+               return NULL;
+
+       mem = job = kzalloc(total, GFP_KERNEL);
+       if (!job)
+               return NULL;
+
+       kref_init(&job->ref);
+       job->channel = ch;
+
+       /* Redistribute memory to the structs  */
+       mem += sizeof(struct host1x_job);
+       job->relocarray = num_relocs ? mem : NULL;
+       mem += num_relocs * sizeof(struct host1x_reloc);
+       job->unpins = num_unpins ? mem : NULL;
+       mem += num_unpins * sizeof(struct host1x_job_unpin_data);
+       job->waitchk = num_waitchks ? mem : NULL;
+       mem += num_waitchks * sizeof(struct host1x_waitchk);
+       job->gathers = num_cmdbufs ? mem : NULL;
+       mem += num_cmdbufs * sizeof(struct host1x_job_gather);
+       job->addr_phys = num_unpins ? mem : NULL;
+
+       job->reloc_addr_phys = job->addr_phys;
+       job->gather_addr_phys = &job->addr_phys[num_relocs];
+
+       return job;
+}
+
+struct host1x_job *host1x_job_get(struct host1x_job *job)
+{
+       kref_get(&job->ref);
+       return job;
+}
+
+static void job_free(struct kref *ref)
+{
+       struct host1x_job *job = container_of(ref, struct host1x_job, ref);
+
+       kfree(job);
+}
+
+void host1x_job_put(struct host1x_job *job)
+{
+       kref_put(&job->ref, job_free);
+}
+
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
+                          u32 words, u32 offset)
+{
+       struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
+
+       cur_gather->words = words;
+       cur_gather->bo = bo;
+       cur_gather->offset = offset;
+       job->num_gathers++;
+}
+
+/*
+ * NULL an already satisfied WAIT_SYNCPT host method, by patching its
+ * args in the command stream. The method data is changed to reference
+ * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
+ * with a matching threshold value of 0, so is guaranteed to be popped
+ * by the host HW.
+ */
+static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
+                                      struct host1x_bo *h, u32 offset)
+{
+       void *patch_addr = NULL;
+
+       /* patch the wait */
+       patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
+       if (patch_addr) {
+               host1x_syncpt_patch_wait(sp,
+                                        patch_addr + (offset & ~PAGE_MASK));
+               host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
+       } else
+               pr_err("Could not map cmdbuf for wait check\n");
+}
+
+/*
+ * Check driver supplied waitchk structs for syncpt thresholds
+ * that have already been satisfied and NULL the comparison (to
+ * avoid a wrap condition in the HW).
+ */
+static int do_waitchks(struct host1x_job *job, struct host1x *host,
+                      struct host1x_bo *patch)
+{
+       int i;
+
+       /* compare syncpt vs wait threshold */
+       for (i = 0; i < job->num_waitchk; i++) {
+               struct host1x_waitchk *wait = &job->waitchk[i];
+               struct host1x_syncpt *sp =
+                       host1x_syncpt_get(host, wait->syncpt_id);
+
+               /* validate syncpt id */
+               if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
+                       continue;
+
+               /* skip all other gathers */
+               if (patch != wait->bo)
+                       continue;
+
+               trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
+                                              wait->syncpt_id, wait->thresh,
+                                              host1x_syncpt_read_min(sp));
+
+               if (host1x_syncpt_is_expired(sp, wait->thresh)) {
+                       dev_dbg(host->dev,
+                               "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
+                               wait->syncpt_id, sp->name, wait->thresh,
+                               host1x_syncpt_read_min(sp));
+
+                       host1x_syncpt_patch_offset(sp, patch, wait->offset);
+               }
+
+               wait->bo = NULL;
+       }
+
+       return 0;
+}
+
+static unsigned int pin_job(struct host1x_job *job)
+{
+       unsigned int i;
+
+       job->num_unpins = 0;
+
+       for (i = 0; i < job->num_relocs; i++) {
+               struct host1x_reloc *reloc = &job->relocarray[i];
+               struct sg_table *sgt;
+               dma_addr_t phys_addr;
+
+               reloc->target = host1x_bo_get(reloc->target);
+               if (!reloc->target)
+                       goto unpin;
+
+               phys_addr = host1x_bo_pin(reloc->target, &sgt);
+               if (!phys_addr)
+                       goto unpin;
+
+               job->addr_phys[job->num_unpins] = phys_addr;
+               job->unpins[job->num_unpins].bo = reloc->target;
+               job->unpins[job->num_unpins].sgt = sgt;
+               job->num_unpins++;
+       }
+
+       for (i = 0; i < job->num_gathers; i++) {
+               struct host1x_job_gather *g = &job->gathers[i];
+               struct sg_table *sgt;
+               dma_addr_t phys_addr;
+
+               g->bo = host1x_bo_get(g->bo);
+               if (!g->bo)
+                       goto unpin;
+
+               phys_addr = host1x_bo_pin(g->bo, &sgt);
+               if (!phys_addr)
+                       goto unpin;
+
+               job->addr_phys[job->num_unpins] = phys_addr;
+               job->unpins[job->num_unpins].bo = g->bo;
+               job->unpins[job->num_unpins].sgt = sgt;
+               job->num_unpins++;
+       }
+
+       return job->num_unpins;
+
+unpin:
+       host1x_job_unpin(job);
+       return 0;
+}
+
+static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
+{
+       int i = 0;
+       u32 last_page = ~0;
+       void *cmdbuf_page_addr = NULL;
+
+       /* pin & patch the relocs for one gather */
+       while (i < job->num_relocs) {
+               struct host1x_reloc *reloc = &job->relocarray[i];
+               u32 reloc_addr = (job->reloc_addr_phys[i] +
+                       reloc->target_offset) >> reloc->shift;
+               u32 *target;
+
+               /* skip all other gathers */
+               if (!(reloc->cmdbuf && cmdbuf == reloc->cmdbuf)) {
+                       i++;
+                       continue;
+               }
+
+               if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
+                       if (cmdbuf_page_addr)
+                               host1x_bo_kunmap(cmdbuf, last_page,
+                                                cmdbuf_page_addr);
+
+                       cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
+                                       reloc->cmdbuf_offset >> PAGE_SHIFT);
+                       last_page = reloc->cmdbuf_offset >> PAGE_SHIFT;
+
+                       if (unlikely(!cmdbuf_page_addr)) {
+                               pr_err("Could not map cmdbuf for relocation\n");
+                               return -ENOMEM;
+                       }
+               }
+
+               target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK);
+               *target = reloc_addr;
+
+               /* mark this gather as handled */
+               reloc->cmdbuf = 0;
+       }
+
+       if (cmdbuf_page_addr)
+               host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
+
+       return 0;
+}
+
+static int check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
+                      unsigned int offset)
+{
+       offset *= sizeof(u32);
+
+       if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset)
+               return -EINVAL;
+
+       return 0;
+}
+
+struct host1x_firewall {
+       struct host1x_job *job;
+       struct device *dev;
+
+       unsigned int num_relocs;
+       struct host1x_reloc *reloc;
+
+       struct host1x_bo *cmdbuf_id;
+       unsigned int offset;
+
+       u32 words;
+       u32 class;
+       u32 reg;
+       u32 mask;
+       u32 count;
+};
+
+static int check_mask(struct host1x_firewall *fw)
+{
+       u32 mask = fw->mask;
+       u32 reg = fw->reg;
+
+       while (mask) {
+               if (fw->words == 0)
+                       return -EINVAL;
+
+               if (mask & 1) {
+                       if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
+                               bool bad_reloc = check_reloc(fw->reloc,
+                                                            fw->cmdbuf_id,
+                                                            fw->offset);
+                               if (!fw->num_relocs || bad_reloc)
+                                       return -EINVAL;
+                               fw->reloc++;
+                               fw->num_relocs--;
+                       }
+                       fw->words--;
+                       fw->offset++;
+               }
+               mask >>= 1;
+               reg++;
+       }
+
+       return 0;
+}
+
+static int check_incr(struct host1x_firewall *fw)
+{
+       u32 count = fw->count;
+       u32 reg = fw->reg;
+
+       while (fw) {
+               if (fw->words == 0)
+                       return -EINVAL;
+
+               if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
+                       bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
+                                                    fw->offset);
+                       if (!fw->num_relocs || bad_reloc)
+                               return -EINVAL;
+                       fw->reloc++;
+                       fw->num_relocs--;
+               }
+               reg++;
+               fw->words--;
+               fw->offset++;
+               count--;
+       }
+
+       return 0;
+}
+
+static int check_nonincr(struct host1x_firewall *fw)
+{
+       int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
+       u32 count = fw->count;
+
+       while (count) {
+               if (fw->words == 0)
+                       return -EINVAL;
+
+               if (is_addr_reg) {
+                       bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
+                                                    fw->offset);
+                       if (!fw->num_relocs || bad_reloc)
+                               return -EINVAL;
+                       fw->reloc++;
+                       fw->num_relocs--;
+               }
+               fw->words--;
+               fw->offset++;
+               count--;
+       }
+
+       return 0;
+}
+
+static int validate(struct host1x_job *job, struct device *dev,
+                   struct host1x_job_gather *g)
+{
+       u32 *cmdbuf_base;
+       int err = 0;
+       struct host1x_firewall fw;
+
+       fw.job = job;
+       fw.dev = dev;
+       fw.reloc = job->relocarray;
+       fw.num_relocs = job->num_relocs;
+       fw.cmdbuf_id = g->bo;
+
+       fw.offset = 0;
+       fw.class = 0;
+
+       if (!job->is_addr_reg)
+               return 0;
+
+       cmdbuf_base = host1x_bo_mmap(g->bo);
+       if (!cmdbuf_base)
+               return -ENOMEM;
+
+       fw.words = g->words;
+       while (fw.words && !err) {
+               u32 word = cmdbuf_base[fw.offset];
+               u32 opcode = (word & 0xf0000000) >> 28;
+
+               fw.mask = 0;
+               fw.reg = 0;
+               fw.count = 0;
+               fw.words--;
+               fw.offset++;
+
+               switch (opcode) {
+               case 0:
+                       fw.class = word >> 6 & 0x3ff;
+                       fw.mask = word & 0x3f;
+                       fw.reg = word >> 16 & 0xfff;
+                       err = check_mask(&fw);
+                       if (err)
+                               goto out;
+                       break;
+               case 1:
+                       fw.reg = word >> 16 & 0xfff;
+                       fw.count = word & 0xffff;
+                       err = check_incr(&fw);
+                       if (err)
+                               goto out;
+                       break;
+
+               case 2:
+                       fw.reg = word >> 16 & 0xfff;
+                       fw.count = word & 0xffff;
+                       err = check_nonincr(&fw);
+                       if (err)
+                               goto out;
+                       break;
+
+               case 3:
+                       fw.mask = word & 0xffff;
+                       fw.reg = word >> 16 & 0xfff;
+                       err = check_mask(&fw);
+                       if (err)
+                               goto out;
+                       break;
+               case 4:
+               case 5:
+               case 14:
+                       break;
+               default:
+                       err = -EINVAL;
+                       break;
+               }
+       }
+
+       /* No relocs should remain at this point */
+       if (fw.num_relocs)
+               err = -EINVAL;
+
+out:
+       host1x_bo_munmap(g->bo, cmdbuf_base);
+
+       return err;
+}
+
+static inline int copy_gathers(struct host1x_job *job, struct device *dev)
+{
+       size_t size = 0;
+       size_t offset = 0;
+       int i;
+
+       for (i = 0; i < job->num_gathers; i++) {
+               struct host1x_job_gather *g = &job->gathers[i];
+               size += g->words * sizeof(u32);
+       }
+
+       job->gather_copy_mapped = dma_alloc_writecombine(dev, size,
+                                                        &job->gather_copy,
+                                                        GFP_KERNEL);
+       if (!job->gather_copy_mapped) {
+               int err = PTR_ERR(job->gather_copy_mapped);
+               job->gather_copy_mapped = NULL;
+               return err;
+       }
+
+       job->gather_copy_size = size;
+
+       for (i = 0; i < job->num_gathers; i++) {
+               struct host1x_job_gather *g = &job->gathers[i];
+               void *gather;
+
+               gather = host1x_bo_mmap(g->bo);
+               memcpy(job->gather_copy_mapped + offset, gather + g->offset,
+                      g->words * sizeof(u32));
+               host1x_bo_munmap(g->bo, gather);
+
+               g->base = job->gather_copy;
+               g->offset = offset;
+               g->bo = NULL;
+
+               offset += g->words * sizeof(u32);
+       }
+
+       return 0;
+}
+
+int host1x_job_pin(struct host1x_job *job, struct device *dev)
+{
+       int err;
+       unsigned int i, j;
+       struct host1x *host = dev_get_drvdata(dev->parent);
+       DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
+
+       bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
+       for (i = 0; i < job->num_waitchk; i++) {
+               u32 syncpt_id = job->waitchk[i].syncpt_id;
+               if (syncpt_id < host1x_syncpt_nb_pts(host))
+                       set_bit(syncpt_id, waitchk_mask);
+       }
+
+       /* get current syncpt values for waitchk */
+       for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
+               host1x_syncpt_load(host->syncpt + i);
+
+       /* pin memory */
+       err = pin_job(job);
+       if (!err)
+               goto out;
+
+       /* patch gathers */
+       for (i = 0; i < job->num_gathers; i++) {
+               struct host1x_job_gather *g = &job->gathers[i];
+
+               /* process each gather mem only once */
+               if (g->handled)
+                       continue;
+
+               g->base = job->gather_addr_phys[i];
+
+               for (j = 0; j < job->num_gathers; j++)
+                       if (job->gathers[j].bo == g->bo)
+                               job->gathers[j].handled = true;
+
+               err = 0;
+
+               if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+                       err = validate(job, dev, g);
+
+               if (err)
+                       dev_err(dev, "Job invalid (err=%d)\n", err);
+
+               if (!err)
+                       err = do_relocs(job, g->bo);
+
+               if (!err)
+                       err = do_waitchks(job, host, g->bo);
+
+               if (err)
+                       break;
+       }
+
+       if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !err) {
+               err = copy_gathers(job, dev);
+               if (err) {
+                       host1x_job_unpin(job);
+                       return err;
+               }
+       }
+
+out:
+       wmb();
+
+       return err;
+}
+
+void host1x_job_unpin(struct host1x_job *job)
+{
+       unsigned int i;
+
+       for (i = 0; i < job->num_unpins; i++) {
+               struct host1x_job_unpin_data *unpin = &job->unpins[i];
+               host1x_bo_unpin(unpin->bo, unpin->sgt);
+               host1x_bo_put(unpin->bo);
+       }
+       job->num_unpins = 0;
+
+       if (job->gather_copy_size)
+               dma_free_writecombine(job->channel->dev, job->gather_copy_size,
+                                     job->gather_copy_mapped,
+                                     job->gather_copy);
+}
+
+/*
+ * Debug routine used to dump job entries
+ */
+void host1x_job_dump(struct device *dev, struct host1x_job *job)
+{
+       dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt_id);
+       dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
+       dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
+       dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
+       dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
+       dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
+}
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
new file mode 100644 (file)
index 0000000..fba45f2
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Tegra host1x Job
+ *
+ * Copyright (c) 2011-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_JOB_H
+#define __HOST1X_JOB_H
+
+struct host1x_job_gather {
+       u32 words;
+       dma_addr_t base;
+       struct host1x_bo *bo;
+       int offset;
+       bool handled;
+};
+
+struct host1x_cmdbuf {
+       u32 handle;
+       u32 offset;
+       u32 words;
+       u32 pad;
+};
+
+struct host1x_reloc {
+       struct host1x_bo *cmdbuf;
+       u32 cmdbuf_offset;
+       struct host1x_bo *target;
+       u32 target_offset;
+       u32 shift;
+       u32 pad;
+};
+
+struct host1x_waitchk {
+       struct host1x_bo *bo;
+       u32 offset;
+       u32 syncpt_id;
+       u32 thresh;
+};
+
+struct host1x_job_unpin_data {
+       struct host1x_bo *bo;
+       struct sg_table *sgt;
+};
+
+/*
+ * Each submit is tracked as a host1x_job.
+ */
+struct host1x_job {
+       /* When refcount goes to zero, job can be freed */
+       struct kref ref;
+
+       /* List entry */
+       struct list_head list;
+
+       /* Channel where job is submitted to */
+       struct host1x_channel *channel;
+
+       u32 client;
+
+       /* Gathers and their memory */
+       struct host1x_job_gather *gathers;
+       unsigned int num_gathers;
+
+       /* Wait checks to be processed at submit time */
+       struct host1x_waitchk *waitchk;
+       unsigned int num_waitchk;
+       u32 waitchk_mask;
+
+       /* Array of handles to be pinned & unpinned */
+       struct host1x_reloc *relocarray;
+       unsigned int num_relocs;
+       struct host1x_job_unpin_data *unpins;
+       unsigned int num_unpins;
+
+       dma_addr_t *addr_phys;
+       dma_addr_t *gather_addr_phys;
+       dma_addr_t *reloc_addr_phys;
+
+       /* Sync point id, number of increments and end related to the submit */
+       u32 syncpt_id;
+       u32 syncpt_incrs;
+       u32 syncpt_end;
+
+       /* Maximum time to wait for this job */
+       unsigned int timeout;
+
+       /* Index and number of slots used in the push buffer */
+       unsigned int first_get;
+       unsigned int num_slots;
+
+       /* Copy of gathers */
+       size_t gather_copy_size;
+       dma_addr_t gather_copy;
+       u8 *gather_copy_mapped;
+
+       /* Check if register is marked as an address reg */
+       int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
+
+       /* Request a SETCLASS to this class */
+       u32 class;
+
+       /* Add a channel wait for previous ops to complete */
+       bool serialize;
+};
+/*
+ * Allocate memory for a job. Just enough memory will be allocated to
+ * accomodate the submit.
+ */
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+                                   u32 num_cmdbufs, u32 num_relocs,
+                                   u32 num_waitchks);
+
+/*
+ * Add a gather to a job.
+ */
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
+                          u32 words, u32 offset);
+
+/*
+ * Increment reference going to host1x_job.
+ */
+struct host1x_job *host1x_job_get(struct host1x_job *job);
+
+/*
+ * Decrement reference job, free if goes to zero.
+ */
+void host1x_job_put(struct host1x_job *job);
+
+/*
+ * Pin memory related to job. This handles relocation of addresses to the
+ * host1x address space. Handles both the gather memory and any other memory
+ * referred to from the gather buffers.
+ *
+ * Handles also patching out host waits that would wait for an expired sync
+ * point value.
+ */
+int host1x_job_pin(struct host1x_job *job, struct device *dev);
+
+/*
+ * Unpin memory related to job.
+ */
+void host1x_job_unpin(struct host1x_job *job);
+
+/*
+ * Dump contents of job to debug output.
+ */
+void host1x_job_dump(struct device *dev, struct host1x_job *job);
+
+#endif
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
new file mode 100644 (file)
index 0000000..4b49345
--- /dev/null
@@ -0,0 +1,387 @@
+/*
+ * Tegra host1x Syncpoints
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <trace/events/host1x.h>
+
+#include "syncpt.h"
+#include "dev.h"
+#include "intr.h"
+#include "debug.h"
+
+#define SYNCPT_CHECK_PERIOD (2 * HZ)
+#define MAX_STUCK_CHECK_COUNT 15
+
+static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
+                                                 struct device *dev,
+                                                 int client_managed)
+{
+       int i;
+       struct host1x_syncpt *sp = host->syncpt;
+       char *name;
+
+       for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
+               ;
+       if (sp->dev)
+               return NULL;
+
+       name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
+                       dev ? dev_name(dev) : NULL);
+       if (!name)
+               return NULL;
+
+       sp->dev = dev;
+       sp->name = name;
+       sp->client_managed = client_managed;
+
+       return sp;
+}
+
+u32 host1x_syncpt_id(struct host1x_syncpt *sp)
+{
+       return sp->id;
+}
+
+/*
+ * Updates the value sent to hardware.
+ */
+u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
+{
+       return (u32)atomic_add_return(incrs, &sp->max_val);
+}
+
+ /*
+ * Write cached syncpoint and waitbase values to hardware.
+ */
+void host1x_syncpt_restore(struct host1x *host)
+{
+       struct host1x_syncpt *sp_base = host->syncpt;
+       u32 i;
+
+       for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
+               host1x_hw_syncpt_restore(host, sp_base + i);
+       for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
+               host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
+       wmb();
+}
+
+/*
+ * Update the cached syncpoint and waitbase values by reading them
+ * from the registers.
+  */
+void host1x_syncpt_save(struct host1x *host)
+{
+       struct host1x_syncpt *sp_base = host->syncpt;
+       u32 i;
+
+       for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
+               if (host1x_syncpt_client_managed(sp_base + i))
+                       host1x_hw_syncpt_load(host, sp_base + i);
+               else
+                       WARN_ON(!host1x_syncpt_idle(sp_base + i));
+       }
+
+       for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
+               host1x_hw_syncpt_load_wait_base(host, sp_base + i);
+}
+
+/*
+ * Updates the cached syncpoint value by reading a new value from the hardware
+ * register
+ */
+u32 host1x_syncpt_load(struct host1x_syncpt *sp)
+{
+       u32 val;
+       val = host1x_hw_syncpt_load(sp->host, sp);
+       trace_host1x_syncpt_load_min(sp->id, val);
+
+       return val;
+}
+
+/*
+ * Get the current syncpoint base
+ */
+u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
+{
+       u32 val;
+       host1x_hw_syncpt_load_wait_base(sp->host, sp);
+       val = sp->base_val;
+       return val;
+}
+
+/*
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp)
+{
+       host1x_hw_syncpt_cpu_incr(sp->host, sp);
+}
+
+/*
+ * Increment syncpoint value from cpu, updating cache
+ */
+void host1x_syncpt_incr(struct host1x_syncpt *sp)
+{
+       if (host1x_syncpt_client_managed(sp))
+               host1x_syncpt_incr_max(sp, 1);
+       host1x_syncpt_cpu_incr(sp);
+}
+
+/*
+ * Updated sync point form hardware, and returns true if syncpoint is expired,
+ * false if we may need to wait
+ */
+static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
+{
+       host1x_hw_syncpt_load(sp->host, sp);
+       return host1x_syncpt_is_expired(sp, thresh);
+}
+
+/*
+ * Main entrypoint for syncpoint value waits.
+ */
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
+                       u32 *value)
+{
+       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+       void *ref;
+       struct host1x_waitlist *waiter;
+       int err = 0, check_count = 0;
+       u32 val;
+
+       if (value)
+               *value = 0;
+
+       /* first check cache */
+       if (host1x_syncpt_is_expired(sp, thresh)) {
+               if (value)
+                       *value = host1x_syncpt_load(sp);
+               return 0;
+       }
+
+       /* try to read from register */
+       val = host1x_hw_syncpt_load(sp->host, sp);
+       if (host1x_syncpt_is_expired(sp, thresh)) {
+               if (value)
+                       *value = val;
+               goto done;
+       }
+
+       if (!timeout) {
+               err = -EAGAIN;
+               goto done;
+       }
+
+       /* allocate a waiter */
+       waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
+       if (!waiter) {
+               err = -ENOMEM;
+               goto done;
+       }
+
+       /* schedule a wakeup when the syncpoint value is reached */
+       err = host1x_intr_add_action(sp->host, sp->id, thresh,
+                                    HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+                                    &wq, waiter, &ref);
+       if (err)
+               goto done;
+
+       err = -EAGAIN;
+       /* Caller-specified timeout may be impractically low */
+       if (timeout < 0)
+               timeout = LONG_MAX;
+
+       /* wait for the syncpoint, or timeout, or signal */
+       while (timeout) {
+               long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
+               int remain = wait_event_interruptible_timeout(wq,
+                               syncpt_load_min_is_expired(sp, thresh),
+                               check);
+               if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
+                       if (value)
+                               *value = host1x_syncpt_load(sp);
+                       err = 0;
+                       break;
+               }
+               if (remain < 0) {
+                       err = remain;
+                       break;
+               }
+               timeout -= check;
+               if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
+                       dev_warn(sp->host->dev,
+                               "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
+                                current->comm, sp->id, sp->name,
+                                thresh, timeout);
+
+                       host1x_debug_dump_syncpts(sp->host);
+                       if (check_count == MAX_STUCK_CHECK_COUNT)
+                               host1x_debug_dump(sp->host);
+                       check_count++;
+               }
+       }
+       host1x_intr_put_ref(sp->host, sp->id, ref);
+
+done:
+       return err;
+}
+EXPORT_SYMBOL(host1x_syncpt_wait);
+
+/*
+ * Returns true if syncpoint is expired, false if we may need to wait
+ */
+bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
+{
+       u32 current_val;
+       u32 future_val;
+       smp_rmb();
+       current_val = (u32)atomic_read(&sp->min_val);
+       future_val = (u32)atomic_read(&sp->max_val);
+
+       /* Note the use of unsigned arithmetic here (mod 1<<32).
+        *
+        * c = current_val = min_val    = the current value of the syncpoint.
+        * t = thresh                   = the value we are checking
+        * f = future_val  = max_val    = the value c will reach when all
+        *                                outstanding increments have completed.
+        *
+        * Note that c always chases f until it reaches f.
+        *
+        * Dtf = (f - t)
+        * Dtc = (c - t)
+        *
+        *  Consider all cases:
+        *
+        *      A) .....c..t..f.....    Dtf < Dtc       need to wait
+        *      B) .....c.....f..t..    Dtf > Dtc       expired
+        *      C) ..t..c.....f.....    Dtf > Dtc       expired    (Dct very large)
+        *
+        *  Any case where f==c: always expired (for any t).    Dtf == Dcf
+        *  Any case where t==c: always expired (for any f).    Dtf >= Dtc (because Dtc==0)
+        *  Any case where t==f!=c: always wait.                Dtf <  Dtc (because Dtf==0,
+        *                                                      Dtc!=0)
+        *
+        *  Other cases:
+        *
+        *      A) .....t..f..c.....    Dtf < Dtc       need to wait
+        *      A) .....f..c..t.....    Dtf < Dtc       need to wait
+        *      A) .....f..t..c.....    Dtf > Dtc       expired
+        *
+        *   So:
+        *         Dtf >= Dtc implies EXPIRED   (return true)
+        *         Dtf <  Dtc implies WAIT      (return false)
+        *
+        * Note: If t is expired then we *cannot* wait on it. We would wait
+        * forever (hang the system).
+        *
+        * Note: do NOT get clever and remove the -thresh from both sides. It
+        * is NOT the same.
+        *
+        * If future valueis zero, we have a client managed sync point. In that
+        * case we do a direct comparison.
+        */
+       if (!host1x_syncpt_client_managed(sp))
+               return future_val - thresh >= current_val - thresh;
+       else
+               return (s32)(current_val - thresh) >= 0;
+}
+
+/* remove a wait pointed to by patch_addr */
+int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
+{
+       return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
+}
+
+int host1x_syncpt_init(struct host1x *host)
+{
+       struct host1x_syncpt *syncpt;
+       int i;
+
+       syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
+               GFP_KERNEL);
+       if (!syncpt)
+               return -ENOMEM;
+
+       for (i = 0; i < host->info->nb_pts; ++i) {
+               syncpt[i].id = i;
+               syncpt[i].host = host;
+       }
+
+       host->syncpt = syncpt;
+
+       host1x_syncpt_restore(host);
+
+       /* Allocate sync point to use for clearing waits for expired fences */
+       host->nop_sp = _host1x_syncpt_alloc(host, NULL, 0);
+       if (!host->nop_sp)
+               return -ENOMEM;
+
+       return 0;
+}
+
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+                                           int client_managed)
+{
+       struct host1x *host = dev_get_drvdata(dev->parent);
+       return _host1x_syncpt_alloc(host, dev, client_managed);
+}
+
+void host1x_syncpt_free(struct host1x_syncpt *sp)
+{
+       if (!sp)
+               return;
+
+       kfree(sp->name);
+       sp->dev = NULL;
+       sp->name = NULL;
+       sp->client_managed = 0;
+}
+
+void host1x_syncpt_deinit(struct host1x *host)
+{
+       int i;
+       struct host1x_syncpt *sp = host->syncpt;
+       for (i = 0; i < host->info->nb_pts; i++, sp++)
+               kfree(sp->name);
+}
+
+int host1x_syncpt_nb_pts(struct host1x *host)
+{
+       return host->info->nb_pts;
+}
+
+int host1x_syncpt_nb_bases(struct host1x *host)
+{
+       return host->info->nb_bases;
+}
+
+int host1x_syncpt_nb_mlocks(struct host1x *host)
+{
+       return host->info->nb_mlocks;
+}
+
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
+{
+       if (host->info->nb_pts < id)
+               return NULL;
+       return host->syncpt + id;
+}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
new file mode 100644 (file)
index 0000000..c998061
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Tegra host1x Syncpoints
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_SYNCPT_H
+#define __HOST1X_SYNCPT_H
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include "intr.h"
+
+struct host1x;
+
+/* Reserved for replacing an expired wait with a NOP */
+#define HOST1X_SYNCPT_RESERVED                 0
+
+struct host1x_syncpt {
+       int id;
+       atomic_t min_val;
+       atomic_t max_val;
+       u32 base_val;
+       const char *name;
+       int client_managed;
+       struct host1x *host;
+       struct device *dev;
+
+       /* interrupt data */
+       struct host1x_syncpt_intr intr;
+};
+
+/* Initialize sync point array  */
+int host1x_syncpt_init(struct host1x *host);
+
+/*  Free sync point array */
+void host1x_syncpt_deinit(struct host1x *host);
+
+/*
+ * Read max. It indicates how many operations there are in queue, either in
+ * channel or in a software thread.
+ * */
+static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
+{
+       smp_rmb();
+       return (u32)atomic_read(&sp->max_val);
+}
+
+/*
+ * Read min, which is a shadow of the current sync point value in hardware.
+ */
+static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
+{
+       smp_rmb();
+       return (u32)atomic_read(&sp->min_val);
+}
+
+/* Return number of sync point supported. */
+int host1x_syncpt_nb_pts(struct host1x *host);
+
+/* Return number of wait bases supported. */
+int host1x_syncpt_nb_bases(struct host1x *host);
+
+/* Return number of mlocks supported. */
+int host1x_syncpt_nb_mlocks(struct host1x *host);
+
+/*
+ * Check sync point sanity. If max is larger than min, there have too many
+ * sync point increments.
+ *
+ * Client managed sync point are not tracked.
+ * */
+static inline bool host1x_syncpt_check_max(struct host1x_syncpt *sp, u32 real)
+{
+       u32 max;
+       if (sp->client_managed)
+               return true;
+       max = host1x_syncpt_read_max(sp);
+       return (s32)(max - real) >= 0;
+}
+
+/* Return true if sync point is client managed. */
+static inline int host1x_syncpt_client_managed(struct host1x_syncpt *sp)
+{
+       return sp->client_managed;
+}
+
+/*
+ * Returns true if syncpoint min == max, which means that there are no
+ * outstanding operations.
+ */
+static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
+{
+       int min, max;
+       smp_rmb();
+       min = atomic_read(&sp->min_val);
+       max = atomic_read(&sp->max_val);
+       return (min == max);
+}
+
+/* Return pointer to struct denoting sync point id. */
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+
+/* Request incrementing a sync point. */
+void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp);
+
+/* Load current value from hardware to the shadow register. */
+u32 host1x_syncpt_load(struct host1x_syncpt *sp);
+
+/* Check if the given syncpoint value has already passed */
+bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh);
+
+/* Save host1x sync point state into shadow registers. */
+void host1x_syncpt_save(struct host1x *host);
+
+/* Reset host1x sync point state from shadow registers. */
+void host1x_syncpt_restore(struct host1x *host);
+
+/* Read current wait base value into shadow register and return it. */
+u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
+
+/* Increment sync point and its max. */
+void host1x_syncpt_incr(struct host1x_syncpt *sp);
+
+/* Indicate future operations by incrementing the sync point max. */
+u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
+
+/* Wait until sync point reaches a threshold value, or a timeout. */
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
+                       long timeout, u32 *value);
+
+/* Check if sync point id is valid. */
+static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
+{
+       return sp->id < host1x_syncpt_nb_pts(sp->host);
+}
+
+/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
+int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
+
+/* Return id of the sync point */
+u32 host1x_syncpt_id(struct host1x_syncpt *sp);
+
+/* Allocate a sync point for a device. */
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+               int client_managed);
+
+/* Free a sync point. */
+void host1x_syncpt_free(struct host1x_syncpt *sp);
+
+#endif
index 512b01c..aa341d1 100644 (file)
@@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev)
                     hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
                        return true;
                break;
+       case USB_VENDOR_ID_ATMEL_V_USB:
+               /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
+                * it has the same USB ID as many Atmel V-USB devices. This
+                * usb radio is handled by radio-ma901.c driver so we want
+                * ignore the hid. Check the name, bus, product and ignore
+                * if we have MA901 usb radio.
+                */
+               if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
+                       hdev->bus == BUS_USB &&
+                       strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
+                       return true;
+               break;
        }
 
        if (hdev->type == HID_TYPE_USBMOUSE &&
index c438877..5309fd5 100644 (file)
 #define USB_VENDOR_ID_ATMEL            0x03eb
 #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
 #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER      0x2118
+#define USB_VENDOR_ID_ATMEL_V_USB      0x16c0
+#define USB_DEVICE_ID_ATMEL_V_USB      0x05df
 
 #define USB_VENDOR_ID_AUREAL           0x0755
 #define USB_DEVICE_ID_AUREAL_W01RN     0x2626
 #define USB_VENDOR_ID_MADCATZ          0x0738
 #define USB_DEVICE_ID_MADCATZ_BEATPAD  0x4540
 
-#define USB_VENDOR_ID_MASTERKIT                        0x16c0
-#define USB_DEVICE_ID_MASTERKIT_MA901RADIO     0x05df
-
 #define USB_VENDOR_ID_MCC              0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
 #define USB_DEVICE_ID_MCC_PMD1208LS    0x007a
index f7f113b..a8ce442 100644 (file)
@@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
        return 0;
 }
 
+static void magicmouse_input_configured(struct hid_device *hdev,
+               struct hid_input *hi)
+
+{
+       struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+
+       int ret = magicmouse_setup_input(msc->input, hdev);
+       if (ret) {
+               hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
+               /* clean msc->input to notify probe() of the failure */
+               msc->input = NULL;
+       }
+}
+
+
 static int magicmouse_probe(struct hid_device *hdev,
        const struct hid_device_id *id)
 {
@@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev,
                goto err_free;
        }
 
-       /* We do this after hid-input is done parsing reports so that
-        * hid-input uses the most natural button and axis IDs.
-        */
-       if (msc->input) {
-               ret = magicmouse_setup_input(msc->input, hdev);
-               if (ret) {
-                       hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
-                       goto err_stop_hw;
-               }
+       if (!msc->input) {
+               hid_err(hdev, "magicmouse input not registered\n");
+               ret = -ENOMEM;
+               goto err_stop_hw;
        }
 
        if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
@@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = {
        .remove = magicmouse_remove,
        .raw_event = magicmouse_raw_event,
        .input_mapping = magicmouse_input_mapping,
+       .input_configured = magicmouse_input_configured,
 };
 module_hid_driver(magicmouse_driver);
 
index 05d7b63..a0639e7 100644 (file)
@@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
 
 config VIDEO_SH_VEU
        tristate "SuperH VEU mem2mem video processing driver"
-       depends on VIDEO_DEV && VIDEO_V4L2
+       depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
        help
index c61f590..348dafc 100644 (file)
@@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev)
 static int usb_ma901radio_probe(struct usb_interface *intf,
                                const struct usb_device_id *id)
 {
+       struct usb_device *dev = interface_to_usbdev(intf);
        struct ma901radio_device *radio;
        int retval = 0;
 
+       /* Masterkit MA901 usb radio has the same USB ID as many others
+        * Atmel V-USB devices. Let's make additional checks to be sure
+        * that this is our device.
+        */
+
+       if (dev->product && dev->manufacturer &&
+               (strncmp(dev->product, "MA901", 5) != 0
+               || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0))
+               return -ENODEV;
+
        radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL);
        if (!radio) {
                dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n");
index 6bbd90e..a51241b 100644 (file)
@@ -1976,12 +1976,11 @@ static int __bond_release_one(struct net_device *bond_dev,
                return -EINVAL;
        }
 
+       write_unlock_bh(&bond->lock);
        /* unregister rx_handler early so bond_handle_frame wouldn't be called
         * for this slave anymore.
         */
        netdev_rx_handler_unregister(slave_dev);
-       write_unlock_bh(&bond->lock);
-       synchronize_net();
        write_lock_bh(&bond->lock);
 
        if (!all && !bond->params.fail_over_mac) {
index db103e0..ea7a388 100644 (file)
@@ -527,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
                goto out;
        }
        if (new_value < 0) {
-               pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
+               pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
                       bond->dev->name, new_value, INT_MAX);
                ret = -EINVAL;
                goto out;
@@ -542,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
        pr_info("%s: Setting ARP monitoring interval to %d.\n",
                bond->dev->name, new_value);
        bond->params.arp_interval = new_value;
-       if (bond->params.miimon) {
-               pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
-                       bond->dev->name, bond->dev->name);
-               bond->params.miimon = 0;
-       }
-       if (!bond->params.arp_targets[0]) {
-               pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
-                       bond->dev->name);
+       if (new_value) {
+               if (bond->params.miimon) {
+                       pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+                               bond->dev->name, bond->dev->name);
+                       bond->params.miimon = 0;
+               }
+               if (!bond->params.arp_targets[0])
+                       pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+                               bond->dev->name);
        }
        if (bond->dev->flags & IFF_UP) {
                /* If the interface is up, we may need to fire off
@@ -557,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
                 * timer will get fired off when the open function
                 * is called.
                 */
-               cancel_delayed_work_sync(&bond->mii_work);
-               queue_delayed_work(bond->wq, &bond->arp_work, 0);
+               if (!new_value) {
+                       cancel_delayed_work_sync(&bond->arp_work);
+               } else {
+                       cancel_delayed_work_sync(&bond->mii_work);
+                       queue_delayed_work(bond->wq, &bond->arp_work, 0);
+               }
        }
-
 out:
        rtnl_unlock();
        return ret;
@@ -702,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
        }
        if (new_value < 0) {
                pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
-                      bond->dev->name, new_value, 1, INT_MAX);
+                      bond->dev->name, new_value, 0, INT_MAX);
                ret = -EINVAL;
                goto out;
        } else {
@@ -757,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d,
                goto out;
        }
        if (new_value < 0) {
-               pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
-                      bond->dev->name, new_value, 1, INT_MAX);
+               pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
+                      bond->dev->name, new_value, 0, INT_MAX);
                ret = -EINVAL;
                goto out;
        } else {
@@ -968,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d,
        }
        if (new_value < 0) {
                pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
-                      bond->dev->name, new_value, 1, INT_MAX);
+                      bond->dev->name, new_value, 0, INT_MAX);
                ret = -EINVAL;
                goto out;
-       } else {
-               pr_info("%s: Setting MII monitoring interval to %d.\n",
-                       bond->dev->name, new_value);
-               bond->params.miimon = new_value;
-               if (bond->params.updelay)
-                       pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
-                               bond->dev->name,
-                               bond->params.updelay * bond->params.miimon);
-               if (bond->params.downdelay)
-                       pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
-                               bond->dev->name,
-                               bond->params.downdelay * bond->params.miimon);
-               if (bond->params.arp_interval) {
-                       pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
-                               bond->dev->name);
-                       bond->params.arp_interval = 0;
-                       if (bond->params.arp_validate) {
-                               bond->params.arp_validate =
-                                       BOND_ARP_VALIDATE_NONE;
-                       }
-               }
-
-               if (bond->dev->flags & IFF_UP) {
-                       /* If the interface is up, we may need to fire off
-                        * the MII timer. If the interface is down, the
-                        * timer will get fired off when the open function
-                        * is called.
-                        */
+       }
+       pr_info("%s: Setting MII monitoring interval to %d.\n",
+               bond->dev->name, new_value);
+       bond->params.miimon = new_value;
+       if (bond->params.updelay)
+               pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+                       bond->dev->name,
+                       bond->params.updelay * bond->params.miimon);
+       if (bond->params.downdelay)
+               pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+                       bond->dev->name,
+                       bond->params.downdelay * bond->params.miimon);
+       if (new_value && bond->params.arp_interval) {
+               pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+                       bond->dev->name);
+               bond->params.arp_interval = 0;
+               if (bond->params.arp_validate)
+                       bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+       }
+       if (bond->dev->flags & IFF_UP) {
+               /* If the interface is up, we may need to fire off
+                * the MII timer. If the interface is down, the
+                * timer will get fired off when the open function
+                * is called.
+                */
+               if (!new_value) {
+                       cancel_delayed_work_sync(&bond->mii_work);
+               } else {
                        cancel_delayed_work_sync(&bond->arp_work);
                        queue_delayed_work(bond->wq, &bond->mii_work, 0);
                }
index b39ca5b..ff2ba86 100644 (file)
@@ -46,6 +46,7 @@ config CAN_EMS_PCI
 config CAN_PEAK_PCMCIA
        tristate "PEAK PCAN-PC Card"
        depends on PCMCIA
+       depends on HAS_IOPORT
        ---help---
          This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
          from PEAK-System (http://www.peak-system.com). To compile this
index a042cdc..3c18d7d 100644 (file)
@@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
         */
        if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
            REG_CR_BASICCAN_INITIAL &&
-           (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+           (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
            (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
                flag = 1;
 
@@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
         * See states on p. 23 of the Datasheet.
         */
        if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
-           priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
+           priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
            priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
                return flag;
 
index daf4013..e4df307 100644 (file)
@@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
         */
        spin_lock_irqsave(&priv->cmdreg_lock, flags);
        priv->write_reg(priv, REG_CMR, val);
-       priv->read_reg(priv, REG_SR);
+       priv->read_reg(priv, SJA1000_REG_SR);
        spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
 }
 
@@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
 
        while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
                n++;
-               status = priv->read_reg(priv, REG_SR);
+               status = priv->read_reg(priv, SJA1000_REG_SR);
                /* check for absent controller due to hw unplug */
                if (status == 0xFF && sja1000_is_absent(priv))
                        return IRQ_NONE;
@@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
                        /* receive interrupt */
                        while (status & SR_RBS) {
                                sja1000_rx(dev);
-                               status = priv->read_reg(priv, REG_SR);
+                               status = priv->read_reg(priv, SJA1000_REG_SR);
                                /* check for absent controller */
                                if (status == 0xFF && sja1000_is_absent(priv))
                                        return IRQ_NONE;
index afa9984..aa48e05 100644 (file)
@@ -56,7 +56,7 @@
 /* SJA1000 registers - manual section 6.4 (Pelican Mode) */
 #define REG_MOD                0x00
 #define REG_CMR                0x01
-#define REG_SR         0x02
+#define SJA1000_REG_SR         0x02
 #define REG_IR         0x03
 #define REG_IER                0x04
 #define REG_ALC                0x0B
index 829b5ad..edfdf6b 100644 (file)
@@ -438,7 +438,6 @@ struct atl1e_adapter {
        struct atl1e_hw        hw;
        struct atl1e_hw_stats  hw_stats;
 
-       bool have_msi;
        u32 wol;
        u16 link_speed;
        u16 link_duplex;
index 92f4734..f73d560 100644 (file)
@@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
 
        free_irq(adapter->pdev->irq, netdev);
-
-       if (adapter->have_msi)
-               pci_disable_msi(adapter->pdev);
 }
 
 static int atl1e_request_irq(struct atl1e_adapter *adapter)
 {
        struct pci_dev    *pdev   = adapter->pdev;
        struct net_device *netdev = adapter->netdev;
-       int flags = 0;
        int err = 0;
 
-       adapter->have_msi = true;
-       err = pci_enable_msi(pdev);
-       if (err) {
-               netdev_dbg(netdev,
-                          "Unable to allocate MSI interrupt Error: %d\n", err);
-               adapter->have_msi = false;
-       }
-
-       if (!adapter->have_msi)
-               flags |= IRQF_SHARED;
-       err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
+       err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
+                         netdev);
        if (err) {
                netdev_dbg(adapter->netdev,
                           "Unable to allocate interrupt Error: %d\n", err);
-               if (adapter->have_msi)
-                       pci_disable_msi(pdev);
                return err;
        }
        netdev_dbg(netdev, "atl1e_request_irq OK\n");
index 67d2663..17a9727 100644 (file)
@@ -14604,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp)
                if (j + len > block_end)
                        goto partno;
 
-               memcpy(tp->fw_ver, &vpd_data[j], len);
-               strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
+               if (len >= sizeof(tp->fw_ver))
+                       len = sizeof(tp->fw_ver) - 1;
+               memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
+               snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
+                        &vpd_data[j]);
        }
 
 partno:
index a170065..b0ebc9f 100644 (file)
 #define XGMAC_FLOW_CTRL_FCB_BPA        0x00000001      /* Flow Control Busy ... */
 
 /* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMTIM   0x00800000      /* PMT Interrupt Mask */
 #define XGMAC_INT_STAT_PMT     0x0080          /* PMT Interrupt Status */
 #define XGMAC_INT_STAT_LPI     0x0040          /* LPI Interrupt Status */
 
@@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev)
        writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
        writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
 
+       /* Mask power mgt interrupt */
+       writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
+
        /* XGMAC requires AXI bus init. This is a 'magic number' for now */
        writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
 
@@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
                struct sk_buff *skb;
                int frame_len;
 
+               if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
+                       break;
+
                entry = priv->rx_tail;
                p = priv->dma_rx + entry;
                if (desc_get_owner(p))
@@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
        unsigned int pmt = 0;
 
        if (mode & WAKE_MAGIC)
-               pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+               pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
        if (mode & WAKE_UCAST)
                pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
 
index 8cdf025..9eada8e 100644 (file)
@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
                tmp = readl(reg);
 }
 
+/*
+ * Sleep, either by using msleep() or if we are suspending, then
+ * use mdelay() to sleep.
+ */
+static void dm9000_msleep(board_info_t *db, unsigned int ms)
+{
+       if (db->in_suspend)
+               mdelay(ms);
+       else
+               msleep(ms);
+}
+
+/* Read a word from phyxcer */
+static int
+dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
+{
+       board_info_t *db = netdev_priv(dev);
+       unsigned long flags;
+       unsigned int reg_save;
+       int ret;
+
+       mutex_lock(&db->addr_lock);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       /* Save previous register address */
+       reg_save = readb(db->io_addr);
+
+       /* Fill the phyxcer register into REG_0C */
+       iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+       /* Issue phyxcer read command */
+       iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
+
+       writeb(reg_save, db->io_addr);
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       dm9000_msleep(db, 1);           /* Wait read complete */
+
+       spin_lock_irqsave(&db->lock, flags);
+       reg_save = readb(db->io_addr);
+
+       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer read command */
+
+       /* The read data keeps on REG_0D & REG_0E */
+       ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+
+       /* restore the previous address */
+       writeb(reg_save, db->io_addr);
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       mutex_unlock(&db->addr_lock);
+
+       dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
+       return ret;
+}
+
+/* Write a word to phyxcer */
+static void
+dm9000_phy_write(struct net_device *dev,
+                int phyaddr_unused, int reg, int value)
+{
+       board_info_t *db = netdev_priv(dev);
+       unsigned long flags;
+       unsigned long reg_save;
+
+       dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
+       mutex_lock(&db->addr_lock);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       /* Save previous register address */
+       reg_save = readb(db->io_addr);
+
+       /* Fill the phyxcer register into REG_0C */
+       iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+       /* Fill the written data into REG_0D & REG_0E */
+       iow(db, DM9000_EPDRL, value);
+       iow(db, DM9000_EPDRH, value >> 8);
+
+       /* Issue phyxcer write command */
+       iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
+
+       writeb(reg_save, db->io_addr);
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       dm9000_msleep(db, 1);           /* Wait write complete */
+
+       spin_lock_irqsave(&db->lock, flags);
+       reg_save = readb(db->io_addr);
+
+       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer write command */
+
+       /* restore the previous address */
+       writeb(reg_save, db->io_addr);
+
+       spin_unlock_irqrestore(&db->lock, flags);
+       mutex_unlock(&db->addr_lock);
+}
+
 /* dm9000_set_io
  *
  * select the specified set of io routines to use with the
@@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev)
 
        iow(db, DM9000_GPCR, GPCR_GEP_CNTL);    /* Let GPIO0 output */
 
+       dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
+       dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
+
        ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
 
        /* if wol is needed, then always set NCR_WAKEEN otherwise we end
@@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev)
        return 0;
 }
 
-/*
- * Sleep, either by using msleep() or if we are suspending, then
- * use mdelay() to sleep.
- */
-static void dm9000_msleep(board_info_t *db, unsigned int ms)
-{
-       if (db->in_suspend)
-               mdelay(ms);
-       else
-               msleep(ms);
-}
-
-/*
- *   Read a word from phyxcer
- */
-static int
-dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
-{
-       board_info_t *db = netdev_priv(dev);
-       unsigned long flags;
-       unsigned int reg_save;
-       int ret;
-
-       mutex_lock(&db->addr_lock);
-
-       spin_lock_irqsave(&db->lock,flags);
-
-       /* Save previous register address */
-       reg_save = readb(db->io_addr);
-
-       /* Fill the phyxcer register into REG_0C */
-       iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
-       iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);   /* Issue phyxcer read command */
-
-       writeb(reg_save, db->io_addr);
-       spin_unlock_irqrestore(&db->lock,flags);
-
-       dm9000_msleep(db, 1);           /* Wait read complete */
-
-       spin_lock_irqsave(&db->lock,flags);
-       reg_save = readb(db->io_addr);
-
-       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer read command */
-
-       /* The read data keeps on REG_0D & REG_0E */
-       ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
-
-       /* restore the previous address */
-       writeb(reg_save, db->io_addr);
-       spin_unlock_irqrestore(&db->lock,flags);
-
-       mutex_unlock(&db->addr_lock);
-
-       dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
-       return ret;
-}
-
-/*
- *   Write a word to phyxcer
- */
-static void
-dm9000_phy_write(struct net_device *dev,
-                int phyaddr_unused, int reg, int value)
-{
-       board_info_t *db = netdev_priv(dev);
-       unsigned long flags;
-       unsigned long reg_save;
-
-       dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
-       mutex_lock(&db->addr_lock);
-
-       spin_lock_irqsave(&db->lock,flags);
-
-       /* Save previous register address */
-       reg_save = readb(db->io_addr);
-
-       /* Fill the phyxcer register into REG_0C */
-       iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
-       /* Fill the written data into REG_0D & REG_0E */
-       iow(db, DM9000_EPDRL, value);
-       iow(db, DM9000_EPDRH, value >> 8);
-
-       iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);   /* Issue phyxcer write command */
-
-       writeb(reg_save, db->io_addr);
-       spin_unlock_irqrestore(&db->lock, flags);
-
-       dm9000_msleep(db, 1);           /* Wait write complete */
-
-       spin_lock_irqsave(&db->lock,flags);
-       reg_save = readb(db->io_addr);
-
-       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer write command */
-
-       /* restore the previous address */
-       writeb(reg_save, db->io_addr);
-
-       spin_unlock_irqrestore(&db->lock, flags);
-       mutex_unlock(&db->addr_lock);
-}
-
 static void
 dm9000_shutdown(struct net_device *dev)
 {
@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)
        db->flags |= DM9000_PLATF_SIMPLE_PHY;
 #endif
 
-       dm9000_reset(db);
+       /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
+        * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
+        * while probe stage.
+        */
+
+       iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
 
        /* try multiple times, DM9000 sometimes gets the read wrong */
        for (i = 0; i < 8; i++) {
index 55688bd..9ce058a 100644 (file)
@@ -69,7 +69,9 @@
 #define NCR_WAKEEN          (1<<6)
 #define NCR_FCOL            (1<<4)
 #define NCR_FDX             (1<<3)
-#define NCR_LBK             (3<<1)
+
+#define NCR_RESERVED        (3<<1)
+#define NCR_MAC_LBK         (1<<1)
 #define NCR_RST                    (1<<0)
 
 #define NSR_SPEED           (1<<7)
 #define ISR_LNKCHNG            (1<<5)
 #define ISR_UNDERRUN           (1<<4)
 
+/* Davicom MII registers.
+ */
+
+#define MII_DM_DSPCR           0x1b    /* DSP Control Register */
+
+#define DSPCR_INIT_PARAM       0xE100  /* DSP init parameter */
+
 #endif /* _DM9000X_H_ */
 
index 911d025..f292c3a 100644 (file)
@@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 }
 
+/* Init RX & TX buffer descriptors
+ */
+static void fec_enet_bd_init(struct net_device *dev)
+{
+       struct fec_enet_private *fep = netdev_priv(dev);
+       struct bufdesc *bdp;
+       unsigned int i;
+
+       /* Initialize the receive buffer descriptors. */
+       bdp = fep->rx_bd_base;
+       for (i = 0; i < RX_RING_SIZE; i++) {
+
+               /* Initialize the BD for every fragment in the page. */
+               if (bdp->cbd_bufaddr)
+                       bdp->cbd_sc = BD_ENET_RX_EMPTY;
+               else
+                       bdp->cbd_sc = 0;
+               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+       }
+
+       /* Set the last buffer to wrap */
+       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+       bdp->cbd_sc |= BD_SC_WRAP;
+
+       fep->cur_rx = fep->rx_bd_base;
+
+       /* ...and the same for transmit */
+       bdp = fep->tx_bd_base;
+       fep->cur_tx = bdp;
+       for (i = 0; i < TX_RING_SIZE; i++) {
+
+               /* Initialize the BD for every fragment in the page. */
+               bdp->cbd_sc = 0;
+               if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
+                       dev_kfree_skb_any(fep->tx_skbuff[i]);
+                       fep->tx_skbuff[i] = NULL;
+               }
+               bdp->cbd_bufaddr = 0;
+               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+       }
+
+       /* Set the last buffer to wrap */
+       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+       bdp->cbd_sc |= BD_SC_WRAP;
+       fep->dirty_tx = bdp;
+}
+
 /* This function is called to start or restart the FEC during a link
  * change.  This only happens when switching between half and full
  * duplex.
@@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex)
        /* Set maximum receive buffer size. */
        writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
 
+       fec_enet_bd_init(ndev);
+
        /* Set receive and transmit descriptor base. */
        writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
        if (fep->bufdesc_ex)
@@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex)
                writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
                        * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
 
-       fep->cur_rx = fep->rx_bd_base;
 
        for (i = 0; i <= TX_RING_MOD_MASK; i++) {
                if (fep->tx_skbuff[i]) {
@@ -1597,8 +1645,6 @@ static int fec_enet_init(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
        struct bufdesc *cbd_base;
-       struct bufdesc *bdp;
-       unsigned int i;
 
        /* Allocate memory for buffer descriptors. */
        cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
@@ -1608,6 +1654,7 @@ static int fec_enet_init(struct net_device *ndev)
                return -ENOMEM;
        }
 
+       memset(cbd_base, 0, PAGE_SIZE);
        spin_lock_init(&fep->hw_lock);
 
        fep->netdev = ndev;
@@ -1631,35 +1678,6 @@ static int fec_enet_init(struct net_device *ndev)
        writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
        netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
 
-       /* Initialize the receive buffer descriptors. */
-       bdp = fep->rx_bd_base;
-       for (i = 0; i < RX_RING_SIZE; i++) {
-
-               /* Initialize the BD for every fragment in the page. */
-               bdp->cbd_sc = 0;
-               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
-       }
-
-       /* Set the last buffer to wrap */
-       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
-       bdp->cbd_sc |= BD_SC_WRAP;
-
-       /* ...and the same for transmit */
-       bdp = fep->tx_bd_base;
-       fep->cur_tx = bdp;
-       for (i = 0; i < TX_RING_SIZE; i++) {
-
-               /* Initialize the BD for every fragment in the page. */
-               bdp->cbd_sc = 0;
-               bdp->cbd_bufaddr = 0;
-               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
-       }
-
-       /* Set the last buffer to wrap */
-       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
-       bdp->cbd_sc |= BD_SC_WRAP;
-       fep->dirty_tx = bdp;
-
        fec_restart(ndev, 0);
 
        return 0;
index 43462d5..ffd2871 100644 (file)
@@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
                txdr->buffer_info[i].dma =
                        dma_map_single(&pdev->dev, skb->data, skb->len,
                                       DMA_TO_DEVICE);
+               if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
+                       ret_val = 4;
+                       goto err_nomem;
+               }
                tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
                tx_desc->lower.data = cpu_to_le32(skb->len);
                tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
        rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
                                    GFP_KERNEL);
        if (!rxdr->buffer_info) {
-               ret_val = 4;
+               ret_val = 5;
                goto err_nomem;
        }
 
@@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
        rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
                                        GFP_KERNEL);
        if (!rxdr->desc) {
-               ret_val = 5;
+               ret_val = 6;
                goto err_nomem;
        }
        memset(rxdr->desc, 0, rxdr->size);
@@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 
                skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
                if (!skb) {
-                       ret_val = 6;
+                       ret_val = 7;
                        goto err_nomem;
                }
                skb_reserve(skb, NET_IP_ALIGN);
@@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
                rxdr->buffer_info[i].dma =
                        dma_map_single(&pdev->dev, skb->data,
                                       E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
+                       ret_val = 8;
+                       goto err_nomem;
+               }
                rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
                memset(skb->data, 0x00, skb->len);
        }
index 948b86f..7e615e2 100644 (file)
@@ -848,11 +848,16 @@ check_page:
                        }
                }
 
-               if (!buffer_info->dma)
+               if (!buffer_info->dma) {
                        buffer_info->dma = dma_map_page(&pdev->dev,
                                                        buffer_info->page, 0,
                                                        PAGE_SIZE,
                                                        DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+                               adapter->alloc_rx_buff_failed++;
+                               break;
+                       }
+               }
 
                rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
                rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
index ea48083..b5f94ab 100644 (file)
@@ -2159,6 +2159,10 @@ map_skb:
                                                  skb->data,
                                                  adapter->rx_buffer_len,
                                                  DMA_FROM_DEVICE);
+               if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+                       adapter->alloc_rx_buff_failed++;
+                       break;
+               }
 
                rx_desc = IXGB_RX_DESC(*rx_ring, i);
                rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2168,7 +2172,8 @@ map_skb:
                rx_desc->status = 0;
 
 
-               if (++i == rx_ring->count) i = 0;
+               if (++i == rx_ring->count)
+                       i = 0;
                buffer_info = &rx_ring->buffer_info[i];
        }
 
index fc07ca3..6a0e671 100644 (file)
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
                sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
                sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
 
-               tp = space - 2048/8;
+               tp = space - 8192/8;
                sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
                sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
        } else {
index 615ac63..ec6dcd8 100644 (file)
@@ -2074,7 +2074,7 @@ enum {
        GM_IS_RX_FF_OR  = 1<<1, /* Receive FIFO Overrun */
        GM_IS_RX_COMPL  = 1<<0, /* Frame Reception Complete */
 
-#define GMAC_DEF_MSK     GM_IS_TX_FF_UR
+#define GMAC_DEF_MSK     (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
 };
 
 /*     GMAC_LINK_CTRL  16 bit  GMAC Link Control Reg (YUKON only) */
index f278b10..30d78f8 100644 (file)
@@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 
 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
 {
-       unsigned int i;
-       for (i = ETH_ALEN - 1; i; --i) {
+       int i;
+       for (i = ETH_ALEN - 1; i >= 0; --i) {
                dst_mac[i] = src_mac & 0xff;
                src_mac >>= 8;
        }
index 33bcb63..8fb4812 100644 (file)
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
        for (; rxfc != 0; rxfc--) {
                rxh = ks8851_rdreg32(ks, KS_RXFHSR);
                rxstat = rxh & 0xffff;
-               rxlen = rxh >> 16;
+               rxlen = (rxh >> 16) & 0xfff;
 
                netif_dbg(ks, rx_status, ks->netdev,
                          "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
index bf5e3cf..6ed333f 100644 (file)
@@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                if (felic_stat & ECSR_LCHNG) {
                        /* Link Changed */
                        if (mdp->cd->no_psr || mdp->no_ether_link) {
-                               if (mdp->link == PHY_DOWN)
-                                       link_stat = 0;
-                               else
-                                       link_stat = PHY_ST_LINK;
+                               goto ignore_link;
                        } else {
                                link_stat = (sh_eth_read(ndev, PSR));
                                if (mdp->ether_link_active_low)
@@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                }
        }
 
+ignore_link:
        if (intr_status & EESR_TWB) {
                /* Write buck end. unused write back interrupt */
                if (intr_status & EESR_TABT)    /* Transmit Abort int */
@@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct sh_eth_cpu_data *cd = mdp->cd;
        irqreturn_t ret = IRQ_NONE;
-       u32 intr_status = 0;
+       unsigned long intr_status;
 
        spin_lock(&mdp->lock);
 
-       /* Get interrpt stat */
+       /* Get interrupt status */
        intr_status = sh_eth_read(ndev, EESR);
+       /* Mask it with the interrupt mask, forcing ECI interrupt to be always
+        * enabled since it's the one that  comes thru regardless of the mask,
+        * and we need to fully handle it in sh_eth_error() in order to quench
+        * it as it doesn't get cleared by just writing 1 to the ECI bit...
+        */
+       intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;
        /* Clear interrupt */
        if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
                        EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
@@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
        struct phy_device *phydev = mdp->phydev;
        int new_state = 0;
 
-       if (phydev->link != PHY_DOWN) {
+       if (phydev->link) {
                if (phydev->duplex != mdp->duplex) {
                        new_state = 1;
                        mdp->duplex = phydev->duplex;
@@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                        if (mdp->cd->set_rate)
                                mdp->cd->set_rate(ndev);
                }
-               if (mdp->link == PHY_DOWN) {
+               if (!mdp->link) {
                        sh_eth_write(ndev,
                                (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
                        new_state = 1;
                        mdp->link = phydev->link;
+                       if (mdp->cd->no_psr || mdp->no_ether_link)
+                               sh_eth_rcv_snd_enable(ndev);
                }
        } else if (mdp->link) {
                new_state = 1;
-               mdp->link = PHY_DOWN;
+               mdp->link = 0;
                mdp->speed = 0;
                mdp->duplex = -1;
+               if (mdp->cd->no_psr || mdp->no_ether_link)
+                       sh_eth_rcv_snd_disable(ndev);
        }
 
        if (new_state && netif_msg_link(mdp))
@@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
        snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
                mdp->mii_bus->id , mdp->phy_id);
 
-       mdp->link = PHY_DOWN;
+       mdp->link = 0;
        mdp->speed = 0;
        mdp->duplex = -1;
 
index e665567..828be45 100644 (file)
@@ -723,7 +723,7 @@ struct sh_eth_private {
        u32 phy_id;                                     /* PHY ID */
        struct mii_bus *mii_bus;        /* MDIO bus control */
        struct phy_device *phydev;      /* PHY device control */
-       enum phy_state link;
+       int link;
        phy_interface_t phy_interface;
        int msg_enable;
        int speed;
index df32a09..80cad06 100644 (file)
@@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status)
         * queue is stopped then start the queue as we have free desc for tx
         */
        if (unlikely(netif_queue_stopped(ndev)))
-               netif_start_queue(ndev);
+               netif_wake_queue(ndev);
        cpts_tx_timestamp(priv->cpts, skb);
        priv->stats.tx_packets++;
        priv->stats.tx_bytes += len;
index ae1b77a..72300bc 100644 (file)
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status)
         * queue is stopped then start the queue as we have free desc for tx
         */
        if (unlikely(netif_queue_stopped(ndev)))
-               netif_start_queue(ndev);
+               netif_wake_queue(ndev);
        ndev->stats.tx_packets++;
        ndev->stats.tx_bytes += len;
        dev_kfree_skb_any(skb);
index 9abe517..1a15ec1 100644 (file)
@@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
 static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct usbnet *dev = netdev_priv(netdev);
+       int ret;
+
+       if (new_mtu > MAX_SINGLE_PACKET_SIZE)
+               return -EINVAL;
 
-       int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
+       ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
        if (ret < 0) {
                netdev_warn(dev->net, "Failed to set mac rx frame length\n");
                return ret;
@@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev)
 
        netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
 
-       ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
+       ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
        if (ret < 0) {
                netdev_warn(dev->net, "Failed to set max rx frame length\n");
                return ret;
@@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                        else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
                                dev->net->stats.rx_frame_errors++;
                } else {
-                       /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
-                       if (unlikely(size > (ETH_FRAME_LEN + 12))) {
+                       /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
+                       if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
                                netif_dbg(dev, rx_err, dev->net,
                                          "size err rx_cmd_a=0x%08x\n",
                                          rx_cmd_a);
index 39c84ec..7fdac6c 100644 (file)
@@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data)
 {
        struct ath_softc *sc = (struct ath_softc *)data;
 
-       ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+       if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
+               ieee80211_queue_work(sc->hw, &sc->hw_check_work);
 }
 
 /*
index 38bc5a7..1221469 100644 (file)
@@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
        const struct b43_dma_ops *ops;
        struct b43_dmaring *ring;
        struct b43_dmadesc_meta *meta;
+       static const struct b43_txstatus fake; /* filled with 0 */
+       const struct b43_txstatus *txstat;
        int slot, firstused;
        bool frame_succeed;
+       int skip;
+       static u8 err_out1, err_out2;
 
        ring = parse_cookie(dev, status->cookie, &slot);
        if (unlikely(!ring))
@@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
        firstused = ring->current_slot - ring->used_slots + 1;
        if (firstused < 0)
                firstused = ring->nr_slots + firstused;
+
+       skip = 0;
        if (unlikely(slot != firstused)) {
                /* This possibly is a firmware bug and will result in
-                * malfunction, memory leaks and/or stall of DMA functionality. */
-               b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
-                      "Expected %d, but got %d\n",
-                      ring->index, firstused, slot);
-               return;
+                * malfunction, memory leaks and/or stall of DMA functionality.
+                */
+               if (slot == next_slot(ring, next_slot(ring, firstused))) {
+                       /* If a single header/data pair was missed, skip over
+                        * the first two slots in an attempt to recover.
+                        */
+                       slot = firstused;
+                       skip = 2;
+                       if (!err_out1) {
+                               /* Report the error once. */
+                               b43dbg(dev->wl,
+                                      "Skip on DMA ring %d slot %d.\n",
+                                      ring->index, slot);
+                               err_out1 = 1;
+                       }
+               } else {
+                       /* More than a single header/data pair were missed.
+                        * Report this error once.
+                        */
+                       if (!err_out2)
+                               b43dbg(dev->wl,
+                                      "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
+                                      ring->index, firstused, slot);
+                       err_out2 = 1;
+                       return;
+               }
        }
 
        ops = ring->ops;
@@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                               slot, firstused, ring->index);
                        break;
                }
+
                if (meta->skb) {
                        struct b43_private_tx_info *priv_info =
-                               b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+                            b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
 
-                       unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
+                       unmap_descbuffer(ring, meta->dmaaddr,
+                                        meta->skb->len, 1);
                        kfree(priv_info->bouncebuffer);
                        priv_info->bouncebuffer = NULL;
                } else {
@@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                        struct ieee80211_tx_info *info;
 
                        if (unlikely(!meta->skb)) {
-                               /* This is a scatter-gather fragment of a frame, so
-                                * the skb pointer must not be NULL. */
+                               /* This is a scatter-gather fragment of a frame,
+                                * so the skb pointer must not be NULL.
+                                */
                                b43dbg(dev->wl, "TX status unexpected NULL skb "
                                       "at slot %d (first=%d) on ring %d\n",
                                       slot, firstused, ring->index);
@@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
 
                        /*
                         * Call back to inform the ieee80211 subsystem about
-                        * the status of the transmission.
+                        * the status of the transmission. When skipping over
+                        * a missed TX status report, use a status structure
+                        * filled with zeros to indicate that the frame was not
+                        * sent (frame_count 0) and not acknowledged
                         */
-                       frame_succeed = b43_fill_txstatus_report(dev, info, status);
+                       if (unlikely(skip))
+                               txstat = &fake;
+                       else
+                               txstat = status;
+
+                       frame_succeed = b43_fill_txstatus_report(dev, info,
+                                                                txstat);
 #ifdef CONFIG_B43_DEBUG
                        if (frame_succeed)
                                ring->nr_succeed_tx_packets++;
@@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                /* Everything unmapped and free'd. So it's not used anymore. */
                ring->used_slots--;
 
-               if (meta->is_last_fragment) {
+               if (meta->is_last_fragment && !skip) {
                        /* This is the last scatter-gather
                         * fragment of the frame. We are done. */
                        break;
                }
                slot = next_slot(ring, slot);
+               if (skip > 0)
+                       --skip;
        }
        if (ring->stopped) {
                B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
index 3c35382..e8486c1 100644 (file)
@@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
        u16 clip_off[2] = { 0xFFFF, 0xFFFF };
 
        u8 vcm_final = 0;
-       s8 offset[4];
+       s32 offset[4];
        s32 results[8][4] = { };
        s32 results_min[4] = { };
        s32 poll_results[4] = { };
@@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
                }
                for (i = 0; i < 4; i += 2) {
                        s32 curr;
-                       s32 mind = 40;
+                       s32 mind = 0x100000;
                        s32 minpoll = 249;
                        u8 minvcm = 0;
                        if (2 * core != i)
@@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
        u8 regs_save_radio[2];
        u16 regs_save_phy[2];
 
-       s8 offset[4];
+       s32 offset[4];
        u8 core;
        u8 rail;
 
@@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
        }
 
        for (i = 0; i < 4; i++) {
-               s32 mind = 40;
+               s32 mind = 0x100000;
                u8 minvcm = 0;
                s32 minpoll = 249;
                s32 curr;
index 21a8242..18d3764 100644 (file)
@@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
        gain0_15 = ((biq1 & 0xf) << 12) |
                   ((tia & 0xf) << 8) |
                   ((lna2 & 0x3) << 6) |
-                  ((lna2 & 0x3) << 4) |
-                  ((lna1 & 0x3) << 2) |
-                  ((lna1 & 0x3) << 0);
+                  ((lna2 &
+                    0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
 
        mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
        mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
        }
 
        mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
-       mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
-       mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
 
 }
 
@@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
        return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
 }
 
-static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
-                                     u16 tia_gain, u16 lna2_gain)
-{
-       u32 i_thresh_l, q_thresh_l;
-       u32 i_thresh_h, q_thresh_h;
-       struct lcnphy_iq_est iq_est_h, iq_est_l;
-
-       wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
-                                              lna2_gain, 0);
-
-       wlc_lcnphy_rx_gain_override_enable(pi, true);
-       wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
-       udelay(500);
-       write_radio_reg(pi, RADIO_2064_REG112, 0);
-       if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
-               return false;
-
-       wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
-       udelay(500);
-       write_radio_reg(pi, RADIO_2064_REG112, 0);
-       if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
-               return false;
-
-       i_thresh_l = (iq_est_l.i_pwr << 1);
-       i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
-
-       q_thresh_l = (iq_est_l.q_pwr << 1);
-       q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
-       if ((iq_est_h.i_pwr > i_thresh_l) &&
-           (iq_est_h.i_pwr < i_thresh_h) &&
-           (iq_est_h.q_pwr > q_thresh_l) &&
-           (iq_est_h.q_pwr < q_thresh_h))
-               return true;
-
-       return false;
-}
-
 static bool
 wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
                     const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
            RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
            rfoverride3_old, rfoverride3val_old, rfoverride4_old,
            rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
-       int tia_gain, lna2_gain, biq1_gain;
-       bool set_gain;
+       int tia_gain;
+       u32 received_power, rx_pwr_threshold;
        u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
        u16 values_to_save[11];
        s16 *ptr;
@@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
                goto cal_done;
        }
 
-       WARN_ON(module != 1);
-       tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
-       wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
-
-       for (i = 0; i < 11; i++)
-               values_to_save[i] =
-                       read_radio_reg(pi, rxiq_cal_rf_reg[i]);
-       Core1TxControl_old = read_phy_reg(pi, 0x631);
-
-       or_phy_reg(pi, 0x631, 0x0015);
-
-       RFOverride0_old = read_phy_reg(pi, 0x44c);
-       RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
-       rfoverride2_old = read_phy_reg(pi, 0x4b0);
-       rfoverride2val_old = read_phy_reg(pi, 0x4b1);
-       rfoverride3_old = read_phy_reg(pi, 0x4f9);
-       rfoverride3val_old = read_phy_reg(pi, 0x4fa);
-       rfoverride4_old = read_phy_reg(pi, 0x938);
-       rfoverride4val_old = read_phy_reg(pi, 0x939);
-       afectrlovr_old = read_phy_reg(pi, 0x43b);
-       afectrlovrval_old = read_phy_reg(pi, 0x43c);
-       old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
-       old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
-
-       tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
-       if (tx_gain_override_old) {
-               wlc_lcnphy_get_tx_gain(pi, &old_gains);
-               tx_gain_index_old = pi_lcn->lcnphy_current_index;
-       }
-
-       wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
+       if (module == 1) {
 
-       mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
-       mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
+               tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
+               wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
 
-       mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
-       mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+               for (i = 0; i < 11; i++)
+                       values_to_save[i] =
+                               read_radio_reg(pi, rxiq_cal_rf_reg[i]);
+               Core1TxControl_old = read_phy_reg(pi, 0x631);
+
+               or_phy_reg(pi, 0x631, 0x0015);
+
+               RFOverride0_old = read_phy_reg(pi, 0x44c);
+               RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
+               rfoverride2_old = read_phy_reg(pi, 0x4b0);
+               rfoverride2val_old = read_phy_reg(pi, 0x4b1);
+               rfoverride3_old = read_phy_reg(pi, 0x4f9);
+               rfoverride3val_old = read_phy_reg(pi, 0x4fa);
+               rfoverride4_old = read_phy_reg(pi, 0x938);
+               rfoverride4val_old = read_phy_reg(pi, 0x939);
+               afectrlovr_old = read_phy_reg(pi, 0x43b);
+               afectrlovrval_old = read_phy_reg(pi, 0x43c);
+               old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+               old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
+
+               tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
+               if (tx_gain_override_old) {
+                       wlc_lcnphy_get_tx_gain(pi, &old_gains);
+                       tx_gain_index_old = pi_lcn->lcnphy_current_index;
+               }
 
-       write_radio_reg(pi, RADIO_2064_REG116, 0x06);
-       write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
-       write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
-       write_radio_reg(pi, RADIO_2064_REG098, 0x03);
-       write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
-       mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
-       write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
-       write_radio_reg(pi, RADIO_2064_REG114, 0x01);
-       write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
-       write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
-
-       mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
-       mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
-       mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
-       mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
-       mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
-       mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
-       mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
-       mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
-       mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
-       mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
+               wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
 
-       mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
-       mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+               mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
+               mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
 
-       write_phy_reg(pi, 0x6da, 0xffff);
-       or_phy_reg(pi, 0x6db, 0x3);
+               mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+               mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
 
-       wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
-       set_gain = false;
-
-       lna2_gain = 3;
-       while ((lna2_gain >= 0) && !set_gain) {
-               tia_gain = 4;
-
-               while ((tia_gain >= 0) && !set_gain) {
-                       biq1_gain = 6;
-
-                       while ((biq1_gain >= 0) && !set_gain) {
-                               set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
-                                                                    (u16)
-                                                                    biq1_gain,
-                                                                    (u16)
-                                                                    tia_gain,
-                                                                    (u16)
-                                                                    lna2_gain);
-                               biq1_gain -= 1;
-                       }
+               write_radio_reg(pi, RADIO_2064_REG116, 0x06);
+               write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
+               write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
+               write_radio_reg(pi, RADIO_2064_REG098, 0x03);
+               write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+               mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
+               write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
+               write_radio_reg(pi, RADIO_2064_REG114, 0x01);
+               write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
+               write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
+
+               mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
+               mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
+               mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
+               mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
+               mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
+               mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
+               mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
+               mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
+               mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
+               mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
+
+               mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+               mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+
+               wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
+               write_phy_reg(pi, 0x6da, 0xffff);
+               or_phy_reg(pi, 0x6db, 0x3);
+               wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
+               wlc_lcnphy_rx_gain_override_enable(pi, true);
+
+               tia_gain = 8;
+               rx_pwr_threshold = 950;
+               while (tia_gain > 0) {
                        tia_gain -= 1;
+                       wlc_lcnphy_set_rx_gain_by_distribution(pi,
+                                                              0, 0, 2, 2,
+                                                              (u16)
+                                                              tia_gain, 1, 0);
+                       udelay(500);
+
+                       received_power =
+                               wlc_lcnphy_measure_digital_power(pi, 2000);
+                       if (received_power < rx_pwr_threshold)
+                               break;
                }
-               lna2_gain -= 1;
-       }
+               result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
 
-       if (set_gain)
-               result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
-       else
-               result = false;
+               wlc_lcnphy_stop_tx_tone(pi);
 
-       wlc_lcnphy_stop_tx_tone(pi);
+               write_phy_reg(pi, 0x631, Core1TxControl_old);
 
-       write_phy_reg(pi, 0x631, Core1TxControl_old);
-
-       write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
-       write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
-       write_phy_reg(pi, 0x4b0, rfoverride2_old);
-       write_phy_reg(pi, 0x4b1, rfoverride2val_old);
-       write_phy_reg(pi, 0x4f9, rfoverride3_old);
-       write_phy_reg(pi, 0x4fa, rfoverride3val_old);
-       write_phy_reg(pi, 0x938, rfoverride4_old);
-       write_phy_reg(pi, 0x939, rfoverride4val_old);
-       write_phy_reg(pi, 0x43b, afectrlovr_old);
-       write_phy_reg(pi, 0x43c, afectrlovrval_old);
-       write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
-       write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
+               write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
+               write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
+               write_phy_reg(pi, 0x4b0, rfoverride2_old);
+               write_phy_reg(pi, 0x4b1, rfoverride2val_old);
+               write_phy_reg(pi, 0x4f9, rfoverride3_old);
+               write_phy_reg(pi, 0x4fa, rfoverride3val_old);
+               write_phy_reg(pi, 0x938, rfoverride4_old);
+               write_phy_reg(pi, 0x939, rfoverride4val_old);
+               write_phy_reg(pi, 0x43b, afectrlovr_old);
+               write_phy_reg(pi, 0x43c, afectrlovrval_old);
+               write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+               write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
 
-       wlc_lcnphy_clear_trsw_override(pi);
+               wlc_lcnphy_clear_trsw_override(pi);
 
-       mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
+               mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
 
-       for (i = 0; i < 11; i++)
-               write_radio_reg(pi, rxiq_cal_rf_reg[i],
-                               values_to_save[i]);
+               for (i = 0; i < 11; i++)
+                       write_radio_reg(pi, rxiq_cal_rf_reg[i],
+                                       values_to_save[i]);
 
-       if (tx_gain_override_old)
-               wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
-       else
-               wlc_lcnphy_disable_tx_gain_override(pi);
+               if (tx_gain_override_old)
+                       wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
+               else
+                       wlc_lcnphy_disable_tx_gain_override(pi);
 
-       wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
-       wlc_lcnphy_rx_gain_override_enable(pi, false);
+               wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
+               wlc_lcnphy_rx_gain_override_enable(pi, false);
+       }
 
 cal_done:
        kfree(ptr);
@@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
                write_radio_reg(pi, RADIO_2064_REG038, 3);
                write_radio_reg(pi, RADIO_2064_REG091, 7);
        }
-
-       if (!(pi->sh->boardflags & BFL_FEM)) {
-               u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc,
-                       0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0};
-
-               write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
-               write_radio_reg(pi, RADIO_2064_REG091, 0x3);
-               write_radio_reg(pi, RADIO_2064_REG038, 0x3);
-
-               write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
-       }
 }
 
 static int
@@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
                } else {
                        mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
                        mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
-                       mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
-                       mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
-                       mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
-                       mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
-                       mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
-                       mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
-                       mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
-                       mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
-                       mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
-                       mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
                }
        } else {
                mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
                    (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
 
        mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
-       mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
 }
 
 static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
 {
        struct phytbl_info tab;
        u32 rfseq, ind;
-       u8 tssi_sel;
 
        tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
        tab.tbl_width = 32;
@@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
 
        mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
 
-       if (pi->sh->boardflags & BFL_FEM) {
-               tssi_sel = 0x1;
-               wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
-       } else {
-               tssi_sel = 0xe;
-               wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA);
-       }
+       wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
        mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
 
        mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
        mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
 
        if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
-               mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
+               mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
                mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
        } else {
-               mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
                mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
                mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
        }
@@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
 
        mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
 
-       mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
-       mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
-       mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
-
        wlc_lcnphy_pwrctrl_rssiparams(pi);
 }
 
@@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
                read_radio_reg(pi, RADIO_2064_REG007) & 1;
        u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
        u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
-       u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
-
        idleTssi = read_phy_reg(pi, 0x4ab);
        suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
                         MCTL_EN_MAC));
@@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
        mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
        mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
        wlc_lcnphy_tssi_setup(pi);
-
-       mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
-       mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
-
-       wlc_lcnphy_set_bbmult(pi, 0x0);
-
        wlc_phy_do_dummy_tx(pi, true, OFF);
        idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
                    >> 0);
@@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
 
        mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
 
-       wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
        wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
        wlc_lcnphy_set_tx_gain(pi, &old_gains);
        wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
                        wlc_lcnphy_write_table(pi, &tab);
                        tab.tbl_offset++;
                }
-               mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
-               mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
-               mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
-               mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
-               mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
 
                mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
 
@@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
        target_gains.pad_gain = 21;
        target_gains.dac_gain = 0;
        wlc_lcnphy_set_tx_gain(pi, &target_gains);
+       wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
 
        if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
 
@@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
                                        lcnphy_recal ? LCNPHY_CAL_RECAL :
                                        LCNPHY_CAL_FULL), false);
        } else {
-               wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
                wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
        }
 
@@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
        if (CHSPEC_IS5G(pi->radio_chanspec))
                pa_gain = 0x70;
        else
-               pa_gain = 0x60;
+               pa_gain = 0x70;
 
        if (pi->sh->boardflags & BFL_FEM)
                pa_gain = 0x10;
-
        tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
        tab.tbl_width = 32;
        tab.tbl_len = 1;
        tab.tbl_ptr = &val;
 
        for (j = 0; j < 128; j++) {
-               if (pi->sh->boardflags & BFL_FEM)
-                       gm_gain = gain_table[j].gm;
-               else
-                       gm_gain = 15;
-
+               gm_gain = gain_table[j].gm;
                val = (((u32) pa_gain << 24) |
                       (gain_table[j].pad << 16) |
                       (gain_table[j].pga << 8) | gm_gain);
@@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
 
        write_phy_reg(pi, 0x4ea, 0x4688);
 
-       if (pi->sh->boardflags & BFL_FEM)
-               mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
-       else
-               mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
+       mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
 
        mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
 
@@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
        wlc_lcnphy_rcal(pi);
 
        wlc_lcnphy_rc_cal(pi);
-
-       if (!(pi->sh->boardflags & BFL_FEM)) {
-               write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
-               write_radio_reg(pi, RADIO_2064_REG033, 0x19);
-               write_radio_reg(pi, RADIO_2064_REG039, 0xe);
-       }
-
 }
 
 static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
                wlc_lcnphy_write_table(pi, &tab);
        }
 
-       if (!(pi->sh->boardflags & BFL_FEM)) {
-               tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
-               tab.tbl_width = 16;
-               tab.tbl_ptr = &val;
-               tab.tbl_len = 1;
+       tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
+       tab.tbl_width = 16;
+       tab.tbl_ptr = &val;
+       tab.tbl_len = 1;
 
-               val = 150;
-               tab.tbl_offset = 0;
-               wlc_lcnphy_write_table(pi, &tab);
+       val = 114;
+       tab.tbl_offset = 0;
+       wlc_lcnphy_write_table(pi, &tab);
 
-               val = 220;
-               tab.tbl_offset = 1;
-               wlc_lcnphy_write_table(pi, &tab);
-       }
+       val = 130;
+       tab.tbl_offset = 1;
+       wlc_lcnphy_write_table(pi, &tab);
+
+       val = 6;
+       tab.tbl_offset = 8;
+       wlc_lcnphy_write_table(pi, &tab);
 
        if (CHSPEC_IS2G(pi->radio_chanspec)) {
                if (pi->sh->boardflags & BFL_FEM)
@@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
                wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
 
        mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
-       wlc_lcnphy_tssi_setup(pi);
 }
 
 void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
        if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
                return false;
 
-       if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
+       if ((pi->sh->boardflags & BFL_FEM) &&
+           (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
                if (pi_lcn->lcnphy_tempsense_option == 3) {
                        pi->hwpwrctrl = true;
                        pi->hwpwrctrl_capable = true;
index b7e95ac..622c01c 100644 (file)
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
 };
 
 static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
 };
 
 static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
index e8324b5..6c7493c 100644 (file)
@@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
        int rate_idx;
        int i;
        u32 rate;
-       u8 use_green = il4965_rs_use_green(il, sta);
+       u8 use_green;
        u8 active_tbl = 0;
        u8 valid_tx_ant;
        struct il_station_priv *sta_priv;
@@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
        if (!sta || !lq_sta)
                return;
 
+       use_green = il4965_rs_use_green(il, sta);
        sta_priv = (void *)sta->drv_priv;
 
        i = lq_sta->last_txrate_idx;
index 86ea5f4..44ca0e5 100644 (file)
@@ -1261,6 +1261,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
                return -EIO;
        }
 
+       /*
+        * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
+        * in iwl_down but cancel the workers only later.
+        */
+       if (!priv->ucode_loaded) {
+               IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id);
+               return -EIO;
+       }
+
        /*
         * Synchronous commands from this op-mode must hold
         * the mutex, this ensures we don't try to send two
index 736fe9b..1a4ac92 100644 (file)
@@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
                return -EIO;
        }
 
+       priv->ucode_loaded = true;
+
        if (ucode_type != IWL_UCODE_WOWLAN) {
                /* delay a bit to give rfkill time to run */
                msleep(5);
@@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
                return ret;
        }
 
-       priv->ucode_loaded = true;
-
        return 0;
 }
 
index 17bedc5..12c4f31 100644 (file)
@@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 
        /* If platform's RF_KILL switch is NOT set to KILL */
        hw_rfkill = iwl_is_rfkill_set(trans);
+       if (hw_rfkill)
+               set_bit(STATUS_RFKILL, &trans_pcie->status);
+       else
+               clear_bit(STATUS_RFKILL, &trans_pcie->status);
        iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
        if (hw_rfkill && !run_in_rfkill)
                return -ERFKILL;
@@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 
 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
        int err;
 
@@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
        iwl_enable_rfkill_int(trans);
 
        hw_rfkill = iwl_is_rfkill_set(trans);
+       if (hw_rfkill)
+               set_bit(STATUS_RFKILL, &trans_pcie->status);
+       else
+               clear_bit(STATUS_RFKILL, &trans_pcie->status);
        iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 
        return 0;
@@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
                 * op_mode.
                 */
                hw_rfkill = iwl_is_rfkill_set(trans);
+               if (hw_rfkill)
+                       set_bit(STATUS_RFKILL, &trans_pcie->status);
+               else
+                       clear_bit(STATUS_RFKILL, &trans_pcie->status);
                iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
        }
 }
index 8595c16..cb5c679 100644 (file)
@@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                int copy = 0;
 
-               if (!cmd->len)
+               if (!cmd->len[i])
                        continue;
 
                /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
index 5c395e2..feb2046 100644 (file)
@@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                }
                memcpy(adapter->upld_buf, skb->data,
                       min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
+               skb_push(skb, INTF_HEADER_LEN);
                if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
                                           PCI_DMA_FROMDEVICE))
                        return -1;
index 5ac9c93..e9b9c83 100644 (file)
@@ -307,7 +307,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
        case EQC_WR_PROHIBIT:
                spin_lock_irqsave(&bdev->lock, flags);
                if (bdev->state != SCM_WR_PROHIBIT)
-                       pr_info("%lu: Write access to the SCM increment is suspended\n",
+                       pr_info("%lx: Write access to the SCM increment is suspended\n",
                                (unsigned long) bdev->scmdev->address);
                bdev->state = SCM_WR_PROHIBIT;
                spin_unlock_irqrestore(&bdev->lock, flags);
@@ -445,7 +445,7 @@ void scm_blk_set_available(struct scm_blk_dev *bdev)
 
        spin_lock_irqsave(&bdev->lock, flags);
        if (bdev->state == SCM_WR_PROHIBIT)
-               pr_info("%lu: Write access to the SCM increment is restored\n",
+               pr_info("%lx: Write access to the SCM increment is restored\n",
                        (unsigned long) bdev->scmdev->address);
        bdev->state = SCM_OPER;
        spin_unlock_irqrestore(&bdev->lock, flags);
@@ -463,12 +463,15 @@ static int __init scm_blk_init(void)
                goto out;
 
        scm_major = ret;
-       if (scm_alloc_rqs(nr_requests))
+       ret = scm_alloc_rqs(nr_requests);
+       if (ret)
                goto out_unreg;
 
        scm_debug = debug_register("scm_log", 16, 1, 16);
-       if (!scm_debug)
+       if (!scm_debug) {
+               ret = -ENOMEM;
                goto out_free;
+       }
 
        debug_register_view(scm_debug, &debug_hex_ascii_view);
        debug_set_level(scm_debug, 2);
index 5f6180d..c98cf52 100644 (file)
@@ -19,7 +19,7 @@ static void scm_notify(struct scm_device *scmdev, enum scm_event event)
 
        switch (event) {
        case SCM_CHANGE:
-               pr_info("%lu: The capabilities of the SCM increment changed\n",
+               pr_info("%lx: The capabilities of the SCM increment changed\n",
                        (unsigned long) scmdev->address);
                SCM_LOG(2, "State changed");
                SCM_LOG_STATE(2, scmdev);
index b907dba..cee69da 100644 (file)
@@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
        int i, rc;
 
        /* Check if the tty3270 is already there. */
-       view = raw3270_find_view(&tty3270_fn, tty->index);
+       view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
        if (!IS_ERR(view)) {
                tp = container_of(view, struct tty3270, view);
                tty->driver_data = tp;
@@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
                tp->inattr = TF_INPUT;
                return tty_port_install(&tp->port, driver, tty);
        }
-       if (tty3270_max_index < tty->index)
-               tty3270_max_index = tty->index;
+       if (tty3270_max_index < tty->index + 1)
+               tty3270_max_index = tty->index + 1;
 
        /* Allocate tty3270 structure on first open. */
        tp = tty3270_alloc_view();
        if (IS_ERR(tp))
                return PTR_ERR(tp);
 
-       rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
+       rc = raw3270_add_view(&tp->view, &tty3270_fn,
+                             tty->index + RAW3270_FIRSTMINOR);
        if (rc) {
                tty3270_free_view(tp);
                return rc;
@@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = {
 
 void tty3270_create_cb(int minor)
 {
-       tty_register_device(tty3270_driver, minor, NULL);
+       tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
 }
 
 void tty3270_destroy_cb(int minor)
 {
-       tty_unregister_device(tty3270_driver, minor);
+       tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
 }
 
 struct raw3270_notifier tty3270_notifier =
@@ -1884,7 +1885,8 @@ static int __init tty3270_init(void)
        driver->driver_name = "tty3270";
        driver->name = "3270/tty";
        driver->major = IBM_TTY3270_MAJOR;
-       driver->minor_start = 0;
+       driver->minor_start = RAW3270_FIRSTMINOR;
+       driver->name_base = RAW3270_FIRSTMINOR;
        driver->type = TTY_DRIVER_TYPE_SYSTEM;
        driver->subtype = SYSTEM_TYPE_TTY;
        driver->init_termios = tty_std_termios;
index 2daf4b0..90bc7bd 100644 (file)
@@ -940,6 +940,7 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
        fc_exch_init(lport);
        fc_rport_init(lport);
        fc_disc_init(lport);
+       fc_disc_config(lport, lport);
        return 0;
 }
 
@@ -2133,6 +2134,7 @@ static int _bnx2fc_create(struct net_device *netdev,
        }
 
        ctlr = bnx2fc_to_ctlr(interface);
+       cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
        interface->vlan_id = vlan_id;
 
        interface->timer_work_queue =
@@ -2143,7 +2145,7 @@ static int _bnx2fc_create(struct net_device *netdev,
                goto ifput_err;
        }
 
-       lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0);
+       lport = bnx2fc_if_create(interface, &cdev->dev, 0);
        if (!lport) {
                printk(KERN_ERR PFX "Failed to create interface (%s)\n",
                        netdev->name);
@@ -2159,8 +2161,6 @@ static int _bnx2fc_create(struct net_device *netdev,
        /* Make this master N_port */
        ctlr->lp = lport;
 
-       cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
-
        if (link_state == BNX2FC_CREATE_LINK_UP)
                cdev->enabled = FCOE_CTLR_ENABLED;
        else
index b5d92fc..9bfdc9a 100644 (file)
@@ -490,7 +490,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 {
        struct net_device *netdev = fcoe->netdev;
        struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
-       struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
 
        rtnl_lock();
        if (!fcoe->removed)
@@ -501,7 +500,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
        /* tear-down the FCoE controller */
        fcoe_ctlr_destroy(fip);
        scsi_host_put(fip->lp->host);
-       fcoe_ctlr_device_delete(ctlr_dev);
        dev_put(netdev);
        module_put(THIS_MODULE);
 }
@@ -2194,6 +2192,8 @@ out_nodev:
  */
 static void fcoe_destroy_work(struct work_struct *work)
 {
+       struct fcoe_ctlr_device *cdev;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_port *port;
        struct fcoe_interface *fcoe;
        struct Scsi_Host *shost;
@@ -2224,10 +2224,15 @@ static void fcoe_destroy_work(struct work_struct *work)
        mutex_lock(&fcoe_config_mutex);
 
        fcoe = port->priv;
+       ctlr = fcoe_to_ctlr(fcoe);
+       cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
        fcoe_if_destroy(port->lport);
        fcoe_interface_cleanup(fcoe);
 
        mutex_unlock(&fcoe_config_mutex);
+
+       fcoe_ctlr_device_delete(cdev);
 }
 
 /**
@@ -2335,7 +2340,9 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
                rc = -EIO;
                rtnl_unlock();
                fcoe_interface_cleanup(fcoe);
-               goto out_nortnl;
+               mutex_unlock(&fcoe_config_mutex);
+               fcoe_ctlr_device_delete(ctlr_dev);
+               goto out;
        }
 
        /* Make this the "master" N_Port */
@@ -2375,8 +2382,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
 
 out_nodev:
        rtnl_unlock();
-out_nortnl:
        mutex_unlock(&fcoe_config_mutex);
+out:
        return rc;
 }
 
index 08c3bc3..a762472 100644 (file)
@@ -2814,6 +2814,47 @@ unlock:
                fc_lport_set_local_id(fip->lp, new_port_id);
 }
 
+/**
+ * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode
+ * @lport: The local port to be (re)configured
+ * @fip:   The FCoE controller whose mode is changing
+ * @fip_mode: The new fip mode
+ *
+ * Note that the we shouldn't be changing the libfc discovery settings
+ * (fc_disc_config) while an lport is going through the libfc state
+ * machine. The mode can only be changed when a fcoe_ctlr device is
+ * disabled, so that should ensure that this routine is only called
+ * when nothing is happening.
+ */
+void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
+                       enum fip_state fip_mode)
+{
+       void *priv;
+
+       WARN_ON(lport->state != LPORT_ST_RESET &&
+               lport->state != LPORT_ST_DISABLED);
+
+       if (fip_mode == FIP_MODE_VN2VN) {
+               lport->rport_priv_size = sizeof(struct fcoe_rport);
+               lport->point_to_multipoint = 1;
+               lport->tt.disc_recv_req = fcoe_ctlr_disc_recv;
+               lport->tt.disc_start = fcoe_ctlr_disc_start;
+               lport->tt.disc_stop = fcoe_ctlr_disc_stop;
+               lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final;
+               priv = fip;
+       } else {
+               lport->rport_priv_size = 0;
+               lport->point_to_multipoint = 0;
+               lport->tt.disc_recv_req = NULL;
+               lport->tt.disc_start = NULL;
+               lport->tt.disc_stop = NULL;
+               lport->tt.disc_stop_final = NULL;
+               priv = lport;
+       }
+
+       fc_disc_config(lport, priv);
+}
+
 /**
  * fcoe_libfc_config() - Sets up libfc related properties for local port
  * @lport:    The local port to configure libfc for
@@ -2833,21 +2874,9 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
        fc_exch_init(lport);
        fc_elsct_init(lport);
        fc_lport_init(lport);
-       if (fip->mode == FIP_MODE_VN2VN)
-               lport->rport_priv_size = sizeof(struct fcoe_rport);
        fc_rport_init(lport);
-       if (fip->mode == FIP_MODE_VN2VN) {
-               lport->point_to_multipoint = 1;
-               lport->tt.disc_recv_req = fcoe_ctlr_disc_recv;
-               lport->tt.disc_start = fcoe_ctlr_disc_start;
-               lport->tt.disc_stop = fcoe_ctlr_disc_stop;
-               lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final;
-               mutex_init(&lport->disc.disc_mutex);
-               INIT_LIST_HEAD(&lport->disc.rports);
-               lport->disc.priv = fip;
-       } else {
-               fc_disc_init(lport);
-       }
+       fc_disc_init(lport);
+       fcoe_ctlr_mode_set(lport, fip, fip->mode);
        return 0;
 }
 EXPORT_SYMBOL_GPL(fcoe_libfc_config);
@@ -2875,6 +2904,7 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected);
 void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
 {
        struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+       struct fc_lport *lport = ctlr->lp;
 
        mutex_lock(&ctlr->ctlr_mutex);
        switch (ctlr_dev->mode) {
@@ -2888,5 +2918,7 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
        }
 
        mutex_unlock(&ctlr->ctlr_mutex);
+
+       fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode);
 }
 EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
index 8e561e6..880a906 100644 (file)
@@ -712,12 +712,13 @@ static void fc_disc_stop_final(struct fc_lport *lport)
 }
 
 /**
- * fc_disc_init() - Initialize the discovery layer for a local port
- * @lport: The local port that needs the discovery layer to be initialized
+ * fc_disc_config() - Configure the discovery layer for a local port
+ * @lport: The local port that needs the discovery layer to be configured
+ * @priv: Private data structre for users of the discovery layer
  */
-int fc_disc_init(struct fc_lport *lport)
+void fc_disc_config(struct fc_lport *lport, void *priv)
 {
-       struct fc_disc *disc;
+       struct fc_disc *disc = &lport->disc;
 
        if (!lport->tt.disc_start)
                lport->tt.disc_start = fc_disc_start;
@@ -732,12 +733,21 @@ int fc_disc_init(struct fc_lport *lport)
                lport->tt.disc_recv_req = fc_disc_recv_req;
 
        disc = &lport->disc;
+
+       disc->priv = priv;
+}
+EXPORT_SYMBOL(fc_disc_config);
+
+/**
+ * fc_disc_init() - Initialize the discovery layer for a local port
+ * @lport: The local port that needs the discovery layer to be initialized
+ */
+void fc_disc_init(struct fc_lport *lport)
+{
+       struct fc_disc *disc = &lport->disc;
+
        INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
        mutex_init(&disc->disc_mutex);
        INIT_LIST_HEAD(&disc->rports);
-
-       disc->priv = lport;
-
-       return 0;
 }
 EXPORT_SYMBOL(fc_disc_init);
index f80eee7..2be0de9 100644 (file)
@@ -55,6 +55,7 @@ comment "SPI Master Controller Drivers"
 
 config SPI_ALTERA
        tristate "Altera SPI Controller"
+       depends on GENERIC_HARDIRQS
        select SPI_BITBANG
        help
          This is the driver for the Altera SPI Controller.
@@ -310,7 +311,7 @@ config SPI_PXA2XX_DMA
 
 config SPI_PXA2XX
        tristate "PXA2xx SSP SPI master"
-       depends on ARCH_PXA || PCI || ACPI
+       depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS
        select PXA_SSP if ARCH_PXA
        help
          This enables using a PXA2xx or Sodaville SSP port as a SPI master
index 4c1546f..3e0e3f0 100644 (file)
@@ -21,6 +21,8 @@ source "drivers/gpu/vga/Kconfig"
 
 source "drivers/gpu/drm/Kconfig"
 
+source "drivers/gpu/host1x/Kconfig"
+
 config VGASTATE
        tristate
        default n
index 3cd6759..a92783e 100644 (file)
@@ -1228,6 +1228,8 @@ static void fbcon_deinit(struct vc_data *vc)
 finished:
 
        fbcon_free_font(p, free_font);
+       if (free_font)
+               vc->vc_font.data = NULL;
 
        if (!con_is_bound(&fb_con))
                fbcon_exit();
index ab23c9b..4017833 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (C) 2012 Avionic Design GmbH
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
  */
 
 #include <linux/bitops.h>
index 5ea7cb9..296e5c5 100644 (file)
@@ -1,5 +1,5 @@
 obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
 
 obj-$(CONFIG_OMAP2_DSS) += dss/
-obj-$(CONFIG_FB_OMAP2) += omapfb/
 obj-y += displays/
+obj-$(CONFIG_FB_OMAP2) += omapfb/
index 72699f8..d7f69c0 100644 (file)
 #include <linux/sched.h>
 #include <linux/backlight.h>
 #include <linux/fb.h>
+#include <linux/gpio.h>
 
 #include <video/omapdss.h>
+#include <video/omap-panel-data.h>
 
 #define MIPID_CMD_READ_DISP_ID         0x04
 #define MIPID_CMD_READ_RED             0x06
@@ -336,8 +338,6 @@ static int acx565akm_bl_update_status(struct backlight_device *dev)
        r = 0;
        if (md->has_bc)
                acx565akm_set_brightness(md, level);
-       else if (md->dssdev->set_backlight)
-               r = md->dssdev->set_backlight(md->dssdev, level);
        else
                r = -ENODEV;
 
@@ -352,7 +352,7 @@ static int acx565akm_bl_get_intensity(struct backlight_device *dev)
 
        dev_dbg(&dev->dev, "%s\n", __func__);
 
-       if (!md->has_bc && md->dssdev->set_backlight == NULL)
+       if (!md->has_bc)
                return -ENODEV;
 
        if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
@@ -496,21 +496,38 @@ static struct omap_video_timings acx_panel_timings = {
        .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
 };
 
+static struct panel_acx565akm_data *get_panel_data(struct omap_dss_device *dssdev)
+{
+       return (struct panel_acx565akm_data *) dssdev->data;
+}
+
 static int acx_panel_probe(struct omap_dss_device *dssdev)
 {
        int r;
        struct acx565akm_device *md = &acx_dev;
+       struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
        struct backlight_device *bldev;
        int max_brightness, brightness;
        struct backlight_properties props;
 
        dev_dbg(&dssdev->dev, "%s\n", __func__);
 
+       if (!panel_data)
+               return -EINVAL;
+
        /* FIXME AC bias ? */
        dssdev->panel.timings = acx_panel_timings;
 
-       if (dssdev->platform_enable)
-               dssdev->platform_enable(dssdev);
+       if (gpio_is_valid(panel_data->reset_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, panel_data->reset_gpio,
+                               GPIOF_OUT_INIT_LOW, "lcd reset");
+               if (r)
+                       return r;
+       }
+
+       if (gpio_is_valid(panel_data->reset_gpio))
+               gpio_set_value(panel_data->reset_gpio, 1);
+
        /*
         * After reset we have to wait 5 msec before the first
         * command can be sent.
@@ -522,8 +539,9 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
        r = panel_detect(md);
        if (r) {
                dev_err(&dssdev->dev, "%s panel detect error\n", __func__);
-               if (!md->enabled && dssdev->platform_disable)
-                       dssdev->platform_disable(dssdev);
+               if (!md->enabled && gpio_is_valid(panel_data->reset_gpio))
+                       gpio_set_value(panel_data->reset_gpio, 0);
+
                return r;
        }
 
@@ -532,8 +550,8 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
        mutex_unlock(&acx_dev.mutex);
 
        if (!md->enabled) {
-               if (dssdev->platform_disable)
-                       dssdev->platform_disable(dssdev);
+               if (gpio_is_valid(panel_data->reset_gpio))
+                       gpio_set_value(panel_data->reset_gpio, 0);
        }
 
        /*------- Backlight control --------*/
@@ -557,15 +575,10 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
                md->cabc_mode = get_hw_cabc_mode(md);
        }
 
-       if (md->has_bc)
-               max_brightness = 255;
-       else
-               max_brightness = dssdev->max_backlight_level;
+       max_brightness = 255;
 
        if (md->has_bc)
                brightness = acx565akm_get_actual_brightness(md);
-       else if (dssdev->get_backlight)
-               brightness = dssdev->get_backlight(dssdev);
        else
                brightness = 0;
 
@@ -591,6 +604,7 @@ static void acx_panel_remove(struct omap_dss_device *dssdev)
 static int acx_panel_power_on(struct omap_dss_device *dssdev)
 {
        struct acx565akm_device *md = &acx_dev;
+       struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
        int r;
 
        dev_dbg(&dssdev->dev, "%s\n", __func__);
@@ -612,11 +626,8 @@ static int acx_panel_power_on(struct omap_dss_device *dssdev)
        /*FIXME tweak me */
        msleep(50);
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto fail;
-       }
+       if (gpio_is_valid(panel_data->reset_gpio))
+               gpio_set_value(panel_data->reset_gpio, 1);
 
        if (md->enabled) {
                dev_dbg(&md->spi->dev, "panel already enabled\n");
@@ -645,8 +656,7 @@ static int acx_panel_power_on(struct omap_dss_device *dssdev)
        mutex_unlock(&md->mutex);
 
        return acx565akm_bl_update_status(md->bl_dev);
-fail:
-       omapdss_sdi_display_disable(dssdev);
+
 fail_unlock:
        mutex_unlock(&md->mutex);
        return r;
@@ -655,6 +665,7 @@ fail_unlock:
 static void acx_panel_power_off(struct omap_dss_device *dssdev)
 {
        struct acx565akm_device *md = &acx_dev;
+       struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
 
        dev_dbg(&dssdev->dev, "%s\n", __func__);
 
@@ -678,8 +689,8 @@ static void acx_panel_power_off(struct omap_dss_device *dssdev)
         */
        msleep(50);
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
+       if (gpio_is_valid(panel_data->reset_gpio))
+               gpio_set_value(panel_data->reset_gpio, 0);
 
        /* FIXME need to tweak this delay */
        msleep(100);
index c904f42..97363f7 100644 (file)
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/gpio.h>
 #include <video/omapdss.h>
 
-#include <video/omap-panel-generic-dpi.h>
+#include <video/omap-panel-data.h>
 
 struct panel_config {
        struct omap_video_timings timings;
@@ -533,7 +534,7 @@ static inline struct panel_generic_dpi_data
 
 static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
 {
-       int r;
+       int r, i;
        struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
        struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
        struct panel_config *panel_config = drv_data->panel_config;
@@ -552,15 +553,13 @@ static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
        if (panel_config->power_on_delay)
                msleep(panel_config->power_on_delay);
 
-       if (panel_data->platform_enable) {
-               r = panel_data->platform_enable(dssdev);
-               if (r)
-                       goto err1;
+       for (i = 0; i < panel_data->num_gpios; ++i) {
+               gpio_set_value_cansleep(panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ? 0 : 1);
        }
 
        return 0;
-err1:
-       omapdss_dpi_display_disable(dssdev);
+
 err0:
        return r;
 }
@@ -570,12 +569,15 @@ static void generic_dpi_panel_power_off(struct omap_dss_device *dssdev)
        struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
        struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
        struct panel_config *panel_config = drv_data->panel_config;
+       int i;
 
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
                return;
 
-       if (panel_data->platform_disable)
-               panel_data->platform_disable(dssdev);
+       for (i = panel_data->num_gpios - 1; i >= 0; --i) {
+               gpio_set_value_cansleep(panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ? 1 : 0);
+       }
 
        /* wait couple of vsyncs after disabling the LCD */
        if (panel_config->power_off_delay)
@@ -589,7 +591,7 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
        struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
        struct panel_config *panel_config = NULL;
        struct panel_drv_data *drv_data = NULL;
-       int i;
+       int i, r;
 
        dev_dbg(&dssdev->dev, "probe\n");
 
@@ -606,9 +608,18 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
        if (!panel_config)
                return -EINVAL;
 
+       for (i = 0; i < panel_data->num_gpios; ++i) {
+               r = devm_gpio_request_one(&dssdev->dev, panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ?
+                               GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+                               "panel gpio");
+               if (r)
+                       return r;
+       }
+
        dssdev->panel.timings = panel_config->timings;
 
-       drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+       drv_data = devm_kzalloc(&dssdev->dev, sizeof(*drv_data), GFP_KERNEL);
        if (!drv_data)
                return -ENOMEM;
 
@@ -624,12 +635,8 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
 
 static void __exit generic_dpi_panel_remove(struct omap_dss_device *dssdev)
 {
-       struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
-
        dev_dbg(&dssdev->dev, "remove\n");
 
-       kfree(drv_data);
-
        dev_set_drvdata(&dssdev->dev, NULL);
 }
 
index 6e5abe8..4ea6548 100644 (file)
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/mutex.h>
+#include <linux/gpio.h>
 
 #include <video/omapdss.h>
+#include <video/omap-panel-data.h>
 
 struct lb035q02_data {
        struct mutex lock;
@@ -48,9 +50,16 @@ static struct omap_video_timings lb035q02_timings = {
        .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
 };
 
+static inline struct panel_generic_dpi_data
+*get_panel_data(const struct omap_dss_device *dssdev)
+{
+       return (struct panel_generic_dpi_data *) dssdev->data;
+}
+
 static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
 {
-       int r;
+       struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+       int r, i;
 
        if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
                return 0;
@@ -62,54 +71,65 @@ static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
        if (r)
                goto err0;
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto err1;
+       for (i = 0; i < panel_data->num_gpios; ++i) {
+               gpio_set_value_cansleep(panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ? 0 : 1);
        }
 
        return 0;
-err1:
-       omapdss_dpi_display_disable(dssdev);
+
 err0:
        return r;
 }
 
 static void lb035q02_panel_power_off(struct omap_dss_device *dssdev)
 {
+       struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+       int i;
+
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
                return;
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
+       for (i = panel_data->num_gpios - 1; i >= 0; --i) {
+               gpio_set_value_cansleep(panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ? 1 : 0);
+       }
 
        omapdss_dpi_display_disable(dssdev);
 }
 
 static int lb035q02_panel_probe(struct omap_dss_device *dssdev)
 {
+       struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
        struct lb035q02_data *ld;
-       int r;
+       int r, i;
+
+       if (!panel_data)
+               return -EINVAL;
 
        dssdev->panel.timings = lb035q02_timings;
 
-       ld = kzalloc(sizeof(*ld), GFP_KERNEL);
-       if (!ld) {
-               r = -ENOMEM;
-               goto err;
+       ld = devm_kzalloc(&dssdev->dev, sizeof(*ld), GFP_KERNEL);
+       if (!ld)
+               return -ENOMEM;
+
+       for (i = 0; i < panel_data->num_gpios; ++i) {
+               r = devm_gpio_request_one(&dssdev->dev, panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ?
+                               GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+                               "panel gpio");
+               if (r)
+                       return r;
        }
+
        mutex_init(&ld->lock);
        dev_set_drvdata(&dssdev->dev, ld);
+
        return 0;
-err:
-       return r;
 }
 
 static void lb035q02_panel_remove(struct omap_dss_device *dssdev)
 {
-       struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
-
-       kfree(ld);
 }
 
 static int lb035q02_panel_enable(struct omap_dss_device *dssdev)
index dd12947..f94ead6 100644 (file)
@@ -5,11 +5,10 @@
 #include <linux/slab.h>
 #include <linux/gpio.h>
 #include <linux/spi/spi.h>
-#include <linux/backlight.h>
 #include <linux/fb.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-n8x0.h>
+#include <video/omap-panel-data.h>
 
 #define BLIZZARD_REV_CODE                      0x00
 #define BLIZZARD_CONFIG                        0x02
@@ -69,7 +68,6 @@ static struct panel_drv_data {
 
        struct omap_dss_device *dssdev;
        struct spi_device *spidev;
-       struct backlight_device *bldev;
 
        int blizzard_ver;
 } s_drv_data;
@@ -297,12 +295,6 @@ static int n8x0_panel_power_on(struct omap_dss_device *dssdev)
 
        gpio_direction_output(bdata->ctrl_pwrdown, 1);
 
-       if (bdata->platform_enable) {
-               r = bdata->platform_enable(dssdev);
-               if (r)
-                       goto err_plat_en;
-       }
-
        omapdss_rfbi_set_size(dssdev, dssdev->panel.timings.x_res,
                dssdev->panel.timings.y_res);
        omapdss_rfbi_set_pixel_size(dssdev, dssdev->ctrl.pixel_size);
@@ -375,9 +367,6 @@ err_inv_panel:
 err_inv_chip:
        omapdss_rfbi_display_disable(dssdev);
 err_rfbi_en:
-       if (bdata->platform_disable)
-               bdata->platform_disable(dssdev);
-err_plat_en:
        gpio_direction_output(bdata->ctrl_pwrdown, 0);
        return r;
 }
@@ -394,9 +383,6 @@ static void n8x0_panel_power_off(struct omap_dss_device *dssdev)
        send_display_off(spi);
        send_sleep_in(spi);
 
-       if (bdata->platform_disable)
-               bdata->platform_disable(dssdev);
-
        /*
         * HACK: we should turn off the panel here, but there is some problem
         * with the initialization sequence, and we fail to init the panel if we
@@ -424,54 +410,10 @@ static const struct rfbi_timings n8x0_panel_timings = {
        .cs_pulse_width = 0,
 };
 
-static int n8x0_bl_update_status(struct backlight_device *dev)
-{
-       struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
-       struct panel_n8x0_data *bdata = get_board_data(dssdev);
-       struct panel_drv_data *ddata = get_drv_data(dssdev);
-       int r;
-       int level;
-
-       mutex_lock(&ddata->lock);
-
-       if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
-                       dev->props.power == FB_BLANK_UNBLANK)
-               level = dev->props.brightness;
-       else
-               level = 0;
-
-       dev_dbg(&dssdev->dev, "update brightness to %d\n", level);
-
-       if (!bdata->set_backlight)
-               r = -EINVAL;
-       else
-               r = bdata->set_backlight(dssdev, level);
-
-       mutex_unlock(&ddata->lock);
-
-       return r;
-}
-
-static int n8x0_bl_get_intensity(struct backlight_device *dev)
-{
-       if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
-                       dev->props.power == FB_BLANK_UNBLANK)
-               return dev->props.brightness;
-
-       return 0;
-}
-
-static const struct backlight_ops n8x0_bl_ops = {
-       .get_brightness = n8x0_bl_get_intensity,
-       .update_status  = n8x0_bl_update_status,
-};
-
 static int n8x0_panel_probe(struct omap_dss_device *dssdev)
 {
        struct panel_n8x0_data *bdata = get_board_data(dssdev);
        struct panel_drv_data *ddata;
-       struct backlight_device *bldev;
-       struct backlight_properties props;
        int r;
 
        dev_dbg(&dssdev->dev, "probe\n");
@@ -491,40 +433,27 @@ static int n8x0_panel_probe(struct omap_dss_device *dssdev)
        dssdev->ctrl.rfbi_timings = n8x0_panel_timings;
        dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
 
-       memset(&props, 0, sizeof(props));
-       props.max_brightness = 127;
-       props.type = BACKLIGHT_PLATFORM;
-       bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev,
-                       dssdev, &n8x0_bl_ops, &props);
-       if (IS_ERR(bldev)) {
-               r = PTR_ERR(bldev);
-               dev_err(&dssdev->dev, "register backlight failed\n");
-               return r;
+       if (gpio_is_valid(bdata->panel_reset)) {
+               r = devm_gpio_request_one(&dssdev->dev, bdata->panel_reset,
+                               GPIOF_OUT_INIT_LOW, "PANEL RESET");
+               if (r)
+                       return r;
        }
 
-       ddata->bldev = bldev;
-
-       bldev->props.fb_blank = FB_BLANK_UNBLANK;
-       bldev->props.power = FB_BLANK_UNBLANK;
-       bldev->props.brightness = 127;
-
-       n8x0_bl_update_status(bldev);
+       if (gpio_is_valid(bdata->ctrl_pwrdown)) {
+               r = devm_gpio_request_one(&dssdev->dev, bdata->ctrl_pwrdown,
+                               GPIOF_OUT_INIT_LOW, "PANEL PWRDOWN");
+               if (r)
+                       return r;
+       }
 
        return 0;
 }
 
 static void n8x0_panel_remove(struct omap_dss_device *dssdev)
 {
-       struct panel_drv_data *ddata = get_drv_data(dssdev);
-       struct backlight_device *bldev;
-
        dev_dbg(&dssdev->dev, "remove\n");
 
-       bldev = ddata->bldev;
-       bldev->props.power = FB_BLANK_POWERDOWN;
-       n8x0_bl_update_status(bldev);
-       backlight_device_unregister(bldev);
-
        dev_set_drvdata(&dssdev->dev, NULL);
 }
 
index c4e9c2b..20c3cd9 100644 (file)
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
-#include <linux/backlight.h>
 #include <linux/fb.h>
+#include <linux/gpio.h>
 
 #include <video/omapdss.h>
+#include <video/omap-panel-data.h>
 
 #define LCD_XRES               800
 #define LCD_YRES               480
  */
 #define LCD_PIXEL_CLOCK                23800
 
-struct nec_8048_data {
-       struct backlight_device *bl;
-};
-
 static const struct {
        unsigned char addr;
        unsigned char dat;
@@ -84,93 +81,47 @@ static struct omap_video_timings nec_8048_panel_timings = {
        .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
 };
 
-static int nec_8048_bl_update_status(struct backlight_device *bl)
-{
-       struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
-       int level;
-
-       if (!dssdev->set_backlight)
-               return -EINVAL;
-
-       if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
-                       bl->props.power == FB_BLANK_UNBLANK)
-               level = bl->props.brightness;
-       else
-               level = 0;
-
-       return dssdev->set_backlight(dssdev, level);
-}
-
-static int nec_8048_bl_get_brightness(struct backlight_device *bl)
+static inline struct panel_nec_nl8048_data
+*get_panel_data(const struct omap_dss_device *dssdev)
 {
-       if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
-                       bl->props.power == FB_BLANK_UNBLANK)
-               return bl->props.brightness;
-
-       return 0;
+       return (struct panel_nec_nl8048_data *) dssdev->data;
 }
 
-static const struct backlight_ops nec_8048_bl_ops = {
-       .get_brightness = nec_8048_bl_get_brightness,
-       .update_status  = nec_8048_bl_update_status,
-};
-
 static int nec_8048_panel_probe(struct omap_dss_device *dssdev)
 {
-       struct backlight_device *bl;
-       struct nec_8048_data *necd;
-       struct backlight_properties props;
+       struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
        int r;
 
-       dssdev->panel.timings = nec_8048_panel_timings;
-
-       necd = kzalloc(sizeof(*necd), GFP_KERNEL);
-       if (!necd)
-               return -ENOMEM;
-
-       dev_set_drvdata(&dssdev->dev, necd);
+       if (!pd)
+               return -EINVAL;
 
-       memset(&props, 0, sizeof(struct backlight_properties));
-       props.max_brightness = 255;
+       dssdev->panel.timings = nec_8048_panel_timings;
 
-       bl = backlight_device_register("nec-8048", &dssdev->dev, dssdev,
-                       &nec_8048_bl_ops, &props);
-       if (IS_ERR(bl)) {
-               r = PTR_ERR(bl);
-               kfree(necd);
-               return r;
+       if (gpio_is_valid(pd->qvga_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->qvga_gpio,
+                               GPIOF_OUT_INIT_HIGH, "lcd QVGA");
+               if (r)
+                       return r;
        }
-       necd->bl = bl;
-
-       bl->props.fb_blank = FB_BLANK_UNBLANK;
-       bl->props.power = FB_BLANK_UNBLANK;
-       bl->props.max_brightness = dssdev->max_backlight_level;
-       bl->props.brightness = dssdev->max_backlight_level;
 
-       r = nec_8048_bl_update_status(bl);
-       if (r < 0)
-               dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+       if (gpio_is_valid(pd->res_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->res_gpio,
+                               GPIOF_OUT_INIT_LOW, "lcd RES");
+               if (r)
+                       return r;
+       }
 
        return 0;
 }
 
 static void nec_8048_panel_remove(struct omap_dss_device *dssdev)
 {
-       struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
-       struct backlight_device *bl = necd->bl;
-
-       bl->props.power = FB_BLANK_POWERDOWN;
-       nec_8048_bl_update_status(bl);
-       backlight_device_unregister(bl);
-
-       kfree(necd);
 }
 
 static int nec_8048_panel_power_on(struct omap_dss_device *dssdev)
 {
+       struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
        int r;
-       struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
-       struct backlight_device *bl = necd->bl;
 
        if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
                return 0;
@@ -182,36 +133,24 @@ static int nec_8048_panel_power_on(struct omap_dss_device *dssdev)
        if (r)
                goto err0;
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto err1;
-       }
-
-       r = nec_8048_bl_update_status(bl);
-       if (r < 0)
-               dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+       if (gpio_is_valid(pd->res_gpio))
+               gpio_set_value_cansleep(pd->res_gpio, 1);
 
        return 0;
-err1:
-       omapdss_dpi_display_disable(dssdev);
+
 err0:
        return r;
 }
 
 static void nec_8048_panel_power_off(struct omap_dss_device *dssdev)
 {
-       struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
-       struct backlight_device *bl = necd->bl;
+       struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
                return;
 
-       bl->props.brightness = 0;
-       nec_8048_bl_update_status(bl);
-
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
+       if (gpio_is_valid(pd->res_gpio))
+               gpio_set_value_cansleep(pd->res_gpio, 0);
 
        omapdss_dpi_display_disable(dssdev);
 }
@@ -303,16 +242,22 @@ static int nec_8048_spi_remove(struct spi_device *spi)
        return 0;
 }
 
-static int nec_8048_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+
+static int nec_8048_spi_suspend(struct device *dev)
 {
+       struct spi_device *spi = to_spi_device(dev);
+
        nec_8048_spi_send(spi, 2, 0x01);
        mdelay(40);
 
        return 0;
 }
 
-static int nec_8048_spi_resume(struct spi_device *spi)
+static int nec_8048_spi_resume(struct device *dev)
 {
+       struct spi_device *spi = to_spi_device(dev);
+
        /* reinitialize the panel */
        spi_setup(spi);
        nec_8048_spi_send(spi, 2, 0x00);
@@ -321,14 +266,20 @@ static int nec_8048_spi_resume(struct spi_device *spi)
        return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(nec_8048_spi_pm_ops, nec_8048_spi_suspend,
+               nec_8048_spi_resume);
+#define NEC_8048_SPI_PM_OPS (&nec_8048_spi_pm_ops)
+#else
+#define NEC_8048_SPI_PM_OPS NULL
+#endif
+
 static struct spi_driver nec_8048_spi_driver = {
        .probe          = nec_8048_spi_probe,
        .remove         = nec_8048_spi_remove,
-       .suspend        = nec_8048_spi_suspend,
-       .resume         = nec_8048_spi_resume,
        .driver         = {
                .name   = "nec_8048_spi",
                .owner  = THIS_MODULE,
+               .pm     = NEC_8048_SPI_PM_OPS,
        },
 };
 
index 1b94018..62f2db0 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/gpio.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-picodlp.h>
+#include <video/omap-panel-data.h>
 
 #include "panel-picodlp.h"
 
@@ -354,12 +354,6 @@ static int picodlp_panel_power_on(struct omap_dss_device *dssdev)
        struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
        struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       return r;
-       }
-
        gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
        msleep(1);
        gpio_set_value(picodlp_pdata->pwrgood_gpio, 1);
@@ -398,9 +392,6 @@ static int picodlp_panel_power_on(struct omap_dss_device *dssdev)
 err:
        omapdss_dpi_display_disable(dssdev);
 err1:
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
-
        return r;
 }
 
@@ -412,9 +403,6 @@ static void picodlp_panel_power_off(struct omap_dss_device *dssdev)
 
        gpio_set_value(picodlp_pdata->emu_done_gpio, 0);
        gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
-
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
 }
 
 static int picodlp_panel_probe(struct omap_dss_device *dssdev)
@@ -423,11 +411,14 @@ static int picodlp_panel_probe(struct omap_dss_device *dssdev)
        struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
        struct i2c_adapter *adapter;
        struct i2c_client *picodlp_i2c_client;
-       int r = 0, picodlp_adapter_id;
+       int r, picodlp_adapter_id;
 
        dssdev->panel.timings = pico_ls_timings;
 
-       picod =  kzalloc(sizeof(struct picodlp_data), GFP_KERNEL);
+       if (!picodlp_pdata)
+               return -EINVAL;
+
+       picod = devm_kzalloc(&dssdev->dev, sizeof(*picod), GFP_KERNEL);
        if (!picod)
                return -ENOMEM;
 
@@ -438,25 +429,37 @@ static int picodlp_panel_probe(struct omap_dss_device *dssdev)
        adapter = i2c_get_adapter(picodlp_adapter_id);
        if (!adapter) {
                dev_err(&dssdev->dev, "can't get i2c adapter\n");
-               r = -ENODEV;
-               goto err;
+               return -ENODEV;
        }
 
        picodlp_i2c_client = i2c_new_device(adapter, &picodlp_i2c_board_info);
        if (!picodlp_i2c_client) {
                dev_err(&dssdev->dev, "can't add i2c device::"
                                         " picodlp_i2c_client is NULL\n");
-               r = -ENODEV;
-               goto err;
+               return -ENODEV;
        }
 
        picod->picodlp_i2c_client = picodlp_i2c_client;
 
        dev_set_drvdata(&dssdev->dev, picod);
-       return r;
-err:
-       kfree(picod);
-       return r;
+
+       if (gpio_is_valid(picodlp_pdata->emu_done_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev,
+                               picodlp_pdata->emu_done_gpio,
+                               GPIOF_IN, "DLP EMU DONE");
+               if (r)
+                       return r;
+       }
+
+       if (gpio_is_valid(picodlp_pdata->pwrgood_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev,
+                               picodlp_pdata->pwrgood_gpio,
+                               GPIOF_OUT_INIT_LOW, "DLP PWRGOOD");
+               if (r)
+                       return r;
+       }
+
+       return 0;
 }
 
 static void picodlp_panel_remove(struct omap_dss_device *dssdev)
index cada8c6..74cb0eb 100644 (file)
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/device.h>
-#include <linux/backlight.h>
 #include <linux/fb.h>
 #include <linux/err.h>
 #include <linux/slab.h>
+#include <linux/gpio.h>
 
 #include <video/omapdss.h>
-
-struct sharp_data {
-       struct backlight_device *bl;
-};
+#include <video/omap-panel-data.h>
 
 static struct omap_video_timings sharp_ls_timings = {
        .x_res = 480,
@@ -52,91 +49,67 @@ static struct omap_video_timings sharp_ls_timings = {
        .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
 };
 
-static int sharp_ls_bl_update_status(struct backlight_device *bl)
+static inline struct panel_sharp_ls037v7dw01_data
+*get_panel_data(const struct omap_dss_device *dssdev)
 {
-       struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
-       int level;
-
-       if (!dssdev->set_backlight)
-               return -EINVAL;
-
-       if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
-                       bl->props.power == FB_BLANK_UNBLANK)
-               level = bl->props.brightness;
-       else
-               level = 0;
-
-       return dssdev->set_backlight(dssdev, level);
+       return (struct panel_sharp_ls037v7dw01_data *) dssdev->data;
 }
 
-static int sharp_ls_bl_get_brightness(struct backlight_device *bl)
-{
-       if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
-                       bl->props.power == FB_BLANK_UNBLANK)
-               return bl->props.brightness;
-
-       return 0;
-}
-
-static const struct backlight_ops sharp_ls_bl_ops = {
-       .get_brightness = sharp_ls_bl_get_brightness,
-       .update_status  = sharp_ls_bl_update_status,
-};
-
-
-
 static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
 {
-       struct backlight_properties props;
-       struct backlight_device *bl;
-       struct sharp_data *sd;
+       struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
        int r;
 
+       if (!pd)
+               return -EINVAL;
+
        dssdev->panel.timings = sharp_ls_timings;
 
-       sd = kzalloc(sizeof(*sd), GFP_KERNEL);
-       if (!sd)
-               return -ENOMEM;
+       if (gpio_is_valid(pd->mo_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->mo_gpio,
+                               GPIOF_OUT_INIT_LOW, "lcd MO");
+               if (r)
+                       return r;
+       }
 
-       dev_set_drvdata(&dssdev->dev, sd);
+       if (gpio_is_valid(pd->lr_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->lr_gpio,
+                               GPIOF_OUT_INIT_HIGH, "lcd LR");
+               if (r)
+                       return r;
+       }
 
-       memset(&props, 0, sizeof(struct backlight_properties));
-       props.max_brightness = dssdev->max_backlight_level;
-       props.type = BACKLIGHT_RAW;
+       if (gpio_is_valid(pd->ud_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->ud_gpio,
+                               GPIOF_OUT_INIT_HIGH, "lcd UD");
+               if (r)
+                       return r;
+       }
 
-       bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev,
-                       &sharp_ls_bl_ops, &props);
-       if (IS_ERR(bl)) {
-               r = PTR_ERR(bl);
-               kfree(sd);
-               return r;
+       if (gpio_is_valid(pd->resb_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->resb_gpio,
+                               GPIOF_OUT_INIT_LOW, "lcd RESB");
+               if (r)
+                       return r;
        }
-       sd->bl = bl;
 
-       bl->props.fb_blank = FB_BLANK_UNBLANK;
-       bl->props.power = FB_BLANK_UNBLANK;
-       bl->props.brightness = dssdev->max_backlight_level;
-       r = sharp_ls_bl_update_status(bl);
-       if (r < 0)
-               dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+       if (gpio_is_valid(pd->ini_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->ini_gpio,
+                               GPIOF_OUT_INIT_LOW, "lcd INI");
+               if (r)
+                       return r;
+       }
 
        return 0;
 }
 
 static void __exit sharp_ls_panel_remove(struct omap_dss_device *dssdev)
 {
-       struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
-       struct backlight_device *bl = sd->bl;
-
-       bl->props.power = FB_BLANK_POWERDOWN;
-       sharp_ls_bl_update_status(bl);
-       backlight_device_unregister(bl);
-
-       kfree(sd);
 }
 
 static int sharp_ls_power_on(struct omap_dss_device *dssdev)
 {
+       struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
        int r = 0;
 
        if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
@@ -152,26 +125,29 @@ static int sharp_ls_power_on(struct omap_dss_device *dssdev)
        /* wait couple of vsyncs until enabling the LCD */
        msleep(50);
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto err1;
-       }
+       if (gpio_is_valid(pd->resb_gpio))
+               gpio_set_value_cansleep(pd->resb_gpio, 1);
+
+       if (gpio_is_valid(pd->ini_gpio))
+               gpio_set_value_cansleep(pd->ini_gpio, 1);
 
        return 0;
-err1:
-       omapdss_dpi_display_disable(dssdev);
 err0:
        return r;
 }
 
 static void sharp_ls_power_off(struct omap_dss_device *dssdev)
 {
+       struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
+
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
                return;
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
+       if (gpio_is_valid(pd->ini_gpio))
+               gpio_set_value_cansleep(pd->ini_gpio, 0);
+
+       if (gpio_is_valid(pd->resb_gpio))
+               gpio_set_value_cansleep(pd->resb_gpio, 0);
 
        /* wait at least 5 vsyncs after disabling the LCD */
 
index a32407a..c4f78bd 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/mutex.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-nokia-dsi.h>
+#include <video/omap-panel-data.h>
 #include <video/mipi_display.h>
 
 /* DSI Virtual channel. Hardcoded for now. */
@@ -54,61 +54,6 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
 
 static int taal_panel_reset(struct omap_dss_device *dssdev);
 
-/**
- * struct panel_config - panel configuration
- * @name: panel name
- * @type: panel type
- * @timings: panel resolution
- * @sleep: various panel specific delays, passed to msleep() if non-zero
- * @reset_sequence: reset sequence timings, passed to udelay() if non-zero
- * @regulators: array of panel regulators
- * @num_regulators: number of regulators in the array
- */
-struct panel_config {
-       const char *name;
-       int type;
-
-       struct omap_video_timings timings;
-
-       struct {
-               unsigned int sleep_in;
-               unsigned int sleep_out;
-               unsigned int hw_reset;
-               unsigned int enable_te;
-       } sleep;
-
-       struct {
-               unsigned int high;
-               unsigned int low;
-       } reset_sequence;
-
-};
-
-enum {
-       PANEL_TAAL,
-};
-
-static struct panel_config panel_configs[] = {
-       {
-               .name           = "taal",
-               .type           = PANEL_TAAL,
-               .timings        = {
-                       .x_res          = 864,
-                       .y_res          = 480,
-               },
-               .sleep          = {
-                       .sleep_in       = 5,
-                       .sleep_out      = 5,
-                       .hw_reset       = 5,
-                       .enable_te      = 100, /* possible panel bug */
-               },
-               .reset_sequence = {
-                       .high           = 10,
-                       .low            = 10,
-               },
-       },
-};
-
 struct taal_data {
        struct mutex lock;
 
@@ -121,9 +66,6 @@ struct taal_data {
 
        struct omap_dss_device *dssdev;
 
-       /* panel specific HW info */
-       struct panel_config *panel_config;
-
        /* panel HW configuration from DT or platform data */
        int reset_gpio;
        int ext_te_gpio;
@@ -134,8 +76,6 @@ struct taal_data {
 
        /* runtime variables */
        bool enabled;
-       u8 rotate;
-       bool mirror;
 
        bool te_enabled;
 
@@ -221,8 +161,7 @@ static int taal_sleep_in(struct taal_data *td)
 
        hw_guard_start(td, 120);
 
-       if (td->panel_config->sleep.sleep_in)
-               msleep(td->panel_config->sleep.sleep_in);
+       msleep(5);
 
        return 0;
 }
@@ -239,8 +178,7 @@ static int taal_sleep_out(struct taal_data *td)
 
        hw_guard_start(td, 120);
 
-       if (td->panel_config->sleep.sleep_out)
-               msleep(td->panel_config->sleep.sleep_out);
+       msleep(5);
 
        return 0;
 }
@@ -262,49 +200,6 @@ static int taal_get_id(struct taal_data *td, u8 *id1, u8 *id2, u8 *id3)
        return 0;
 }
 
-static int taal_set_addr_mode(struct taal_data *td, u8 rotate, bool mirror)
-{
-       int r;
-       u8 mode;
-       int b5, b6, b7;
-
-       r = taal_dcs_read_1(td, MIPI_DCS_GET_ADDRESS_MODE, &mode);
-       if (r)
-               return r;
-
-       switch (rotate) {
-       default:
-       case 0:
-               b7 = 0;
-               b6 = 0;
-               b5 = 0;
-               break;
-       case 1:
-               b7 = 0;
-               b6 = 1;
-               b5 = 1;
-               break;
-       case 2:
-               b7 = 1;
-               b6 = 1;
-               b5 = 0;
-               break;
-       case 3:
-               b7 = 1;
-               b6 = 0;
-               b5 = 1;
-               break;
-       }
-
-       if (mirror)
-               b6 = !b6;
-
-       mode &= ~((1<<7) | (1<<6) | (1<<5));
-       mode |= (b7 << 7) | (b6 << 6) | (b5 << 5);
-
-       return taal_dcs_write_1(td, MIPI_DCS_SET_ADDRESS_MODE, mode);
-}
-
 static int taal_set_update_window(struct taal_data *td,
                u16 x, u16 y, u16 w, u16 h)
 {
@@ -515,15 +410,8 @@ static const struct backlight_ops taal_bl_ops = {
 static void taal_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres)
 {
-       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-
-       if (td->rotate == 0 || td->rotate == 2) {
-               *xres = dssdev->panel.timings.x_res;
-               *yres = dssdev->panel.timings.y_res;
-       } else {
-               *yres = dssdev->panel.timings.x_res;
-               *xres = dssdev->panel.timings.y_res;
-       }
+       *xres = dssdev->panel.timings.x_res;
+       *yres = dssdev->panel.timings.y_res;
 }
 
 static ssize_t taal_num_errors_show(struct device *dev,
@@ -845,17 +733,14 @@ static void taal_hw_reset(struct omap_dss_device *dssdev)
                return;
 
        gpio_set_value(td->reset_gpio, 1);
-       if (td->panel_config->reset_sequence.high)
-               udelay(td->panel_config->reset_sequence.high);
+       udelay(10);
        /* reset the panel */
        gpio_set_value(td->reset_gpio, 0);
        /* assert reset */
-       if (td->panel_config->reset_sequence.low)
-               udelay(td->panel_config->reset_sequence.low);
+       udelay(10);
        gpio_set_value(td->reset_gpio, 1);
        /* wait after releasing reset */
-       if (td->panel_config->sleep.hw_reset)
-               msleep(td->panel_config->sleep.hw_reset);
+       msleep(5);
 }
 
 static void taal_probe_pdata(struct taal_data *td,
@@ -881,8 +766,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
        struct backlight_properties props;
        struct taal_data *td;
        struct backlight_device *bldev = NULL;
-       int r, i;
-       const char *panel_name;
+       int r;
 
        dev_dbg(&dssdev->dev, "probe\n");
 
@@ -897,26 +781,13 @@ static int taal_probe(struct omap_dss_device *dssdev)
                const struct nokia_dsi_panel_data *pdata = dssdev->data;
 
                taal_probe_pdata(td, pdata);
-
-               panel_name = pdata->name;
        } else {
                return -ENODEV;
        }
 
-       if (panel_name == NULL)
-               return -EINVAL;
-
-       for (i = 0; i < ARRAY_SIZE(panel_configs); i++) {
-               if (strcmp(panel_name, panel_configs[i].name) == 0) {
-                       td->panel_config = &panel_configs[i];
-                       break;
-               }
-       }
-
-       if (!td->panel_config)
-               return -EINVAL;
-
-       dssdev->panel.timings = td->panel_config->timings;
+       dssdev->panel.timings.x_res = 864;
+       dssdev->panel.timings.y_res = 480;
+       dssdev->panel.timings.pixel_clock = DIV_ROUND_UP(864 * 480 * 60, 1000);
        dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888;
        dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
                OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
@@ -1049,6 +920,15 @@ static int taal_power_on(struct omap_dss_device *dssdev)
        struct taal_data *td = dev_get_drvdata(&dssdev->dev);
        u8 id1, id2, id3;
        int r;
+       struct omap_dss_dsi_config dsi_config = {
+               .mode = OMAP_DSS_DSI_CMD_MODE,
+               .pixel_format = OMAP_DSS_DSI_FMT_RGB888,
+               .timings = &dssdev->panel.timings,
+               .hs_clk_min = 150000000,
+               .hs_clk_max = 300000000,
+               .lp_clk_min = 7000000,
+               .lp_clk_max = 10000000,
+       };
 
        r = omapdss_dsi_configure_pins(dssdev, &td->pin_config);
        if (r) {
@@ -1056,14 +936,9 @@ static int taal_power_on(struct omap_dss_device *dssdev)
                goto err0;
        };
 
-       omapdss_dsi_set_size(dssdev, dssdev->panel.timings.x_res,
-               dssdev->panel.timings.y_res);
-       omapdss_dsi_set_pixel_format(dssdev, OMAP_DSS_DSI_FMT_RGB888);
-       omapdss_dsi_set_operation_mode(dssdev, OMAP_DSS_DSI_CMD_MODE);
-
-       r = omapdss_dsi_set_clocks(dssdev, 216000000, 10000000);
+       r = omapdss_dsi_set_config(dssdev, &dsi_config);
        if (r) {
-               dev_err(&dssdev->dev, "failed to set HS and LP clocks\n");
+               dev_err(&dssdev->dev, "failed to configure DSI\n");
                goto err0;
        }
 
@@ -1086,8 +961,7 @@ static int taal_power_on(struct omap_dss_device *dssdev)
                goto err;
 
        /* on early Taal revisions CABC is broken */
-       if (td->panel_config->type == PANEL_TAAL &&
-               (id2 == 0x00 || id2 == 0xff || id2 == 0x81))
+       if (id2 == 0x00 || id2 == 0xff || id2 == 0x81)
                td->cabc_broken = true;
 
        r = taal_dcs_write_1(td, DCS_BRIGHTNESS, 0xff);
@@ -1104,10 +978,6 @@ static int taal_power_on(struct omap_dss_device *dssdev)
        if (r)
                goto err;
 
-       r = taal_set_addr_mode(td, td->rotate, td->mirror);
-       if (r)
-               goto err;
-
        if (!td->cabc_broken) {
                r = taal_dcs_write_1(td, DCS_WRITE_CABC, td->cabc_mode);
                if (r)
@@ -1129,8 +999,8 @@ static int taal_power_on(struct omap_dss_device *dssdev)
        td->enabled = 1;
 
        if (!td->intro_printed) {
-               dev_info(&dssdev->dev, "%s panel revision %02x.%02x.%02x\n",
-                       td->panel_config->name, id1, id2, id3);
+               dev_info(&dssdev->dev, "panel revision %02x.%02x.%02x\n",
+                       id1, id2, id3);
                if (td->cabc_broken)
                        dev_info(&dssdev->dev,
                                        "old Taal version, CABC disabled\n");
@@ -1311,8 +1181,8 @@ static int taal_update(struct omap_dss_device *dssdev,
 
        /* XXX no need to send this every frame, but dsi break if not done */
        r = taal_set_update_window(td, 0, 0,
-                       td->panel_config->timings.x_res,
-                       td->panel_config->timings.y_res);
+                       dssdev->panel.timings.x_res,
+                       dssdev->panel.timings.y_res);
        if (r)
                goto err;
 
@@ -1365,8 +1235,8 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
        if (!gpio_is_valid(td->ext_te_gpio))
                omapdss_dsi_enable_te(dssdev, enable);
 
-       if (td->panel_config->sleep.enable_te)
-               msleep(td->panel_config->sleep.enable_te);
+       /* possible panel bug */
+       msleep(100);
 
        return r;
 }
@@ -1419,112 +1289,6 @@ static int taal_get_te(struct omap_dss_device *dssdev)
        return r;
 }
 
-static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
-{
-       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       u16 dw, dh;
-       int r;
-
-       dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
-
-       mutex_lock(&td->lock);
-
-       if (td->rotate == rotate)
-               goto end;
-
-       dsi_bus_lock(dssdev);
-
-       if (td->enabled) {
-               r = taal_wake_up(dssdev);
-               if (r)
-                       goto err;
-
-               r = taal_set_addr_mode(td, rotate, td->mirror);
-               if (r)
-                       goto err;
-       }
-
-       if (rotate == 0 || rotate == 2) {
-               dw = dssdev->panel.timings.x_res;
-               dh = dssdev->panel.timings.y_res;
-       } else {
-               dw = dssdev->panel.timings.y_res;
-               dh = dssdev->panel.timings.x_res;
-       }
-
-       omapdss_dsi_set_size(dssdev, dw, dh);
-
-       td->rotate = rotate;
-
-       dsi_bus_unlock(dssdev);
-end:
-       mutex_unlock(&td->lock);
-       return 0;
-err:
-       dsi_bus_unlock(dssdev);
-       mutex_unlock(&td->lock);
-       return r;
-}
-
-static u8 taal_get_rotate(struct omap_dss_device *dssdev)
-{
-       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       int r;
-
-       mutex_lock(&td->lock);
-       r = td->rotate;
-       mutex_unlock(&td->lock);
-
-       return r;
-}
-
-static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
-{
-       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       int r;
-
-       dev_dbg(&dssdev->dev, "mirror %d\n", enable);
-
-       mutex_lock(&td->lock);
-
-       if (td->mirror == enable)
-               goto end;
-
-       dsi_bus_lock(dssdev);
-       if (td->enabled) {
-               r = taal_wake_up(dssdev);
-               if (r)
-                       goto err;
-
-               r = taal_set_addr_mode(td, td->rotate, enable);
-               if (r)
-                       goto err;
-       }
-
-       td->mirror = enable;
-
-       dsi_bus_unlock(dssdev);
-end:
-       mutex_unlock(&td->lock);
-       return 0;
-err:
-       dsi_bus_unlock(dssdev);
-       mutex_unlock(&td->lock);
-       return r;
-}
-
-static bool taal_get_mirror(struct omap_dss_device *dssdev)
-{
-       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       int r;
-
-       mutex_lock(&td->lock);
-       r = td->mirror;
-       mutex_unlock(&td->lock);
-
-       return r;
-}
-
 static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
 {
        struct taal_data *td = dev_get_drvdata(&dssdev->dev);
@@ -1758,10 +1522,6 @@ static struct omap_dss_driver taal_driver = {
        .enable_te      = taal_enable_te,
        .get_te         = taal_get_te,
 
-       .set_rotate     = taal_rotate,
-       .get_rotate     = taal_get_rotate,
-       .set_mirror     = taal_mirror,
-       .get_mirror     = taal_get_mirror,
        .run_test       = taal_run_test,
        .memory_read    = taal_memory_read,
 
index 8281baa..46039c4 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/gpio.h>
 #include <drm/drm_edid.h>
 
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 static const struct omap_video_timings tfp410_default_timings = {
        .x_res          = 640,
@@ -135,7 +135,7 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
                if (!adapter) {
                        dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
                                        i2c_bus_num);
-                       return -EINVAL;
+                       return -EPROBE_DEFER;
                }
 
                ddata->i2c_adapter = adapter;
index 048c983..abf2bc4 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 
 #include <video/omapdss.h>
+#include <video/omap-panel-data.h>
 
 #define TPO_R02_MODE(x)                ((x) & 7)
 #define TPO_R02_MODE_800x480   7
@@ -278,9 +279,14 @@ static const struct omap_video_timings tpo_td043_timings = {
        .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
 };
 
+static inline struct panel_tpo_td043_data
+*get_panel_data(const struct omap_dss_device *dssdev)
+{
+       return (struct panel_tpo_td043_data *) dssdev->data;
+}
+
 static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
 {
-       int nreset_gpio = tpo_td043->nreset_gpio;
        int r;
 
        if (tpo_td043->powered_on)
@@ -293,8 +299,8 @@ static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
        /* wait for panel to stabilize */
        msleep(160);
 
-       if (gpio_is_valid(nreset_gpio))
-               gpio_set_value(nreset_gpio, 1);
+       if (gpio_is_valid(tpo_td043->nreset_gpio))
+               gpio_set_value(tpo_td043->nreset_gpio, 1);
 
        tpo_td043_write(tpo_td043->spi, 2,
                        TPO_R02_MODE(tpo_td043->mode) | TPO_R02_NCLK_RISING);
@@ -311,16 +317,14 @@ static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
 
 static void tpo_td043_power_off(struct tpo_td043_device *tpo_td043)
 {
-       int nreset_gpio = tpo_td043->nreset_gpio;
-
        if (!tpo_td043->powered_on)
                return;
 
        tpo_td043_write(tpo_td043->spi, 3,
                        TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
 
-       if (gpio_is_valid(nreset_gpio))
-               gpio_set_value(nreset_gpio, 0);
+       if (gpio_is_valid(tpo_td043->nreset_gpio))
+               gpio_set_value(tpo_td043->nreset_gpio, 0);
 
        /* wait for at least 2 vsyncs before cutting off power */
        msleep(50);
@@ -347,12 +351,6 @@ static int tpo_td043_enable_dss(struct omap_dss_device *dssdev)
        if (r)
                goto err0;
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto err1;
-       }
-
        /*
         * If we are resuming from system suspend, SPI clocks might not be
         * enabled yet, so we'll program the LCD from SPI PM resume callback.
@@ -379,9 +377,6 @@ static void tpo_td043_disable_dss(struct omap_dss_device *dssdev)
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
                return;
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
-
        omapdss_dpi_display_disable(dssdev);
 
        if (!tpo_td043->spi_suspended)
@@ -407,7 +402,7 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
 static int tpo_td043_probe(struct omap_dss_device *dssdev)
 {
        struct tpo_td043_device *tpo_td043 = g_tpo_td043;
-       int nreset_gpio = dssdev->reset_gpio;
+       struct panel_tpo_td043_data *pdata = get_panel_data(dssdev);
        int ret = 0;
 
        dev_dbg(&dssdev->dev, "probe\n");
@@ -417,6 +412,11 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
                return -ENODEV;
        }
 
+       if (!pdata)
+               return -EINVAL;
+
+       tpo_td043->nreset_gpio = pdata->nreset_gpio;
+
        dssdev->panel.timings = tpo_td043_timings;
        dssdev->ctrl.pixel_size = 24;
 
@@ -430,9 +430,10 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
                goto fail_regulator;
        }
 
-       if (gpio_is_valid(nreset_gpio)) {
-               ret = gpio_request_one(nreset_gpio, GPIOF_OUT_INIT_LOW,
-                                       "lcd reset");
+       if (gpio_is_valid(tpo_td043->nreset_gpio)) {
+               ret = devm_gpio_request_one(&dssdev->dev,
+                               tpo_td043->nreset_gpio, GPIOF_OUT_INIT_LOW,
+                               "lcd reset");
                if (ret < 0) {
                        dev_err(&dssdev->dev, "couldn't request reset GPIO\n");
                        goto fail_gpio_req;
@@ -457,14 +458,11 @@ fail_regulator:
 static void tpo_td043_remove(struct omap_dss_device *dssdev)
 {
        struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
-       int nreset_gpio = dssdev->reset_gpio;
 
        dev_dbg(&dssdev->dev, "remove\n");
 
        sysfs_remove_group(&dssdev->dev.kobj, &tpo_td043_attr_group);
        regulator_put(tpo_td043->vcc_reg);
-       if (gpio_is_valid(nreset_gpio))
-               gpio_free(nreset_gpio);
 }
 
 static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
@@ -527,7 +525,6 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
                return -ENOMEM;
 
        tpo_td043->spi = spi;
-       tpo_td043->nreset_gpio = dssdev->reset_gpio;
        dev_set_drvdata(&spi->dev, tpo_td043);
        g_tpo_td043 = tpo_td043;
 
index d446bdf..a4b356a 100644 (file)
@@ -435,20 +435,27 @@ static inline struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_man
 static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
 {
        unsigned long timeout = msecs_to_jiffies(500);
-       struct omap_dss_device *dssdev = mgr->get_device(mgr);
        u32 irq;
        int r;
 
+       if (mgr->output == NULL)
+               return -ENODEV;
+
        r = dispc_runtime_get();
        if (r)
                return r;
 
-       if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
+       switch (mgr->output->id) {
+       case OMAP_DSS_OUTPUT_VENC:
                irq = DISPC_IRQ_EVSYNC_ODD;
-       else if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
+               break;
+       case OMAP_DSS_OUTPUT_HDMI:
                irq = DISPC_IRQ_EVSYNC_EVEN;
-       else
+               break;
+       default:
                irq = dispc_mgr_get_vsync_irq(mgr->id);
+               break;
+       }
 
        r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
 
index f8779d4..60cc6fe 100644 (file)
@@ -181,10 +181,7 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
        d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
                        write, &dss_debug_fops);
 
-       if (IS_ERR(d))
-               return PTR_ERR(d);
-
-       return 0;
+       return PTR_RET(d);
 }
 #else /* CONFIG_OMAP2_DSS_DEBUGFS */
 static inline int dss_initialize_debugfs(void)
index 05ff2b9..b33b016 100644 (file)
@@ -69,6 +69,8 @@ struct dispc_features {
        u8 mgr_height_start;
        u16 mgr_width_max;
        u16 mgr_height_max;
+       unsigned long max_lcd_pclk;
+       unsigned long max_tv_pclk;
        int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
                const struct omap_video_timings *mgr_timings,
                u16 width, u16 height, u16 out_width, u16 out_height,
@@ -85,6 +87,9 @@ struct dispc_features {
 
        /* no DISPC_IRQ_FRAMEDONETV on this SoC */
        bool no_framedone_tv:1;
+
+       /* revert to the OMAP4 mechanism of DISPC Smart Standby operation */
+       bool mstandby_workaround:1;
 };
 
 #define DISPC_MAX_NR_FIFOS 5
@@ -97,6 +102,8 @@ static struct {
 
        int irq;
 
+       unsigned long core_clk_rate;
+
        u32 fifo_size[DISPC_MAX_NR_FIFOS];
        /* maps which plane is using a fifo. fifo-id -> plane-id */
        int fifo_assignment[DISPC_MAX_NR_FIFOS];
@@ -1584,6 +1591,7 @@ static void dispc_ovl_set_scaling(enum omap_plane plane,
 }
 
 static void dispc_ovl_set_rotation_attrs(enum omap_plane plane, u8 rotation,
+               enum omap_dss_rotation_type rotation_type,
                bool mirroring, enum omap_color_mode color_mode)
 {
        bool row_repeat = false;
@@ -1634,6 +1642,15 @@ static void dispc_ovl_set_rotation_attrs(enum omap_plane plane, u8 rotation,
        if (dss_has_feature(FEAT_ROWREPEATENABLE))
                REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
                        row_repeat ? 1 : 0, 18, 18);
+
+       if (color_mode == OMAP_DSS_COLOR_NV12) {
+               bool doublestride = (rotation_type == OMAP_DSS_ROT_TILER) &&
+                                       (rotation == OMAP_DSS_ROT_0 ||
+                                       rotation == OMAP_DSS_ROT_180);
+               /* DOUBLESTRIDE */
+               REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), doublestride, 22, 22);
+       }
+
 }
 
 static int color_mode_to_bpp(enum omap_color_mode color_mode)
@@ -2512,7 +2529,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
                dispc_ovl_set_vid_color_conv(plane, cconv);
        }
 
-       dispc_ovl_set_rotation_attrs(plane, rotation, mirror, color_mode);
+       dispc_ovl_set_rotation_attrs(plane, rotation, rotation_type, mirror,
+                       color_mode);
 
        dispc_ovl_set_zorder(plane, caps, zorder);
        dispc_ovl_set_pre_mult_alpha(plane, caps, pre_mult_alpha);
@@ -2823,6 +2841,15 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
        return true;
 }
 
+static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
+               unsigned long pclk)
+{
+       if (dss_mgr_is_lcd(channel))
+               return pclk <= dispc.feat->max_lcd_pclk ? true : false;
+       else
+               return pclk <= dispc.feat->max_tv_pclk ? true : false;
+}
+
 bool dispc_mgr_timings_ok(enum omap_channel channel,
                const struct omap_video_timings *timings)
 {
@@ -2830,11 +2857,13 @@ bool dispc_mgr_timings_ok(enum omap_channel channel,
 
        timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
 
-       if (dss_mgr_is_lcd(channel))
-               timings_ok =  timings_ok && _dispc_lcd_timings_ok(timings->hsw,
-                                               timings->hfp, timings->hbp,
-                                               timings->vsw, timings->vfp,
-                                               timings->vbp);
+       timings_ok &= _dispc_mgr_pclk_ok(channel, timings->pixel_clock * 1000);
+
+       if (dss_mgr_is_lcd(channel)) {
+               timings_ok &= _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
+                               timings->hbp, timings->vsw, timings->vfp,
+                               timings->vbp);
+       }
 
        return timings_ok;
 }
@@ -2951,6 +2980,10 @@ static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
 
        dispc_write_reg(DISPC_DIVISORo(channel),
                        FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
+
+       if (dss_has_feature(FEAT_CORE_CLK_DIV) == false &&
+                       channel == OMAP_DSS_CHANNEL_LCD)
+               dispc.core_clk_rate = dispc_fclk_rate() / lck_div;
 }
 
 static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
@@ -3056,15 +3089,7 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
 
 unsigned long dispc_core_clk_rate(void)
 {
-       int lcd;
-       unsigned long fclk = dispc_fclk_rate();
-
-       if (dss_has_feature(FEAT_CORE_CLK_DIV))
-               lcd = REG_GET(DISPC_DIVISOR, 23, 16);
-       else
-               lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16);
-
-       return fclk / lcd;
+       return dispc.core_clk_rate;
 }
 
 static unsigned long dispc_plane_pclk_rate(enum omap_plane plane)
@@ -3313,67 +3338,79 @@ static void dispc_dump_regs(struct seq_file *s)
 #undef DUMPREG
 }
 
-/* with fck as input clock rate, find dispc dividers that produce req_pck */
-void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck,
+/* calculate clock rates using dividers in cinfo */
+int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
                struct dispc_clock_info *cinfo)
 {
-       u16 pcd_min, pcd_max;
-       unsigned long best_pck;
-       u16 best_ld, cur_ld;
-       u16 best_pd, cur_pd;
+       if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
+               return -EINVAL;
+       if (cinfo->pck_div < 1 || cinfo->pck_div > 255)
+               return -EINVAL;
 
-       pcd_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
-       pcd_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
+       cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
+       cinfo->pck = cinfo->lck / cinfo->pck_div;
 
-       best_pck = 0;
-       best_ld = 0;
-       best_pd = 0;
+       return 0;
+}
 
-       for (cur_ld = 1; cur_ld <= 255; ++cur_ld) {
-               unsigned long lck = fck / cur_ld;
+bool dispc_div_calc(unsigned long dispc,
+               unsigned long pck_min, unsigned long pck_max,
+               dispc_div_calc_func func, void *data)
+{
+       int lckd, lckd_start, lckd_stop;
+       int pckd, pckd_start, pckd_stop;
+       unsigned long pck, lck;
+       unsigned long lck_max;
+       unsigned long pckd_hw_min, pckd_hw_max;
+       unsigned min_fck_per_pck;
+       unsigned long fck;
 
-               for (cur_pd = pcd_min; cur_pd <= pcd_max; ++cur_pd) {
-                       unsigned long pck = lck / cur_pd;
-                       long old_delta = abs(best_pck - req_pck);
-                       long new_delta = abs(pck - req_pck);
+#ifdef CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK
+       min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
+#else
+       min_fck_per_pck = 0;
+#endif
 
-                       if (best_pck == 0 || new_delta < old_delta) {
-                               best_pck = pck;
-                               best_ld = cur_ld;
-                               best_pd = cur_pd;
+       pckd_hw_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
+       pckd_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
 
-                               if (pck == req_pck)
-                                       goto found;
-                       }
+       lck_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
 
-                       if (pck < req_pck)
-                               break;
-               }
+       pck_min = pck_min ? pck_min : 1;
+       pck_max = pck_max ? pck_max : ULONG_MAX;
 
-               if (lck / pcd_min < req_pck)
-                       break;
-       }
+       lckd_start = max(DIV_ROUND_UP(dispc, lck_max), 1ul);
+       lckd_stop = min(dispc / pck_min, 255ul);
 
-found:
-       cinfo->lck_div = best_ld;
-       cinfo->pck_div = best_pd;
-       cinfo->lck = fck / cinfo->lck_div;
-       cinfo->pck = cinfo->lck / cinfo->pck_div;
-}
+       for (lckd = lckd_start; lckd <= lckd_stop; ++lckd) {
+               lck = dispc / lckd;
 
-/* calculate clock rates using dividers in cinfo */
-int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
-               struct dispc_clock_info *cinfo)
-{
-       if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
-               return -EINVAL;
-       if (cinfo->pck_div < 1 || cinfo->pck_div > 255)
-               return -EINVAL;
+               pckd_start = max(DIV_ROUND_UP(lck, pck_max), pckd_hw_min);
+               pckd_stop = min(lck / pck_min, pckd_hw_max);
 
-       cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
-       cinfo->pck = cinfo->lck / cinfo->pck_div;
+               for (pckd = pckd_start; pckd <= pckd_stop; ++pckd) {
+                       pck = lck / pckd;
 
-       return 0;
+                       /*
+                        * For OMAP2/3 the DISPC fclk is the same as LCD's logic
+                        * clock, which means we're configuring DISPC fclk here
+                        * also. Thus we need to use the calculated lck. For
+                        * OMAP4+ the DISPC fclk is a separate clock.
+                        */
+                       if (dss_has_feature(FEAT_CORE_CLK_DIV))
+                               fck = dispc_core_clk_rate();
+                       else
+                               fck = lck;
+
+                       if (fck < pck * min_fck_per_pck)
+                               continue;
+
+                       if (func(lckd, pckd, lck, pck, data))
+                               return true;
+               }
+       }
+
+       return false;
 }
 
 void dispc_mgr_set_clock_div(enum omap_channel channel,
@@ -3451,6 +3488,8 @@ static void _omap_dispc_initial_config(void)
                l = FLD_MOD(l, 1, 0, 0);
                l = FLD_MOD(l, 1, 23, 16);
                dispc_write_reg(DISPC_DIVISOR, l);
+
+               dispc.core_clk_rate = dispc_fclk_rate();
        }
 
        /* FUNCGATED */
@@ -3466,6 +3505,9 @@ static void _omap_dispc_initial_config(void)
        dispc_configure_burst_sizes();
 
        dispc_ovl_enable_zorder_planes();
+
+       if (dispc.feat->mstandby_workaround)
+               REG_FLD_MOD(DISPC_MSTANDBY_CTRL, 1, 0, 0);
 }
 
 static const struct dispc_features omap24xx_dispc_feats __initconst = {
@@ -3479,6 +3521,7 @@ static const struct dispc_features omap24xx_dispc_feats __initconst = {
        .mgr_height_start       =       26,
        .mgr_width_max          =       2048,
        .mgr_height_max         =       2048,
+       .max_lcd_pclk           =       66500000,
        .calc_scaling           =       dispc_ovl_calc_scaling_24xx,
        .calc_core_clk          =       calc_core_clk_24xx,
        .num_fifos              =       3,
@@ -3496,6 +3539,8 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
        .mgr_height_start       =       26,
        .mgr_width_max          =       2048,
        .mgr_height_max         =       2048,
+       .max_lcd_pclk           =       173000000,
+       .max_tv_pclk            =       59000000,
        .calc_scaling           =       dispc_ovl_calc_scaling_34xx,
        .calc_core_clk          =       calc_core_clk_34xx,
        .num_fifos              =       3,
@@ -3513,6 +3558,8 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
        .mgr_height_start       =       26,
        .mgr_width_max          =       2048,
        .mgr_height_max         =       2048,
+       .max_lcd_pclk           =       173000000,
+       .max_tv_pclk            =       59000000,
        .calc_scaling           =       dispc_ovl_calc_scaling_34xx,
        .calc_core_clk          =       calc_core_clk_34xx,
        .num_fifos              =       3,
@@ -3530,6 +3577,8 @@ static const struct dispc_features omap44xx_dispc_feats __initconst = {
        .mgr_height_start       =       26,
        .mgr_width_max          =       2048,
        .mgr_height_max         =       2048,
+       .max_lcd_pclk           =       170000000,
+       .max_tv_pclk            =       185625000,
        .calc_scaling           =       dispc_ovl_calc_scaling_44xx,
        .calc_core_clk          =       calc_core_clk_44xx,
        .num_fifos              =       5,
@@ -3547,10 +3596,13 @@ static const struct dispc_features omap54xx_dispc_feats __initconst = {
        .mgr_height_start       =       27,
        .mgr_width_max          =       4096,
        .mgr_height_max         =       4096,
+       .max_lcd_pclk           =       170000000,
+       .max_tv_pclk            =       186000000,
        .calc_scaling           =       dispc_ovl_calc_scaling_44xx,
        .calc_core_clk          =       calc_core_clk_44xx,
        .num_fifos              =       5,
        .gfx_fifo_workaround    =       true,
+       .mstandby_workaround    =       true,
 };
 
 static int __init dispc_init_features(struct platform_device *pdev)
index 222363c..de4863d 100644 (file)
@@ -39,6 +39,7 @@
 #define DISPC_GLOBAL_BUFFER            0x0800
 #define DISPC_CONTROL3                  0x0848
 #define DISPC_CONFIG3                   0x084C
+#define DISPC_MSTANDBY_CTRL            0x0858
 
 /* DISPC overlay registers */
 #define DISPC_OVL_BA0(n)               (DISPC_OVL_BASE(n) + \
index 4af136a..757b57f 100644 (file)
@@ -63,15 +63,29 @@ static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
        case OMAPDSS_VER_OMAP3630:
        case OMAPDSS_VER_AM35xx:
                return NULL;
-       default:
-               break;
-       }
 
-       switch (channel) {
-       case OMAP_DSS_CHANNEL_LCD:
-               return dsi_get_dsidev_from_id(0);
-       case OMAP_DSS_CHANNEL_LCD2:
-               return dsi_get_dsidev_from_id(1);
+       case OMAPDSS_VER_OMAP4430_ES1:
+       case OMAPDSS_VER_OMAP4430_ES2:
+       case OMAPDSS_VER_OMAP4:
+               switch (channel) {
+               case OMAP_DSS_CHANNEL_LCD:
+                       return dsi_get_dsidev_from_id(0);
+               case OMAP_DSS_CHANNEL_LCD2:
+                       return dsi_get_dsidev_from_id(1);
+               default:
+                       return NULL;
+               }
+
+       case OMAPDSS_VER_OMAP5:
+               switch (channel) {
+               case OMAP_DSS_CHANNEL_LCD:
+                       return dsi_get_dsidev_from_id(0);
+               case OMAP_DSS_CHANNEL_LCD3:
+                       return dsi_get_dsidev_from_id(1);
+               default:
+                       return NULL;
+               }
+
        default:
                return NULL;
        }
@@ -91,75 +105,211 @@ static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
        }
 }
 
-static int dpi_set_dsi_clk(struct omap_dss_device *dssdev,
+struct dpi_clk_calc_ctx {
+       struct platform_device *dsidev;
+
+       /* inputs */
+
+       unsigned long pck_min, pck_max;
+
+       /* outputs */
+
+       struct dsi_clock_info dsi_cinfo;
+       struct dss_clock_info dss_cinfo;
+       struct dispc_clock_info dispc_cinfo;
+};
+
+static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+               unsigned long pck, void *data)
+{
+       struct dpi_clk_calc_ctx *ctx = data;
+
+       /*
+        * Odd dividers give us uneven duty cycle, causing problem when level
+        * shifted. So skip all odd dividers when the pixel clock is on the
+        * higher side.
+        */
+       if (ctx->pck_min >= 1000000) {
+               if (lckd > 1 && lckd % 2 != 0)
+                       return false;
+
+               if (pckd > 1 && pckd % 2 != 0)
+                       return false;
+       }
+
+       ctx->dispc_cinfo.lck_div = lckd;
+       ctx->dispc_cinfo.pck_div = pckd;
+       ctx->dispc_cinfo.lck = lck;
+       ctx->dispc_cinfo.pck = pck;
+
+       return true;
+}
+
+
+static bool dpi_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+               void *data)
+{
+       struct dpi_clk_calc_ctx *ctx = data;
+
+       /*
+        * Odd dividers give us uneven duty cycle, causing problem when level
+        * shifted. So skip all odd dividers when the pixel clock is on the
+        * higher side.
+        */
+       if (regm_dispc > 1 && regm_dispc % 2 != 0 && ctx->pck_min >= 1000000)
+               return false;
+
+       ctx->dsi_cinfo.regm_dispc = regm_dispc;
+       ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
+
+       return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
+                       dpi_calc_dispc_cb, ctx);
+}
+
+
+static bool dpi_calc_pll_cb(int regn, int regm, unsigned long fint,
+               unsigned long pll,
+               void *data)
+{
+       struct dpi_clk_calc_ctx *ctx = data;
+
+       ctx->dsi_cinfo.regn = regn;
+       ctx->dsi_cinfo.regm = regm;
+       ctx->dsi_cinfo.fint = fint;
+       ctx->dsi_cinfo.clkin4ddr = pll;
+
+       return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->pck_min,
+                       dpi_calc_hsdiv_cb, ctx);
+}
+
+static bool dpi_calc_dss_cb(int fckd, unsigned long fck, void *data)
+{
+       struct dpi_clk_calc_ctx *ctx = data;
+
+       ctx->dss_cinfo.fck = fck;
+       ctx->dss_cinfo.fck_div = fckd;
+
+       return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
+                       dpi_calc_dispc_cb, ctx);
+}
+
+static bool dpi_dsi_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
+{
+       unsigned long clkin;
+       unsigned long pll_min, pll_max;
+
+       clkin = dsi_get_pll_clkin(dpi.dsidev);
+
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->dsidev = dpi.dsidev;
+       ctx->pck_min = pck - 1000;
+       ctx->pck_max = pck + 1000;
+       ctx->dsi_cinfo.clkin = clkin;
+
+       pll_min = 0;
+       pll_max = 0;
+
+       return dsi_pll_calc(dpi.dsidev, clkin,
+                       pll_min, pll_max,
+                       dpi_calc_pll_cb, ctx);
+}
+
+static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
+{
+       int i;
+
+       /*
+        * DSS fck gives us very few possibilities, so finding a good pixel
+        * clock may not be possible. We try multiple times to find the clock,
+        * each time widening the pixel clock range we look for, up to
+        * +/- ~15MHz.
+        */
+
+       for (i = 0; i < 25; ++i) {
+               bool ok;
+
+               memset(ctx, 0, sizeof(*ctx));
+               if (pck > 1000 * i * i * i)
+                       ctx->pck_min = max(pck - 1000 * i * i * i, 0lu);
+               else
+                       ctx->pck_min = 0;
+               ctx->pck_max = pck + 1000 * i * i * i;
+
+               ok = dss_div_calc(ctx->pck_min, dpi_calc_dss_cb, ctx);
+               if (ok)
+                       return ok;
+       }
+
+       return false;
+}
+
+
+
+static int dpi_set_dsi_clk(enum omap_channel channel,
                unsigned long pck_req, unsigned long *fck, int *lck_div,
                int *pck_div)
 {
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
-       struct dsi_clock_info dsi_cinfo;
-       struct dispc_clock_info dispc_cinfo;
+       struct dpi_clk_calc_ctx ctx;
        int r;
+       bool ok;
 
-       r = dsi_pll_calc_clock_div_pck(dpi.dsidev, pck_req, &dsi_cinfo,
-                       &dispc_cinfo);
-       if (r)
-               return r;
+       ok = dpi_dsi_clk_calc(pck_req, &ctx);
+       if (!ok)
+               return -EINVAL;
 
-       r = dsi_pll_set_clock_div(dpi.dsidev, &dsi_cinfo);
+       r = dsi_pll_set_clock_div(dpi.dsidev, &ctx.dsi_cinfo);
        if (r)
                return r;
 
-       dss_select_lcd_clk_source(mgr->id,
-                       dpi_get_alt_clk_src(mgr->id));
+       dss_select_lcd_clk_source(channel,
+                       dpi_get_alt_clk_src(channel));
 
-       dpi.mgr_config.clock_info = dispc_cinfo;
+       dpi.mgr_config.clock_info = ctx.dispc_cinfo;
 
-       *fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
-       *lck_div = dispc_cinfo.lck_div;
-       *pck_div = dispc_cinfo.pck_div;
+       *fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
+       *lck_div = ctx.dispc_cinfo.lck_div;
+       *pck_div = ctx.dispc_cinfo.pck_div;
 
        return 0;
 }
 
-static int dpi_set_dispc_clk(struct omap_dss_device *dssdev,
-               unsigned long pck_req, unsigned long *fck, int *lck_div,
-               int *pck_div)
+static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
+               int *lck_div, int *pck_div)
 {
-       struct dss_clock_info dss_cinfo;
-       struct dispc_clock_info dispc_cinfo;
+       struct dpi_clk_calc_ctx ctx;
        int r;
+       bool ok;
 
-       r = dss_calc_clock_div(pck_req, &dss_cinfo, &dispc_cinfo);
-       if (r)
-               return r;
+       ok = dpi_dss_clk_calc(pck_req, &ctx);
+       if (!ok)
+               return -EINVAL;
 
-       r = dss_set_clock_div(&dss_cinfo);
+       r = dss_set_clock_div(&ctx.dss_cinfo);
        if (r)
                return r;
 
-       dpi.mgr_config.clock_info = dispc_cinfo;
+       dpi.mgr_config.clock_info = ctx.dispc_cinfo;
 
-       *fck = dss_cinfo.fck;
-       *lck_div = dispc_cinfo.lck_div;
-       *pck_div = dispc_cinfo.pck_div;
+       *fck = ctx.dss_cinfo.fck;
+       *lck_div = ctx.dispc_cinfo.lck_div;
+       *pck_div = ctx.dispc_cinfo.pck_div;
 
        return 0;
 }
 
-static int dpi_set_mode(struct omap_dss_device *dssdev)
+static int dpi_set_mode(struct omap_overlay_manager *mgr)
 {
        struct omap_video_timings *t = &dpi.timings;
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
        int lck_div = 0, pck_div = 0;
        unsigned long fck = 0;
        unsigned long pck;
        int r = 0;
 
        if (dpi.dsidev)
-               r = dpi_set_dsi_clk(dssdev, t->pixel_clock * 1000, &fck,
+               r = dpi_set_dsi_clk(mgr->id, t->pixel_clock * 1000, &fck,
                                &lck_div, &pck_div);
        else
-               r = dpi_set_dispc_clk(dssdev, t->pixel_clock * 1000, &fck,
+               r = dpi_set_dispc_clk(t->pixel_clock * 1000, &fck,
                                &lck_div, &pck_div);
        if (r)
                return r;
@@ -179,10 +329,8 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static void dpi_config_lcd_manager(struct omap_dss_device *dssdev)
+static void dpi_config_lcd_manager(struct omap_overlay_manager *mgr)
 {
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
-
        dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
 
        dpi.mgr_config.stallmode = false;
@@ -197,7 +345,7 @@ static void dpi_config_lcd_manager(struct omap_dss_device *dssdev)
 
 int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
 {
-       struct omap_dss_output *out = dssdev->output;
+       struct omap_dss_output *out = &dpi.output;
        int r;
 
        mutex_lock(&dpi.lock);
@@ -230,7 +378,7 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
        if (r)
                goto err_get_dispc;
 
-       r = dss_dpi_select_source(dssdev->channel);
+       r = dss_dpi_select_source(out->manager->id);
        if (r)
                goto err_src_sel;
 
@@ -244,11 +392,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
                        goto err_dsi_pll_init;
        }
 
-       r = dpi_set_mode(dssdev);
+       r = dpi_set_mode(out->manager);
        if (r)
                goto err_set_mode;
 
-       dpi_config_lcd_manager(dssdev);
+       dpi_config_lcd_manager(out->manager);
 
        mdelay(2);
 
@@ -285,7 +433,7 @@ EXPORT_SYMBOL(omapdss_dpi_display_enable);
 
 void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
 {
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
+       struct omap_overlay_manager *mgr = dpi.output.manager;
 
        mutex_lock(&dpi.lock);
 
@@ -324,12 +472,12 @@ EXPORT_SYMBOL(omapdss_dpi_set_timings);
 int dpi_check_timings(struct omap_dss_device *dssdev,
                        struct omap_video_timings *timings)
 {
-       int r;
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
+       struct omap_overlay_manager *mgr = dpi.output.manager;
        int lck_div, pck_div;
        unsigned long fck;
        unsigned long pck;
-       struct dispc_clock_info dispc_cinfo;
+       struct dpi_clk_calc_ctx ctx;
+       bool ok;
 
        if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
                return -EINVAL;
@@ -338,28 +486,21 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
                return -EINVAL;
 
        if (dpi.dsidev) {
-               struct dsi_clock_info dsi_cinfo;
-               r = dsi_pll_calc_clock_div_pck(dpi.dsidev,
-                               timings->pixel_clock * 1000,
-                               &dsi_cinfo, &dispc_cinfo);
+               ok = dpi_dsi_clk_calc(timings->pixel_clock * 1000, &ctx);
+               if (!ok)
+                       return -EINVAL;
 
-               if (r)
-                       return r;
-
-               fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
+               fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
        } else {
-               struct dss_clock_info dss_cinfo;
-               r = dss_calc_clock_div(timings->pixel_clock * 1000,
-                               &dss_cinfo, &dispc_cinfo);
+               ok = dpi_dss_clk_calc(timings->pixel_clock * 1000, &ctx);
+               if (!ok)
+                       return -EINVAL;
 
-               if (r)
-                       return r;
-
-               fck = dss_cinfo.fck;
+               fck = ctx.dss_cinfo.fck;
        }
 
-       lck_div = dispc_cinfo.lck_div;
-       pck_div = dispc_cinfo.pck_div;
+       lck_div = ctx.dispc_cinfo.lck_div;
+       pck_div = ctx.dispc_cinfo.pck_div;
 
        pck = fck / lck_div / pck_div / 1000;
 
@@ -379,7 +520,7 @@ void omapdss_dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
 }
 EXPORT_SYMBOL(omapdss_dpi_set_data_lines);
 
-static int __init dpi_verify_dsi_pll(struct platform_device *dsidev)
+static int dpi_verify_dsi_pll(struct platform_device *dsidev)
 {
        int r;
 
@@ -401,7 +542,37 @@ static int __init dpi_verify_dsi_pll(struct platform_device *dsidev)
        return 0;
 }
 
-static int __init dpi_init_display(struct omap_dss_device *dssdev)
+/*
+ * Return a hardcoded channel for the DPI output. This should work for
+ * current use cases, but this can be later expanded to either resolve
+ * the channel in some more dynamic manner, or get the channel as a user
+ * parameter.
+ */
+static enum omap_channel dpi_get_channel(void)
+{
+       switch (omapdss_get_version()) {
+       case OMAPDSS_VER_OMAP24xx:
+       case OMAPDSS_VER_OMAP34xx_ES1:
+       case OMAPDSS_VER_OMAP34xx_ES3:
+       case OMAPDSS_VER_OMAP3630:
+       case OMAPDSS_VER_AM35xx:
+               return OMAP_DSS_CHANNEL_LCD;
+
+       case OMAPDSS_VER_OMAP4430_ES1:
+       case OMAPDSS_VER_OMAP4430_ES2:
+       case OMAPDSS_VER_OMAP4:
+               return OMAP_DSS_CHANNEL_LCD2;
+
+       case OMAPDSS_VER_OMAP5:
+               return OMAP_DSS_CHANNEL_LCD3;
+
+       default:
+               DSSWARN("unsupported DSS version\n");
+               return OMAP_DSS_CHANNEL_LCD;
+       }
+}
+
+static int dpi_init_display(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev;
 
@@ -421,12 +592,7 @@ static int __init dpi_init_display(struct omap_dss_device *dssdev)
                dpi.vdds_dsi_reg = vdds_dsi;
        }
 
-       /*
-        * XXX We shouldn't need dssdev->channel for this. The dsi pll clock
-        * source for DPI is SoC integration detail, not something that should
-        * be configured in the dssdev
-        */
-       dsidev = dpi_get_dsidev(dssdev->channel);
+       dsidev = dpi_get_dsidev(dpi.output.dispc_channel);
 
        if (dsidev && dpi_verify_dsi_pll(dsidev)) {
                dsidev = NULL;
@@ -441,7 +607,7 @@ static int __init dpi_init_display(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static struct omap_dss_device * __init dpi_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *dpi_find_dssdev(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        const char *def_disp_name = omapdss_get_default_display_name();
@@ -469,7 +635,7 @@ static struct omap_dss_device * __init dpi_find_dssdev(struct platform_device *p
        return def_dssdev;
 }
 
-static void __init dpi_probe_pdata(struct platform_device *dpidev)
+static int dpi_probe_pdata(struct platform_device *dpidev)
 {
        struct omap_dss_device *plat_dssdev;
        struct omap_dss_device *dssdev;
@@ -478,11 +644,11 @@ static void __init dpi_probe_pdata(struct platform_device *dpidev)
        plat_dssdev = dpi_find_dssdev(dpidev);
 
        if (!plat_dssdev)
-               return;
+               return 0;
 
        dssdev = dss_alloc_and_init_device(&dpidev->dev);
        if (!dssdev)
-               return;
+               return -ENOMEM;
 
        dss_copy_device_pdata(dssdev, plat_dssdev);
 
@@ -490,7 +656,7 @@ static void __init dpi_probe_pdata(struct platform_device *dpidev)
        if (r) {
                DSSERR("device %s init failed: %d\n", dssdev->name, r);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = omapdss_output_set_device(&dpi.output, dssdev);
@@ -498,7 +664,7 @@ static void __init dpi_probe_pdata(struct platform_device *dpidev)
                DSSERR("failed to connect output to new device: %s\n",
                                dssdev->name);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = dss_add_device(dssdev);
@@ -506,17 +672,21 @@ static void __init dpi_probe_pdata(struct platform_device *dpidev)
                DSSERR("device %s register failed: %d\n", dssdev->name, r);
                omapdss_output_unset_device(&dpi.output);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
+
+       return 0;
 }
 
-static void __init dpi_init_output(struct platform_device *pdev)
+static void dpi_init_output(struct platform_device *pdev)
 {
        struct omap_dss_output *out = &dpi.output;
 
        out->pdev = pdev;
        out->id = OMAP_DSS_OUTPUT_DPI;
        out->type = OMAP_DISPLAY_TYPE_DPI;
+       out->name = "dpi.0";
+       out->dispc_channel = dpi_get_channel();
 
        dss_register_output(out);
 }
@@ -528,13 +698,19 @@ static void __exit dpi_uninit_output(struct platform_device *pdev)
        dss_unregister_output(out);
 }
 
-static int __init omap_dpi_probe(struct platform_device *pdev)
+static int omap_dpi_probe(struct platform_device *pdev)
 {
+       int r;
+
        mutex_init(&dpi.lock);
 
        dpi_init_output(pdev);
 
-       dpi_probe_pdata(pdev);
+       r = dpi_probe_pdata(pdev);
+       if (r) {
+               dpi_uninit_output(pdev);
+               return r;
+       }
 
        return 0;
 }
@@ -549,6 +725,7 @@ static int __exit omap_dpi_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver omap_dpi_driver = {
+       .probe          = omap_dpi_probe,
        .remove         = __exit_p(omap_dpi_remove),
        .driver         = {
                .name   = "omapdss_dpi",
@@ -558,7 +735,7 @@ static struct platform_driver omap_dpi_driver = {
 
 int __init dpi_init_platform_driver(void)
 {
-       return platform_driver_probe(&omap_dpi_driver, omap_dpi_probe);
+       return platform_driver_register(&omap_dpi_driver);
 }
 
 void __exit dpi_uninit_platform_driver(void)
index 28d41d1..a73dedc 100644 (file)
@@ -200,6 +200,11 @@ struct dsi_reg { u16 idx; };
 
 typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
 
+static int dsi_display_init_dispc(struct platform_device *dsidev,
+       struct omap_overlay_manager *mgr);
+static void dsi_display_uninit_dispc(struct platform_device *dsidev,
+       struct omap_overlay_manager *mgr);
+
 #define DSI_MAX_NR_ISRS                2
 #define DSI_MAX_NR_LANES       5
 
@@ -250,6 +255,24 @@ struct dsi_isr_tables {
        struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
 };
 
+struct dsi_clk_calc_ctx {
+       struct platform_device *dsidev;
+
+       /* inputs */
+
+       const struct omap_dss_dsi_config *config;
+
+       unsigned long req_pck_min, req_pck_nom, req_pck_max;
+
+       /* outputs */
+
+       struct dsi_clock_info dsi_cinfo;
+       struct dispc_clock_info dispc_cinfo;
+
+       struct omap_video_timings dispc_vm;
+       struct omap_dss_dsi_videomode_timings dsi_vm;
+};
+
 struct dsi_data {
        struct platform_device *pdev;
        void __iomem    *base;
@@ -261,6 +284,9 @@ struct dsi_data {
        struct clk *dss_clk;
        struct clk *sys_clk;
 
+       struct dispc_clock_info user_dispc_cinfo;
+       struct dsi_clock_info user_dsi_cinfo;
+
        struct dsi_clock_info current_cinfo;
 
        bool vdds_dsi_enabled;
@@ -324,6 +350,7 @@ struct dsi_data {
        unsigned long lpdiv_max;
 
        unsigned num_lanes_supported;
+       unsigned line_buffer_size;
 
        struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
        unsigned num_lanes_used;
@@ -1192,15 +1219,33 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
        return r;
 }
 
-static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
+static int dsi_lp_clock_calc(struct dsi_clock_info *cinfo,
+               unsigned long lp_clk_min, unsigned long lp_clk_max)
+{
+       unsigned long dsi_fclk = cinfo->dsi_pll_hsdiv_dsi_clk;
+       unsigned lp_clk_div;
+       unsigned long lp_clk;
+
+       lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2);
+       lp_clk = dsi_fclk / 2 / lp_clk_div;
+
+       if (lp_clk < lp_clk_min || lp_clk > lp_clk_max)
+               return -EINVAL;
+
+       cinfo->lp_clk_div = lp_clk_div;
+       cinfo->lp_clk = lp_clk;
+
+       return 0;
+}
+
+static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        unsigned long dsi_fclk;
        unsigned lp_clk_div;
        unsigned long lp_clk;
 
-       lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
+       lp_clk_div = dsi->user_dsi_cinfo.lp_clk_div;
 
        if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
                return -EINVAL;
@@ -1272,6 +1317,75 @@ static int dsi_pll_power(struct platform_device *dsidev,
        return 0;
 }
 
+unsigned long dsi_get_pll_clkin(struct platform_device *dsidev)
+{
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       return clk_get_rate(dsi->sys_clk);
+}
+
+bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
+               unsigned long out_min, dsi_hsdiv_calc_func func, void *data)
+{
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       int regm, regm_start, regm_stop;
+       unsigned long out_max;
+       unsigned long out;
+
+       out_min = out_min ? out_min : 1;
+       out_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+
+       regm_start = max(DIV_ROUND_UP(pll, out_max), 1ul);
+       regm_stop = min(pll / out_min, dsi->regm_dispc_max);
+
+       for (regm = regm_start; regm <= regm_stop; ++regm) {
+               out = pll / regm;
+
+               if (func(regm, out, data))
+                       return true;
+       }
+
+       return false;
+}
+
+bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
+               unsigned long pll_min, unsigned long pll_max,
+               dsi_pll_calc_func func, void *data)
+{
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       int regn, regn_start, regn_stop;
+       int regm, regm_start, regm_stop;
+       unsigned long fint, pll;
+       const unsigned long pll_hw_max = 1800000000;
+       unsigned long fint_hw_min, fint_hw_max;
+
+       fint_hw_min = dsi->fint_min;
+       fint_hw_max = dsi->fint_max;
+
+       regn_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
+       regn_stop = min(clkin / fint_hw_min, dsi->regn_max);
+
+       pll_max = pll_max ? pll_max : ULONG_MAX;
+
+       for (regn = regn_start; regn <= regn_stop; ++regn) {
+               fint = clkin / regn;
+
+               regm_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
+                               1ul);
+               regm_stop = min3(pll_max / fint / 2,
+                               pll_hw_max / fint / 2,
+                               dsi->regm_max);
+
+               for (regm = regm_start; regm <= regm_stop; ++regm) {
+                       pll = 2 * regm * fint;
+
+                       if (func(regn, regm, fint, pll, data))
+                               return true;
+               }
+       }
+
+       return false;
+}
+
 /* calculate clock rates using dividers in cinfo */
 static int dsi_calc_clock_rates(struct platform_device *dsidev,
                struct dsi_clock_info *cinfo)
@@ -1316,192 +1430,7 @@ static int dsi_calc_clock_rates(struct platform_device *dsidev,
        return 0;
 }
 
-int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
-               unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
-               struct dispc_clock_info *dispc_cinfo)
-{
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct dsi_clock_info cur, best;
-       struct dispc_clock_info best_dispc;
-       int min_fck_per_pck;
-       int match = 0;
-       unsigned long dss_sys_clk, max_dss_fck;
-
-       dss_sys_clk = clk_get_rate(dsi->sys_clk);
-
-       max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
-
-       if (req_pck == dsi->cache_req_pck &&
-                       dsi->cache_cinfo.clkin == dss_sys_clk) {
-               DSSDBG("DSI clock info found from cache\n");
-               *dsi_cinfo = dsi->cache_cinfo;
-               dispc_find_clk_divs(req_pck, dsi_cinfo->dsi_pll_hsdiv_dispc_clk,
-                       dispc_cinfo);
-               return 0;
-       }
-
-       min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
-
-       if (min_fck_per_pck &&
-               req_pck * min_fck_per_pck > max_dss_fck) {
-               DSSERR("Requested pixel clock not possible with the current "
-                               "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
-                               "the constraint off.\n");
-               min_fck_per_pck = 0;
-       }
-
-       DSSDBG("dsi_pll_calc\n");
-
-retry:
-       memset(&best, 0, sizeof(best));
-       memset(&best_dispc, 0, sizeof(best_dispc));
-
-       memset(&cur, 0, sizeof(cur));
-       cur.clkin = dss_sys_clk;
-
-       /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
-       /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
-       for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
-               cur.fint = cur.clkin / cur.regn;
-
-               if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
-                       continue;
-
-               /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
-               for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
-                       unsigned long a, b;
-
-                       a = 2 * cur.regm * (cur.clkin/1000);
-                       b = cur.regn;
-                       cur.clkin4ddr = a / b * 1000;
-
-                       if (cur.clkin4ddr > 1800 * 1000 * 1000)
-                               break;
-
-                       /* dsi_pll_hsdiv_dispc_clk(MHz) =
-                        * DSIPHY(MHz) / regm_dispc  < 173MHz/186Mhz */
-                       for (cur.regm_dispc = 1; cur.regm_dispc <
-                                       dsi->regm_dispc_max; ++cur.regm_dispc) {
-                               struct dispc_clock_info cur_dispc;
-                               cur.dsi_pll_hsdiv_dispc_clk =
-                                       cur.clkin4ddr / cur.regm_dispc;
-
-                               if (cur.regm_dispc > 1 &&
-                                               cur.regm_dispc % 2 != 0 &&
-                                               req_pck >= 1000000)
-                                       continue;
-
-                               /* this will narrow down the search a bit,
-                                * but still give pixclocks below what was
-                                * requested */
-                               if (cur.dsi_pll_hsdiv_dispc_clk  < req_pck)
-                                       break;
-
-                               if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
-                                       continue;
-
-                               if (min_fck_per_pck &&
-                                       cur.dsi_pll_hsdiv_dispc_clk <
-                                               req_pck * min_fck_per_pck)
-                                       continue;
-
-                               match = 1;
-
-                               dispc_find_clk_divs(req_pck,
-                                               cur.dsi_pll_hsdiv_dispc_clk,
-                                               &cur_dispc);
-
-                               if (abs(cur_dispc.pck - req_pck) <
-                                               abs(best_dispc.pck - req_pck)) {
-                                       best = cur;
-                                       best_dispc = cur_dispc;
-
-                                       if (cur_dispc.pck == req_pck)
-                                               goto found;
-                               }
-                       }
-               }
-       }
-found:
-       if (!match) {
-               if (min_fck_per_pck) {
-                       DSSERR("Could not find suitable clock settings.\n"
-                                       "Turning FCK/PCK constraint off and"
-                                       "trying again.\n");
-                       min_fck_per_pck = 0;
-                       goto retry;
-               }
-
-               DSSERR("Could not find suitable clock settings.\n");
-
-               return -EINVAL;
-       }
-
-       /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
-       best.regm_dsi = 0;
-       best.dsi_pll_hsdiv_dsi_clk = 0;
-
-       if (dsi_cinfo)
-               *dsi_cinfo = best;
-       if (dispc_cinfo)
-               *dispc_cinfo = best_dispc;
-
-       dsi->cache_req_pck = req_pck;
-       dsi->cache_clk_freq = 0;
-       dsi->cache_cinfo = best;
-
-       return 0;
-}
-
-static int dsi_pll_calc_ddrfreq(struct platform_device *dsidev,
-               unsigned long req_clkin4ddr, struct dsi_clock_info *cinfo)
-{
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct dsi_clock_info cur, best;
-
-       DSSDBG("dsi_pll_calc_ddrfreq\n");
-
-       memset(&best, 0, sizeof(best));
-       memset(&cur, 0, sizeof(cur));
-
-       cur.clkin = clk_get_rate(dsi->sys_clk);
-
-       for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
-               cur.fint = cur.clkin / cur.regn;
-
-               if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
-                       continue;
-
-               /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
-               for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
-                       unsigned long a, b;
-
-                       a = 2 * cur.regm * (cur.clkin/1000);
-                       b = cur.regn;
-                       cur.clkin4ddr = a / b * 1000;
-
-                       if (cur.clkin4ddr > 1800 * 1000 * 1000)
-                               break;
-
-                       if (abs(cur.clkin4ddr - req_clkin4ddr) <
-                                       abs(best.clkin4ddr - req_clkin4ddr)) {
-                               best = cur;
-                               DSSDBG("best %ld\n", best.clkin4ddr);
-                       }
-
-                       if (cur.clkin4ddr == req_clkin4ddr)
-                               goto found;
-               }
-       }
-found:
-       if (cinfo)
-               *cinfo = best;
-
-       return 0;
-}
-
-static void dsi_pll_calc_dsi_fck(struct platform_device *dsidev,
-               struct dsi_clock_info *cinfo)
+static void dsi_pll_calc_dsi_fck(struct dsi_clock_info *cinfo)
 {
        unsigned long max_dsi_fck;
 
@@ -1511,90 +1440,6 @@ static void dsi_pll_calc_dsi_fck(struct platform_device *dsidev,
        cinfo->dsi_pll_hsdiv_dsi_clk = cinfo->clkin4ddr / cinfo->regm_dsi;
 }
 
-static int dsi_pll_calc_dispc_fck(struct platform_device *dsidev,
-               unsigned long req_pck, struct dsi_clock_info *cinfo,
-               struct dispc_clock_info *dispc_cinfo)
-{
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       unsigned regm_dispc, best_regm_dispc;
-       unsigned long dispc_clk, best_dispc_clk;
-       int min_fck_per_pck;
-       unsigned long max_dss_fck;
-       struct dispc_clock_info best_dispc;
-       bool match;
-
-       max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
-
-       min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
-
-       if (min_fck_per_pck &&
-                       req_pck * min_fck_per_pck > max_dss_fck) {
-               DSSERR("Requested pixel clock not possible with the current "
-                               "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
-                               "the constraint off.\n");
-               min_fck_per_pck = 0;
-       }
-
-retry:
-       best_regm_dispc = 0;
-       best_dispc_clk = 0;
-       memset(&best_dispc, 0, sizeof(best_dispc));
-       match = false;
-
-       for (regm_dispc = 1; regm_dispc < dsi->regm_dispc_max; ++regm_dispc) {
-               struct dispc_clock_info cur_dispc;
-
-               dispc_clk = cinfo->clkin4ddr / regm_dispc;
-
-               /* this will narrow down the search a bit,
-                * but still give pixclocks below what was
-                * requested */
-               if (dispc_clk  < req_pck)
-                       break;
-
-               if (dispc_clk > max_dss_fck)
-                       continue;
-
-               if (min_fck_per_pck && dispc_clk < req_pck * min_fck_per_pck)
-                       continue;
-
-               match = true;
-
-               dispc_find_clk_divs(req_pck, dispc_clk, &cur_dispc);
-
-               if (abs(cur_dispc.pck - req_pck) <
-                               abs(best_dispc.pck - req_pck)) {
-                       best_regm_dispc = regm_dispc;
-                       best_dispc_clk = dispc_clk;
-                       best_dispc = cur_dispc;
-
-                       if (cur_dispc.pck == req_pck)
-                               goto found;
-               }
-       }
-
-       if (!match) {
-               if (min_fck_per_pck) {
-                       DSSERR("Could not find suitable clock settings.\n"
-                                       "Turning FCK/PCK constraint off and"
-                                       "trying again.\n");
-                       min_fck_per_pck = 0;
-                       goto retry;
-               }
-
-               DSSERR("Could not find suitable clock settings.\n");
-
-               return -EINVAL;
-       }
-found:
-       cinfo->regm_dispc = best_regm_dispc;
-       cinfo->dsi_pll_hsdiv_dispc_clk = best_dispc_clk;
-
-       *dispc_cinfo = best_dispc;
-
-       return 0;
-}
-
 int dsi_pll_set_clock_div(struct platform_device *dsidev,
                struct dsi_clock_info *cinfo)
 {
@@ -2783,6 +2628,7 @@ static int dsi_vc_enable(struct platform_device *dsidev, int channel,
 
 static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
 {
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        u32 r;
 
        DSSDBG("Initial config of virtual channel %d", channel);
@@ -2807,6 +2653,8 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
        r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
 
        dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
+
+       dsi->vc[channel].source = DSI_VC_SOURCE_L4;
 }
 
 static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
@@ -3777,13 +3625,12 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
 
        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
                int bpp = dsi_get_pixel_size(dsi->pix_fmt);
-               unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
                struct omap_video_timings *timings = &dsi->timings;
                /*
                 * Don't use line buffers if width is greater than the video
                 * port's line buffer size
                 */
-               if (line_buf_size <= timings->x_res * bpp / 8)
+               if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
                        num_line_buffers = 0;
                else
                        num_line_buffers = 2;
@@ -3799,18 +3646,22 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
 static void dsi_config_vp_sync_events(struct platform_device *dsidev)
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       bool vsync_end = dsi->vm_timings.vp_vsync_end;
-       bool hsync_end = dsi->vm_timings.vp_hsync_end;
+       bool sync_end;
        u32 r;
 
+       if (dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE)
+               sync_end = true;
+       else
+               sync_end = false;
+
        r = dsi_read_reg(dsidev, DSI_CTRL);
        r = FLD_MOD(r, 1, 9, 9);                /* VP_DE_POL */
        r = FLD_MOD(r, 1, 10, 10);              /* VP_HSYNC_POL */
        r = FLD_MOD(r, 1, 11, 11);              /* VP_VSYNC_POL */
        r = FLD_MOD(r, 1, 15, 15);              /* VP_VSYNC_START */
-       r = FLD_MOD(r, vsync_end, 16, 16);      /* VP_VSYNC_END */
+       r = FLD_MOD(r, sync_end, 16, 16);       /* VP_VSYNC_END */
        r = FLD_MOD(r, 1, 17, 17);              /* VP_HSYNC_START */
-       r = FLD_MOD(r, hsync_end, 18, 18);      /* VP_HSYNC_END */
+       r = FLD_MOD(r, sync_end, 18, 18);       /* VP_HSYNC_END */
        dsi_write_reg(dsidev, DSI_CTRL, r);
 }
 
@@ -3897,9 +3748,8 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
        return max(lp_inter, 0);
 }
 
-static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
+static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        int blanking_mode;
        int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
@@ -3910,7 +3760,7 @@ static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
        struct omap_video_timings *timings = &dsi->timings;
        int bpp = dsi_get_pixel_size(dsi->pix_fmt);
        int ndl = dsi->num_lanes_used - 1;
-       int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
+       int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.regm_dsi + 1;
        int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
        int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
        int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
@@ -4015,9 +3865,8 @@ static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
        dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
 }
 
-static int dsi_proto_config(struct omap_dss_device *dssdev)
+static int dsi_proto_config(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        u32 r;
        int buswidth = 0;
@@ -4075,7 +3924,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
                dsi_config_vp_sync_events(dsidev);
                dsi_config_blanking_modes(dsidev);
-               dsi_config_cmd_mode_interleaving(dssdev);
+               dsi_config_cmd_mode_interleaving(dsidev);
        }
 
        dsi_vc_initial_config(dsidev, 0);
@@ -4159,11 +4008,12 @@ static void dsi_proto_timings(struct platform_device *dsidev)
                int vfp = dsi->vm_timings.vfp;
                int vbp = dsi->vm_timings.vbp;
                int window_sync = dsi->vm_timings.window_sync;
-               bool hsync_end = dsi->vm_timings.vp_hsync_end;
+               bool hsync_end;
                struct omap_video_timings *timings = &dsi->timings;
                int bpp = dsi_get_pixel_size(dsi->pix_fmt);
                int tl, t_he, width_bytes;
 
+               hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
                t_he = hsync_end ?
                        ((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
 
@@ -4266,82 +4116,26 @@ int omapdss_dsi_configure_pins(struct omap_dss_device *dssdev,
 }
 EXPORT_SYMBOL(omapdss_dsi_configure_pins);
 
-int omapdss_dsi_set_clocks(struct omap_dss_device *dssdev,
-               unsigned long ddr_clk, unsigned long lp_clk)
-{
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct dsi_clock_info cinfo;
-       struct dispc_clock_info dispc_cinfo;
-       unsigned lp_clk_div;
-       unsigned long dsi_fclk;
-       int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
-       unsigned long pck;
-       int r;
-
-       DSSDBG("Setting DSI clocks: ddr_clk %lu, lp_clk %lu", ddr_clk, lp_clk);
-
-       mutex_lock(&dsi->lock);
-
-       /* Calculate PLL output clock */
-       r = dsi_pll_calc_ddrfreq(dsidev, ddr_clk * 4, &cinfo);
-       if (r)
-               goto err;
-
-       /* Calculate PLL's DSI clock */
-       dsi_pll_calc_dsi_fck(dsidev, &cinfo);
-
-       /* Calculate PLL's DISPC clock and pck & lck divs */
-       pck = cinfo.clkin4ddr / 16 * (dsi->num_lanes_used - 1) * 8 / bpp;
-       DSSDBG("finding dispc dividers for pck %lu\n", pck);
-       r = dsi_pll_calc_dispc_fck(dsidev, pck, &cinfo, &dispc_cinfo);
-       if (r)
-               goto err;
-
-       /* Calculate LP clock */
-       dsi_fclk = cinfo.dsi_pll_hsdiv_dsi_clk;
-       lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk * 2);
-
-       dssdev->clocks.dsi.regn = cinfo.regn;
-       dssdev->clocks.dsi.regm = cinfo.regm;
-       dssdev->clocks.dsi.regm_dispc = cinfo.regm_dispc;
-       dssdev->clocks.dsi.regm_dsi = cinfo.regm_dsi;
-
-       dssdev->clocks.dsi.lp_clk_div = lp_clk_div;
-
-       dssdev->clocks.dispc.channel.lck_div = dispc_cinfo.lck_div;
-       dssdev->clocks.dispc.channel.pck_div = dispc_cinfo.pck_div;
-
-       dssdev->clocks.dispc.dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK;
-
-       dssdev->clocks.dispc.channel.lcd_clk_src =
-               dsi->module_id == 0 ?
-               OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
-               OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
-
-       dssdev->clocks.dsi.dsi_fclk_src =
-               dsi->module_id == 0 ?
-               OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
-               OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI;
-
-       mutex_unlock(&dsi->lock);
-       return 0;
-err:
-       mutex_unlock(&dsi->lock);
-       return r;
-}
-EXPORT_SYMBOL(omapdss_dsi_set_clocks);
-
 int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
+       struct omap_overlay_manager *mgr = dsi->output.manager;
        int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+       struct omap_dss_output *out = &dsi->output;
        u8 data_type;
        u16 word_count;
        int r;
 
+       if (out == NULL || out->manager == NULL) {
+               DSSERR("failed to enable display: no output/manager\n");
+               return -ENODEV;
+       }
+
+       r = dsi_display_init_dispc(dsidev, mgr);
+       if (r)
+               goto err_init_dispc;
+
        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
                switch (dsi->pix_fmt) {
                case OMAP_DSS_DSI_FMT_RGB888:
@@ -4357,8 +4151,8 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
                        data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
                        break;
                default:
-                       BUG();
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto err_pix_fmt;
                };
 
                dsi_if_enable(dsidev, false);
@@ -4377,16 +4171,20 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
        }
 
        r = dss_mgr_enable(mgr);
-       if (r) {
-               if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
-                       dsi_if_enable(dsidev, false);
-                       dsi_vc_enable(dsidev, channel, false);
-               }
-
-               return r;
-       }
+       if (r)
+               goto err_mgr_enable;
 
        return 0;
+
+err_mgr_enable:
+       if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
+               dsi_if_enable(dsidev, false);
+               dsi_vc_enable(dsidev, channel, false);
+       }
+err_pix_fmt:
+       dsi_display_uninit_dispc(dsidev, mgr);
+err_init_dispc:
+       return r;
 }
 EXPORT_SYMBOL(dsi_enable_video_output);
 
@@ -4394,7 +4192,7 @@ void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
+       struct omap_overlay_manager *mgr = dsi->output.manager;
 
        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
                dsi_if_enable(dsidev, false);
@@ -4408,14 +4206,15 @@ void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
        }
 
        dss_mgr_disable(mgr);
+
+       dsi_display_uninit_dispc(dsidev, mgr);
 }
 EXPORT_SYMBOL(dsi_disable_video_output);
 
-static void dsi_update_screen_dispc(struct omap_dss_device *dssdev)
+static void dsi_update_screen_dispc(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
+       struct omap_overlay_manager *mgr = dsi->output.manager;
        unsigned bytespp;
        unsigned bytespl;
        unsigned bytespf;
@@ -4425,7 +4224,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev)
        u32 l;
        int r;
        const unsigned channel = dsi->update_channel;
-       const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
+       const unsigned line_buf_size = dsi->line_buffer_size;
        u16 w = dsi->timings.x_res;
        u16 h = dsi->timings.y_res;
 
@@ -4571,7 +4370,7 @@ int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
        dsi->update_bytes = dw * dh *
                dsi_get_pixel_size(dsi->pix_fmt) / 8;
 #endif
-       dsi_update_screen_dispc(dssdev);
+       dsi_update_screen_dispc(dsidev);
 
        return 0;
 }
@@ -4579,18 +4378,17 @@ EXPORT_SYMBOL(omap_dsi_update);
 
 /* Display funcs */
 
-static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
+static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct dispc_clock_info dispc_cinfo;
        int r;
-       unsigned long long fck;
+       unsigned long fck;
 
        fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
 
-       dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
-       dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
+       dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
+       dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
 
        r = dispc_calc_clock_rates(fck, &dispc_cinfo);
        if (r) {
@@ -4603,21 +4401,17 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
+static int dsi_display_init_dispc(struct platform_device *dsidev,
+               struct omap_overlay_manager *mgr)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
        int r;
 
-       if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
-               dsi->timings.hsw = 1;
-               dsi->timings.hfp = 1;
-               dsi->timings.hbp = 1;
-               dsi->timings.vsw = 1;
-               dsi->timings.vfp = 0;
-               dsi->timings.vbp = 0;
+       dss_select_lcd_clk_source(mgr->id, dsi->module_id == 0 ?
+                       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
+                       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC);
 
+       if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
                r = dss_mgr_register_framedone_handler(mgr,
                                dsi_framedone_irq_callback, dsidev);
                if (r) {
@@ -4645,7 +4439,7 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
 
        dss_mgr_set_timings(mgr, &dsi->timings);
 
-       r = dsi_configure_dispc_clocks(dssdev);
+       r = dsi_configure_dispc_clocks(dsidev);
        if (r)
                goto err1;
 
@@ -4662,30 +4456,30 @@ err1:
                dss_mgr_unregister_framedone_handler(mgr,
                                dsi_framedone_irq_callback, dsidev);
 err:
+       dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
        return r;
 }
 
-static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
+static void dsi_display_uninit_dispc(struct platform_device *dsidev,
+               struct omap_overlay_manager *mgr)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
 
        if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
                dss_mgr_unregister_framedone_handler(mgr,
                                dsi_framedone_irq_callback, dsidev);
+
+       dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
 }
 
-static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
+static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct dsi_clock_info cinfo;
        int r;
 
-       cinfo.regn  = dssdev->clocks.dsi.regn;
-       cinfo.regm  = dssdev->clocks.dsi.regm;
-       cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
-       cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
+       cinfo = dsi->user_dsi_cinfo;
+
        r = dsi_calc_clock_rates(dsidev, &cinfo);
        if (r) {
                DSSERR("Failed to calc dsi clocks\n");
@@ -4701,24 +4495,22 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
+static int dsi_display_init_dsi(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
        int r;
 
        r = dsi_pll_init(dsidev, true, true);
        if (r)
                goto err0;
 
-       r = dsi_configure_dsi_clocks(dssdev);
+       r = dsi_configure_dsi_clocks(dsidev);
        if (r)
                goto err1;
 
-       dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
-       dss_select_lcd_clk_source(mgr->id,
-                       dssdev->clocks.dispc.channel.lcd_clk_src);
+       dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
+                       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
+                       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI);
 
        DSSDBG("PLL OK\n");
 
@@ -4729,12 +4521,12 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
        _dsi_print_reset_status(dsidev);
 
        dsi_proto_timings(dsidev);
-       dsi_set_lp_clk_divisor(dssdev);
+       dsi_set_lp_clk_divisor(dsidev);
 
        if (1)
                _dsi_print_reset_status(dsidev);
 
-       r = dsi_proto_config(dssdev);
+       r = dsi_proto_config(dsidev);
        if (r)
                goto err3;
 
@@ -4751,20 +4543,16 @@ err3:
        dsi_cio_uninit(dsidev);
 err2:
        dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
-       dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
-
 err1:
        dsi_pll_uninit(dsidev, true);
 err0:
        return r;
 }
 
-static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
+static void dsi_display_uninit_dsi(struct platform_device *dsidev,
                bool disconnect_lanes, bool enter_ulps)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
 
        if (enter_ulps && !dsi->ulps_enabled)
                dsi_enter_ulps(dsidev);
@@ -4777,7 +4565,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
        dsi_vc_enable(dsidev, 3, 0);
 
        dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
-       dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
        dsi_cio_uninit(dsidev);
        dsi_pll_uninit(dsidev, disconnect_lanes);
 }
@@ -4786,7 +4573,6 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_dss_output *out = dssdev->output;
        int r = 0;
 
        DSSDBG("dsi_display_enable\n");
@@ -4795,12 +4581,6 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
 
        mutex_lock(&dsi->lock);
 
-       if (out == NULL || out->manager == NULL) {
-               DSSERR("failed to enable display: no output/manager\n");
-               r = -ENODEV;
-               goto err_start_dev;
-       }
-
        r = omap_dss_start_device(dssdev);
        if (r) {
                DSSERR("failed to start device\n");
@@ -4815,11 +4595,7 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
 
        _dsi_initialize_irq(dsidev);
 
-       r = dsi_display_init_dispc(dssdev);
-       if (r)
-               goto err_init_dispc;
-
-       r = dsi_display_init_dsi(dssdev);
+       r = dsi_display_init_dsi(dsidev);
        if (r)
                goto err_init_dsi;
 
@@ -4828,8 +4604,6 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
        return 0;
 
 err_init_dsi:
-       dsi_display_uninit_dispc(dssdev);
-err_init_dispc:
        dsi_enable_pll_clock(dsidev, 0);
        dsi_runtime_put(dsidev);
 err_get_dsi:
@@ -4858,9 +4632,7 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
        dsi_sync_vc(dsidev, 2);
        dsi_sync_vc(dsidev, 3);
 
-       dsi_display_uninit_dispc(dssdev);
-
-       dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
+       dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
 
        dsi_runtime_put(dsidev);
        dsi_enable_pll_clock(dsidev, 0);
@@ -4881,77 +4653,579 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
 }
 EXPORT_SYMBOL(omapdss_dsi_enable_te);
 
-void omapdss_dsi_set_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
+#ifdef PRINT_VERBOSE_VM_TIMINGS
+static void print_dsi_vm(const char *str,
+               const struct omap_dss_dsi_videomode_timings *t)
+{
+       unsigned long byteclk = t->hsclk / 4;
+       int bl, wc, pps, tot;
+
+       wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
+       pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
+       bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
+       tot = bl + pps;
+
+#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
+
+       pr_debug("%s bck %lu, %u/%u/%u/%u/%u/%u = %u+%u = %u, "
+                       "%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
+                       str,
+                       byteclk,
+                       t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
+                       bl, pps, tot,
+                       TO_DSI_T(t->hss),
+                       TO_DSI_T(t->hsa),
+                       TO_DSI_T(t->hse),
+                       TO_DSI_T(t->hbp),
+                       TO_DSI_T(pps),
+                       TO_DSI_T(t->hfp),
+
+                       TO_DSI_T(bl),
+                       TO_DSI_T(pps),
+
+                       TO_DSI_T(tot));
+#undef TO_DSI_T
+}
+
+static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
+{
+       unsigned long pck = t->pixel_clock * 1000;
+       int hact, bl, tot;
+
+       hact = t->x_res;
+       bl = t->hsw + t->hbp + t->hfp;
+       tot = hact + bl;
+
+#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
+
+       pr_debug("%s pck %lu, %u/%u/%u/%u = %u+%u = %u, "
+                       "%u/%u/%u/%u = %u + %u = %u\n",
+                       str,
+                       pck,
+                       t->hsw, t->hbp, hact, t->hfp,
+                       bl, hact, tot,
+                       TO_DISPC_T(t->hsw),
+                       TO_DISPC_T(t->hbp),
+                       TO_DISPC_T(hact),
+                       TO_DISPC_T(t->hfp),
+                       TO_DISPC_T(bl),
+                       TO_DISPC_T(hact),
+                       TO_DISPC_T(tot));
+#undef TO_DISPC_T
+}
+
+/* note: this is not quite accurate */
+static void print_dsi_dispc_vm(const char *str,
+               const struct omap_dss_dsi_videomode_timings *t)
+{
+       struct omap_video_timings vm = { 0 };
+       unsigned long byteclk = t->hsclk / 4;
+       unsigned long pck;
+       u64 dsi_tput;
+       int dsi_hact, dsi_htot;
+
+       dsi_tput = (u64)byteclk * t->ndl * 8;
+       pck = (u32)div64_u64(dsi_tput, t->bitspp);
+       dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
+       dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
+
+       vm.pixel_clock = pck / 1000;
+       vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
+       vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
+       vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
+       vm.x_res = t->hact;
+
+       print_dispc_vm(str, &vm);
+}
+#endif /* PRINT_VERBOSE_VM_TIMINGS */
+
+static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+               unsigned long pck, void *data)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       struct dsi_clk_calc_ctx *ctx = data;
+       struct omap_video_timings *t = &ctx->dispc_vm;
 
-       mutex_lock(&dsi->lock);
+       ctx->dispc_cinfo.lck_div = lckd;
+       ctx->dispc_cinfo.pck_div = pckd;
+       ctx->dispc_cinfo.lck = lck;
+       ctx->dispc_cinfo.pck = pck;
 
-       dsi->timings = *timings;
+       *t = *ctx->config->timings;
+       t->pixel_clock = pck / 1000;
+       t->x_res = ctx->config->timings->x_res;
+       t->y_res = ctx->config->timings->y_res;
+       t->hsw = t->hfp = t->hbp = t->vsw = 1;
+       t->vfp = t->vbp = 0;
 
-       mutex_unlock(&dsi->lock);
+       return true;
 }
-EXPORT_SYMBOL(omapdss_dsi_set_timings);
 
-void omapdss_dsi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
+static bool dsi_cm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+               void *data)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       struct dsi_clk_calc_ctx *ctx = data;
 
-       mutex_lock(&dsi->lock);
+       ctx->dsi_cinfo.regm_dispc = regm_dispc;
+       ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
 
-       dsi->timings.x_res = w;
-       dsi->timings.y_res = h;
+       return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max,
+                       dsi_cm_calc_dispc_cb, ctx);
+}
 
-       mutex_unlock(&dsi->lock);
+static bool dsi_cm_calc_pll_cb(int regn, int regm, unsigned long fint,
+               unsigned long pll, void *data)
+{
+       struct dsi_clk_calc_ctx *ctx = data;
+
+       ctx->dsi_cinfo.regn = regn;
+       ctx->dsi_cinfo.regm = regm;
+       ctx->dsi_cinfo.fint = fint;
+       ctx->dsi_cinfo.clkin4ddr = pll;
+
+       return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min,
+                       dsi_cm_calc_hsdiv_cb, ctx);
 }
-EXPORT_SYMBOL(omapdss_dsi_set_size);
 
-void omapdss_dsi_set_pixel_format(struct omap_dss_device *dssdev,
-               enum omap_dss_dsi_pixel_format fmt)
+static bool dsi_cm_calc(struct dsi_data *dsi,
+               const struct omap_dss_dsi_config *cfg,
+               struct dsi_clk_calc_ctx *ctx)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       unsigned long clkin;
+       int bitspp, ndl;
+       unsigned long pll_min, pll_max;
+       unsigned long pck, txbyteclk;
 
-       mutex_lock(&dsi->lock);
+       clkin = clk_get_rate(dsi->sys_clk);
+       bitspp = dsi_get_pixel_size(cfg->pixel_format);
+       ndl = dsi->num_lanes_used - 1;
 
-       dsi->pix_fmt = fmt;
+       /*
+        * Here we should calculate minimum txbyteclk to be able to send the
+        * frame in time, and also to handle TE. That's not very simple, though,
+        * especially as we go to LP between each pixel packet due to HW
+        * "feature". So let's just estimate very roughly and multiply by 1.5.
+        */
+       pck = cfg->timings->pixel_clock * 1000;
+       pck = pck * 3 / 2;
+       txbyteclk = pck * bitspp / 8 / ndl;
 
-       mutex_unlock(&dsi->lock);
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->dsidev = dsi->pdev;
+       ctx->config = cfg;
+       ctx->req_pck_min = pck;
+       ctx->req_pck_nom = pck;
+       ctx->req_pck_max = pck * 3 / 2;
+       ctx->dsi_cinfo.clkin = clkin;
+
+       pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
+       pll_max = cfg->hs_clk_max * 4;
+
+       return dsi_pll_calc(dsi->pdev, clkin,
+                       pll_min, pll_max,
+                       dsi_cm_calc_pll_cb, ctx);
 }
-EXPORT_SYMBOL(omapdss_dsi_set_pixel_format);
 
-void omapdss_dsi_set_operation_mode(struct omap_dss_device *dssdev,
-               enum omap_dss_dsi_mode mode)
+static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
+       const struct omap_dss_dsi_config *cfg = ctx->config;
+       int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+       int ndl = dsi->num_lanes_used - 1;
+       unsigned long hsclk = ctx->dsi_cinfo.clkin4ddr / 4;
+       unsigned long byteclk = hsclk / 4;
 
-       mutex_lock(&dsi->lock);
+       unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max;
+       int xres;
+       int panel_htot, panel_hbl; /* pixels */
+       int dispc_htot, dispc_hbl; /* pixels */
+       int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
+       int hfp, hsa, hbp;
+       const struct omap_video_timings *req_vm;
+       struct omap_video_timings *dispc_vm;
+       struct omap_dss_dsi_videomode_timings *dsi_vm;
+       u64 dsi_tput, dispc_tput;
 
-       dsi->mode = mode;
+       dsi_tput = (u64)byteclk * ndl * 8;
 
-       mutex_unlock(&dsi->lock);
+       req_vm = cfg->timings;
+       req_pck_min = ctx->req_pck_min;
+       req_pck_max = ctx->req_pck_max;
+       req_pck_nom = ctx->req_pck_nom;
+
+       dispc_pck = ctx->dispc_cinfo.pck;
+       dispc_tput = (u64)dispc_pck * bitspp;
+
+       xres = req_vm->x_res;
+
+       panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw;
+       panel_htot = xres + panel_hbl;
+
+       dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
+
+       /*
+        * When there are no line buffers, DISPC and DSI must have the
+        * same tput. Otherwise DISPC tput needs to be higher than DSI's.
+        */
+       if (dsi->line_buffer_size < xres * bitspp / 8) {
+               if (dispc_tput != dsi_tput)
+                       return false;
+       } else {
+               if (dispc_tput < dsi_tput)
+                       return false;
+       }
+
+       /* DSI tput must be over the min requirement */
+       if (dsi_tput < (u64)bitspp * req_pck_min)
+               return false;
+
+       /* When non-burst mode, DSI tput must be below max requirement. */
+       if (cfg->trans_mode != OMAP_DSS_DSI_BURST_MODE) {
+               if (dsi_tput > (u64)bitspp * req_pck_max)
+                       return false;
+       }
+
+       hss = DIV_ROUND_UP(4, ndl);
+
+       if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
+               if (ndl == 3 && req_vm->hsw == 0)
+                       hse = 1;
+               else
+                       hse = DIV_ROUND_UP(4, ndl);
+       } else {
+               hse = 0;
+       }
+
+       /* DSI htot to match the panel's nominal pck */
+       dsi_htot = div64_u64((u64)panel_htot * byteclk, req_pck_nom);
+
+       /* fail if there would be no time for blanking */
+       if (dsi_htot < hss + hse + dsi_hact)
+               return false;
+
+       /* total DSI blanking needed to achieve panel's TL */
+       dsi_hbl = dsi_htot - dsi_hact;
+
+       /* DISPC htot to match the DSI TL */
+       dispc_htot = div64_u64((u64)dsi_htot * dispc_pck, byteclk);
+
+       /* verify that the DSI and DISPC TLs are the same */
+       if ((u64)dsi_htot * dispc_pck != (u64)dispc_htot * byteclk)
+               return false;
+
+       dispc_hbl = dispc_htot - xres;
+
+       /* setup DSI videomode */
+
+       dsi_vm = &ctx->dsi_vm;
+       memset(dsi_vm, 0, sizeof(*dsi_vm));
+
+       dsi_vm->hsclk = hsclk;
+
+       dsi_vm->ndl = ndl;
+       dsi_vm->bitspp = bitspp;
+
+       if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
+               hsa = 0;
+       } else if (ndl == 3 && req_vm->hsw == 0) {
+               hsa = 0;
+       } else {
+               hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom);
+               hsa = max(hsa - hse, 1);
+       }
+
+       hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom);
+       hbp = max(hbp, 1);
+
+       hfp = dsi_hbl - (hss + hsa + hse + hbp);
+       if (hfp < 1) {
+               int t;
+               /* we need to take cycles from hbp */
+
+               t = 1 - hfp;
+               hbp = max(hbp - t, 1);
+               hfp = dsi_hbl - (hss + hsa + hse + hbp);
+
+               if (hfp < 1 && hsa > 0) {
+                       /* we need to take cycles from hsa */
+                       t = 1 - hfp;
+                       hsa = max(hsa - t, 1);
+                       hfp = dsi_hbl - (hss + hsa + hse + hbp);
+               }
+       }
+
+       if (hfp < 1)
+               return false;
+
+       dsi_vm->hss = hss;
+       dsi_vm->hsa = hsa;
+       dsi_vm->hse = hse;
+       dsi_vm->hbp = hbp;
+       dsi_vm->hact = xres;
+       dsi_vm->hfp = hfp;
+
+       dsi_vm->vsa = req_vm->vsw;
+       dsi_vm->vbp = req_vm->vbp;
+       dsi_vm->vact = req_vm->y_res;
+       dsi_vm->vfp = req_vm->vfp;
+
+       dsi_vm->trans_mode = cfg->trans_mode;
+
+       dsi_vm->blanking_mode = 0;
+       dsi_vm->hsa_blanking_mode = 1;
+       dsi_vm->hfp_blanking_mode = 1;
+       dsi_vm->hbp_blanking_mode = 1;
+
+       dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on;
+       dsi_vm->window_sync = 4;
+
+       /* setup DISPC videomode */
+
+       dispc_vm = &ctx->dispc_vm;
+       *dispc_vm = *req_vm;
+       dispc_vm->pixel_clock = dispc_pck / 1000;
+
+       if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
+               hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
+                               req_pck_nom);
+               hsa = max(hsa, 1);
+       } else {
+               hsa = 1;
+       }
+
+       hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom);
+       hbp = max(hbp, 1);
+
+       hfp = dispc_hbl - hsa - hbp;
+       if (hfp < 1) {
+               int t;
+               /* we need to take cycles from hbp */
+
+               t = 1 - hfp;
+               hbp = max(hbp - t, 1);
+               hfp = dispc_hbl - hsa - hbp;
+
+               if (hfp < 1) {
+                       /* we need to take cycles from hsa */
+                       t = 1 - hfp;
+                       hsa = max(hsa - t, 1);
+                       hfp = dispc_hbl - hsa - hbp;
+               }
+       }
+
+       if (hfp < 1)
+               return false;
+
+       dispc_vm->hfp = hfp;
+       dispc_vm->hsw = hsa;
+       dispc_vm->hbp = hbp;
+
+       return true;
 }
-EXPORT_SYMBOL(omapdss_dsi_set_operation_mode);
 
-void omapdss_dsi_set_videomode_timings(struct omap_dss_device *dssdev,
-               struct omap_dss_dsi_videomode_timings *timings)
+
+static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+               unsigned long pck, void *data)
+{
+       struct dsi_clk_calc_ctx *ctx = data;
+
+       ctx->dispc_cinfo.lck_div = lckd;
+       ctx->dispc_cinfo.pck_div = pckd;
+       ctx->dispc_cinfo.lck = lck;
+       ctx->dispc_cinfo.pck = pck;
+
+       if (dsi_vm_calc_blanking(ctx) == false)
+               return false;
+
+#ifdef PRINT_VERBOSE_VM_TIMINGS
+       print_dispc_vm("dispc", &ctx->dispc_vm);
+       print_dsi_vm("dsi  ", &ctx->dsi_vm);
+       print_dispc_vm("req  ", ctx->config->timings);
+       print_dsi_dispc_vm("act  ", &ctx->dsi_vm);
+#endif
+
+       return true;
+}
+
+static bool dsi_vm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+               void *data)
+{
+       struct dsi_clk_calc_ctx *ctx = data;
+       unsigned long pck_max;
+
+       ctx->dsi_cinfo.regm_dispc = regm_dispc;
+       ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
+
+       /*
+        * In burst mode we can let the dispc pck be arbitrarily high, but it
+        * limits our scaling abilities. So for now, don't aim too high.
+        */
+
+       if (ctx->config->trans_mode == OMAP_DSS_DSI_BURST_MODE)
+               pck_max = ctx->req_pck_max + 10000000;
+       else
+               pck_max = ctx->req_pck_max;
+
+       return dispc_div_calc(dispc, ctx->req_pck_min, pck_max,
+                       dsi_vm_calc_dispc_cb, ctx);
+}
+
+static bool dsi_vm_calc_pll_cb(int regn, int regm, unsigned long fint,
+               unsigned long pll, void *data)
+{
+       struct dsi_clk_calc_ctx *ctx = data;
+
+       ctx->dsi_cinfo.regn = regn;
+       ctx->dsi_cinfo.regm = regm;
+       ctx->dsi_cinfo.fint = fint;
+       ctx->dsi_cinfo.clkin4ddr = pll;
+
+       return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min,
+                       dsi_vm_calc_hsdiv_cb, ctx);
+}
+
+static bool dsi_vm_calc(struct dsi_data *dsi,
+               const struct omap_dss_dsi_config *cfg,
+               struct dsi_clk_calc_ctx *ctx)
+{
+       const struct omap_video_timings *t = cfg->timings;
+       unsigned long clkin;
+       unsigned long pll_min;
+       unsigned long pll_max;
+       int ndl = dsi->num_lanes_used - 1;
+       int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+       unsigned long byteclk_min;
+
+       clkin = clk_get_rate(dsi->sys_clk);
+
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->dsidev = dsi->pdev;
+       ctx->config = cfg;
+
+       ctx->dsi_cinfo.clkin = clkin;
+
+       /* these limits should come from the panel driver */
+       ctx->req_pck_min = t->pixel_clock * 1000 - 1000;
+       ctx->req_pck_nom = t->pixel_clock * 1000;
+       ctx->req_pck_max = t->pixel_clock * 1000 + 1000;
+
+       byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
+       pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
+
+       if (cfg->trans_mode == OMAP_DSS_DSI_BURST_MODE) {
+               pll_max = cfg->hs_clk_max * 4;
+       } else {
+               unsigned long byteclk_max;
+               byteclk_max = div64_u64((u64)ctx->req_pck_max * bitspp,
+                               ndl * 8);
+
+               pll_max = byteclk_max * 4 * 4;
+       }
+
+       return dsi_pll_calc(dsi->pdev, clkin,
+                       pll_min, pll_max,
+                       dsi_vm_calc_pll_cb, ctx);
+}
+
+int omapdss_dsi_set_config(struct omap_dss_device *dssdev,
+               const struct omap_dss_dsi_config *config)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       struct dsi_clk_calc_ctx ctx;
+       bool ok;
+       int r;
 
        mutex_lock(&dsi->lock);
 
-       dsi->vm_timings = *timings;
+       dsi->pix_fmt = config->pixel_format;
+       dsi->mode = config->mode;
+
+       if (config->mode == OMAP_DSS_DSI_VIDEO_MODE)
+               ok = dsi_vm_calc(dsi, config, &ctx);
+       else
+               ok = dsi_cm_calc(dsi, config, &ctx);
+
+       if (!ok) {
+               DSSERR("failed to find suitable DSI clock settings\n");
+               r = -EINVAL;
+               goto err;
+       }
+
+       dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo);
+
+       r = dsi_lp_clock_calc(&ctx.dsi_cinfo, config->lp_clk_min,
+                       config->lp_clk_max);
+       if (r) {
+               DSSERR("failed to find suitable DSI LP clock settings\n");
+               goto err;
+       }
+
+       dsi->user_dsi_cinfo = ctx.dsi_cinfo;
+       dsi->user_dispc_cinfo = ctx.dispc_cinfo;
+
+       dsi->timings = ctx.dispc_vm;
+       dsi->vm_timings = ctx.dsi_vm;
 
        mutex_unlock(&dsi->lock);
+
+       return 0;
+err:
+       mutex_unlock(&dsi->lock);
+
+       return r;
 }
-EXPORT_SYMBOL(omapdss_dsi_set_videomode_timings);
+EXPORT_SYMBOL(omapdss_dsi_set_config);
+
+/*
+ * Return a hardcoded channel for the DSI output. This should work for
+ * current use cases, but this can be later expanded to either resolve
+ * the channel in some more dynamic manner, or get the channel as a user
+ * parameter.
+ */
+static enum omap_channel dsi_get_channel(int module_id)
+{
+       switch (omapdss_get_version()) {
+       case OMAPDSS_VER_OMAP24xx:
+               DSSWARN("DSI not supported\n");
+               return OMAP_DSS_CHANNEL_LCD;
+
+       case OMAPDSS_VER_OMAP34xx_ES1:
+       case OMAPDSS_VER_OMAP34xx_ES3:
+       case OMAPDSS_VER_OMAP3630:
+       case OMAPDSS_VER_AM35xx:
+               return OMAP_DSS_CHANNEL_LCD;
+
+       case OMAPDSS_VER_OMAP4430_ES1:
+       case OMAPDSS_VER_OMAP4430_ES2:
+       case OMAPDSS_VER_OMAP4:
+               switch (module_id) {
+               case 0:
+                       return OMAP_DSS_CHANNEL_LCD;
+               case 1:
+                       return OMAP_DSS_CHANNEL_LCD2;
+               default:
+                       DSSWARN("unsupported module id\n");
+                       return OMAP_DSS_CHANNEL_LCD;
+               }
+
+       case OMAPDSS_VER_OMAP5:
+               switch (module_id) {
+               case 0:
+                       return OMAP_DSS_CHANNEL_LCD;
+               case 1:
+                       return OMAP_DSS_CHANNEL_LCD3;
+               default:
+                       DSSWARN("unsupported module id\n");
+                       return OMAP_DSS_CHANNEL_LCD;
+               }
 
-static int __init dsi_init_display(struct omap_dss_device *dssdev)
+       default:
+               DSSWARN("unsupported DSS version\n");
+               return OMAP_DSS_CHANNEL_LCD;
+       }
+}
+
+static int dsi_init_display(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev =
                        dsi_get_dsidev_from_id(dssdev->phy.dsi.module);
@@ -5073,7 +5347,7 @@ static int dsi_get_clocks(struct platform_device *dsidev)
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct clk *clk;
 
-       clk = clk_get(&dsidev->dev, "fck");
+       clk = devm_clk_get(&dsidev->dev, "fck");
        if (IS_ERR(clk)) {
                DSSERR("can't get fck\n");
                return PTR_ERR(clk);
@@ -5081,11 +5355,9 @@ static int dsi_get_clocks(struct platform_device *dsidev)
 
        dsi->dss_clk = clk;
 
-       clk = clk_get(&dsidev->dev, "sys_clk");
+       clk = devm_clk_get(&dsidev->dev, "sys_clk");
        if (IS_ERR(clk)) {
                DSSERR("can't get sys_clk\n");
-               clk_put(dsi->dss_clk);
-               dsi->dss_clk = NULL;
                return PTR_ERR(clk);
        }
 
@@ -5094,17 +5366,7 @@ static int dsi_get_clocks(struct platform_device *dsidev)
        return 0;
 }
 
-static void dsi_put_clocks(struct platform_device *dsidev)
-{
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
-       if (dsi->dss_clk)
-               clk_put(dsi->dss_clk);
-       if (dsi->sys_clk)
-               clk_put(dsi->sys_clk);
-}
-
-static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *dsi_find_dssdev(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
@@ -5136,7 +5398,7 @@ static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *p
        return def_dssdev;
 }
 
-static void __init dsi_probe_pdata(struct platform_device *dsidev)
+static int dsi_probe_pdata(struct platform_device *dsidev)
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct omap_dss_device *plat_dssdev;
@@ -5146,11 +5408,11 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
        plat_dssdev = dsi_find_dssdev(dsidev);
 
        if (!plat_dssdev)
-               return;
+               return 0;
 
        dssdev = dss_alloc_and_init_device(&dsidev->dev);
        if (!dssdev)
-               return;
+               return -ENOMEM;
 
        dss_copy_device_pdata(dssdev, plat_dssdev);
 
@@ -5158,7 +5420,7 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
        if (r) {
                DSSERR("device %s init failed: %d\n", dssdev->name, r);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = omapdss_output_set_device(&dsi->output, dssdev);
@@ -5166,7 +5428,7 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
                DSSERR("failed to connect output to new device: %s\n",
                                dssdev->name);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = dss_add_device(dssdev);
@@ -5174,11 +5436,13 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
                DSSERR("device %s register failed: %d\n", dssdev->name, r);
                omapdss_output_unset_device(&dsi->output);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
+
+       return 0;
 }
 
-static void __init dsi_init_output(struct platform_device *dsidev)
+static void dsi_init_output(struct platform_device *dsidev)
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct omap_dss_output *out = &dsi->output;
@@ -5188,11 +5452,13 @@ static void __init dsi_init_output(struct platform_device *dsidev)
                        OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
 
        out->type = OMAP_DISPLAY_TYPE_DSI;
+       out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
+       out->dispc_channel = dsi_get_channel(dsi->module_id);
 
        dss_register_output(out);
 }
 
-static void __exit dsi_uninit_output(struct platform_device *dsidev)
+static void dsi_uninit_output(struct platform_device *dsidev)
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct omap_dss_output *out = &dsi->output;
@@ -5201,7 +5467,7 @@ static void __exit dsi_uninit_output(struct platform_device *dsidev)
 }
 
 /* DSI1 HW IP initialisation */
-static int __init omap_dsihw_probe(struct platform_device *dsidev)
+static int omap_dsihw_probe(struct platform_device *dsidev)
 {
        u32 rev;
        int r, i;
@@ -5293,9 +5559,17 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
        else
                dsi->num_lanes_supported = 3;
 
+       dsi->line_buffer_size = dsi_get_line_buf_size(dsidev);
+
        dsi_init_output(dsidev);
 
-       dsi_probe_pdata(dsidev);
+       r = dsi_probe_pdata(dsidev);
+       if (r) {
+               dsi_runtime_put(dsidev);
+               dsi_uninit_output(dsidev);
+               pm_runtime_disable(&dsidev->dev);
+               return r;
+       }
 
        dsi_runtime_put(dsidev);
 
@@ -5314,7 +5588,6 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
 
 err_runtime_get:
        pm_runtime_disable(&dsidev->dev);
-       dsi_put_clocks(dsidev);
        return r;
 }
 
@@ -5330,8 +5603,6 @@ static int __exit omap_dsihw_remove(struct platform_device *dsidev)
 
        pm_runtime_disable(&dsidev->dev);
 
-       dsi_put_clocks(dsidev);
-
        if (dsi->vdds_dsi_reg != NULL) {
                if (dsi->vdds_dsi_enabled) {
                        regulator_disable(dsi->vdds_dsi_reg);
@@ -5369,6 +5640,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
 };
 
 static struct platform_driver omap_dsihw_driver = {
+       .probe          = omap_dsihw_probe,
        .remove         = __exit_p(omap_dsihw_remove),
        .driver         = {
                .name   = "omapdss_dsi",
@@ -5379,7 +5651,7 @@ static struct platform_driver omap_dsihw_driver = {
 
 int __init dsi_init_platform_driver(void)
 {
-       return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
+       return platform_driver_register(&omap_dsihw_driver);
 }
 
 void __exit dsi_uninit_platform_driver(void)
index 054c2a2..94f66f9 100644 (file)
@@ -473,6 +473,47 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo)
        return 0;
 }
 
+bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data)
+{
+       int fckd, fckd_start, fckd_stop;
+       unsigned long fck;
+       unsigned long fck_hw_max;
+       unsigned long fckd_hw_max;
+       unsigned long prate;
+       unsigned m;
+
+       if (dss.dpll4_m4_ck == NULL) {
+               /*
+                * TODO: dss1_fclk can be changed on OMAP2, but the available
+                * dividers are not continuous. We just use the pre-set rate for
+                * now.
+                */
+               fck = clk_get_rate(dss.dss_clk);
+               fckd = 1;
+               return func(fckd, fck, data);
+       }
+
+       fck_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+       fckd_hw_max = dss.feat->fck_div_max;
+
+       m = dss.feat->dss_fck_multiplier;
+       prate = dss_get_dpll4_rate();
+
+       fck_min = fck_min ? fck_min : 1;
+
+       fckd_start = min(prate * m / fck_min, fckd_hw_max);
+       fckd_stop = max(DIV_ROUND_UP(prate * m, fck_hw_max), 1ul);
+
+       for (fckd = fckd_start; fckd >= fckd_stop; --fckd) {
+               fck = prate / fckd * m;
+
+               if (func(fckd, fck, data))
+                       return true;
+       }
+
+       return false;
+}
+
 int dss_set_clock_div(struct dss_clock_info *cinfo)
 {
        if (dss.dpll4_m4_ck) {
@@ -482,7 +523,8 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
                prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
                DSSDBG("dpll4_m4 = %ld\n", prate);
 
-               r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
+               r = clk_set_rate(dss.dpll4_m4_ck,
+                               DIV_ROUND_UP(prate, cinfo->fck_div));
                if (r)
                        return r;
        } else {
@@ -492,7 +534,9 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
 
        dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
 
-       WARN_ONCE(dss.dss_clk_rate != cinfo->fck, "clk rate mismatch");
+       WARN_ONCE(dss.dss_clk_rate != cinfo->fck,
+                       "clk rate mismatch: %lu != %lu", dss.dss_clk_rate,
+                       cinfo->fck);
 
        DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
 
@@ -542,121 +586,6 @@ static int dss_setup_default_clock(void)
        return 0;
 }
 
-int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
-               struct dispc_clock_info *dispc_cinfo)
-{
-       unsigned long prate;
-       struct dss_clock_info best_dss;
-       struct dispc_clock_info best_dispc;
-
-       unsigned long fck, max_dss_fck;
-
-       u16 fck_div;
-
-       int match = 0;
-       int min_fck_per_pck;
-
-       prate = dss_get_dpll4_rate();
-
-       max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
-
-       fck = clk_get_rate(dss.dss_clk);
-       if (req_pck == dss.cache_req_pck && prate == dss.cache_prate &&
-               dss.cache_dss_cinfo.fck == fck) {
-               DSSDBG("dispc clock info found from cache.\n");
-               *dss_cinfo = dss.cache_dss_cinfo;
-               *dispc_cinfo = dss.cache_dispc_cinfo;
-               return 0;
-       }
-
-       min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
-
-       if (min_fck_per_pck &&
-               req_pck * min_fck_per_pck > max_dss_fck) {
-               DSSERR("Requested pixel clock not possible with the current "
-                               "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
-                               "the constraint off.\n");
-               min_fck_per_pck = 0;
-       }
-
-retry:
-       memset(&best_dss, 0, sizeof(best_dss));
-       memset(&best_dispc, 0, sizeof(best_dispc));
-
-       if (dss.dpll4_m4_ck == NULL) {
-               struct dispc_clock_info cur_dispc;
-               /* XXX can we change the clock on omap2? */
-               fck = clk_get_rate(dss.dss_clk);
-               fck_div = 1;
-
-               dispc_find_clk_divs(req_pck, fck, &cur_dispc);
-               match = 1;
-
-               best_dss.fck = fck;
-               best_dss.fck_div = fck_div;
-
-               best_dispc = cur_dispc;
-
-               goto found;
-       } else {
-               for (fck_div = dss.feat->fck_div_max; fck_div > 0; --fck_div) {
-                       struct dispc_clock_info cur_dispc;
-
-                       fck = prate / fck_div * dss.feat->dss_fck_multiplier;
-
-                       if (fck > max_dss_fck)
-                               continue;
-
-                       if (min_fck_per_pck &&
-                                       fck < req_pck * min_fck_per_pck)
-                               continue;
-
-                       match = 1;
-
-                       dispc_find_clk_divs(req_pck, fck, &cur_dispc);
-
-                       if (abs(cur_dispc.pck - req_pck) <
-                                       abs(best_dispc.pck - req_pck)) {
-
-                               best_dss.fck = fck;
-                               best_dss.fck_div = fck_div;
-
-                               best_dispc = cur_dispc;
-
-                               if (cur_dispc.pck == req_pck)
-                                       goto found;
-                       }
-               }
-       }
-
-found:
-       if (!match) {
-               if (min_fck_per_pck) {
-                       DSSERR("Could not find suitable clock settings.\n"
-                                       "Turning FCK/PCK constraint off and"
-                                       "trying again.\n");
-                       min_fck_per_pck = 0;
-                       goto retry;
-               }
-
-               DSSERR("Could not find suitable clock settings.\n");
-
-               return -EINVAL;
-       }
-
-       if (dss_cinfo)
-               *dss_cinfo = best_dss;
-       if (dispc_cinfo)
-               *dispc_cinfo = best_dispc;
-
-       dss.cache_req_pck = req_pck;
-       dss.cache_prate = prate;
-       dss.cache_dss_cinfo = best_dss;
-       dss.cache_dispc_cinfo = best_dispc;
-
-       return 0;
-}
-
 void dss_set_venc_output(enum omap_dss_venc_type type)
 {
        int l = 0;
@@ -767,13 +696,11 @@ int dss_dpi_select_source(enum omap_channel channel)
 static int dss_get_clocks(void)
 {
        struct clk *clk;
-       int r;
 
-       clk = clk_get(&dss.pdev->dev, "fck");
+       clk = devm_clk_get(&dss.pdev->dev, "fck");
        if (IS_ERR(clk)) {
                DSSERR("can't get clock fck\n");
-               r = PTR_ERR(clk);
-               goto err;
+               return PTR_ERR(clk);
        }
 
        dss.dss_clk = clk;
@@ -782,8 +709,7 @@ static int dss_get_clocks(void)
                clk = clk_get(NULL, dss.feat->clk_name);
                if (IS_ERR(clk)) {
                        DSSERR("Failed to get %s\n", dss.feat->clk_name);
-                       r = PTR_ERR(clk);
-                       goto err;
+                       return PTR_ERR(clk);
                }
        } else {
                clk = NULL;
@@ -792,21 +718,12 @@ static int dss_get_clocks(void)
        dss.dpll4_m4_ck = clk;
 
        return 0;
-
-err:
-       if (dss.dss_clk)
-               clk_put(dss.dss_clk);
-       if (dss.dpll4_m4_ck)
-               clk_put(dss.dpll4_m4_ck);
-
-       return r;
 }
 
 static void dss_put_clocks(void)
 {
        if (dss.dpll4_m4_ck)
                clk_put(dss.dpll4_m4_ck);
-       clk_put(dss.dss_clk);
 }
 
 static int dss_runtime_get(void)
index 610c8e5..8475893 100644 (file)
@@ -268,14 +268,21 @@ void dss_set_dac_pwrdn_bgz(bool enable);
 unsigned long dss_get_dpll4_rate(void);
 int dss_calc_clock_rates(struct dss_clock_info *cinfo);
 int dss_set_clock_div(struct dss_clock_info *cinfo);
-int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
-               struct dispc_clock_info *dispc_cinfo);
+
+typedef bool (*dss_div_calc_func)(int fckd, unsigned long fck, void *data);
+bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data);
 
 /* SDI */
 int sdi_init_platform_driver(void) __init;
 void sdi_uninit_platform_driver(void) __exit;
 
 /* DSI */
+
+typedef bool (*dsi_pll_calc_func)(int regn, int regm, unsigned long fint,
+               unsigned long pll, void *data);
+typedef bool (*dsi_hsdiv_calc_func)(int regm_dispc, unsigned long dispc,
+               void *data);
+
 #ifdef CONFIG_OMAP2_DSS_DSI
 
 struct dentry;
@@ -292,12 +299,17 @@ void dsi_dump_clocks(struct seq_file *s);
 void dsi_irq_handler(void);
 u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
 
+unsigned long dsi_get_pll_clkin(struct platform_device *dsidev);
+
+bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
+               unsigned long out_min, dsi_hsdiv_calc_func func, void *data);
+bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
+               unsigned long pll_min, unsigned long pll_max,
+               dsi_pll_calc_func func, void *data);
+
 unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
 int dsi_pll_set_clock_div(struct platform_device *dsidev,
                struct dsi_clock_info *cinfo);
-int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
-               unsigned long req_pck, struct dsi_clock_info *cinfo,
-               struct dispc_clock_info *dispc_cinfo);
 int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
                bool enable_hsdiv);
 void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
@@ -328,14 +340,6 @@ static inline int dsi_pll_set_clock_div(struct platform_device *dsidev,
        WARN("%s: DSI not compiled in\n", __func__);
        return -ENODEV;
 }
-static inline int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
-               unsigned long req_pck,
-               struct dsi_clock_info *dsi_cinfo,
-               struct dispc_clock_info *dispc_cinfo)
-{
-       WARN("%s: DSI not compiled in\n", __func__);
-       return -ENODEV;
-}
 static inline int dsi_pll_init(struct platform_device *dsidev,
                bool enable_hsclk, bool enable_hsdiv)
 {
@@ -356,6 +360,27 @@ static inline struct platform_device *dsi_get_dsidev_from_id(int module)
 {
        return NULL;
 }
+
+static inline unsigned long dsi_get_pll_clkin(struct platform_device *dsidev)
+{
+       return 0;
+}
+
+static inline bool dsi_hsdiv_calc(struct platform_device *dsidev,
+               unsigned long pll, unsigned long out_min,
+               dsi_hsdiv_calc_func func, void *data)
+{
+       return false;
+}
+
+static inline bool dsi_pll_calc(struct platform_device *dsidev,
+               unsigned long clkin,
+               unsigned long pll_min, unsigned long pll_max,
+               dsi_pll_calc_func func, void *data)
+{
+       return false;
+}
+
 #endif
 
 /* DPI */
@@ -376,11 +401,15 @@ void dispc_enable_fifomerge(bool enable);
 void dispc_enable_gamma_table(bool enable);
 void dispc_set_loadmode(enum omap_dss_load_mode mode);
 
+typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck,
+               unsigned long pck, void *data);
+bool dispc_div_calc(unsigned long dispc,
+               unsigned long pck_min, unsigned long pck_max,
+               dispc_div_calc_func func, void *data);
+
 bool dispc_mgr_timings_ok(enum omap_channel channel,
                const struct omap_video_timings *timings);
 unsigned long dispc_fclk_rate(void);
-void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck,
-               struct dispc_clock_info *cinfo);
 int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
                struct dispc_clock_info *cinfo);
 
index 7f791ae..77dbe0c 100644 (file)
@@ -414,7 +414,7 @@ static const char * const omap5_dss_clk_source_names[] = {
 };
 
 static const struct dss_param_range omap2_dss_param_range[] = {
-       [FEAT_PARAM_DSS_FCK]                    = { 0, 173000000 },
+       [FEAT_PARAM_DSS_FCK]                    = { 0, 133000000 },
        [FEAT_PARAM_DSS_PCD]                    = { 2, 255 },
        [FEAT_PARAM_DSIPLL_REGN]                = { 0, 0 },
        [FEAT_PARAM_DSIPLL_REGM]                = { 0, 0 },
@@ -459,15 +459,15 @@ static const struct dss_param_range omap4_dss_param_range[] = {
 };
 
 static const struct dss_param_range omap5_dss_param_range[] = {
-       [FEAT_PARAM_DSS_FCK]                    = { 0, 200000000 },
+       [FEAT_PARAM_DSS_FCK]                    = { 0, 209250000 },
        [FEAT_PARAM_DSS_PCD]                    = { 1, 255 },
        [FEAT_PARAM_DSIPLL_REGN]                = { 0, (1 << 8) - 1 },
        [FEAT_PARAM_DSIPLL_REGM]                = { 0, (1 << 12) - 1 },
        [FEAT_PARAM_DSIPLL_REGM_DISPC]          = { 0, (1 << 5) - 1 },
        [FEAT_PARAM_DSIPLL_REGM_DSI]            = { 0, (1 << 5) - 1 },
-       [FEAT_PARAM_DSIPLL_FINT]                = { 500000, 2500000 },
+       [FEAT_PARAM_DSIPLL_FINT]                = { 150000, 52000000 },
        [FEAT_PARAM_DSIPLL_LPDIV]               = { 0, (1 << 13) - 1 },
-       [FEAT_PARAM_DSI_FCK]                    = { 0, 170000000 },
+       [FEAT_PARAM_DSI_FCK]                    = { 0, 209250000 },
        [FEAT_PARAM_DOWNSCALE]                  = { 1, 4 },
        [FEAT_PARAM_LINEWIDTH]                  = { 1, 2048 },
 };
index 7292364..17f4d55 100644 (file)
@@ -328,7 +328,7 @@ static void hdmi_runtime_put(void)
        WARN_ON(r < 0 && r != -ENOSYS);
 }
 
-static int __init hdmi_init_display(struct omap_dss_device *dssdev)
+static int hdmi_init_display(struct omap_dss_device *dssdev)
 {
        int r;
 
@@ -472,17 +472,12 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
         * Input clock is predivided by N + 1
         * out put of which is reference clk
         */
-       if (dssdev->clocks.hdmi.regn == 0)
-               pi->regn = HDMI_DEFAULT_REGN;
-       else
-               pi->regn = dssdev->clocks.hdmi.regn;
+
+       pi->regn = HDMI_DEFAULT_REGN;
 
        refclk = clkin / pi->regn;
 
-       if (dssdev->clocks.hdmi.regm2 == 0)
-               pi->regm2 = HDMI_DEFAULT_REGM2;
-       else
-               pi->regm2 = dssdev->clocks.hdmi.regm2;
+       pi->regm2 = HDMI_DEFAULT_REGM2;
 
        /*
         * multiplier is pixel_clk/ref_clk
@@ -804,7 +799,7 @@ static int hdmi_get_clocks(struct platform_device *pdev)
 {
        struct clk *clk;
 
-       clk = clk_get(&pdev->dev, "sys_clk");
+       clk = devm_clk_get(&pdev->dev, "sys_clk");
        if (IS_ERR(clk)) {
                DSSERR("can't get sys_clk\n");
                return PTR_ERR(clk);
@@ -815,12 +810,6 @@ static int hdmi_get_clocks(struct platform_device *pdev)
        return 0;
 }
 
-static void hdmi_put_clocks(void)
-{
-       if (hdmi.sys_clk)
-               clk_put(hdmi.sys_clk);
-}
-
 #if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
 int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
 {
@@ -965,7 +954,7 @@ int hdmi_audio_config(struct omap_dss_audio *audio)
 
 #endif
 
-static struct omap_dss_device * __init hdmi_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *hdmi_find_dssdev(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        const char *def_disp_name = omapdss_get_default_display_name();
@@ -993,7 +982,7 @@ static struct omap_dss_device * __init hdmi_find_dssdev(struct platform_device *
        return def_dssdev;
 }
 
-static void __init hdmi_probe_pdata(struct platform_device *pdev)
+static int hdmi_probe_pdata(struct platform_device *pdev)
 {
        struct omap_dss_device *plat_dssdev;
        struct omap_dss_device *dssdev;
@@ -1003,11 +992,11 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
        plat_dssdev = hdmi_find_dssdev(pdev);
 
        if (!plat_dssdev)
-               return;
+               return 0;
 
        dssdev = dss_alloc_and_init_device(&pdev->dev);
        if (!dssdev)
-               return;
+               return -ENOMEM;
 
        dss_copy_device_pdata(dssdev, plat_dssdev);
 
@@ -1017,13 +1006,11 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
        hdmi.ls_oe_gpio = priv->ls_oe_gpio;
        hdmi.hpd_gpio = priv->hpd_gpio;
 
-       dssdev->channel = OMAP_DSS_CHANNEL_DIGIT;
-
        r = hdmi_init_display(dssdev);
        if (r) {
                DSSERR("device %s init failed: %d\n", dssdev->name, r);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = omapdss_output_set_device(&hdmi.output, dssdev);
@@ -1031,7 +1018,7 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
                DSSERR("failed to connect output to new device: %s\n",
                                dssdev->name);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = dss_add_device(dssdev);
@@ -1040,17 +1027,21 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
                omapdss_output_unset_device(&hdmi.output);
                hdmi_uninit_display(dssdev);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
+
+       return 0;
 }
 
-static void __init hdmi_init_output(struct platform_device *pdev)
+static void hdmi_init_output(struct platform_device *pdev)
 {
        struct omap_dss_output *out = &hdmi.output;
 
        out->pdev = pdev;
        out->id = OMAP_DSS_OUTPUT_HDMI;
        out->type = OMAP_DISPLAY_TYPE_HDMI;
+       out->name = "hdmi.0";
+       out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
 
        dss_register_output(out);
 }
@@ -1063,7 +1054,7 @@ static void __exit hdmi_uninit_output(struct platform_device *pdev)
 }
 
 /* HDMI HW IP initialisation */
-static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
+static int omapdss_hdmihw_probe(struct platform_device *pdev)
 {
        struct resource *res;
        int r;
@@ -1097,23 +1088,25 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
        hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
        hdmi.ip_data.phy_offset = HDMI_PHY;
 
+       hdmi_init_output(pdev);
+
        r = hdmi_panel_init();
        if (r) {
                DSSERR("can't init panel\n");
-               goto err_panel_init;
+               return r;
        }
 
        dss_debugfs_create_file("hdmi", hdmi_dump_regs);
 
-       hdmi_init_output(pdev);
-
-       hdmi_probe_pdata(pdev);
+       r = hdmi_probe_pdata(pdev);
+       if (r) {
+               hdmi_panel_exit();
+               hdmi_uninit_output(pdev);
+               pm_runtime_disable(&pdev->dev);
+               return r;
+       }
 
        return 0;
-
-err_panel_init:
-       hdmi_put_clocks();
-       return r;
 }
 
 static int __exit hdmi_remove_child(struct device *dev, void *data)
@@ -1135,8 +1128,6 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
 
        pm_runtime_disable(&pdev->dev);
 
-       hdmi_put_clocks();
-
        return 0;
 }
 
@@ -1168,6 +1159,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
 };
 
 static struct platform_driver omapdss_hdmihw_driver = {
+       .probe          = omapdss_hdmihw_probe,
        .remove         = __exit_p(omapdss_hdmihw_remove),
        .driver         = {
                .name   = "omapdss_hdmi",
@@ -1178,7 +1170,7 @@ static struct platform_driver omapdss_hdmihw_driver = {
 
 int __init hdmi_init_platform_driver(void)
 {
-       return platform_driver_probe(&omapdss_hdmihw_driver, omapdss_hdmihw_probe);
+       return platform_driver_register(&omapdss_hdmihw_driver);
 }
 
 void __exit hdmi_uninit_platform_driver(void)
index 79dea1a..5214df6 100644 (file)
@@ -113,6 +113,7 @@ struct omap_dss_output *omap_dss_get_output(enum omap_dss_output_id id)
 
        return NULL;
 }
+EXPORT_SYMBOL(omap_dss_get_output);
 
 static const struct dss_mgr_ops *dss_mgr_ops;
 
index e903dd3..1a17dd1 100644 (file)
@@ -943,13 +943,13 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
 }
 EXPORT_SYMBOL(omapdss_rfbi_display_disable);
 
-static int __init rfbi_init_display(struct omap_dss_device *dssdev)
+static int rfbi_init_display(struct omap_dss_device *dssdev)
 {
        rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
        return 0;
 }
 
-static struct omap_dss_device * __init rfbi_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *rfbi_find_dssdev(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        const char *def_disp_name = omapdss_get_default_display_name();
@@ -977,7 +977,7 @@ static struct omap_dss_device * __init rfbi_find_dssdev(struct platform_device *
        return def_dssdev;
 }
 
-static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
+static int rfbi_probe_pdata(struct platform_device *rfbidev)
 {
        struct omap_dss_device *plat_dssdev;
        struct omap_dss_device *dssdev;
@@ -986,11 +986,11 @@ static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
        plat_dssdev = rfbi_find_dssdev(rfbidev);
 
        if (!plat_dssdev)
-               return;
+               return 0;
 
        dssdev = dss_alloc_and_init_device(&rfbidev->dev);
        if (!dssdev)
-               return;
+               return -ENOMEM;
 
        dss_copy_device_pdata(dssdev, plat_dssdev);
 
@@ -998,7 +998,7 @@ static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
        if (r) {
                DSSERR("device %s init failed: %d\n", dssdev->name, r);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = omapdss_output_set_device(&rfbi.output, dssdev);
@@ -1006,7 +1006,7 @@ static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
                DSSERR("failed to connect output to new device: %s\n",
                                dssdev->name);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = dss_add_device(dssdev);
@@ -1014,17 +1014,21 @@ static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
                DSSERR("device %s register failed: %d\n", dssdev->name, r);
                omapdss_output_unset_device(&rfbi.output);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
+
+       return 0;
 }
 
-static void __init rfbi_init_output(struct platform_device *pdev)
+static void rfbi_init_output(struct platform_device *pdev)
 {
        struct omap_dss_output *out = &rfbi.output;
 
        out->pdev = pdev;
        out->id = OMAP_DSS_OUTPUT_DBI;
        out->type = OMAP_DISPLAY_TYPE_DBI;
+       out->name = "rfbi.0";
+       out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
 
        dss_register_output(out);
 }
@@ -1037,7 +1041,7 @@ static void __exit rfbi_uninit_output(struct platform_device *pdev)
 }
 
 /* RFBI HW IP initialisation */
-static int __init omap_rfbihw_probe(struct platform_device *pdev)
+static int omap_rfbihw_probe(struct platform_device *pdev)
 {
        u32 rev;
        struct resource *rfbi_mem;
@@ -1089,7 +1093,12 @@ static int __init omap_rfbihw_probe(struct platform_device *pdev)
 
        rfbi_init_output(pdev);
 
-       rfbi_probe_pdata(pdev);
+       r = rfbi_probe_pdata(pdev);
+       if (r) {
+               rfbi_uninit_output(pdev);
+               pm_runtime_disable(&pdev->dev);
+               return r;
+       }
 
        return 0;
 
@@ -1133,6 +1142,7 @@ static const struct dev_pm_ops rfbi_pm_ops = {
 };
 
 static struct platform_driver omap_rfbihw_driver = {
+       .probe          = omap_rfbihw_probe,
        .remove         = __exit_p(omap_rfbihw_remove),
        .driver         = {
                .name   = "omapdss_rfbi",
@@ -1143,7 +1153,7 @@ static struct platform_driver omap_rfbihw_driver = {
 
 int __init rfbi_init_platform_driver(void)
 {
-       return platform_driver_probe(&omap_rfbihw_driver, omap_rfbihw_probe);
+       return platform_driver_register(&omap_rfbihw_driver);
 }
 
 void __exit rfbi_uninit_platform_driver(void)
index 62b5374..0bcd302 100644 (file)
@@ -41,6 +41,72 @@ static struct {
        struct omap_dss_output output;
 } sdi;
 
+struct sdi_clk_calc_ctx {
+       unsigned long pck_min, pck_max;
+
+       struct dss_clock_info dss_cinfo;
+       struct dispc_clock_info dispc_cinfo;
+};
+
+static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+               unsigned long pck, void *data)
+{
+       struct sdi_clk_calc_ctx *ctx = data;
+
+       ctx->dispc_cinfo.lck_div = lckd;
+       ctx->dispc_cinfo.pck_div = pckd;
+       ctx->dispc_cinfo.lck = lck;
+       ctx->dispc_cinfo.pck = pck;
+
+       return true;
+}
+
+static bool dpi_calc_dss_cb(int fckd, unsigned long fck, void *data)
+{
+       struct sdi_clk_calc_ctx *ctx = data;
+
+       ctx->dss_cinfo.fck = fck;
+       ctx->dss_cinfo.fck_div = fckd;
+
+       return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
+                       dpi_calc_dispc_cb, ctx);
+}
+
+static int sdi_calc_clock_div(unsigned long pclk,
+               struct dss_clock_info *dss_cinfo,
+               struct dispc_clock_info *dispc_cinfo)
+{
+       int i;
+       struct sdi_clk_calc_ctx ctx;
+
+       /*
+        * DSS fclk gives us very few possibilities, so finding a good pixel
+        * clock may not be possible. We try multiple times to find the clock,
+        * each time widening the pixel clock range we look for, up to
+        * +/- 1MHz.
+        */
+
+       for (i = 0; i < 10; ++i) {
+               bool ok;
+
+               memset(&ctx, 0, sizeof(ctx));
+               if (pclk > 1000 * i * i * i)
+                       ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu);
+               else
+                       ctx.pck_min = 0;
+               ctx.pck_max = pclk + 1000 * i * i * i;
+
+               ok = dss_div_calc(ctx.pck_min, dpi_calc_dss_cb, &ctx);
+               if (ok) {
+                       *dss_cinfo = ctx.dss_cinfo;
+                       *dispc_cinfo = ctx.dispc_cinfo;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
 static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
 {
        struct omap_overlay_manager *mgr = dssdev->output->manager;
@@ -88,7 +154,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
        t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
        t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
 
-       r = dss_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo);
+       r = sdi_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo);
        if (r)
                goto err_calc_clock_div;
 
@@ -182,7 +248,7 @@ void omapdss_sdi_set_datapairs(struct omap_dss_device *dssdev, int datapairs)
 }
 EXPORT_SYMBOL(omapdss_sdi_set_datapairs);
 
-static int __init sdi_init_display(struct omap_dss_device *dssdev)
+static int sdi_init_display(struct omap_dss_device *dssdev)
 {
        DSSDBG("SDI init\n");
 
@@ -202,7 +268,7 @@ static int __init sdi_init_display(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static struct omap_dss_device * __init sdi_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *sdi_find_dssdev(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        const char *def_disp_name = omapdss_get_default_display_name();
@@ -230,7 +296,7 @@ static struct omap_dss_device * __init sdi_find_dssdev(struct platform_device *p
        return def_dssdev;
 }
 
-static void __init sdi_probe_pdata(struct platform_device *sdidev)
+static int sdi_probe_pdata(struct platform_device *sdidev)
 {
        struct omap_dss_device *plat_dssdev;
        struct omap_dss_device *dssdev;
@@ -239,11 +305,11 @@ static void __init sdi_probe_pdata(struct platform_device *sdidev)
        plat_dssdev = sdi_find_dssdev(sdidev);
 
        if (!plat_dssdev)
-               return;
+               return 0;
 
        dssdev = dss_alloc_and_init_device(&sdidev->dev);
        if (!dssdev)
-               return;
+               return -ENOMEM;
 
        dss_copy_device_pdata(dssdev, plat_dssdev);
 
@@ -251,7 +317,7 @@ static void __init sdi_probe_pdata(struct platform_device *sdidev)
        if (r) {
                DSSERR("device %s init failed: %d\n", dssdev->name, r);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = omapdss_output_set_device(&sdi.output, dssdev);
@@ -259,7 +325,7 @@ static void __init sdi_probe_pdata(struct platform_device *sdidev)
                DSSERR("failed to connect output to new device: %s\n",
                                dssdev->name);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = dss_add_device(dssdev);
@@ -267,17 +333,21 @@ static void __init sdi_probe_pdata(struct platform_device *sdidev)
                DSSERR("device %s register failed: %d\n", dssdev->name, r);
                omapdss_output_unset_device(&sdi.output);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
+
+       return 0;
 }
 
-static void __init sdi_init_output(struct platform_device *pdev)
+static void sdi_init_output(struct platform_device *pdev)
 {
        struct omap_dss_output *out = &sdi.output;
 
        out->pdev = pdev;
        out->id = OMAP_DSS_OUTPUT_SDI;
        out->type = OMAP_DISPLAY_TYPE_SDI;
+       out->name = "sdi.0";
+       out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
 
        dss_register_output(out);
 }
@@ -289,11 +359,17 @@ static void __exit sdi_uninit_output(struct platform_device *pdev)
        dss_unregister_output(out);
 }
 
-static int __init omap_sdi_probe(struct platform_device *pdev)
+static int omap_sdi_probe(struct platform_device *pdev)
 {
+       int r;
+
        sdi_init_output(pdev);
 
-       sdi_probe_pdata(pdev);
+       r = sdi_probe_pdata(pdev);
+       if (r) {
+               sdi_uninit_output(pdev);
+               return r;
+       }
 
        return 0;
 }
@@ -308,6 +384,7 @@ static int __exit omap_sdi_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver omap_sdi_driver = {
+       .probe          = omap_sdi_probe,
        .remove         = __exit_p(omap_sdi_remove),
        .driver         = {
                .name   = "omapdss_sdi",
@@ -317,7 +394,7 @@ static struct platform_driver omap_sdi_driver = {
 
 int __init sdi_init_platform_driver(void)
 {
-       return platform_driver_probe(&omap_sdi_driver, omap_sdi_probe);
+       return platform_driver_register(&omap_sdi_driver);
 }
 
 void __exit sdi_uninit_platform_driver(void)
index 006caf3..74fdb3e 100644 (file)
@@ -519,10 +519,6 @@ int omapdss_venc_display_enable(struct omap_dss_device *dssdev)
                goto err0;
        }
 
-       if (dssdev->platform_enable)
-               dssdev->platform_enable(dssdev);
-
-
        r = venc_power_on(dssdev);
        if (r)
                goto err1;
@@ -533,8 +529,6 @@ int omapdss_venc_display_enable(struct omap_dss_device *dssdev)
 
        return 0;
 err1:
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
        omap_dss_stop_device(dssdev);
 err0:
        mutex_unlock(&venc.venc_lock);
@@ -551,9 +545,6 @@ void omapdss_venc_display_disable(struct omap_dss_device *dssdev)
 
        omap_dss_stop_device(dssdev);
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
-
        mutex_unlock(&venc.venc_lock);
 }
 
@@ -642,7 +633,7 @@ void omapdss_venc_invert_vid_out_polarity(struct omap_dss_device *dssdev,
        mutex_unlock(&venc.venc_lock);
 }
 
-static int __init venc_init_display(struct omap_dss_device *dssdev)
+static int venc_init_display(struct omap_dss_device *dssdev)
 {
        DSSDBG("init_display\n");
 
@@ -721,7 +712,7 @@ static int venc_get_clocks(struct platform_device *pdev)
        struct clk *clk;
 
        if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
-               clk = clk_get(&pdev->dev, "tv_dac_clk");
+               clk = devm_clk_get(&pdev->dev, "tv_dac_clk");
                if (IS_ERR(clk)) {
                        DSSERR("can't get tv_dac_clk\n");
                        return PTR_ERR(clk);
@@ -735,13 +726,7 @@ static int venc_get_clocks(struct platform_device *pdev)
        return 0;
 }
 
-static void venc_put_clocks(void)
-{
-       if (venc.tv_dac_clk)
-               clk_put(venc.tv_dac_clk);
-}
-
-static struct omap_dss_device * __init venc_find_dssdev(struct platform_device *pdev)
+static struct omap_dss_device *venc_find_dssdev(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        const char *def_disp_name = omapdss_get_default_display_name();
@@ -769,7 +754,7 @@ static struct omap_dss_device * __init venc_find_dssdev(struct platform_device *
        return def_dssdev;
 }
 
-static void __init venc_probe_pdata(struct platform_device *vencdev)
+static int venc_probe_pdata(struct platform_device *vencdev)
 {
        struct omap_dss_device *plat_dssdev;
        struct omap_dss_device *dssdev;
@@ -778,21 +763,19 @@ static void __init venc_probe_pdata(struct platform_device *vencdev)
        plat_dssdev = venc_find_dssdev(vencdev);
 
        if (!plat_dssdev)
-               return;
+               return 0;
 
        dssdev = dss_alloc_and_init_device(&vencdev->dev);
        if (!dssdev)
-               return;
+               return -ENOMEM;
 
        dss_copy_device_pdata(dssdev, plat_dssdev);
 
-       dssdev->channel = OMAP_DSS_CHANNEL_DIGIT;
-
        r = venc_init_display(dssdev);
        if (r) {
                DSSERR("device %s init failed: %d\n", dssdev->name, r);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = omapdss_output_set_device(&venc.output, dssdev);
@@ -800,7 +783,7 @@ static void __init venc_probe_pdata(struct platform_device *vencdev)
                DSSERR("failed to connect output to new device: %s\n",
                                dssdev->name);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
 
        r = dss_add_device(dssdev);
@@ -808,17 +791,21 @@ static void __init venc_probe_pdata(struct platform_device *vencdev)
                DSSERR("device %s register failed: %d\n", dssdev->name, r);
                omapdss_output_unset_device(&venc.output);
                dss_put_device(dssdev);
-               return;
+               return r;
        }
+
+       return 0;
 }
 
-static void __init venc_init_output(struct platform_device *pdev)
+static void venc_init_output(struct platform_device *pdev)
 {
        struct omap_dss_output *out = &venc.output;
 
        out->pdev = pdev;
        out->id = OMAP_DSS_OUTPUT_VENC;
        out->type = OMAP_DISPLAY_TYPE_VENC;
+       out->name = "venc.0";
+       out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
 
        dss_register_output(out);
 }
@@ -831,7 +818,7 @@ static void __exit venc_uninit_output(struct platform_device *pdev)
 }
 
 /* VENC HW IP initialisation */
-static int __init omap_venchw_probe(struct platform_device *pdev)
+static int omap_venchw_probe(struct platform_device *pdev)
 {
        u8 rev_id;
        struct resource *venc_mem;
@@ -879,14 +866,19 @@ static int __init omap_venchw_probe(struct platform_device *pdev)
 
        venc_init_output(pdev);
 
-       venc_probe_pdata(pdev);
+       r = venc_probe_pdata(pdev);
+       if (r) {
+               venc_panel_exit();
+               venc_uninit_output(pdev);
+               pm_runtime_disable(&pdev->dev);
+               return r;
+       }
 
        return 0;
 
 err_panel_init:
 err_runtime_get:
        pm_runtime_disable(&pdev->dev);
-       venc_put_clocks();
        return r;
 }
 
@@ -904,7 +896,6 @@ static int __exit omap_venchw_remove(struct platform_device *pdev)
        venc_uninit_output(pdev);
 
        pm_runtime_disable(&pdev->dev);
-       venc_put_clocks();
 
        return 0;
 }
@@ -939,6 +930,7 @@ static const struct dev_pm_ops venc_pm_ops = {
 };
 
 static struct platform_driver omap_venchw_driver = {
+       .probe          = omap_venchw_probe,
        .remove         = __exit_p(omap_venchw_remove),
        .driver         = {
                .name   = "omapdss_venc",
@@ -949,7 +941,7 @@ static struct platform_driver omap_venchw_driver = {
 
 int __init venc_init_platform_driver(void)
 {
-       return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe);
+       return platform_driver_register(&omap_venchw_driver);
 }
 
 void __exit venc_uninit_platform_driver(void)
index ca585ef..ff00d1d 100644 (file)
@@ -2388,7 +2388,7 @@ static int omapfb_init_connections(struct omapfb2_device *fbdev,
                struct omap_dss_device *dssdev = fbdev->displays[i].dssdev;
                struct omap_dss_output *out = dssdev->output;
 
-               mgr = omap_dss_get_overlay_manager(dssdev->channel);
+               mgr = omap_dss_get_overlay_manager(out->dispc_channel);
 
                if (!mgr || !out)
                        continue;
@@ -2422,7 +2422,7 @@ static int omapfb_init_connections(struct omapfb2_device *fbdev,
        return 0;
 }
 
-static int __init omapfb_probe(struct platform_device *pdev)
+static int omapfb_probe(struct platform_device *pdev)
 {
        struct omapfb2_device *fbdev = NULL;
        int r = 0;
@@ -2484,7 +2484,7 @@ static int __init omapfb_probe(struct platform_device *pdev)
 
        if (fbdev->num_displays == 0) {
                dev_err(&pdev->dev, "no displays\n");
-               r = -EINVAL;
+               r = -EPROBE_DEFER;
                goto cleanup;
        }
 
@@ -2595,6 +2595,7 @@ static int __exit omapfb_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver omapfb_driver = {
+       .probe          = omapfb_probe,
        .remove         = __exit_p(omapfb_remove),
        .driver         = {
                .name   = "omapfb",
@@ -2602,36 +2603,13 @@ static struct platform_driver omapfb_driver = {
        },
 };
 
-static int __init omapfb_init(void)
-{
-       DBG("omapfb_init\n");
-
-       if (platform_driver_probe(&omapfb_driver, omapfb_probe)) {
-               printk(KERN_ERR "failed to register omapfb driver\n");
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static void __exit omapfb_exit(void)
-{
-       DBG("omapfb_exit\n");
-       platform_driver_unregister(&omapfb_driver);
-}
-
 module_param_named(mode, def_mode, charp, 0);
 module_param_named(vram, def_vram, charp, 0);
 module_param_named(rotate, def_rotate, int, 0);
 module_param_named(vrfb, def_vrfb, bool, 0);
 module_param_named(mirror, def_mirror, bool, 0);
 
-/* late_initcall to let panel/ctrl drivers loaded first.
- * I guess better option would be a more dynamic approach,
- * so that omapfb reacts to new panels when they are loaded */
-late_initcall(omapfb_init);
-/*module_init(omapfb_init);*/
-module_exit(omapfb_exit);
+module_platform_driver(omapfb_driver);
 
 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
 MODULE_DESCRIPTION("OMAP2/3 Framebuffer");
index 63203ac..0264704 100644 (file)
@@ -858,6 +858,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch)
        tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16)
            | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7);
        lcdc_write_chan(ch, LDHAJR, tmp);
+       lcdc_write_chan_mirror(ch, LDHAJR, tmp);
 }
 
 static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl)
index aea605c..aae187a 100644 (file)
@@ -551,6 +551,7 @@ struct block_device *bdgrab(struct block_device *bdev)
        ihold(bdev->bd_inode);
        return bdev;
 }
+EXPORT_SYMBOL(bdgrab);
 
 long nr_blockdev_pages(void)
 {
index 56efcaa..9c6d06d 100644 (file)
@@ -2999,20 +2999,23 @@ static int ext4_split_extent_at(handle_t *handle,
                        if (split_flag & EXT4_EXT_DATA_VALID1) {
                                err = ext4_ext_zeroout(inode, ex2);
                                zero_ex.ee_block = ex2->ee_block;
-                               zero_ex.ee_len = ext4_ext_get_actual_len(ex2);
+                               zero_ex.ee_len = cpu_to_le16(
+                                               ext4_ext_get_actual_len(ex2));
                                ext4_ext_store_pblock(&zero_ex,
                                                      ext4_ext_pblock(ex2));
                        } else {
                                err = ext4_ext_zeroout(inode, ex);
                                zero_ex.ee_block = ex->ee_block;
-                               zero_ex.ee_len = ext4_ext_get_actual_len(ex);
+                               zero_ex.ee_len = cpu_to_le16(
+                                               ext4_ext_get_actual_len(ex));
                                ext4_ext_store_pblock(&zero_ex,
                                                      ext4_ext_pblock(ex));
                        }
                } else {
                        err = ext4_ext_zeroout(inode, &orig_ex);
                        zero_ex.ee_block = orig_ex.ee_block;
-                       zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex);
+                       zero_ex.ee_len = cpu_to_le16(
+                                               ext4_ext_get_actual_len(&orig_ex));
                        ext4_ext_store_pblock(&zero_ex,
                                              ext4_ext_pblock(&orig_ex));
                }
@@ -3272,7 +3275,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                if (err)
                        goto out;
                zero_ex.ee_block = ex->ee_block;
-               zero_ex.ee_len = ext4_ext_get_actual_len(ex);
+               zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
                ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
 
                err = ext4_ext_get_access(handle, inode, path + depth);
index b505a14..a041831 100644 (file)
@@ -1539,9 +1539,9 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
                blk = *i_data;
                if (level > 0) {
                        ext4_lblk_t first2;
-                       bh = sb_bread(inode->i_sb, blk);
+                       bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
                        if (!bh) {
-                               EXT4_ERROR_INODE_BLOCK(inode, blk,
+                               EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
                                                       "Read failure");
                                return -EIO;
                        }
index 0116886..a272007 100644 (file)
@@ -264,7 +264,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                iattr->ia_valid |= ATTR_SIZE;
        }
        if (bmval[0] & FATTR4_WORD0_ACL) {
-               int nace;
+               u32 nace;
                struct nfs4_ace *ace;
 
                READ_BUF(4); len += 4;
index c196369..4cce1d9 100644 (file)
@@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,
        if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
                return -ENOSPC;
 
-       if (name[0] == '.' && (name[1] == '\0' ||
-                              (name[1] == '.' && name[2] == '\0')))
+       if (name[0] == '.' && (namelen < 2 ||
+                              (namelen == 2 && name[1] == '.')))
                return 0;
 
        dentry = lookup_one_len(name, dbuf->xadir, namelen);
index ac838b8..f21acf0 100644 (file)
@@ -1568,6 +1568,12 @@ static int ubifs_remount_rw(struct ubifs_info *c)
        c->remounting_rw = 1;
        c->ro_mount = 0;
 
+       if (c->space_fixup) {
+               err = ubifs_fixup_free_space(c);
+               if (err)
+                       return err;
+       }
+
        err = check_free_space(c);
        if (err)
                goto out;
@@ -1684,12 +1690,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)
                err = dbg_check_space_info(c);
        }
 
-       if (c->space_fixup) {
-               err = ubifs_fixup_free_space(c);
-               if (err)
-                       goto out;
-       }
-
        mutex_unlock(&c->umount_mutex);
        return err;
 
index 2d94d74..f1ce786 100644 (file)
@@ -1593,9 +1593,8 @@ extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *s
 
 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
+int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
 
 int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
 int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
index e3e0d65..adb3f9b 100644 (file)
@@ -120,7 +120,7 @@ enum drm_mode_status {
        .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
        .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
        .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
-       .vscan = (vs), .flags = (f), .vrefresh = 0, \
+       .vscan = (vs), .flags = (f), \
        .base.type = DRM_MODE_OBJECT_MODE
 
 #define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
@@ -310,7 +310,7 @@ struct drm_plane;
  * drm_crtc_funcs - control CRTCs for a given device
  * @save: save CRTC state
  * @restore: restore CRTC state
- * @reset: reset CRTC after state has been invalidate (e.g. resume)
+ * @reset: reset CRTC after state has been invalidated (e.g. resume)
  * @cursor_set: setup the cursor
  * @cursor_move: move the cursor
  * @gamma_set: specify color ramp for CRTC
@@ -554,7 +554,6 @@ enum drm_connector_force {
  * @probed_modes: list of modes derived directly from the display
  * @display_info: information about attached display (e.g. from EDID)
  * @funcs: connector control functions
- * @user_modes: user added mode list
  * @edid_blob_ptr: DRM property containing EDID if present
  * @properties: property tracking for this connector
  * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
@@ -598,7 +597,6 @@ struct drm_connector {
        struct drm_display_info display_info;
        const struct drm_connector_funcs *funcs;
 
-       struct list_head user_modes;
        struct drm_property_blob *edid_blob_ptr;
        struct drm_object_properties properties;
 
@@ -922,15 +920,11 @@ extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 extern void drm_mode_set_name(struct drm_display_mode *mode);
 extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
 extern int drm_mode_width(const struct drm_display_mode *mode);
 extern int drm_mode_height(const struct drm_display_mode *mode);
 
 /* for us by fb module */
-extern int drm_mode_attachmode_crtc(struct drm_device *dev,
-                                   struct drm_crtc *crtc,
-                                   const struct drm_display_mode *mode);
-extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
-
 extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
 extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
 extern void drm_mode_list_concat(struct list_head *head,
@@ -938,6 +932,9 @@ extern void drm_mode_list_concat(struct list_head *head,
 extern void drm_mode_validate_size(struct drm_device *dev,
                                   struct list_head *mode_list,
                                   int maxX, int maxY, int maxPitch);
+extern void drm_mode_validate_clocks(struct drm_device *dev,
+                                    struct list_head *mode_list,
+                                    int *min, int *max, int n_ranges);
 extern void drm_mode_prune_invalid(struct drm_device *dev,
                                   struct list_head *mode_list, bool verbose);
 extern void drm_mode_sort(struct list_head *mode_list);
@@ -1036,14 +1033,6 @@ extern int drm_mode_getfb(struct drm_device *dev,
                          void *data, struct drm_file *file_priv);
 extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
                                  void *data, struct drm_file *file_priv);
-extern int drm_mode_addmode_ioctl(struct drm_device *dev,
-                                 void *data, struct drm_file *file_priv);
-extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
-                                void *data, struct drm_file *file_priv);
-extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
-                                    void *data, struct drm_file *file_priv);
-extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
-                                    void *data, struct drm_file *file_priv);
 
 extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
                                      void *data, struct drm_file *file_priv);
index 5da1b4a..fc481fc 100644 (file)
@@ -244,12 +244,21 @@ struct edid {
 
 #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
 
+/* Short Audio Descriptor */
+struct cea_sad {
+       u8 format;
+       u8 channels; /* max number of channels - 1 */
+       u8 freq;
+       u8 byte2; /* meaning depends on format */
+};
+
 struct drm_encoder;
 struct drm_connector;
 struct drm_display_mode;
 struct hdmi_avi_infoframe;
 
 void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
+int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
 int drm_av_sync_delay(struct drm_connector *connector,
                      struct drm_display_mode *mode);
 struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
index f97a8ef..8230b46 100644 (file)
@@ -106,12 +106,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
 int drm_fb_helper_set_par(struct fb_info *info);
 int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
                            struct fb_info *info);
-int drm_fb_helper_setcolreg(unsigned regno,
-                           unsigned red,
-                           unsigned green,
-                           unsigned blue,
-                           unsigned transp,
-                           struct fb_info *info);
 
 bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
index 918e8fe..c2af598 100644 (file)
        {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
index 0fbd046..9c8dca7 100644 (file)
@@ -902,6 +902,10 @@ extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
  * ttm_bo_util.c
  */
 
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+                      struct ttm_mem_reg *mem);
+void ttm_mem_io_free(struct ttm_bo_device *bdev,
+                    struct ttm_mem_reg *mem);
 /**
  * ttm_bo_move_ttm
  *
index 76a87fb..377cd8c 100644 (file)
@@ -141,11 +141,11 @@ typedef struct {
 } compat_sigset_t;
 
 struct compat_sigaction {
-#ifndef __ARCH_HAS_ODD_SIGACTION
+#ifndef __ARCH_HAS_IRIX_SIGACTION
        compat_uptr_t                   sa_handler;
        compat_ulong_t                  sa_flags;
 #else
-       compat_ulong_t                  sa_flags;
+       compat_uint_t                   sa_flags;
        compat_uptr_t                   sa_handler;
 #endif
 #ifdef __ARCH_HAS_SA_RESTORER
index b3d00fa..8bfa956 100644 (file)
@@ -895,7 +895,7 @@ struct netdev_fcoe_hbainfo {
  *
  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
- *                          struct net_device *dev)
+ *                          struct net_device *dev, u32 filter_mask)
  *
  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
  *     Called to change device carrier. Soft-devices (like dummy, team, etc)
index a2dcb94..9475c5c 100644 (file)
@@ -250,11 +250,11 @@ extern int show_unhandled_signals;
 extern int sigsuspend(sigset_t *);
 
 struct sigaction {
-#ifndef __ARCH_HAS_ODD_SIGACTION
+#ifndef __ARCH_HAS_IRIX_SIGACTION
        __sighandler_t  sa_handler;
        unsigned long   sa_flags;
 #else
-       unsigned long   sa_flags;
+       unsigned int    sa_flags;
        __sighandler_t  sa_handler;
 #endif
 #ifdef __ARCH_HAS_SA_RESTORER
index 399162b..e1379b4 100644 (file)
@@ -1074,7 +1074,8 @@ void fc_rport_terminate_io(struct fc_rport *);
 /*
  * DISCOVERY LAYER
  *****************************/
-int fc_disc_init(struct fc_lport *);
+void fc_disc_init(struct fc_lport *);
+void fc_disc_config(struct fc_lport *, void *);
 
 static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc)
 {
diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h
new file mode 100644 (file)
index 0000000..94db6a2
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * include/trace/events/host1x.h
+ *
+ * host1x event logging to ftrace.
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM host1x
+
+#if !defined(_TRACE_HOST1X_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOST1X_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(host1x,
+       TP_PROTO(const char *name),
+       TP_ARGS(name),
+       TP_STRUCT__entry(__field(const char *, name)),
+       TP_fast_assign(__entry->name = name;),
+       TP_printk("name=%s", __entry->name)
+);
+
+DEFINE_EVENT(host1x, host1x_channel_open,
+       TP_PROTO(const char *name),
+       TP_ARGS(name)
+);
+
+DEFINE_EVENT(host1x, host1x_channel_release,
+       TP_PROTO(const char *name),
+       TP_ARGS(name)
+);
+
+DEFINE_EVENT(host1x, host1x_cdma_begin,
+       TP_PROTO(const char *name),
+       TP_ARGS(name)
+);
+
+DEFINE_EVENT(host1x, host1x_cdma_end,
+       TP_PROTO(const char *name),
+       TP_ARGS(name)
+);
+
+TRACE_EVENT(host1x_cdma_push,
+       TP_PROTO(const char *name, u32 op1, u32 op2),
+
+       TP_ARGS(name, op1, op2),
+
+       TP_STRUCT__entry(
+               __field(const char *, name)
+               __field(u32, op1)
+               __field(u32, op2)
+       ),
+
+       TP_fast_assign(
+               __entry->name = name;
+               __entry->op1 = op1;
+               __entry->op2 = op2;
+       ),
+
+       TP_printk("name=%s, op1=%08x, op2=%08x",
+               __entry->name, __entry->op1, __entry->op2)
+);
+
+TRACE_EVENT(host1x_cdma_push_gather,
+       TP_PROTO(const char *name, u32 mem_id,
+                       u32 words, u32 offset, void *cmdbuf),
+
+       TP_ARGS(name, mem_id, words, offset, cmdbuf),
+
+       TP_STRUCT__entry(
+               __field(const char *, name)
+               __field(u32, mem_id)
+               __field(u32, words)
+               __field(u32, offset)
+               __field(bool, cmdbuf)
+               __dynamic_array(u32, cmdbuf, words)
+       ),
+
+       TP_fast_assign(
+               if (cmdbuf) {
+                       memcpy(__get_dynamic_array(cmdbuf), cmdbuf+offset,
+                                       words * sizeof(u32));
+               }
+               __entry->cmdbuf = cmdbuf;
+               __entry->name = name;
+               __entry->mem_id = mem_id;
+               __entry->words = words;
+               __entry->offset = offset;
+       ),
+
+       TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]",
+         __entry->name, __entry->mem_id,
+         __entry->words, __entry->offset,
+         __print_hex(__get_dynamic_array(cmdbuf),
+                 __entry->cmdbuf ? __entry->words * 4 : 0))
+);
+
+TRACE_EVENT(host1x_channel_submit,
+       TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 waitchks,
+                       u32 syncpt_id, u32 syncpt_incrs),
+
+       TP_ARGS(name, cmdbufs, relocs, waitchks, syncpt_id, syncpt_incrs),
+
+       TP_STRUCT__entry(
+               __field(const char *, name)
+               __field(u32, cmdbufs)
+               __field(u32, relocs)
+               __field(u32, waitchks)
+               __field(u32, syncpt_id)
+               __field(u32, syncpt_incrs)
+       ),
+
+       TP_fast_assign(
+               __entry->name = name;
+               __entry->cmdbufs = cmdbufs;
+               __entry->relocs = relocs;
+               __entry->waitchks = waitchks;
+               __entry->syncpt_id = syncpt_id;
+               __entry->syncpt_incrs = syncpt_incrs;
+       ),
+
+       TP_printk("name=%s, cmdbufs=%u, relocs=%u, waitchks=%d,"
+               "syncpt_id=%u, syncpt_incrs=%u",
+         __entry->name, __entry->cmdbufs, __entry->relocs, __entry->waitchks,
+         __entry->syncpt_id, __entry->syncpt_incrs)
+);
+
+TRACE_EVENT(host1x_channel_submitted,
+       TP_PROTO(const char *name, u32 syncpt_base, u32 syncpt_max),
+
+       TP_ARGS(name, syncpt_base, syncpt_max),
+
+       TP_STRUCT__entry(
+               __field(const char *, name)
+               __field(u32, syncpt_base)
+               __field(u32, syncpt_max)
+       ),
+
+       TP_fast_assign(
+               __entry->name = name;
+               __entry->syncpt_base = syncpt_base;
+               __entry->syncpt_max = syncpt_max;
+       ),
+
+       TP_printk("name=%s, syncpt_base=%d, syncpt_max=%d",
+               __entry->name, __entry->syncpt_base, __entry->syncpt_max)
+);
+
+TRACE_EVENT(host1x_channel_submit_complete,
+       TP_PROTO(const char *name, int count, u32 thresh),
+
+       TP_ARGS(name, count, thresh),
+
+       TP_STRUCT__entry(
+               __field(const char *, name)
+               __field(int, count)
+               __field(u32, thresh)
+       ),
+
+       TP_fast_assign(
+               __entry->name = name;
+               __entry->count = count;
+               __entry->thresh = thresh;
+       ),
+
+       TP_printk("name=%s, count=%d, thresh=%d",
+               __entry->name, __entry->count, __entry->thresh)
+);
+
+TRACE_EVENT(host1x_wait_cdma,
+       TP_PROTO(const char *name, u32 eventid),
+
+       TP_ARGS(name, eventid),
+
+       TP_STRUCT__entry(
+               __field(const char *, name)
+               __field(u32, eventid)
+       ),
+
+       TP_fast_assign(
+               __entry->name = name;
+               __entry->eventid = eventid;
+       ),
+
+       TP_printk("name=%s, event=%d", __entry->name, __entry->eventid)
+);
+
+TRACE_EVENT(host1x_syncpt_load_min,
+       TP_PROTO(u32 id, u32 val),
+
+       TP_ARGS(id, val),
+
+       TP_STRUCT__entry(
+               __field(u32, id)
+               __field(u32, val)
+       ),
+
+       TP_fast_assign(
+               __entry->id = id;
+               __entry->val = val;
+       ),
+
+       TP_printk("id=%d, val=%d", __entry->id, __entry->val)
+);
+
+TRACE_EVENT(host1x_syncpt_wait_check,
+       TP_PROTO(void *mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min),
+
+       TP_ARGS(mem_id, offset, syncpt_id, thresh, min),
+
+       TP_STRUCT__entry(
+               __field(void *, mem_id)
+               __field(u32, offset)
+               __field(u32, syncpt_id)
+               __field(u32, thresh)
+               __field(u32, min)
+       ),
+
+       TP_fast_assign(
+               __entry->mem_id = mem_id;
+               __entry->offset = offset;
+               __entry->syncpt_id = syncpt_id;
+               __entry->thresh = thresh;
+               __entry->min = min;
+       ),
+
+       TP_printk("mem_id=%p, offset=%05x, id=%d, thresh=%d, current=%d",
+               __entry->mem_id, __entry->offset,
+               __entry->syncpt_id, __entry->thresh,
+               __entry->min)
+);
+
+#endif /*  _TRACE_HOST1X_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index ba99ce3..119487e 100644 (file)
@@ -8,9 +8,11 @@ header-y += i810_drm.h
 header-y += i915_drm.h
 header-y += mga_drm.h
 header-y += nouveau_drm.h
+header-y += qxl_drm.h
 header-y += r128_drm.h
 header-y += radeon_drm.h
 header-y += savage_drm.h
 header-y += sis_drm.h
+header-y += tegra_drm.h
 header-y += via_drm.h
 header-y += vmwgfx_drm.h
index 8d1e2bb..5a57be6 100644 (file)
@@ -36,7 +36,7 @@
 #ifndef _DRM_H_
 #define _DRM_H_
 
-#if defined(__linux__)
+#if defined(__KERNEL__) || defined(__linux__)
 
 #include <linux/types.h>
 #include <asm/ioctl.h>
@@ -711,8 +711,8 @@ struct drm_prime_handle {
 #define DRM_IOCTL_MODE_SETGAMMA                DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
 #define DRM_IOCTL_MODE_GETENCODER      DRM_IOWR(0xA6, struct drm_mode_get_encoder)
 #define DRM_IOCTL_MODE_GETCONNECTOR    DRM_IOWR(0xA7, struct drm_mode_get_connector)
-#define DRM_IOCTL_MODE_ATTACHMODE      DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
-#define DRM_IOCTL_MODE_DETACHMODE      DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
+#define DRM_IOCTL_MODE_ATTACHMODE      DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
+#define DRM_IOCTL_MODE_DETACHMODE      DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
 
 #define DRM_IOCTL_MODE_GETPROPERTY     DRM_IOWR(0xAA, struct drm_mode_get_property)
 #define DRM_IOCTL_MODE_SETPROPERTY     DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
index 3d6301b..090e533 100644 (file)
@@ -367,13 +367,13 @@ struct drm_mode_mode_cmd {
  * depending on the value in flags different members are used.
  *
  * CURSOR_BO uses
- *    crtc
+ *    crtc_id
  *    width
  *    height
- *    handle - if 0 turns the cursor of
+ *    handle - if 0 turns the cursor off
  *
  * CURSOR_MOVE uses
- *    crtc
+ *    crtc_id
  *    x
  *    y
  */
diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h
new file mode 100644 (file)
index 0000000..ebebd36
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2013 Red Hat
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef QXL_DRM_H
+#define QXL_DRM_H
+
+#include <stddef.h>
+#include "drm/drm.h"
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ *
+ * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel
+ * compatibility Keep fields aligned to their size
+ */
+
+#define QXL_GEM_DOMAIN_CPU 0
+#define QXL_GEM_DOMAIN_VRAM 1
+#define QXL_GEM_DOMAIN_SURFACE 2
+
+#define DRM_QXL_ALLOC       0x00
+#define DRM_QXL_MAP         0x01
+#define DRM_QXL_EXECBUFFER  0x02
+#define DRM_QXL_UPDATE_AREA 0x03
+#define DRM_QXL_GETPARAM    0x04
+#define DRM_QXL_CLIENTCAP   0x05
+
+#define DRM_QXL_ALLOC_SURF  0x06
+
+struct drm_qxl_alloc {
+       uint32_t size;
+       uint32_t handle; /* 0 is an invalid handle */
+};
+
+struct drm_qxl_map {
+       uint64_t offset; /* use for mmap system call */
+       uint32_t handle;
+       uint32_t pad;
+};
+
+/*
+ * dest is the bo we are writing the relocation into
+ * src is bo we are relocating.
+ * *(dest_handle.base_addr + dest_offset) = physical_address(src_handle.addr +
+ * src_offset)
+ */
+#define QXL_RELOC_TYPE_BO 1
+#define QXL_RELOC_TYPE_SURF 2
+
+struct drm_qxl_reloc {
+       uint64_t src_offset; /* offset into src_handle or src buffer */
+       uint64_t dst_offset; /* offset in dest handle */
+       uint32_t src_handle; /* dest handle to compute address from */
+       uint32_t dst_handle; /* 0 if to command buffer */
+       uint32_t reloc_type;
+       uint32_t pad;
+};
+
+struct drm_qxl_command {
+       uint64_t         __user command; /* void* */
+       uint64_t         __user relocs; /* struct drm_qxl_reloc* */
+       uint32_t                type;
+       uint32_t                command_size;
+       uint32_t                relocs_num;
+       uint32_t                pad;
+};
+
+/* XXX: call it drm_qxl_commands? */
+struct drm_qxl_execbuffer {
+       uint32_t                flags;          /* for future use */
+       uint32_t                commands_num;
+       uint64_t         __user commands;       /* struct drm_qxl_command* */
+};
+
+struct drm_qxl_update_area {
+       uint32_t handle;
+       uint32_t top;
+       uint32_t left;
+       uint32_t bottom;
+       uint32_t right;
+       uint32_t pad;
+};
+
+#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
+#define QXL_PARAM_MAX_RELOCS 2
+struct drm_qxl_getparam {
+       uint64_t param;
+       uint64_t value;
+};
+
+/* these are one bit values */
+struct drm_qxl_clientcap {
+       uint32_t index;
+       uint32_t pad;
+};
+
+struct drm_qxl_alloc_surf {
+       uint32_t format;
+       uint32_t width;
+       uint32_t height;
+       int32_t stride;
+       uint32_t handle;
+       uint32_t pad;
+};
+
+#define DRM_IOCTL_QXL_ALLOC \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC, struct drm_qxl_alloc)
+
+#define DRM_IOCTL_QXL_MAP \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_MAP, struct drm_qxl_map)
+
+#define DRM_IOCTL_QXL_EXECBUFFER \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_EXECBUFFER,\
+               struct drm_qxl_execbuffer)
+
+#define DRM_IOCTL_QXL_UPDATE_AREA \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_UPDATE_AREA,\
+               struct drm_qxl_update_area)
+
+#define DRM_IOCTL_QXL_GETPARAM \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_GETPARAM,\
+               struct drm_qxl_getparam)
+
+#define DRM_IOCTL_QXL_CLIENTCAP \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_CLIENTCAP,\
+               struct drm_qxl_clientcap)
+
+#define DRM_IOCTL_QXL_ALLOC_SURF \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
+               struct drm_qxl_alloc_surf)
+
+#endif
index eeda917..321d4ac 100644 (file)
@@ -918,6 +918,7 @@ struct drm_radeon_gem_va {
 #define RADEON_CS_RING_GFX          0
 #define RADEON_CS_RING_COMPUTE      1
 #define RADEON_CS_RING_DMA          2
+#define RADEON_CS_RING_UVD          3
 /* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
 /* 0 = normal, + = higher priority, - = lower priority */
 
@@ -972,6 +973,13 @@ struct drm_radeon_cs {
 #define RADEON_INFO_MAX_SE             0x12
 /* max SH per SE */
 #define RADEON_INFO_MAX_SH_PER_SE      0x13
+/* fast fb access is enabled */
+#define RADEON_INFO_FASTFB_WORKING     0x14
+/* query if a RADEON_CS_RING_* submission is supported */
+#define RADEON_INFO_RING_WORKING       0x15
+/* SI tile mode array */
+#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
+
 
 struct drm_radeon_info {
        uint32_t                request;
@@ -979,4 +987,22 @@ struct drm_radeon_info {
        uint64_t                value;
 };
 
+/* Those correspond to the tile index to use, this is to explicitly state
+ * the API that is implicitly defined by the tile mode array.
+ */
+#define SI_TILE_MODE_COLOR_LINEAR_ALIGNED      8
+#define SI_TILE_MODE_COLOR_1D                  13
+#define SI_TILE_MODE_COLOR_1D_SCANOUT          9
+#define SI_TILE_MODE_COLOR_2D_8BPP             14
+#define SI_TILE_MODE_COLOR_2D_16BPP            15
+#define SI_TILE_MODE_COLOR_2D_32BPP            16
+#define SI_TILE_MODE_COLOR_2D_64BPP            17
+#define SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP    11
+#define SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP    12
+#define SI_TILE_MODE_DEPTH_STENCIL_1D          4
+#define SI_TILE_MODE_DEPTH_STENCIL_2D          0
+#define SI_TILE_MODE_DEPTH_STENCIL_2D_2AA      3
+#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA      3
+#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA      2
+
 #endif
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
new file mode 100644 (file)
index 0000000..6e132a2
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UAPI_TEGRA_DRM_H_
+#define _UAPI_TEGRA_DRM_H_
+
+struct drm_tegra_gem_create {
+       __u64 size;
+       __u32 flags;
+       __u32 handle;
+};
+
+struct drm_tegra_gem_mmap {
+       __u32 handle;
+       __u32 offset;
+};
+
+struct drm_tegra_syncpt_read {
+       __u32 id;
+       __u32 value;
+};
+
+struct drm_tegra_syncpt_incr {
+       __u32 id;
+       __u32 pad;
+};
+
+struct drm_tegra_syncpt_wait {
+       __u32 id;
+       __u32 thresh;
+       __u32 timeout;
+       __u32 value;
+};
+
+#define DRM_TEGRA_NO_TIMEOUT   (0xffffffff)
+
+struct drm_tegra_open_channel {
+       __u32 client;
+       __u32 pad;
+       __u64 context;
+};
+
+struct drm_tegra_close_channel {
+       __u64 context;
+};
+
+struct drm_tegra_get_syncpt {
+       __u64 context;
+       __u32 index;
+       __u32 id;
+};
+
+struct drm_tegra_syncpt {
+       __u32 id;
+       __u32 incrs;
+};
+
+struct drm_tegra_cmdbuf {
+       __u32 handle;
+       __u32 offset;
+       __u32 words;
+       __u32 pad;
+};
+
+struct drm_tegra_reloc {
+       struct {
+               __u32 handle;
+               __u32 offset;
+       } cmdbuf;
+       struct {
+               __u32 handle;
+               __u32 offset;
+       } target;
+       __u32 shift;
+       __u32 pad;
+};
+
+struct drm_tegra_waitchk {
+       __u32 handle;
+       __u32 offset;
+       __u32 syncpt;
+       __u32 thresh;
+};
+
+struct drm_tegra_submit {
+       __u64 context;
+       __u32 num_syncpts;
+       __u32 num_cmdbufs;
+       __u32 num_relocs;
+       __u32 num_waitchks;
+       __u32 waitchk_mask;
+       __u32 timeout;
+       __u32 pad;
+       __u64 syncpts;
+       __u64 cmdbufs;
+       __u64 relocs;
+       __u64 waitchks;
+       __u32 fence;            /* Return value */
+
+       __u32 reserved[5];      /* future expansion */
+};
+
+#define DRM_TEGRA_GEM_CREATE   0x00
+#define DRM_TEGRA_GEM_MMAP     0x01
+#define DRM_TEGRA_SYNCPT_READ  0x02
+#define DRM_TEGRA_SYNCPT_INCR  0x03
+#define DRM_TEGRA_SYNCPT_WAIT  0x04
+#define DRM_TEGRA_OPEN_CHANNEL 0x05
+#define DRM_TEGRA_CLOSE_CHANNEL        0x06
+#define DRM_TEGRA_GET_SYNCPT   0x07
+#define DRM_TEGRA_SUBMIT       0x08
+
+#define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create)
+#define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap)
+#define DRM_IOCTL_TEGRA_SYNCPT_READ DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_READ, struct drm_tegra_syncpt_read)
+#define DRM_IOCTL_TEGRA_SYNCPT_INCR DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_INCR, struct drm_tegra_syncpt_incr)
+#define DRM_IOCTL_TEGRA_SYNCPT_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_WAIT, struct drm_tegra_syncpt_wait)
+#define DRM_IOCTL_TEGRA_OPEN_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_OPEN_CHANNEL, struct drm_tegra_open_channel)
+#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
+#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
+#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
+
+#endif
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
new file mode 100644 (file)
index 0000000..0c3b46d
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * Header containing platform_data structs for omap panels
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *        Archit Taneja <archit@ti.com>
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Mayuresh Janorkar <mayur@ti.com>
+ *
+ * Copyright (C) 2010 Canonical Ltd.
+ * Author: Bryan Wu <bryan.wu@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_PANEL_DATA_H
+#define __OMAP_PANEL_DATA_H
+
+struct omap_dss_device;
+
+/**
+ * struct panel_generic_dpi_data - panel driver configuration data
+ * @name: panel name
+ * @platform_enable: platform specific panel enable function
+ * @platform_disable: platform specific panel disable function
+ * @num_gpios: number of gpios connected to panel
+ * @gpios: gpio numbers on the platform
+ * @gpio_invert: configure gpio as active high or low
+ */
+struct panel_generic_dpi_data {
+       const char *name;
+       int (*platform_enable)(struct omap_dss_device *dssdev);
+       void (*platform_disable)(struct omap_dss_device *dssdev);
+
+       int num_gpios;
+       int gpios[10];
+       bool gpio_invert[10];
+};
+
+/**
+ * struct panel_n8x0_data - N800 panel driver configuration data
+ */
+struct panel_n8x0_data {
+       int (*platform_enable)(struct omap_dss_device *dssdev);
+       void (*platform_disable)(struct omap_dss_device *dssdev);
+       int panel_reset;
+       int ctrl_pwrdown;
+};
+
+/**
+ * struct nokia_dsi_panel_data - Nokia DSI panel driver configuration data
+ * @name: panel name
+ * @use_ext_te: use external TE
+ * @ext_te_gpio: external TE GPIO
+ * @esd_interval: interval of ESD checks, 0 = disabled (ms)
+ * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
+ * @use_dsi_backlight: true if panel uses DSI command to control backlight
+ * @pin_config: DSI pin configuration
+ */
+
+struct nokia_dsi_panel_data {
+       const char *name;
+
+       int reset_gpio;
+
+       bool use_ext_te;
+       int ext_te_gpio;
+
+       unsigned esd_interval;
+       unsigned ulps_timeout;
+
+       bool use_dsi_backlight;
+
+       struct omap_dsi_pin_config pin_config;
+};
+
+/**
+ * struct picodlp_panel_data - picodlp panel driver configuration data
+ * @picodlp_adapter_id:        i2c_adapter number for picodlp
+ */
+struct picodlp_panel_data {
+       int picodlp_adapter_id;
+       int emu_done_gpio;
+       int pwrgood_gpio;
+};
+
+/**
+ * struct tfp410_platform_data - tfp410 panel driver configuration data
+ * @i2c_bus_num: i2c bus id for the panel
+ * @power_down_gpio: gpio number for PD pin (or -1 if not available)
+ */
+struct tfp410_platform_data {
+       int i2c_bus_num;
+       int power_down_gpio;
+};
+
+/**
+ * sharp ls panel driver configuration data
+ * @resb_gpio: reset signal
+ * @ini_gpio: power on control
+ * @mo_gpio: selection for resolution(VGA/QVGA)
+ * @lr_gpio: selection for horizontal scanning direction
+ * @ud_gpio: selection for vertical scanning direction
+ */
+struct panel_sharp_ls037v7dw01_data {
+       int resb_gpio;
+       int ini_gpio;
+       int mo_gpio;
+       int lr_gpio;
+       int ud_gpio;
+};
+
+/**
+ * acx565akm panel driver configuration data
+ * @reset_gpio: reset signal
+ */
+struct panel_acx565akm_data {
+       int reset_gpio;
+};
+
+/**
+ * nec nl8048 panel driver configuration data
+ * @res_gpio: reset signal
+ * @qvga_gpio: selection for resolution(QVGA/WVGA)
+ */
+struct panel_nec_nl8048_data {
+       int res_gpio;
+       int qvga_gpio;
+};
+
+/**
+ * tpo td043 panel driver configuration data
+ * @nreset_gpio: reset signal
+ */
+struct panel_tpo_td043_data {
+       int nreset_gpio;
+};
+
+#endif /* __OMAP_PANEL_DATA_H */
diff --git a/include/video/omap-panel-generic-dpi.h b/include/video/omap-panel-generic-dpi.h
deleted file mode 100644 (file)
index 127e3f2..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Header for generic DPI panel driver
- *
- * Copyright (C) 2010 Canonical Ltd.
- * Author: Bryan Wu <bryan.wu@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP_PANEL_GENERIC_DPI_H
-#define __OMAP_PANEL_GENERIC_DPI_H
-
-struct omap_dss_device;
-
-/**
- * struct panel_generic_dpi_data - panel driver configuration data
- * @name: panel name
- * @platform_enable: platform specific panel enable function
- * @platform_disable: platform specific panel disable function
- */
-struct panel_generic_dpi_data {
-       const char *name;
-       int (*platform_enable)(struct omap_dss_device *dssdev);
-       void (*platform_disable)(struct omap_dss_device *dssdev);
-};
-
-#endif /* __OMAP_PANEL_GENERIC_DPI_H */
diff --git a/include/video/omap-panel-n8x0.h b/include/video/omap-panel-n8x0.h
deleted file mode 100644 (file)
index 50a1302..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __OMAP_PANEL_N8X0_H
-#define __OMAP_PANEL_N8X0_H
-
-struct omap_dss_device;
-
-struct panel_n8x0_data {
-       int (*platform_enable)(struct omap_dss_device *dssdev);
-       void (*platform_disable)(struct omap_dss_device *dssdev);
-       int panel_reset;
-       int ctrl_pwrdown;
-
-       int (*set_backlight)(struct omap_dss_device *dssdev, int level);
-};
-
-#endif
diff --git a/include/video/omap-panel-nokia-dsi.h b/include/video/omap-panel-nokia-dsi.h
deleted file mode 100644 (file)
index 04219a2..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __OMAP_NOKIA_DSI_PANEL_H
-#define __OMAP_NOKIA_DSI_PANEL_H
-
-struct omap_dss_device;
-
-/**
- * struct nokia_dsi_panel_data - Nokia DSI panel driver configuration
- * @name: panel name
- * @use_ext_te: use external TE
- * @ext_te_gpio: external TE GPIO
- * @esd_interval: interval of ESD checks, 0 = disabled (ms)
- * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
- * @use_dsi_backlight: true if panel uses DSI command to control backlight
- * @pin_config: DSI pin configuration
- */
-struct nokia_dsi_panel_data {
-       const char *name;
-
-       int reset_gpio;
-
-       bool use_ext_te;
-       int ext_te_gpio;
-
-       unsigned esd_interval;
-       unsigned ulps_timeout;
-
-       bool use_dsi_backlight;
-
-       struct omap_dsi_pin_config pin_config;
-};
-
-#endif /* __OMAP_NOKIA_DSI_PANEL_H */
diff --git a/include/video/omap-panel-picodlp.h b/include/video/omap-panel-picodlp.h
deleted file mode 100644 (file)
index 1c342ef..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * panel data for picodlp panel
- *
- * Copyright (C) 2011 Texas Instruments
- *
- * Author: Mayuresh Janorkar <mayur@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __PANEL_PICODLP_H
-#define __PANEL_PICODLP_H
-/**
- * struct : picodlp panel data
- * picodlp_adapter_id: i2c_adapter number for picodlp
- */
-struct picodlp_panel_data {
-       int picodlp_adapter_id;
-       int emu_done_gpio;
-       int pwrgood_gpio;
-};
-#endif /* __PANEL_PICODLP_H */
diff --git a/include/video/omap-panel-tfp410.h b/include/video/omap-panel-tfp410.h
deleted file mode 100644 (file)
index aef35e4..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Header for TFP410 chip driver
- *
- * Copyright (C) 2011 Texas Instruments Inc
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP_PANEL_TFP410_H
-#define __OMAP_PANEL_TFP410_H
-
-struct omap_dss_device;
-
-/**
- * struct tfp410_platform_data - panel driver configuration data
- * @i2c_bus_num: i2c bus id for the panel
- * @power_down_gpio: gpio number for PD pin (or -1 if not available)
- */
-struct tfp410_platform_data {
-       int i2c_bus_num;
-       int power_down_gpio;
-};
-
-#endif /* __OMAP_PANEL_TFP410_H */
index caefa09..62ca9a7 100644 (file)
@@ -257,10 +257,31 @@ void rfbi_bus_unlock(void);
 
 /* DSI */
 
+enum omap_dss_dsi_trans_mode {
+       /* Sync Pulses: both sync start and end packets sent */
+       OMAP_DSS_DSI_PULSE_MODE,
+       /* Sync Events: only sync start packets sent */
+       OMAP_DSS_DSI_EVENT_MODE,
+       /* Burst: only sync start packets sent, pixels are time compressed */
+       OMAP_DSS_DSI_BURST_MODE,
+};
+
 struct omap_dss_dsi_videomode_timings {
+       unsigned long hsclk;
+
+       unsigned ndl;
+       unsigned bitspp;
+
+       /* pixels */
+       u16 hact;
+       /* lines */
+       u16 vact;
+
        /* DSI video mode blanking data */
        /* Unit: byte clock cycles */
+       u16 hss;
        u16 hsa;
+       u16 hse;
        u16 hfp;
        u16 hbp;
        /* Unit: line clocks */
@@ -274,14 +295,24 @@ struct omap_dss_dsi_videomode_timings {
        int hbp_blanking_mode;
        int hfp_blanking_mode;
 
-       /* Video port sync events */
-       bool vp_vsync_end;
-       bool vp_hsync_end;
+       enum omap_dss_dsi_trans_mode trans_mode;
 
        bool ddr_clk_always_on;
        int window_sync;
 };
 
+struct omap_dss_dsi_config {
+       enum omap_dss_dsi_mode mode;
+       enum omap_dss_dsi_pixel_format pixel_format;
+       const struct omap_video_timings *timings;
+
+       unsigned long hs_clk_min, hs_clk_max;
+       unsigned long lp_clk_min, lp_clk_max;
+
+       bool ddr_clk_always_on;
+       enum omap_dss_dsi_trans_mode trans_mode;
+};
+
 void dsi_bus_lock(struct omap_dss_device *dssdev);
 void dsi_bus_unlock(struct omap_dss_device *dssdev);
 int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
@@ -541,9 +572,14 @@ struct omap_dss_writeback_info {
 struct omap_dss_output {
        struct list_head list;
 
+       const char *name;
+
        /* display type supported by the output */
        enum omap_display_type type;
 
+       /* DISPC channel for this output */
+       enum omap_channel dispc_channel;
+
        /* output instance */
        enum omap_dss_output_id id;
 
@@ -561,6 +597,7 @@ struct omap_dss_device {
 
        enum omap_display_type type;
 
+       /* obsolete, to be removed */
        enum omap_channel channel;
 
        union {
@@ -590,41 +627,11 @@ struct omap_dss_device {
                } venc;
        } phy;
 
-       struct {
-               struct {
-                       struct {
-                               u16 lck_div;
-                               u16 pck_div;
-                               enum omap_dss_clk_source lcd_clk_src;
-                       } channel;
-
-                       enum omap_dss_clk_source dispc_fclk_src;
-               } dispc;
-
-               struct {
-                       /* regn is one greater than TRM's REGN value */
-                       u16 regn;
-                       u16 regm;
-                       u16 regm_dispc;
-                       u16 regm_dsi;
-
-                       u16 lp_clk_div;
-                       enum omap_dss_clk_source dsi_fclk_src;
-               } dsi;
-
-               struct {
-                       /* regn is one greater than TRM's REGN value */
-                       u16 regn;
-                       u16 regm2;
-               } hdmi;
-       } clocks;
-
        struct {
                struct omap_video_timings timings;
 
                enum omap_dss_dsi_pixel_format dsi_pix_fmt;
                enum omap_dss_dsi_mode dsi_mode;
-               struct omap_dss_dsi_videomode_timings dsi_vm_timings;
        } panel;
 
        struct {
@@ -829,15 +836,8 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
 void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
                bool enable);
 int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable);
-void omapdss_dsi_set_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings);
-void omapdss_dsi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h);
-void omapdss_dsi_set_pixel_format(struct omap_dss_device *dssdev,
-               enum omap_dss_dsi_pixel_format fmt);
-void omapdss_dsi_set_operation_mode(struct omap_dss_device *dssdev,
-               enum omap_dss_dsi_mode mode);
-void omapdss_dsi_set_videomode_timings(struct omap_dss_device *dssdev,
-               struct omap_dss_dsi_videomode_timings *timings);
+int omapdss_dsi_set_config(struct omap_dss_device *dssdev,
+               const struct omap_dss_dsi_config *config);
 
 int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
                void (*callback)(int, void *), void *data);
@@ -846,8 +846,6 @@ int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id);
 void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel);
 int omapdss_dsi_configure_pins(struct omap_dss_device *dssdev,
                const struct omap_dsi_pin_config *pin_cfg);
-int omapdss_dsi_set_clocks(struct omap_dss_device *dssdev,
-               unsigned long ddr_clk, unsigned long lp_clk);
 
 int omapdss_dsi_display_enable(struct omap_dss_device *dssdev);
 void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
index 31cd1bf..fede1d0 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -872,6 +872,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
                                                        goto out_unlock;
                                                break;
                                        }
+                                       msg = ERR_PTR(-EAGAIN);
                                } else
                                        break;
                                msg_counter++;
index 6466699..0db0de1 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1940,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 
        /* Check the cache first. */
        /* (Cache hit rate is typically around 35%.) */
-       vma = mm->mmap_cache;
+       vma = ACCESS_ONCE(mm->mmap_cache);
        if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
                struct rb_node *rb_node;
 
index e193280..2f3ea74 100644 (file)
@@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
        struct vm_area_struct *vma;
 
        /* check the cache first */
-       vma = mm->mmap_cache;
+       vma = ACCESS_ONCE(mm->mmap_cache);
        if (vma && vma->vm_start <= addr && vma->vm_end > addr)
                return vma;
 
index b13e5c7..13e6447 100644 (file)
@@ -1624,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
        }
 
        skb_orphan(skb);
-       nf_reset(skb);
 
        if (unlikely(!is_skb_forwardable(dev, skb))) {
                atomic_long_inc(&dev->rx_dropped);
@@ -3314,6 +3313,7 @@ int netdev_rx_handler_register(struct net_device *dev,
        if (dev->rx_handler)
                return -EBUSY;
 
+       /* Note: rx_handler_data must be set before rx_handler */
        rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
        rcu_assign_pointer(dev->rx_handler, rx_handler);
 
@@ -3334,6 +3334,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
 
        ASSERT_RTNL();
        RCU_INIT_POINTER(dev->rx_handler, NULL);
+       /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
+        * section has a guarantee to see a non NULL rx_handler_data
+        * as well.
+        */
+       synchronize_net();
        RCU_INIT_POINTER(dev->rx_handler_data, NULL);
 }
 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
index c56ea6f..2bfd081 100644 (file)
@@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)
        struct flow_flush_info *info = data;
        struct tasklet_struct *tasklet;
 
-       tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet);
+       tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
        tasklet->data = (unsigned long)info;
        tasklet_schedule(tasklet);
 }
index 5fb8d7e..b65441d 100644 (file)
@@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
        }
        if (ops->fill_info) {
                data = nla_nest_start(skb, IFLA_INFO_DATA);
-               if (data == NULL)
+               if (data == NULL) {
+                       err = -EMSGSIZE;
                        goto err_cancel_link;
+               }
                err = ops->fill_info(skb, dev);
                if (err < 0)
                        goto err_cancel_data;
index 2651225..a459c4f 100644 (file)
@@ -2529,6 +2529,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
 static void init_loopback(struct net_device *dev)
 {
        struct inet6_dev  *idev;
+       struct net_device *sp_dev;
+       struct inet6_ifaddr *sp_ifa;
+       struct rt6_info *sp_rt;
 
        /* ::1 */
 
@@ -2540,6 +2543,30 @@ static void init_loopback(struct net_device *dev)
        }
 
        add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
+
+       /* Add routes to other interface's IPv6 addresses */
+       for_each_netdev(dev_net(dev), sp_dev) {
+               if (!strcmp(sp_dev->name, dev->name))
+                       continue;
+
+               idev = __in6_dev_get(sp_dev);
+               if (!idev)
+                       continue;
+
+               read_lock_bh(&idev->lock);
+               list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
+
+                       if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
+                               continue;
+
+                       sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+
+                       /* Failure cases are ignored */
+                       if (!IS_ERR(sp_rt))
+                               ip6_ins_rt(sp_rt);
+               }
+               read_unlock_bh(&idev->lock);
+       }
 }
 
 static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
index e33fe0a..2bab2aa 100644 (file)
@@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
            ipv6_addr_loopback(&hdr->daddr))
                goto err;
 
+       /* RFC4291 Errata ID: 3480
+        * Interface-Local scope spans only a single interface on a
+        * node and is useful only for loopback transmission of
+        * multicast.  Packets with interface-local scope received
+        * from another node must be discarded.
+        */
+       if (!(skb->pkt_type == PACKET_LOOPBACK ||
+             dev->flags & IFF_LOOPBACK) &&
+           ipv6_addr_is_multicast(&hdr->daddr) &&
+           IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
+               goto err;
+
        /* RFC4291 2.7
         * Nodes must not originate a packet to a multicast address whose scope
         * field contains the reserved value 0; if such a packet is received, it
index 8555f33..5b1e5af 100644 (file)
@@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c)
        hdr->sadb_msg_pid = c->portid;
        hdr->sadb_msg_version = PF_KEY_V2;
        hdr->sadb_msg_errno = (uint8_t) 0;
+       hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
        pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
        return 0;
index baaa860..3bfe261 100644 (file)
@@ -349,21 +349,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
 static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata;
-       int ret = 0;
+       int ret;
 
        if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
                return 0;
 
-       mutex_lock(&local->iflist_mtx);
+       ASSERT_RTNL();
 
        if (local->monitor_sdata)
-               goto out_unlock;
+               return 0;
 
        sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
-       if (!sdata) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
+       if (!sdata)
+               return -ENOMEM;
 
        /* set up data */
        sdata->local = local;
@@ -377,13 +375,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
        if (WARN_ON(ret)) {
                /* ok .. stupid driver, it asked for this! */
                kfree(sdata);
-               goto out_unlock;
+               return ret;
        }
 
        ret = ieee80211_check_queues(sdata);
        if (ret) {
                kfree(sdata);
-               goto out_unlock;
+               return ret;
        }
 
        ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
@@ -391,13 +389,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
        if (ret) {
                drv_remove_interface(local, sdata);
                kfree(sdata);
-               goto out_unlock;
+               return ret;
        }
 
+       mutex_lock(&local->iflist_mtx);
        rcu_assign_pointer(local->monitor_sdata, sdata);
- out_unlock:
        mutex_unlock(&local->iflist_mtx);
-       return ret;
+
+       return 0;
 }
 
 static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
@@ -407,14 +406,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
        if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
                return;
 
+       ASSERT_RTNL();
+
        mutex_lock(&local->iflist_mtx);
 
        sdata = rcu_dereference_protected(local->monitor_sdata,
                                          lockdep_is_held(&local->iflist_mtx));
-       if (!sdata)
-               goto out_unlock;
+       if (!sdata) {
+               mutex_unlock(&local->iflist_mtx);
+               return;
+       }
 
        rcu_assign_pointer(local->monitor_sdata, NULL);
+       mutex_unlock(&local->iflist_mtx);
+
        synchronize_net();
 
        ieee80211_vif_release_channel(sdata);
@@ -422,8 +427,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
        drv_remove_interface(local, sdata);
 
        kfree(sdata);
- out_unlock:
-       mutex_unlock(&local->iflist_mtx);
 }
 
 /*
index 29ce2aa..4749b38 100644 (file)
@@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
 
        rcu_read_lock();
        list_for_each_entry_rcu(sdata, &local->interfaces, list)
-               if (ieee80211_vif_is_mesh(&sdata->vif))
+               if (ieee80211_vif_is_mesh(&sdata->vif) &&
+                   ieee80211_sdata_running(sdata))
                        ieee80211_queue_work(&local->hw, &sdata->work);
        rcu_read_unlock();
 }
index 1415774..82cc303 100644 (file)
@@ -3608,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
 
        /* Restart STA timers */
        rcu_read_lock();
-       list_for_each_entry_rcu(sdata, &local->interfaces, list)
-               ieee80211_restart_sta_timer(sdata);
+       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+               if (ieee80211_sdata_running(sdata))
+                       ieee80211_restart_sta_timer(sdata);
+       }
        rcu_read_unlock();
 }
 
index bb73ed2..c6844ad 100644 (file)
@@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
 
                memset(nskb->cb, 0, sizeof(nskb->cb));
 
-               ieee80211_tx_skb(rx->sdata, nskb);
+               if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
+
+                       info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
+                                     IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
+                                     IEEE80211_TX_CTL_NO_CCK_RATE;
+                       if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+                               info->hw_queue =
+                                       local->hw.offchannel_tx_hw_queue;
+               }
+
+               __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
+                                           status->band);
        }
        dev_kfree_skb(rx->skb);
        return RX_QUEUED;
index a79ce82..238a0cc 100644 (file)
@@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
        int ret, i;
+       bool have_key = false;
 
        might_sleep();
 
@@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
        list_del_rcu(&sta->list);
 
        mutex_lock(&local->key_mtx);
-       for (i = 0; i < NUM_DEFAULT_KEYS; i++)
+       for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
                __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
-       if (sta->ptk)
+               have_key = true;
+       }
+       if (sta->ptk) {
                __ieee80211_key_free(key_mtx_dereference(local, sta->ptk));
+               have_key = true;
+       }
        mutex_unlock(&local->key_mtx);
 
+       if (!have_key)
+               synchronize_net();
+
        sta->dead = true;
 
        local->num_sta--;
index 13aa47a..1bc210f 100644 (file)
@@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch)
                cbq_update(q);
                if ((incr -= incr2) < 0)
                        incr = 0;
+               q->now += incr;
+       } else {
+               if (now > q->now)
+                       q->now = now;
        }
-       q->now += incr;
        q->now_rt = now;
 
        for (;;) {
index 4e606fc..5578628 100644 (file)
@@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                flow->deficit = q->quantum;
                flow->dropped = 0;
        }
-       if (++sch->q.qlen < sch->limit)
+       if (++sch->q.qlen <= sch->limit)
                return NET_XMIT_SUCCESS;
 
        q->drop_overlimit++;
index ffad481..eac7e0e 100644 (file)
@@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
        u64 mult;
        int shift;
 
-       r->rate_bps = rate << 3;
+       r->rate_bps = (u64)rate << 3;
        r->shift = 0;
        r->mult = 1;
        /*
index ca511c4..d8079da 100644 (file)
@@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
        struct vsock_sock *vsk;
 
        list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
-               if (vsock_addr_equals_addr_any(addr, &vsk->local_addr))
+               if (addr->svm_port == vsk->local_addr.svm_port)
                        return sk_vsock(vsk);
 
        return NULL;
@@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
 
        list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
                            connected_table) {
-               if (vsock_addr_equals_addr(src, &vsk->remote_addr)
-                   && vsock_addr_equals_addr(dst, &vsk->local_addr)) {
+               if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
+                   dst->svm_port == vsk->local_addr.svm_port) {
                        return sk_vsock(vsk);
                }
        }
index a70ace8..1f6508e 100644 (file)
@@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending(
        struct vsock_sock *vlistener;
        struct vsock_sock *vpending;
        struct sock *pending;
+       struct sockaddr_vm src;
+
+       vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
 
        vlistener = vsock_sk(listener);
 
        list_for_each_entry(vpending, &vlistener->pending_links,
                            pending_links) {
-               struct sockaddr_vm src;
-               struct sockaddr_vm dst;
-
-               vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
-               vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
-
                if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
-                   vsock_addr_equals_addr(&dst, &vpending->local_addr)) {
+                   pkt->dst_port == vpending->local_addr.svm_port) {
                        pending = sk_vsock(vpending);
                        sock_hold(pending);
                        goto found;
@@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
         */
        bh_lock_sock(sk);
 
-       if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED)
-               vmci_trans(vsk)->notify_ops->handle_notify_pkt(
-                               sk, pkt, true, &dst, &src,
-                               &bh_process_pkt);
+       if (!sock_owned_by_user(sk)) {
+               /* The local context ID may be out of date, update it. */
+               vsk->local_addr.svm_cid = dst.svm_cid;
+
+               if (sk->sk_state == SS_CONNECTED)
+                       vmci_trans(vsk)->notify_ops->handle_notify_pkt(
+                                       sk, pkt, true, &dst, &src,
+                                       &bh_process_pkt);
+       }
 
        bh_unlock_sock(sk);
 
@@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work)
 
        lock_sock(sk);
 
+       /* The local context ID may be out of date. */
+       vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
+
        switch (sk->sk_state) {
        case SS_LISTEN:
                vmci_transport_recv_listen(sk, pkt);
@@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk,
        pending = vmci_transport_get_pending(sk, pkt);
        if (pending) {
                lock_sock(pending);
+
+               /* The local context ID may be out of date. */
+               vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
+
                switch (pending->sk_state) {
                case SS_CONNECTING:
                        err = vmci_transport_recv_connecting_server(sk,
index b7df1ae..ec2611b 100644 (file)
@@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
 }
 EXPORT_SYMBOL_GPL(vsock_addr_equals_addr);
 
-bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
-                               const struct sockaddr_vm *other)
-{
-       return (addr->svm_cid == VMADDR_CID_ANY ||
-               other->svm_cid == VMADDR_CID_ANY ||
-               addr->svm_cid == other->svm_cid) &&
-              addr->svm_port == other->svm_port;
-}
-EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any);
-
 int vsock_addr_cast(const struct sockaddr *addr,
                    size_t len, struct sockaddr_vm **out_addr)
 {
index cdfbcef..9ccd531 100644 (file)
@@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr);
 void vsock_addr_unbind(struct sockaddr_vm *addr);
 bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
                            const struct sockaddr_vm *other);
-bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
-                               const struct sockaddr_vm *other);
 int vsock_addr_cast(const struct sockaddr *addr, size_t len,
                    struct sockaddr_vm **out_addr);
 
index ea4155f..6ddf74f 100644 (file)
@@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
        rdev_rfkill_poll(rdev);
 }
 
+void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
+                             struct wireless_dev *wdev)
+{
+       lockdep_assert_held(&rdev->devlist_mtx);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
+
+       if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE))
+               return;
+
+       if (!wdev->p2p_started)
+               return;
+
+       rdev_stop_p2p_device(rdev, wdev);
+       wdev->p2p_started = false;
+
+       rdev->opencount--;
+
+       if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+               bool busy = work_busy(&rdev->scan_done_wk);
+
+               /*
+                * If the work isn't pending or running (in which case it would
+                * be waiting for the lock we hold) the driver didn't properly
+                * cancel the scan when the interface was removed. In this case
+                * warn and leak the scan request object to not crash later.
+                */
+               WARN_ON(!busy);
+
+               rdev->scan_req->aborted = true;
+               ___cfg80211_scan_done(rdev, !busy);
+       }
+}
+
 static int cfg80211_rfkill_set_block(void *data, bool blocked)
 {
        struct cfg80211_registered_device *rdev = data;
@@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
                return 0;
 
        rtnl_lock();
-       mutex_lock(&rdev->devlist_mtx);
+
+       /* read-only iteration need not hold the devlist_mtx */
 
        list_for_each_entry(wdev, &rdev->wdev_list, list) {
                if (wdev->netdev) {
@@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
                /* otherwise, check iftype */
                switch (wdev->iftype) {
                case NL80211_IFTYPE_P2P_DEVICE:
-                       if (!wdev->p2p_started)
-                               break;
-                       rdev_stop_p2p_device(rdev, wdev);
-                       wdev->p2p_started = false;
-                       rdev->opencount--;
+                       /* but this requires it */
+                       mutex_lock(&rdev->devlist_mtx);
+                       mutex_lock(&rdev->sched_scan_mtx);
+                       cfg80211_stop_p2p_device(rdev, wdev);
+                       mutex_unlock(&rdev->sched_scan_mtx);
+                       mutex_unlock(&rdev->devlist_mtx);
                        break;
                default:
                        break;
                }
        }
 
-       mutex_unlock(&rdev->devlist_mtx);
        rtnl_unlock();
 
        return 0;
@@ -745,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work)
        wdev = container_of(work, struct wireless_dev, cleanup_work);
        rdev = wiphy_to_dev(wdev->wiphy);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
 
        if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
                rdev->scan_req->aborted = true;
                ___cfg80211_scan_done(rdev, true);
        }
 
-       cfg80211_unlock_rdev(rdev);
-
-       mutex_lock(&rdev->sched_scan_mtx);
-
        if (WARN_ON(rdev->sched_scan_req &&
                    rdev->sched_scan_req->dev == wdev->netdev)) {
                __cfg80211_stop_sched_scan(rdev, false);
@@ -781,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
                return;
 
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        list_del_rcu(&wdev->list);
        rdev->devlist_generation++;
 
        switch (wdev->iftype) {
        case NL80211_IFTYPE_P2P_DEVICE:
-               if (!wdev->p2p_started)
-                       break;
-               rdev_stop_p2p_device(rdev, wdev);
-               wdev->p2p_started = false;
-               rdev->opencount--;
+               cfg80211_stop_p2p_device(rdev, wdev);
                break;
        default:
                WARN_ON_ONCE(1);
                break;
        }
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
 }
 EXPORT_SYMBOL(cfg80211_unregister_wdev);
@@ -936,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                cfg80211_update_iface_num(rdev, wdev->iftype, 1);
                cfg80211_lock_rdev(rdev);
                mutex_lock(&rdev->devlist_mtx);
+               mutex_lock(&rdev->sched_scan_mtx);
                wdev_lock(wdev);
                switch (wdev->iftype) {
 #ifdef CONFIG_CFG80211_WEXT
@@ -967,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                        break;
                }
                wdev_unlock(wdev);
+               mutex_unlock(&rdev->sched_scan_mtx);
                rdev->opencount++;
                mutex_unlock(&rdev->devlist_mtx);
                cfg80211_unlock_rdev(rdev);
index 3aec0e4..5845c2b 100644 (file)
@@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
 void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
                               enum nl80211_iftype iftype, int num);
 
+void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
+                             struct wireless_dev *wdev);
+
 #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
 
 #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
index d44ab21..58e13a8 100644 (file)
@@ -4702,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->scan)
                return -EOPNOTSUPP;
 
-       if (rdev->scan_req)
-               return -EBUSY;
+       mutex_lock(&rdev->sched_scan_mtx);
+       if (rdev->scan_req) {
+               err = -EBUSY;
+               goto unlock;
+       }
 
        if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
                n_channels = validate_scan_freqs(
                                info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
-               if (!n_channels)
-                       return -EINVAL;
+               if (!n_channels) {
+                       err = -EINVAL;
+                       goto unlock;
+               }
        } else {
                enum ieee80211_band band;
                n_channels = 0;
@@ -4723,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp)
                        n_ssids++;
 
-       if (n_ssids > wiphy->max_scan_ssids)
-               return -EINVAL;
+       if (n_ssids > wiphy->max_scan_ssids) {
+               err = -EINVAL;
+               goto unlock;
+       }
 
        if (info->attrs[NL80211_ATTR_IE])
                ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
        else
                ie_len = 0;
 
-       if (ie_len > wiphy->max_scan_ie_len)
-               return -EINVAL;
+       if (ie_len > wiphy->max_scan_ie_len) {
+               err = -EINVAL;
+               goto unlock;
+       }
 
        request = kzalloc(sizeof(*request)
                        + sizeof(*request->ssids) * n_ssids
                        + sizeof(*request->channels) * n_channels
                        + ie_len, GFP_KERNEL);
-       if (!request)
-               return -ENOMEM;
+       if (!request) {
+               err = -ENOMEM;
+               goto unlock;
+       }
 
        if (n_ssids)
                request->ssids = (void *)&request->channels[n_channels];
@@ -4876,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                kfree(request);
        }
 
+ unlock:
+       mutex_unlock(&rdev->sched_scan_mtx);
        return err;
 }
 
@@ -7749,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->stop_p2p_device)
                return -EOPNOTSUPP;
 
-       if (!wdev->p2p_started)
-               return 0;
-
-       rdev_stop_p2p_device(rdev, wdev);
-       wdev->p2p_started = false;
-
-       mutex_lock(&rdev->devlist_mtx);
-       rdev->opencount--;
-       mutex_unlock(&rdev->devlist_mtx);
-
-       if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
-               rdev->scan_req->aborted = true;
-               ___cfg80211_scan_done(rdev, true);
-       }
+       mutex_lock(&rdev->sched_scan_mtx);
+       cfg80211_stop_p2p_device(rdev, wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 
        return 0;
 }
@@ -8486,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
        struct nlattr *nest;
        int i;
 
-       ASSERT_RDEV_LOCK(rdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        if (WARN_ON(!req))
                return 0;
index 674aadc..fd99ea4 100644 (file)
@@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
        union iwreq_data wrqu;
 #endif
 
-       ASSERT_RDEV_LOCK(rdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        request = rdev->scan_req;
 
@@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk)
        rdev = container_of(wk, struct cfg80211_registered_device,
                            scan_done_wk);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
        ___cfg80211_scan_done(rdev, false);
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 }
 
 void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
@@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
        found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);
 
        if (found) {
-               found->pub.beacon_interval = tmp->pub.beacon_interval;
-               found->pub.signal = tmp->pub.signal;
-               found->pub.capability = tmp->pub.capability;
-               found->ts = tmp->ts;
-
                /* Update IEs */
                if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
                        const struct cfg80211_bss_ies *old;
@@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
 
                        if (found->pub.hidden_beacon_bss &&
                            !list_empty(&found->hidden_list)) {
+                               const struct cfg80211_bss_ies *f;
+
                                /*
                                 * The found BSS struct is one of the probe
                                 * response members of a group, but we're
@@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                                 * SSID to showing it, which is confusing so
                                 * drop this information.
                                 */
+
+                               f = rcu_access_pointer(tmp->pub.beacon_ies);
+                               kfree_rcu((struct cfg80211_bss_ies *)f,
+                                         rcu_head);
                                goto drop;
                        }
 
@@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                                kfree_rcu((struct cfg80211_bss_ies *)old,
                                          rcu_head);
                }
+
+               found->pub.beacon_interval = tmp->pub.beacon_interval;
+               found->pub.signal = tmp->pub.signal;
+               found->pub.capability = tmp->pub.capability;
+               found->ts = tmp->ts;
        } else {
                struct cfg80211_internal_bss *new;
                struct cfg80211_internal_bss *hidden;
@@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
        if (IS_ERR(rdev))
                return PTR_ERR(rdev);
 
+       mutex_lock(&rdev->sched_scan_mtx);
        if (rdev->scan_req) {
                err = -EBUSY;
                goto out;
@@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
                dev_hold(dev);
        }
  out:
+       mutex_unlock(&rdev->sched_scan_mtx);
        kfree(creq);
        cfg80211_unlock_rdev(rdev);
        return err;
index f432bd3..09d994d 100644 (file)
@@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
        ASSERT_RTNL();
        ASSERT_RDEV_LOCK(rdev);
        ASSERT_WDEV_LOCK(wdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        if (rdev->scan_req)
                return -EBUSY;
@@ -320,11 +321,9 @@ void cfg80211_sme_scan_done(struct net_device *dev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
 
-       mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
        wdev_lock(wdev);
        __cfg80211_sme_scan_done(dev);
        wdev_unlock(wdev);
-       mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
 }
 
 void cfg80211_sme_rx_auth(struct net_device *dev,
@@ -924,9 +923,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
        int err;
 
        mutex_lock(&rdev->devlist_mtx);
+       /* might request scan - scan_mtx -> wdev_mtx dependency */
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(dev->ieee80211_ptr);
        err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL);
        wdev_unlock(dev->ieee80211_ptr);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
 
        return err;
index b7a5313..7586de7 100644 (file)
@@ -27,7 +27,8 @@
 #define WIPHY_PR_ARG   __entry->wiphy_name
 
 #define WDEV_ENTRY     __field(u32, id)
-#define WDEV_ASSIGN    (__entry->id) = (wdev ? wdev->identifier : 0)
+#define WDEV_ASSIGN    (__entry->id) = (!IS_ERR_OR_NULL(wdev)  \
+                                        ? wdev->identifier : 0)
 #define WDEV_PR_FMT    "wdev(%u)"
 #define WDEV_PR_ARG    (__entry->id)
 
@@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl,
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
-               WIPHY_ASSIGN;
+               NETDEV_ASSIGN;
                __entry->acl_policy = params->acl_policy;
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d",
index fb9622f..e79cb5c 100644 (file)
@@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
 
        cfg80211_lock_rdev(rdev);
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
        if (wdev->sme_state != CFG80211_SME_IDLE) {
@@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
        cfg80211_unlock_rdev(rdev);
        return err;
@@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
 
        cfg80211_lock_rdev(rdev);
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
        err = 0;
@@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
        cfg80211_unlock_rdev(rdev);
        return err;
@@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
 
        cfg80211_lock_rdev(rdev);
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
        if (wdev->sme_state != CFG80211_SME_IDLE) {
@@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
        cfg80211_unlock_rdev(rdev);
        return err;
index 35754cc..8dafe6d 100644 (file)
@@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
                x->xflags &= ~XFRM_TIME_DEFER;
 }
 
+static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
+{
+       u32 seq_diff, oseq_diff;
+       struct km_event c;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+       struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn;
+
+       /* we send notify messages in case
+        *  1. we updated on of the sequence numbers, and the seqno difference
+        *     is at least x->replay_maxdiff, in this case we also update the
+        *     timeout of our timer function
+        *  2. if x->replay_maxage has elapsed since last update,
+        *     and there were changes
+        *
+        *  The state structure must be locked!
+        */
+
+       switch (event) {
+       case XFRM_REPLAY_UPDATE:
+               if (!x->replay_maxdiff)
+                       break;
+
+               if (replay_esn->seq_hi == preplay_esn->seq_hi)
+                       seq_diff = replay_esn->seq - preplay_esn->seq;
+               else
+                       seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
+
+               if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
+                       oseq_diff = replay_esn->oseq - preplay_esn->oseq;
+               else
+                       oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
+
+               if (seq_diff < x->replay_maxdiff &&
+                   oseq_diff < x->replay_maxdiff) {
+
+                       if (x->xflags & XFRM_TIME_DEFER)
+                               event = XFRM_REPLAY_TIMEOUT;
+                       else
+                               return;
+               }
+
+               break;
+
+       case XFRM_REPLAY_TIMEOUT:
+               if (memcmp(x->replay_esn, x->preplay_esn,
+                          xfrm_replay_state_esn_len(replay_esn)) == 0) {
+                       x->xflags |= XFRM_TIME_DEFER;
+                       return;
+               }
+
+               break;
+       }
+
+       memcpy(x->preplay_esn, x->replay_esn,
+              xfrm_replay_state_esn_len(replay_esn));
+       c.event = XFRM_MSG_NEWAE;
+       c.data.aevent = event;
+       km_state_notify(x, &c);
+
+       if (x->replay_maxage &&
+           !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
+               x->xflags &= ~XFRM_TIME_DEFER;
+}
+
 static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err = 0;
@@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = {
        .advance        = xfrm_replay_advance_esn,
        .check          = xfrm_replay_check_esn,
        .recheck        = xfrm_replay_recheck_esn,
-       .notify         = xfrm_replay_notify_bmp,
+       .notify         = xfrm_replay_notify_esn,
        .overflow       = xfrm_replay_overflow_esn,
 };