/*
* If we have Intel graphics, we're not going to have anything other than
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent
- * on the Intel IOMMU support (CONFIG_DMAR).
+ * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
* Only newer chipsets need to bother with this, of course.
*/
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
#define USE_PCI_DMA_API 1
#else
#define USE_PCI_DMA_API 0
{
int ret = -EINVAL;
+ if (intel_private.base.do_idle_maps)
+ return -ENODEV;
+
if (intel_private.clear_fake_agp) {
int start = intel_private.base.stolen_size / PAGE_SIZE;
int end = intel_private.base.gtt_mappable_entries;
if (mem->page_count == 0)
return 0;
+ if (intel_private.base.do_idle_maps)
+ return -ENODEV;
+
intel_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.base.needs_dmar) {
{
}
+ /* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+ extern int intel_iommu_gfx_mapped;
+ static inline int needs_idle_maps(void)
+ {
+ const unsigned short gpu_devid = intel_private.pcidev->device;
+
+ /* Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ intel_iommu_gfx_mapped)
+ return 1;
+
+ return 0;
+ }
+
static int i9xx_setup(void)
{
u32 reg_addr;
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
+ if (needs_idle_maps());
+ intel_private.base.do_idle_maps = 1;
+
intel_i9xx_setup_flush();
return 0;
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
+ rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
} else
WREG32(rdev->fence_drv.scratch_reg, seq);
}
*/
if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
/* good news we believe it's a lockup */
- WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
+ printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
/* FIXME: what should we do ? marking everyone
* as signaled for now
if STAGING
+source "drivers/staging/serial/Kconfig"
+
source "drivers/staging/et131x/Kconfig"
source "drivers/staging/slicoss/Kconfig"
source "drivers/staging/echo/Kconfig"
-source "drivers/staging/brcm80211/Kconfig"
-
source "drivers/staging/comedi/Kconfig"
source "drivers/staging/olpc_dcon/Kconfig"
source "drivers/staging/rts_pstor/Kconfig"
+source "drivers/staging/rts5139/Kconfig"
+
source "drivers/staging/frontier/Kconfig"
source "drivers/staging/pohmelfs/Kconfig"
source "drivers/staging/line6/Kconfig"
- source "drivers/gpu/drm/vmwgfx/Kconfig"
-
source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/staging/octeon/Kconfig"
source "drivers/staging/sbe-2t3e3/Kconfig"
-source "drivers/staging/ath6kl/Kconfig"
-
source "drivers/staging/keucr/Kconfig"
source "drivers/staging/bcm/Kconfig"
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
- * See Documentation/io_mapping.txt
+ * See Documentation/io-mapping.txt
*/
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
#else
+ #include <linux/uaccess.h>
+
/* this struct isn't actually defined anywhere */
struct io_mapping;
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
+ pagefault_disable();
return ((char __force __iomem *) mapping) + offset;
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
+ pagefault_enable();
}
/* Non-atomic map/unmap */