curr->bridge->driver->cache_flush();
curr->is_flushed = true;
}
+
+ if (curr->bridge->driver->agp_map_memory) {
+ ret_val = curr->bridge->driver->agp_map_memory(curr);
+ if (ret_val)
+ return ret_val;
+ }
ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
if (ret_val != 0)
if (ret_val != 0)
return ret_val;
+ if (curr->bridge->driver->agp_unmap_memory)
+ curr->bridge->driver->agp_unmap_memory(curr);
+
curr->is_bound = false;
curr->pg_start = 0;
spin_lock(&curr->bridge->mapped_lock);
set_memory_uc((unsigned long)table, 1 << page_order);
bridge->gatt_table = (void *)table;
#else
- bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
+ bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush();
#endif
return -ENOMEM;
}
- bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
+ bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
/* AK: bogus, should encode addresses > 4GB */
for (i = 0; i < num_entries; i++) {
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(bridge->driver->mask_memory(bridge, mem->pages[i], mask_type),
+ writel(bridge->driver->mask_memory(bridge,
+ page_to_phys(mem->pages[i]),
+ mask_type),
bridge->gatt_table+j);
}
readl(bridge->gatt_table+j-1); /* PCI Posting. */
EXPORT_SYMBOL(global_cache_flush);
unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
- struct page *page, int type)
+ dma_addr_t addr, int type)
{
- unsigned long addr = phys_to_gart(page_to_phys(page));
/* memory type is ignored in the generic routine */
if (bridge->driver->masks)
return addr | bridge->driver->masks[0].mask;