2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "i915_trace.h"
30 #include "intel_drv.h"
32 /* PPGTT support for Sandybdrige/Gen6 and later */
33 static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
39 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
40 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
43 scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
44 scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
47 last_pte = first_pte + num_entries;
48 if (last_pte > I915_PPGTT_PT_ENTRIES)
49 last_pte = I915_PPGTT_PT_ENTRIES;
51 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
53 for (i = first_pte; i < last_pte; i++)
54 pt_vaddr[i] = scratch_pte;
56 kunmap_atomic(pt_vaddr);
58 num_entries -= last_pte - first_pte;
64 int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
66 struct drm_i915_private *dev_priv = dev->dev_private;
67 struct i915_hw_ppgtt *ppgtt;
69 unsigned first_pd_entry_in_global_pt;
70 uint32_t __iomem *pd_addr;
74 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
75 * entries. For aliasing ppgtt support we just steal them at the end for
77 first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
79 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
83 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
84 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
89 for (i = 0; i < ppgtt->num_pd_entries; i++) {
90 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
91 if (!ppgtt->pt_pages[i])
95 if (dev_priv->mm.gtt->needs_dmar) {
96 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
97 *ppgtt->num_pd_entries,
99 if (!ppgtt->pt_dma_addr)
103 pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt;
104 for (i = 0; i < ppgtt->num_pd_entries; i++) {
106 if (dev_priv->mm.gtt->needs_dmar) {
107 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
109 PCI_DMA_BIDIRECTIONAL);
111 if (pci_dma_mapping_error(dev->pdev,
117 ppgtt->pt_dma_addr[i] = pt_addr;
119 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
121 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
122 pd_entry |= GEN6_PDE_VALID;
124 writel(pd_entry, pd_addr + i);
128 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
130 i915_ppgtt_clear_range(ppgtt, 0,
131 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
133 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
135 dev_priv->mm.aliasing_ppgtt = ppgtt;
140 if (ppgtt->pt_dma_addr) {
141 for (i--; i >= 0; i--)
142 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
143 4096, PCI_DMA_BIDIRECTIONAL);
146 kfree(ppgtt->pt_dma_addr);
147 for (i = 0; i < ppgtt->num_pd_entries; i++) {
148 if (ppgtt->pt_pages[i])
149 __free_page(ppgtt->pt_pages[i]);
151 kfree(ppgtt->pt_pages);
158 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
160 struct drm_i915_private *dev_priv = dev->dev_private;
161 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
167 if (ppgtt->pt_dma_addr) {
168 for (i = 0; i < ppgtt->num_pd_entries; i++)
169 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
170 4096, PCI_DMA_BIDIRECTIONAL);
173 kfree(ppgtt->pt_dma_addr);
174 for (i = 0; i < ppgtt->num_pd_entries; i++)
175 __free_page(ppgtt->pt_pages[i]);
176 kfree(ppgtt->pt_pages);
180 static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
181 struct scatterlist *sg_list,
183 unsigned first_entry,
186 uint32_t *pt_vaddr, pte;
187 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
188 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
189 unsigned i, j, m, segment_len;
190 dma_addr_t page_addr;
191 struct scatterlist *sg;
193 /* init sg walking */
196 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
200 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
202 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
203 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
204 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
205 pt_vaddr[j] = pte | pte_flags;
207 /* grab the next page */
209 if (m == segment_len) {
215 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
220 kunmap_atomic(pt_vaddr);
227 static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
228 unsigned first_entry, unsigned num_entries,
229 struct page **pages, uint32_t pte_flags)
231 uint32_t *pt_vaddr, pte;
232 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
233 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
234 unsigned last_pte, i;
235 dma_addr_t page_addr;
237 while (num_entries) {
238 last_pte = first_pte + num_entries;
239 last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
241 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
243 for (i = first_pte; i < last_pte; i++) {
244 page_addr = page_to_phys(*pages);
245 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
246 pt_vaddr[i] = pte | pte_flags;
251 kunmap_atomic(pt_vaddr);
253 num_entries -= last_pte - first_pte;
259 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
260 struct drm_i915_gem_object *obj,
261 enum i915_cache_level cache_level)
263 struct drm_device *dev = obj->base.dev;
264 struct drm_i915_private *dev_priv = dev->dev_private;
265 uint32_t pte_flags = GEN6_PTE_VALID;
267 switch (cache_level) {
268 case I915_CACHE_LLC_MLC:
269 pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
272 pte_flags |= GEN6_PTE_CACHE_LLC;
274 case I915_CACHE_NONE:
275 pte_flags |= GEN6_PTE_UNCACHED;
281 if (dev_priv->mm.gtt->needs_dmar) {
282 BUG_ON(!obj->sg_list);
284 i915_ppgtt_insert_sg_entries(ppgtt,
287 obj->gtt_space->start >> PAGE_SHIFT,
290 i915_ppgtt_insert_pages(ppgtt,
291 obj->gtt_space->start >> PAGE_SHIFT,
292 obj->base.size >> PAGE_SHIFT,
297 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
298 struct drm_i915_gem_object *obj)
300 i915_ppgtt_clear_range(ppgtt,
301 obj->gtt_space->start >> PAGE_SHIFT,
302 obj->base.size >> PAGE_SHIFT);
305 /* XXX kill agp_type! */
306 static unsigned int cache_level_to_agp_type(struct drm_device *dev,
307 enum i915_cache_level cache_level)
309 switch (cache_level) {
310 case I915_CACHE_LLC_MLC:
311 if (INTEL_INFO(dev)->gen >= 6)
312 return AGP_USER_CACHED_MEMORY_LLC_MLC;
313 /* Older chipsets do not have this extra level of CPU
314 * cacheing, so fallthrough and request the PTE simply
318 return AGP_USER_CACHED_MEMORY;
320 case I915_CACHE_NONE:
321 return AGP_USER_MEMORY;
325 static bool do_idling(struct drm_i915_private *dev_priv)
327 bool ret = dev_priv->mm.interruptible;
329 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
330 dev_priv->mm.interruptible = false;
331 if (i915_gpu_idle(dev_priv->dev, false)) {
332 DRM_ERROR("Couldn't idle GPU\n");
333 /* Wait a bit, in hopes it avoids the hang */
341 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
343 if (unlikely(dev_priv->mm.gtt->do_idle_maps))
344 dev_priv->mm.interruptible = interruptible;
347 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
349 struct drm_i915_private *dev_priv = dev->dev_private;
350 struct drm_i915_gem_object *obj;
352 /* First fill our portion of the GTT with scratch pages */
353 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
354 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
356 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
357 i915_gem_clflush_object(obj);
358 i915_gem_gtt_rebind_object(obj, obj->cache_level);
361 intel_gtt_chipset_flush();
364 int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
366 struct drm_device *dev = obj->base.dev;
367 struct drm_i915_private *dev_priv = dev->dev_private;
368 unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
371 if (dev_priv->mm.gtt->needs_dmar) {
372 ret = intel_gtt_map_memory(obj->pages,
373 obj->base.size >> PAGE_SHIFT,
379 intel_gtt_insert_sg_entries(obj->sg_list,
381 obj->gtt_space->start >> PAGE_SHIFT,
384 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
385 obj->base.size >> PAGE_SHIFT,
392 void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
393 enum i915_cache_level cache_level)
395 struct drm_device *dev = obj->base.dev;
396 struct drm_i915_private *dev_priv = dev->dev_private;
397 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
399 if (dev_priv->mm.gtt->needs_dmar) {
400 BUG_ON(!obj->sg_list);
402 intel_gtt_insert_sg_entries(obj->sg_list,
404 obj->gtt_space->start >> PAGE_SHIFT,
407 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
408 obj->base.size >> PAGE_SHIFT,
413 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
415 struct drm_device *dev = obj->base.dev;
416 struct drm_i915_private *dev_priv = dev->dev_private;
419 interruptible = do_idling(dev_priv);
421 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
422 obj->base.size >> PAGE_SHIFT);
425 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
429 undo_idling(dev_priv, interruptible);