Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem_debug.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32
33 #if WATCH_LISTS
34 int
35 i915_verify_lists(struct drm_device *dev)
36 {
37         static int warned;
38         drm_i915_private_t *dev_priv = dev->dev_private;
39         struct drm_i915_gem_object *obj;
40         int err = 0;
41
42         if (warned)
43                 return 0;
44
45         list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
46                 if (obj->base.dev != dev ||
47                     !atomic_read(&obj->base.refcount.refcount)) {
48                         DRM_ERROR("freed render active %p\n", obj);
49                         err++;
50                         break;
51                 } else if (!obj->active ||
52                            (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
53                         DRM_ERROR("invalid render active %p (a %d r %x)\n",
54                                   obj,
55                                   obj->active,
56                                   obj->base.read_domains);
57                         err++;
58                 } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
59                         DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
60                                   obj,
61                                   obj->base.write_domain,
62                                   !list_empty(&obj->gpu_write_list));
63                         err++;
64                 }
65         }
66
67         list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
68                 if (obj->base.dev != dev ||
69                     !atomic_read(&obj->base.refcount.refcount)) {
70                         DRM_ERROR("freed flushing %p\n", obj);
71                         err++;
72                         break;
73                 } else if (!obj->active ||
74                            (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
75                            list_empty(&obj->gpu_write_list)){
76                         DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
77                                   obj,
78                                   obj->active,
79                                   obj->base.write_domain,
80                                   !list_empty(&obj->gpu_write_list));
81                         err++;
82                 }
83         }
84
85         list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
86                 if (obj->base.dev != dev ||
87                     !atomic_read(&obj->base.refcount.refcount)) {
88                         DRM_ERROR("freed gpu write %p\n", obj);
89                         err++;
90                         break;
91                 } else if (!obj->active ||
92                            (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
93                         DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
94                                   obj,
95                                   obj->active,
96                                   obj->base.write_domain);
97                         err++;
98                 }
99         }
100
101         list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
102                 if (obj->base.dev != dev ||
103                     !atomic_read(&obj->base.refcount.refcount)) {
104                         DRM_ERROR("freed inactive %p\n", obj);
105                         err++;
106                         break;
107                 } else if (obj->pin_count || obj->active ||
108                            (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
109                         DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
110                                   obj,
111                                   obj->pin_count, obj->active,
112                                   obj->base.write_domain);
113                         err++;
114                 }
115         }
116
117         list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
118                 if (obj->base.dev != dev ||
119                     !atomic_read(&obj->base.refcount.refcount)) {
120                         DRM_ERROR("freed pinned %p\n", obj);
121                         err++;
122                         break;
123                 } else if (!obj->pin_count || obj->active ||
124                            (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
125                         DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
126                                   obj,
127                                   obj->pin_count, obj->active,
128                                   obj->base.write_domain);
129                         err++;
130                 }
131         }
132
133         return warned = err;
134 }
135 #endif /* WATCH_INACTIVE */
136
137
138 #if WATCH_EXEC | WATCH_PWRITE
139 static void
140 i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
141                    uint32_t bias, uint32_t mark)
142 {
143         uint32_t *mem = kmap_atomic(page, KM_USER0);
144         int i;
145         for (i = start; i < end; i += 4)
146                 DRM_INFO("%08x: %08x%s\n",
147                           (int) (bias + i), mem[i / 4],
148                           (bias + i == mark) ? " ********" : "");
149         kunmap_atomic(mem, KM_USER0);
150         /* give syslog time to catch up */
151         msleep(1);
152 }
153
154 void
155 i915_gem_dump_object(struct drm_gem_object *obj, int len,
156                      const char *where, uint32_t mark)
157 {
158         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
159         int page;
160
161         DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
162         for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
163                 int page_len, chunk, chunk_len;
164
165                 page_len = len - page * PAGE_SIZE;
166                 if (page_len > PAGE_SIZE)
167                         page_len = PAGE_SIZE;
168
169                 for (chunk = 0; chunk < page_len; chunk += 128) {
170                         chunk_len = page_len - chunk;
171                         if (chunk_len > 128)
172                                 chunk_len = 128;
173                         i915_gem_dump_page(obj_priv->pages[page],
174                                            chunk, chunk + chunk_len,
175                                            obj_priv->gtt_offset +
176                                            page * PAGE_SIZE,
177                                            mark);
178                 }
179         }
180 }
181 #endif
182
183 #if WATCH_COHERENCY
184 void
185 i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
186 {
187         struct drm_device *dev = obj->dev;
188         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
189         int page;
190         uint32_t *gtt_mapping;
191         uint32_t *backing_map = NULL;
192         int bad_count = 0;
193
194         DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
195                  __func__, obj, obj_priv->gtt_offset, handle,
196                  obj->size / 1024);
197
198         gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
199                               obj->size);
200         if (gtt_mapping == NULL) {
201                 DRM_ERROR("failed to map GTT space\n");
202                 return;
203         }
204
205         for (page = 0; page < obj->size / PAGE_SIZE; page++) {
206                 int i;
207
208                 backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
209
210                 if (backing_map == NULL) {
211                         DRM_ERROR("failed to map backing page\n");
212                         goto out;
213                 }
214
215                 for (i = 0; i < PAGE_SIZE / 4; i++) {
216                         uint32_t cpuval = backing_map[i];
217                         uint32_t gttval = readl(gtt_mapping +
218                                                 page * 1024 + i);
219
220                         if (cpuval != gttval) {
221                                 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
222                                          "0x%08x vs 0x%08x\n",
223                                          (int)(obj_priv->gtt_offset +
224                                                page * PAGE_SIZE + i * 4),
225                                          cpuval, gttval);
226                                 if (bad_count++ >= 8) {
227                                         DRM_INFO("...\n");
228                                         goto out;
229                                 }
230                         }
231                 }
232                 kunmap_atomic(backing_map, KM_USER0);
233                 backing_map = NULL;
234         }
235
236  out:
237         if (backing_map != NULL)
238                 kunmap_atomic(backing_map, KM_USER0);
239         iounmap(gtt_mapping);
240
241         /* give syslog time to catch up */
242         msleep(1);
243
244         /* Directly flush the object, since we just loaded values with the CPU
245          * from the backing pages and we don't want to disturb the cache
246          * management that we're trying to observe.
247          */
248
249         i915_gem_clflush_object(obj);
250 }
251 #endif