drm/i915: add accounting for mappable objects in gtt v2
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include "drmP.h"
33 #include "drm.h"
34 #include "intel_drv.h"
35 #include "i915_drm.h"
36 #include "i915_drv.h"
37
38 #define DRM_I915_RING_DEBUG 1
39
40
41 #if defined(CONFIG_DEBUG_FS)
42
43 enum {
44         ACTIVE_LIST,
45         FLUSHING_LIST,
46         INACTIVE_LIST,
47         PINNED_LIST,
48         DEFERRED_FREE_LIST,
49 };
50
51 enum {
52         RENDER_RING,
53         BSD_RING,
54         BLT_RING,
55 };
56
57 static const char *yesno(int v)
58 {
59         return v ? "yes" : "no";
60 }
61
62 static int i915_capabilities(struct seq_file *m, void *data)
63 {
64         struct drm_info_node *node = (struct drm_info_node *) m->private;
65         struct drm_device *dev = node->minor->dev;
66         const struct intel_device_info *info = INTEL_INFO(dev);
67
68         seq_printf(m, "gen: %d\n", info->gen);
69 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
70         B(is_mobile);
71         B(is_i85x);
72         B(is_i915g);
73         B(is_i945gm);
74         B(is_g33);
75         B(need_gfx_hws);
76         B(is_g4x);
77         B(is_pineview);
78         B(is_broadwater);
79         B(is_crestline);
80         B(has_fbc);
81         B(has_rc6);
82         B(has_pipe_cxsr);
83         B(has_hotplug);
84         B(cursor_needs_physical);
85         B(has_overlay);
86         B(overlay_needs_physical);
87         B(supports_tv);
88         B(has_bsd_ring);
89         B(has_blt_ring);
90 #undef B
91
92         return 0;
93 }
94
95 static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
96 {
97         if (obj_priv->user_pin_count > 0)
98                 return "P";
99         else if (obj_priv->pin_count > 0)
100                 return "p";
101         else
102                 return " ";
103 }
104
105 static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
106 {
107     switch (obj_priv->tiling_mode) {
108     default:
109     case I915_TILING_NONE: return " ";
110     case I915_TILING_X: return "X";
111     case I915_TILING_Y: return "Y";
112     }
113 }
114
115 static void
116 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
117 {
118         seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
119                    &obj->base,
120                    get_pin_flag(obj),
121                    get_tiling_flag(obj),
122                    obj->base.size,
123                    obj->base.read_domains,
124                    obj->base.write_domain,
125                    obj->last_rendering_seqno,
126                    obj->dirty ? " dirty" : "",
127                    obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
128         if (obj->base.name)
129                 seq_printf(m, " (name: %d)", obj->base.name);
130         if (obj->fence_reg != I915_FENCE_REG_NONE)
131                 seq_printf(m, " (fence: %d)", obj->fence_reg);
132         if (obj->gtt_space != NULL)
133                 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
134         if (obj->pin_mappable || obj->fault_mappable)
135                 seq_printf(m, " (mappable)");
136         if (obj->ring != NULL)
137                 seq_printf(m, " (%s)", obj->ring->name);
138 }
139
140 static int i915_gem_object_list_info(struct seq_file *m, void *data)
141 {
142         struct drm_info_node *node = (struct drm_info_node *) m->private;
143         uintptr_t list = (uintptr_t) node->info_ent->data;
144         struct list_head *head;
145         struct drm_device *dev = node->minor->dev;
146         drm_i915_private_t *dev_priv = dev->dev_private;
147         struct drm_i915_gem_object *obj_priv;
148         size_t total_obj_size, total_gtt_size;
149         int count, ret;
150
151         ret = mutex_lock_interruptible(&dev->struct_mutex);
152         if (ret)
153                 return ret;
154
155         switch (list) {
156         case ACTIVE_LIST:
157                 seq_printf(m, "Active:\n");
158                 head = &dev_priv->mm.active_list;
159                 break;
160         case INACTIVE_LIST:
161                 seq_printf(m, "Inactive:\n");
162                 head = &dev_priv->mm.inactive_list;
163                 break;
164         case PINNED_LIST:
165                 seq_printf(m, "Pinned:\n");
166                 head = &dev_priv->mm.pinned_list;
167                 break;
168         case FLUSHING_LIST:
169                 seq_printf(m, "Flushing:\n");
170                 head = &dev_priv->mm.flushing_list;
171                 break;
172         case DEFERRED_FREE_LIST:
173                 seq_printf(m, "Deferred free:\n");
174                 head = &dev_priv->mm.deferred_free_list;
175                 break;
176         default:
177                 mutex_unlock(&dev->struct_mutex);
178                 return -EINVAL;
179         }
180
181         total_obj_size = total_gtt_size = count = 0;
182         list_for_each_entry(obj_priv, head, mm_list) {
183                 seq_printf(m, "   ");
184                 describe_obj(m, obj_priv);
185                 seq_printf(m, "\n");
186                 total_obj_size += obj_priv->base.size;
187                 total_gtt_size += obj_priv->gtt_space->size;
188                 count++;
189         }
190         mutex_unlock(&dev->struct_mutex);
191
192         seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
193                    count, total_obj_size, total_gtt_size);
194         return 0;
195 }
196
197 static int i915_gem_object_info(struct seq_file *m, void* data)
198 {
199         struct drm_info_node *node = (struct drm_info_node *) m->private;
200         struct drm_device *dev = node->minor->dev;
201         struct drm_i915_private *dev_priv = dev->dev_private;
202         int ret;
203
204         ret = mutex_lock_interruptible(&dev->struct_mutex);
205         if (ret)
206                 return ret;
207
208         seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
209         seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
210         seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
211         seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
212         seq_printf(m, "%u mappable objects in gtt\n", dev_priv->mm.gtt_mappable_count);
213         seq_printf(m, "%zu mappable gtt bytes\n", dev_priv->mm.gtt_mappable_memory);
214         seq_printf(m, "%zu mappable gtt used bytes\n", dev_priv->mm.mappable_gtt_used);
215         seq_printf(m, "%zu mappable gtt total\n", dev_priv->mm.mappable_gtt_total);
216         seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
217         seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
218         seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
219
220         mutex_unlock(&dev->struct_mutex);
221
222         return 0;
223 }
224
225
226 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
227 {
228         struct drm_info_node *node = (struct drm_info_node *) m->private;
229         struct drm_device *dev = node->minor->dev;
230         unsigned long flags;
231         struct intel_crtc *crtc;
232
233         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
234                 const char *pipe = crtc->pipe ? "B" : "A";
235                 const char *plane = crtc->plane ? "B" : "A";
236                 struct intel_unpin_work *work;
237
238                 spin_lock_irqsave(&dev->event_lock, flags);
239                 work = crtc->unpin_work;
240                 if (work == NULL) {
241                         seq_printf(m, "No flip due on pipe %s (plane %s)\n",
242                                    pipe, plane);
243                 } else {
244                         if (!work->pending) {
245                                 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
246                                            pipe, plane);
247                         } else {
248                                 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
249                                            pipe, plane);
250                         }
251                         if (work->enable_stall_check)
252                                 seq_printf(m, "Stall check enabled, ");
253                         else
254                                 seq_printf(m, "Stall check waiting for page flip ioctl, ");
255                         seq_printf(m, "%d prepares\n", work->pending);
256
257                         if (work->old_fb_obj) {
258                                 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
259                                 if(obj_priv)
260                                         seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
261                         }
262                         if (work->pending_flip_obj) {
263                                 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
264                                 if(obj_priv)
265                                         seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
266                         }
267                 }
268                 spin_unlock_irqrestore(&dev->event_lock, flags);
269         }
270
271         return 0;
272 }
273
274 static int i915_gem_request_info(struct seq_file *m, void *data)
275 {
276         struct drm_info_node *node = (struct drm_info_node *) m->private;
277         struct drm_device *dev = node->minor->dev;
278         drm_i915_private_t *dev_priv = dev->dev_private;
279         struct drm_i915_gem_request *gem_request;
280         int ret, count;
281
282         ret = mutex_lock_interruptible(&dev->struct_mutex);
283         if (ret)
284                 return ret;
285
286         count = 0;
287         if (!list_empty(&dev_priv->render_ring.request_list)) {
288                 seq_printf(m, "Render requests:\n");
289                 list_for_each_entry(gem_request,
290                                     &dev_priv->render_ring.request_list,
291                                     list) {
292                         seq_printf(m, "    %d @ %d\n",
293                                    gem_request->seqno,
294                                    (int) (jiffies - gem_request->emitted_jiffies));
295                 }
296                 count++;
297         }
298         if (!list_empty(&dev_priv->bsd_ring.request_list)) {
299                 seq_printf(m, "BSD requests:\n");
300                 list_for_each_entry(gem_request,
301                                     &dev_priv->bsd_ring.request_list,
302                                     list) {
303                         seq_printf(m, "    %d @ %d\n",
304                                    gem_request->seqno,
305                                    (int) (jiffies - gem_request->emitted_jiffies));
306                 }
307                 count++;
308         }
309         if (!list_empty(&dev_priv->blt_ring.request_list)) {
310                 seq_printf(m, "BLT requests:\n");
311                 list_for_each_entry(gem_request,
312                                     &dev_priv->blt_ring.request_list,
313                                     list) {
314                         seq_printf(m, "    %d @ %d\n",
315                                    gem_request->seqno,
316                                    (int) (jiffies - gem_request->emitted_jiffies));
317                 }
318                 count++;
319         }
320         mutex_unlock(&dev->struct_mutex);
321
322         if (count == 0)
323                 seq_printf(m, "No requests\n");
324
325         return 0;
326 }
327
328 static void i915_ring_seqno_info(struct seq_file *m,
329                                  struct intel_ring_buffer *ring)
330 {
331         if (ring->get_seqno) {
332                 seq_printf(m, "Current sequence (%s): %d\n",
333                            ring->name, ring->get_seqno(ring));
334                 seq_printf(m, "Waiter sequence (%s):  %d\n",
335                            ring->name, ring->waiting_seqno);
336                 seq_printf(m, "IRQ sequence (%s):     %d\n",
337                            ring->name, ring->irq_seqno);
338         }
339 }
340
341 static int i915_gem_seqno_info(struct seq_file *m, void *data)
342 {
343         struct drm_info_node *node = (struct drm_info_node *) m->private;
344         struct drm_device *dev = node->minor->dev;
345         drm_i915_private_t *dev_priv = dev->dev_private;
346         int ret;
347
348         ret = mutex_lock_interruptible(&dev->struct_mutex);
349         if (ret)
350                 return ret;
351
352         i915_ring_seqno_info(m, &dev_priv->render_ring);
353         i915_ring_seqno_info(m, &dev_priv->bsd_ring);
354         i915_ring_seqno_info(m, &dev_priv->blt_ring);
355
356         mutex_unlock(&dev->struct_mutex);
357
358         return 0;
359 }
360
361
362 static int i915_interrupt_info(struct seq_file *m, void *data)
363 {
364         struct drm_info_node *node = (struct drm_info_node *) m->private;
365         struct drm_device *dev = node->minor->dev;
366         drm_i915_private_t *dev_priv = dev->dev_private;
367         int ret;
368
369         ret = mutex_lock_interruptible(&dev->struct_mutex);
370         if (ret)
371                 return ret;
372
373         if (!HAS_PCH_SPLIT(dev)) {
374                 seq_printf(m, "Interrupt enable:    %08x\n",
375                            I915_READ(IER));
376                 seq_printf(m, "Interrupt identity:  %08x\n",
377                            I915_READ(IIR));
378                 seq_printf(m, "Interrupt mask:      %08x\n",
379                            I915_READ(IMR));
380                 seq_printf(m, "Pipe A stat:         %08x\n",
381                            I915_READ(PIPEASTAT));
382                 seq_printf(m, "Pipe B stat:         %08x\n",
383                            I915_READ(PIPEBSTAT));
384         } else {
385                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
386                            I915_READ(DEIER));
387                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
388                            I915_READ(DEIIR));
389                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
390                            I915_READ(DEIMR));
391                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
392                            I915_READ(SDEIER));
393                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
394                            I915_READ(SDEIIR));
395                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
396                            I915_READ(SDEIMR));
397                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
398                            I915_READ(GTIER));
399                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
400                            I915_READ(GTIIR));
401                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
402                            I915_READ(GTIMR));
403         }
404         seq_printf(m, "Interrupts received: %d\n",
405                    atomic_read(&dev_priv->irq_received));
406         i915_ring_seqno_info(m, &dev_priv->render_ring);
407         i915_ring_seqno_info(m, &dev_priv->bsd_ring);
408         i915_ring_seqno_info(m, &dev_priv->blt_ring);
409         mutex_unlock(&dev->struct_mutex);
410
411         return 0;
412 }
413
414 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
415 {
416         struct drm_info_node *node = (struct drm_info_node *) m->private;
417         struct drm_device *dev = node->minor->dev;
418         drm_i915_private_t *dev_priv = dev->dev_private;
419         int i, ret;
420
421         ret = mutex_lock_interruptible(&dev->struct_mutex);
422         if (ret)
423                 return ret;
424
425         seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
426         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
427         for (i = 0; i < dev_priv->num_fence_regs; i++) {
428                 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
429
430                 seq_printf(m, "Fenced object[%2d] = ", i);
431                 if (obj == NULL)
432                         seq_printf(m, "unused");
433                 else
434                         describe_obj(m, to_intel_bo(obj));
435                 seq_printf(m, "\n");
436         }
437         mutex_unlock(&dev->struct_mutex);
438
439         return 0;
440 }
441
442 static int i915_hws_info(struct seq_file *m, void *data)
443 {
444         struct drm_info_node *node = (struct drm_info_node *) m->private;
445         struct drm_device *dev = node->minor->dev;
446         drm_i915_private_t *dev_priv = dev->dev_private;
447         int i;
448         volatile u32 *hws;
449
450         hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
451         if (hws == NULL)
452                 return 0;
453
454         for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
455                 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
456                            i * 4,
457                            hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
458         }
459         return 0;
460 }
461
462 static void i915_dump_object(struct seq_file *m,
463                              struct io_mapping *mapping,
464                              struct drm_i915_gem_object *obj_priv)
465 {
466         int page, page_count, i;
467
468         page_count = obj_priv->base.size / PAGE_SIZE;
469         for (page = 0; page < page_count; page++) {
470                 u32 *mem = io_mapping_map_wc(mapping,
471                                              obj_priv->gtt_offset + page * PAGE_SIZE);
472                 for (i = 0; i < PAGE_SIZE; i += 4)
473                         seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
474                 io_mapping_unmap(mem);
475         }
476 }
477
478 static int i915_batchbuffer_info(struct seq_file *m, void *data)
479 {
480         struct drm_info_node *node = (struct drm_info_node *) m->private;
481         struct drm_device *dev = node->minor->dev;
482         drm_i915_private_t *dev_priv = dev->dev_private;
483         struct drm_gem_object *obj;
484         struct drm_i915_gem_object *obj_priv;
485         int ret;
486
487         ret = mutex_lock_interruptible(&dev->struct_mutex);
488         if (ret)
489                 return ret;
490
491         list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
492                 obj = &obj_priv->base;
493                 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
494                     seq_printf(m, "--- gtt_offset = 0x%08x\n",
495                                obj_priv->gtt_offset);
496                     i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
497                 }
498         }
499
500         mutex_unlock(&dev->struct_mutex);
501
502         return 0;
503 }
504
505 static int i915_ringbuffer_data(struct seq_file *m, void *data)
506 {
507         struct drm_info_node *node = (struct drm_info_node *) m->private;
508         struct drm_device *dev = node->minor->dev;
509         drm_i915_private_t *dev_priv = dev->dev_private;
510         struct intel_ring_buffer *ring;
511         int ret;
512
513         switch ((uintptr_t)node->info_ent->data) {
514         case RENDER_RING: ring = &dev_priv->render_ring; break;
515         case BSD_RING: ring = &dev_priv->bsd_ring; break;
516         case BLT_RING: ring = &dev_priv->blt_ring; break;
517         default: return -EINVAL;
518         }
519
520         ret = mutex_lock_interruptible(&dev->struct_mutex);
521         if (ret)
522                 return ret;
523
524         if (!ring->gem_object) {
525                 seq_printf(m, "No ringbuffer setup\n");
526         } else {
527                 u8 *virt = ring->virtual_start;
528                 uint32_t off;
529
530                 for (off = 0; off < ring->size; off += 4) {
531                         uint32_t *ptr = (uint32_t *)(virt + off);
532                         seq_printf(m, "%08x :  %08x\n", off, *ptr);
533                 }
534         }
535         mutex_unlock(&dev->struct_mutex);
536
537         return 0;
538 }
539
540 static int i915_ringbuffer_info(struct seq_file *m, void *data)
541 {
542         struct drm_info_node *node = (struct drm_info_node *) m->private;
543         struct drm_device *dev = node->minor->dev;
544         drm_i915_private_t *dev_priv = dev->dev_private;
545         struct intel_ring_buffer *ring;
546
547         switch ((uintptr_t)node->info_ent->data) {
548         case RENDER_RING: ring = &dev_priv->render_ring; break;
549         case BSD_RING: ring = &dev_priv->bsd_ring; break;
550         case BLT_RING: ring = &dev_priv->blt_ring; break;
551         default: return -EINVAL;
552         }
553
554         if (ring->size == 0)
555             return 0;
556
557         seq_printf(m, "Ring %s:\n", ring->name);
558         seq_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
559         seq_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
560         seq_printf(m, "  Size :    %08x\n", ring->size);
561         seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
562         seq_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
563         seq_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
564
565         return 0;
566 }
567
568 static const char *pin_flag(int pinned)
569 {
570         if (pinned > 0)
571                 return " P";
572         else if (pinned < 0)
573                 return " p";
574         else
575                 return "";
576 }
577
578 static const char *tiling_flag(int tiling)
579 {
580         switch (tiling) {
581         default:
582         case I915_TILING_NONE: return "";
583         case I915_TILING_X: return " X";
584         case I915_TILING_Y: return " Y";
585         }
586 }
587
588 static const char *dirty_flag(int dirty)
589 {
590         return dirty ? " dirty" : "";
591 }
592
593 static const char *purgeable_flag(int purgeable)
594 {
595         return purgeable ? " purgeable" : "";
596 }
597
598 static int i915_error_state(struct seq_file *m, void *unused)
599 {
600         struct drm_info_node *node = (struct drm_info_node *) m->private;
601         struct drm_device *dev = node->minor->dev;
602         drm_i915_private_t *dev_priv = dev->dev_private;
603         struct drm_i915_error_state *error;
604         unsigned long flags;
605         int i, page, offset, elt;
606
607         spin_lock_irqsave(&dev_priv->error_lock, flags);
608         if (!dev_priv->first_error) {
609                 seq_printf(m, "no error state collected\n");
610                 goto out;
611         }
612
613         error = dev_priv->first_error;
614
615         seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
616                    error->time.tv_usec);
617         seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
618         seq_printf(m, "EIR: 0x%08x\n", error->eir);
619         seq_printf(m, "  PGTBL_ER: 0x%08x\n", error->pgtbl_er);
620         seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
621         seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
622         seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
623         seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
624         seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
625         if (INTEL_INFO(dev)->gen >= 4) {
626                 seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
627                 seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
628         }
629         seq_printf(m, "seqno: 0x%08x\n", error->seqno);
630
631         if (error->active_bo_count) {
632                 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
633
634                 for (i = 0; i < error->active_bo_count; i++) {
635                         seq_printf(m, "  %08x %8zd %08x %08x %08x%s%s%s%s",
636                                    error->active_bo[i].gtt_offset,
637                                    error->active_bo[i].size,
638                                    error->active_bo[i].read_domains,
639                                    error->active_bo[i].write_domain,
640                                    error->active_bo[i].seqno,
641                                    pin_flag(error->active_bo[i].pinned),
642                                    tiling_flag(error->active_bo[i].tiling),
643                                    dirty_flag(error->active_bo[i].dirty),
644                                    purgeable_flag(error->active_bo[i].purgeable));
645
646                         if (error->active_bo[i].name)
647                                 seq_printf(m, " (name: %d)", error->active_bo[i].name);
648                         if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
649                                 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
650
651                         seq_printf(m, "\n");
652                 }
653         }
654
655         for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
656                 if (error->batchbuffer[i]) {
657                         struct drm_i915_error_object *obj = error->batchbuffer[i];
658
659                         seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
660                         offset = 0;
661                         for (page = 0; page < obj->page_count; page++) {
662                                 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
663                                         seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
664                                         offset += 4;
665                                 }
666                         }
667                 }
668         }
669
670         if (error->ringbuffer) {
671                 struct drm_i915_error_object *obj = error->ringbuffer;
672
673                 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
674                 offset = 0;
675                 for (page = 0; page < obj->page_count; page++) {
676                         for (elt = 0; elt < PAGE_SIZE/4; elt++) {
677                                 seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
678                                 offset += 4;
679                         }
680                 }
681         }
682
683         if (error->overlay)
684                 intel_overlay_print_error_state(m, error->overlay);
685
686 out:
687         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
688
689         return 0;
690 }
691
692 static int i915_rstdby_delays(struct seq_file *m, void *unused)
693 {
694         struct drm_info_node *node = (struct drm_info_node *) m->private;
695         struct drm_device *dev = node->minor->dev;
696         drm_i915_private_t *dev_priv = dev->dev_private;
697         u16 crstanddelay = I915_READ16(CRSTANDVID);
698
699         seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
700
701         return 0;
702 }
703
704 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
705 {
706         struct drm_info_node *node = (struct drm_info_node *) m->private;
707         struct drm_device *dev = node->minor->dev;
708         drm_i915_private_t *dev_priv = dev->dev_private;
709         u16 rgvswctl = I915_READ16(MEMSWCTL);
710         u16 rgvstat = I915_READ16(MEMSTAT_ILK);
711
712         seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
713         seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
714         seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
715                    MEMSTAT_VID_SHIFT);
716         seq_printf(m, "Current P-state: %d\n",
717                    (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
718
719         return 0;
720 }
721
722 static int i915_delayfreq_table(struct seq_file *m, void *unused)
723 {
724         struct drm_info_node *node = (struct drm_info_node *) m->private;
725         struct drm_device *dev = node->minor->dev;
726         drm_i915_private_t *dev_priv = dev->dev_private;
727         u32 delayfreq;
728         int i;
729
730         for (i = 0; i < 16; i++) {
731                 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
732                 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
733                            (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
734         }
735
736         return 0;
737 }
738
739 static inline int MAP_TO_MV(int map)
740 {
741         return 1250 - (map * 25);
742 }
743
744 static int i915_inttoext_table(struct seq_file *m, void *unused)
745 {
746         struct drm_info_node *node = (struct drm_info_node *) m->private;
747         struct drm_device *dev = node->minor->dev;
748         drm_i915_private_t *dev_priv = dev->dev_private;
749         u32 inttoext;
750         int i;
751
752         for (i = 1; i <= 32; i++) {
753                 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
754                 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
755         }
756
757         return 0;
758 }
759
760 static int i915_drpc_info(struct seq_file *m, void *unused)
761 {
762         struct drm_info_node *node = (struct drm_info_node *) m->private;
763         struct drm_device *dev = node->minor->dev;
764         drm_i915_private_t *dev_priv = dev->dev_private;
765         u32 rgvmodectl = I915_READ(MEMMODECTL);
766         u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
767         u16 crstandvid = I915_READ16(CRSTANDVID);
768
769         seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
770                    "yes" : "no");
771         seq_printf(m, "Boost freq: %d\n",
772                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
773                    MEMMODE_BOOST_FREQ_SHIFT);
774         seq_printf(m, "HW control enabled: %s\n",
775                    rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
776         seq_printf(m, "SW control enabled: %s\n",
777                    rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
778         seq_printf(m, "Gated voltage change: %s\n",
779                    rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
780         seq_printf(m, "Starting frequency: P%d\n",
781                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
782         seq_printf(m, "Max P-state: P%d\n",
783                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
784         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
785         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
786         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
787         seq_printf(m, "Render standby enabled: %s\n",
788                    (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
789
790         return 0;
791 }
792
793 static int i915_fbc_status(struct seq_file *m, void *unused)
794 {
795         struct drm_info_node *node = (struct drm_info_node *) m->private;
796         struct drm_device *dev = node->minor->dev;
797         drm_i915_private_t *dev_priv = dev->dev_private;
798
799         if (!I915_HAS_FBC(dev)) {
800                 seq_printf(m, "FBC unsupported on this chipset\n");
801                 return 0;
802         }
803
804         if (intel_fbc_enabled(dev)) {
805                 seq_printf(m, "FBC enabled\n");
806         } else {
807                 seq_printf(m, "FBC disabled: ");
808                 switch (dev_priv->no_fbc_reason) {
809                 case FBC_NO_OUTPUT:
810                         seq_printf(m, "no outputs");
811                         break;
812                 case FBC_STOLEN_TOO_SMALL:
813                         seq_printf(m, "not enough stolen memory");
814                         break;
815                 case FBC_UNSUPPORTED_MODE:
816                         seq_printf(m, "mode not supported");
817                         break;
818                 case FBC_MODE_TOO_LARGE:
819                         seq_printf(m, "mode too large");
820                         break;
821                 case FBC_BAD_PLANE:
822                         seq_printf(m, "FBC unsupported on plane");
823                         break;
824                 case FBC_NOT_TILED:
825                         seq_printf(m, "scanout buffer not tiled");
826                         break;
827                 case FBC_MULTIPLE_PIPES:
828                         seq_printf(m, "multiple pipes are enabled");
829                         break;
830                 default:
831                         seq_printf(m, "unknown reason");
832                 }
833                 seq_printf(m, "\n");
834         }
835         return 0;
836 }
837
838 static int i915_sr_status(struct seq_file *m, void *unused)
839 {
840         struct drm_info_node *node = (struct drm_info_node *) m->private;
841         struct drm_device *dev = node->minor->dev;
842         drm_i915_private_t *dev_priv = dev->dev_private;
843         bool sr_enabled = false;
844
845         if (IS_GEN5(dev))
846                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
847         else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
848                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
849         else if (IS_I915GM(dev))
850                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
851         else if (IS_PINEVIEW(dev))
852                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
853
854         seq_printf(m, "self-refresh: %s\n",
855                    sr_enabled ? "enabled" : "disabled");
856
857         return 0;
858 }
859
860 static int i915_emon_status(struct seq_file *m, void *unused)
861 {
862         struct drm_info_node *node = (struct drm_info_node *) m->private;
863         struct drm_device *dev = node->minor->dev;
864         drm_i915_private_t *dev_priv = dev->dev_private;
865         unsigned long temp, chipset, gfx;
866         int ret;
867
868         ret = mutex_lock_interruptible(&dev->struct_mutex);
869         if (ret)
870                 return ret;
871
872         temp = i915_mch_val(dev_priv);
873         chipset = i915_chipset_val(dev_priv);
874         gfx = i915_gfx_val(dev_priv);
875         mutex_unlock(&dev->struct_mutex);
876
877         seq_printf(m, "GMCH temp: %ld\n", temp);
878         seq_printf(m, "Chipset power: %ld\n", chipset);
879         seq_printf(m, "GFX power: %ld\n", gfx);
880         seq_printf(m, "Total power: %ld\n", chipset + gfx);
881
882         return 0;
883 }
884
885 static int i915_gfxec(struct seq_file *m, void *unused)
886 {
887         struct drm_info_node *node = (struct drm_info_node *) m->private;
888         struct drm_device *dev = node->minor->dev;
889         drm_i915_private_t *dev_priv = dev->dev_private;
890
891         seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
892
893         return 0;
894 }
895
896 static int i915_opregion(struct seq_file *m, void *unused)
897 {
898         struct drm_info_node *node = (struct drm_info_node *) m->private;
899         struct drm_device *dev = node->minor->dev;
900         drm_i915_private_t *dev_priv = dev->dev_private;
901         struct intel_opregion *opregion = &dev_priv->opregion;
902         int ret;
903
904         ret = mutex_lock_interruptible(&dev->struct_mutex);
905         if (ret)
906                 return ret;
907
908         if (opregion->header)
909                 seq_write(m, opregion->header, OPREGION_SIZE);
910
911         mutex_unlock(&dev->struct_mutex);
912
913         return 0;
914 }
915
916 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
917 {
918         struct drm_info_node *node = (struct drm_info_node *) m->private;
919         struct drm_device *dev = node->minor->dev;
920         drm_i915_private_t *dev_priv = dev->dev_private;
921         struct intel_fbdev *ifbdev;
922         struct intel_framebuffer *fb;
923         int ret;
924
925         ret = mutex_lock_interruptible(&dev->mode_config.mutex);
926         if (ret)
927                 return ret;
928
929         ifbdev = dev_priv->fbdev;
930         fb = to_intel_framebuffer(ifbdev->helper.fb);
931
932         seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
933                    fb->base.width,
934                    fb->base.height,
935                    fb->base.depth,
936                    fb->base.bits_per_pixel);
937         describe_obj(m, to_intel_bo(fb->obj));
938         seq_printf(m, "\n");
939
940         list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
941                 if (&fb->base == ifbdev->helper.fb)
942                         continue;
943
944                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
945                            fb->base.width,
946                            fb->base.height,
947                            fb->base.depth,
948                            fb->base.bits_per_pixel);
949                 describe_obj(m, to_intel_bo(fb->obj));
950                 seq_printf(m, "\n");
951         }
952
953         mutex_unlock(&dev->mode_config.mutex);
954
955         return 0;
956 }
957
958 static int
959 i915_wedged_open(struct inode *inode,
960                  struct file *filp)
961 {
962         filp->private_data = inode->i_private;
963         return 0;
964 }
965
966 static ssize_t
967 i915_wedged_read(struct file *filp,
968                  char __user *ubuf,
969                  size_t max,
970                  loff_t *ppos)
971 {
972         struct drm_device *dev = filp->private_data;
973         drm_i915_private_t *dev_priv = dev->dev_private;
974         char buf[80];
975         int len;
976
977         len = snprintf(buf, sizeof (buf),
978                        "wedged :  %d\n",
979                        atomic_read(&dev_priv->mm.wedged));
980
981         if (len > sizeof (buf))
982                 len = sizeof (buf);
983
984         return simple_read_from_buffer(ubuf, max, ppos, buf, len);
985 }
986
987 static ssize_t
988 i915_wedged_write(struct file *filp,
989                   const char __user *ubuf,
990                   size_t cnt,
991                   loff_t *ppos)
992 {
993         struct drm_device *dev = filp->private_data;
994         drm_i915_private_t *dev_priv = dev->dev_private;
995         char buf[20];
996         int val = 1;
997
998         if (cnt > 0) {
999                 if (cnt > sizeof (buf) - 1)
1000                         return -EINVAL;
1001
1002                 if (copy_from_user(buf, ubuf, cnt))
1003                         return -EFAULT;
1004                 buf[cnt] = 0;
1005
1006                 val = simple_strtoul(buf, NULL, 0);
1007         }
1008
1009         DRM_INFO("Manually setting wedged to %d\n", val);
1010
1011         atomic_set(&dev_priv->mm.wedged, val);
1012         if (val) {
1013                 wake_up_all(&dev_priv->irq_queue);
1014                 queue_work(dev_priv->wq, &dev_priv->error_work);
1015         }
1016
1017         return cnt;
1018 }
1019
1020 static const struct file_operations i915_wedged_fops = {
1021         .owner = THIS_MODULE,
1022         .open = i915_wedged_open,
1023         .read = i915_wedged_read,
1024         .write = i915_wedged_write,
1025         .llseek = default_llseek,
1026 };
1027
1028 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1029  * allocated we need to hook into the minor for release. */
1030 static int
1031 drm_add_fake_info_node(struct drm_minor *minor,
1032                        struct dentry *ent,
1033                        const void *key)
1034 {
1035         struct drm_info_node *node;
1036
1037         node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1038         if (node == NULL) {
1039                 debugfs_remove(ent);
1040                 return -ENOMEM;
1041         }
1042
1043         node->minor = minor;
1044         node->dent = ent;
1045         node->info_ent = (void *) key;
1046         list_add(&node->list, &minor->debugfs_nodes.list);
1047
1048         return 0;
1049 }
1050
1051 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1052 {
1053         struct drm_device *dev = minor->dev;
1054         struct dentry *ent;
1055
1056         ent = debugfs_create_file("i915_wedged",
1057                                   S_IRUGO | S_IWUSR,
1058                                   root, dev,
1059                                   &i915_wedged_fops);
1060         if (IS_ERR(ent))
1061                 return PTR_ERR(ent);
1062
1063         return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
1064 }
1065
1066 static struct drm_info_list i915_debugfs_list[] = {
1067         {"i915_capabilities", i915_capabilities, 0, 0},
1068         {"i915_gem_objects", i915_gem_object_info, 0},
1069         {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1070         {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1071         {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1072         {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1073         {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1074         {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1075         {"i915_gem_request", i915_gem_request_info, 0},
1076         {"i915_gem_seqno", i915_gem_seqno_info, 0},
1077         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1078         {"i915_gem_interrupt", i915_interrupt_info, 0},
1079         {"i915_gem_hws", i915_hws_info, 0},
1080         {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RENDER_RING},
1081         {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RENDER_RING},
1082         {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BSD_RING},
1083         {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BSD_RING},
1084         {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BLT_RING},
1085         {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BLT_RING},
1086         {"i915_batchbuffers", i915_batchbuffer_info, 0},
1087         {"i915_error_state", i915_error_state, 0},
1088         {"i915_rstdby_delays", i915_rstdby_delays, 0},
1089         {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1090         {"i915_delayfreq_table", i915_delayfreq_table, 0},
1091         {"i915_inttoext_table", i915_inttoext_table, 0},
1092         {"i915_drpc_info", i915_drpc_info, 0},
1093         {"i915_emon_status", i915_emon_status, 0},
1094         {"i915_gfxec", i915_gfxec, 0},
1095         {"i915_fbc_status", i915_fbc_status, 0},
1096         {"i915_sr_status", i915_sr_status, 0},
1097         {"i915_opregion", i915_opregion, 0},
1098         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1099 };
1100 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1101
1102 int i915_debugfs_init(struct drm_minor *minor)
1103 {
1104         int ret;
1105
1106         ret = i915_wedged_create(minor->debugfs_root, minor);
1107         if (ret)
1108                 return ret;
1109
1110         return drm_debugfs_create_files(i915_debugfs_list,
1111                                         I915_DEBUGFS_ENTRIES,
1112                                         minor->debugfs_root, minor);
1113 }
1114
1115 void i915_debugfs_cleanup(struct drm_minor *minor)
1116 {
1117         drm_debugfs_remove_files(i915_debugfs_list,
1118                                  I915_DEBUGFS_ENTRIES, minor);
1119         drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1120                                  1, minor);
1121 }
1122
1123 #endif /* CONFIG_DEBUG_FS */