2f3e017d24d66ac1d38ac3de4a424884abdeca69
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include "drmP.h"
33 #include "drm.h"
34 #include "intel_drv.h"
35 #include "i915_drm.h"
36 #include "i915_drv.h"
37
38 #define DRM_I915_RING_DEBUG 1
39
40
41 #if defined(CONFIG_DEBUG_FS)
42
43 #define ACTIVE_LIST     1
44 #define FLUSHING_LIST   2
45 #define INACTIVE_LIST   3
46
47 static const char *yesno(int v)
48 {
49         return v ? "yes" : "no";
50 }
51
52 static int i915_capabilities(struct seq_file *m, void *data)
53 {
54         struct drm_info_node *node = (struct drm_info_node *) m->private;
55         struct drm_device *dev = node->minor->dev;
56         const struct intel_device_info *info = INTEL_INFO(dev);
57
58         seq_printf(m, "gen: %d\n", info->gen);
59 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
60         B(is_mobile);
61         B(is_i8xx);
62         B(is_i85x);
63         B(is_i915g);
64         B(is_i9xx);
65         B(is_i945gm);
66         B(is_i965g);
67         B(is_i965gm);
68         B(is_g33);
69         B(need_gfx_hws);
70         B(is_g4x);
71         B(is_pineview);
72         B(is_broadwater);
73         B(is_crestline);
74         B(is_ironlake);
75         B(has_fbc);
76         B(has_rc6);
77         B(has_pipe_cxsr);
78         B(has_hotplug);
79         B(cursor_needs_physical);
80         B(has_overlay);
81         B(overlay_needs_physical);
82 #undef B
83
84         return 0;
85 }
86
87 static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
88 {
89         if (obj_priv->user_pin_count > 0)
90                 return "P";
91         else if (obj_priv->pin_count > 0)
92                 return "p";
93         else
94                 return " ";
95 }
96
97 static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
98 {
99     switch (obj_priv->tiling_mode) {
100     default:
101     case I915_TILING_NONE: return " ";
102     case I915_TILING_X: return "X";
103     case I915_TILING_Y: return "Y";
104     }
105 }
106
107 static void
108 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
109 {
110         seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
111                    &obj->base,
112                    get_pin_flag(obj),
113                    get_tiling_flag(obj),
114                    obj->base.size,
115                    obj->base.read_domains,
116                    obj->base.write_domain,
117                    obj->last_rendering_seqno,
118                    obj->dirty ? " dirty" : "",
119                    obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
120         if (obj->base.name)
121                 seq_printf(m, " (name: %d)", obj->base.name);
122         if (obj->fence_reg != I915_FENCE_REG_NONE)
123                 seq_printf(m, " (fence: %d)", obj->fence_reg);
124         if (obj->gtt_space != NULL)
125                 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
126 }
127
128 static int i915_gem_object_list_info(struct seq_file *m, void *data)
129 {
130         struct drm_info_node *node = (struct drm_info_node *) m->private;
131         uintptr_t list = (uintptr_t) node->info_ent->data;
132         struct list_head *head;
133         struct drm_device *dev = node->minor->dev;
134         drm_i915_private_t *dev_priv = dev->dev_private;
135         struct drm_i915_gem_object *obj_priv;
136         int ret;
137
138         ret = mutex_lock_interruptible(&dev->struct_mutex);
139         if (ret)
140                 return ret;
141
142         switch (list) {
143         case ACTIVE_LIST:
144                 seq_printf(m, "Active:\n");
145                 head = &dev_priv->render_ring.active_list;
146                 break;
147         case INACTIVE_LIST:
148                 seq_printf(m, "Inactive:\n");
149                 head = &dev_priv->mm.inactive_list;
150                 break;
151         case FLUSHING_LIST:
152                 seq_printf(m, "Flushing:\n");
153                 head = &dev_priv->mm.flushing_list;
154                 break;
155         default:
156                 mutex_unlock(&dev->struct_mutex);
157                 return -EINVAL;
158         }
159
160         list_for_each_entry(obj_priv, head, list) {
161                 seq_printf(m, "   ");
162                 describe_obj(m, obj_priv);
163                 seq_printf(m, "\n");
164         }
165
166         mutex_unlock(&dev->struct_mutex);
167         return 0;
168 }
169
170 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
171 {
172         struct drm_info_node *node = (struct drm_info_node *) m->private;
173         struct drm_device *dev = node->minor->dev;
174         unsigned long flags;
175         struct intel_crtc *crtc;
176
177         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
178                 const char *pipe = crtc->pipe ? "B" : "A";
179                 const char *plane = crtc->plane ? "B" : "A";
180                 struct intel_unpin_work *work;
181
182                 spin_lock_irqsave(&dev->event_lock, flags);
183                 work = crtc->unpin_work;
184                 if (work == NULL) {
185                         seq_printf(m, "No flip due on pipe %s (plane %s)\n",
186                                    pipe, plane);
187                 } else {
188                         if (!work->pending) {
189                                 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
190                                            pipe, plane);
191                         } else {
192                                 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
193                                            pipe, plane);
194                         }
195                         if (work->enable_stall_check)
196                                 seq_printf(m, "Stall check enabled, ");
197                         else
198                                 seq_printf(m, "Stall check waiting for page flip ioctl, ");
199                         seq_printf(m, "%d prepares\n", work->pending);
200
201                         if (work->old_fb_obj) {
202                                 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
203                                 if(obj_priv)
204                                         seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
205                         }
206                         if (work->pending_flip_obj) {
207                                 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
208                                 if(obj_priv)
209                                         seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
210                         }
211                 }
212                 spin_unlock_irqrestore(&dev->event_lock, flags);
213         }
214
215         return 0;
216 }
217
218 static int i915_gem_request_info(struct seq_file *m, void *data)
219 {
220         struct drm_info_node *node = (struct drm_info_node *) m->private;
221         struct drm_device *dev = node->minor->dev;
222         drm_i915_private_t *dev_priv = dev->dev_private;
223         struct drm_i915_gem_request *gem_request;
224         int ret;
225
226         ret = mutex_lock_interruptible(&dev->struct_mutex);
227         if (ret)
228                 return ret;
229
230         seq_printf(m, "Request:\n");
231         list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
232                         list) {
233                 seq_printf(m, "    %d @ %d\n",
234                            gem_request->seqno,
235                            (int) (jiffies - gem_request->emitted_jiffies));
236         }
237         mutex_unlock(&dev->struct_mutex);
238
239         return 0;
240 }
241
242 static int i915_gem_seqno_info(struct seq_file *m, void *data)
243 {
244         struct drm_info_node *node = (struct drm_info_node *) m->private;
245         struct drm_device *dev = node->minor->dev;
246         drm_i915_private_t *dev_priv = dev->dev_private;
247         int ret;
248
249         ret = mutex_lock_interruptible(&dev->struct_mutex);
250         if (ret)
251                 return ret;
252
253         if (dev_priv->render_ring.status_page.page_addr != NULL) {
254                 seq_printf(m, "Current sequence: %d\n",
255                            i915_get_gem_seqno(dev,  &dev_priv->render_ring));
256         } else {
257                 seq_printf(m, "Current sequence: hws uninitialized\n");
258         }
259         seq_printf(m, "Waiter sequence:  %d\n",
260                         dev_priv->mm.waiting_gem_seqno);
261         seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
262
263         mutex_unlock(&dev->struct_mutex);
264
265         return 0;
266 }
267
268
269 static int i915_interrupt_info(struct seq_file *m, void *data)
270 {
271         struct drm_info_node *node = (struct drm_info_node *) m->private;
272         struct drm_device *dev = node->minor->dev;
273         drm_i915_private_t *dev_priv = dev->dev_private;
274         int ret;
275
276         ret = mutex_lock_interruptible(&dev->struct_mutex);
277         if (ret)
278                 return ret;
279
280         if (!HAS_PCH_SPLIT(dev)) {
281                 seq_printf(m, "Interrupt enable:    %08x\n",
282                            I915_READ(IER));
283                 seq_printf(m, "Interrupt identity:  %08x\n",
284                            I915_READ(IIR));
285                 seq_printf(m, "Interrupt mask:      %08x\n",
286                            I915_READ(IMR));
287                 seq_printf(m, "Pipe A stat:         %08x\n",
288                            I915_READ(PIPEASTAT));
289                 seq_printf(m, "Pipe B stat:         %08x\n",
290                            I915_READ(PIPEBSTAT));
291         } else {
292                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
293                            I915_READ(DEIER));
294                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
295                            I915_READ(DEIIR));
296                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
297                            I915_READ(DEIMR));
298                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
299                            I915_READ(SDEIER));
300                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
301                            I915_READ(SDEIIR));
302                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
303                            I915_READ(SDEIMR));
304                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
305                            I915_READ(GTIER));
306                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
307                            I915_READ(GTIIR));
308                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
309                            I915_READ(GTIMR));
310         }
311         seq_printf(m, "Interrupts received: %d\n",
312                    atomic_read(&dev_priv->irq_received));
313         if (dev_priv->render_ring.status_page.page_addr != NULL) {
314                 seq_printf(m, "Current sequence:    %d\n",
315                            i915_get_gem_seqno(dev,  &dev_priv->render_ring));
316         } else {
317                 seq_printf(m, "Current sequence:    hws uninitialized\n");
318         }
319         seq_printf(m, "Waiter sequence:     %d\n",
320                    dev_priv->mm.waiting_gem_seqno);
321         seq_printf(m, "IRQ sequence:        %d\n",
322                    dev_priv->mm.irq_gem_seqno);
323         mutex_unlock(&dev->struct_mutex);
324
325         return 0;
326 }
327
328 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
329 {
330         struct drm_info_node *node = (struct drm_info_node *) m->private;
331         struct drm_device *dev = node->minor->dev;
332         drm_i915_private_t *dev_priv = dev->dev_private;
333         int i, ret;
334
335         ret = mutex_lock_interruptible(&dev->struct_mutex);
336         if (ret)
337                 return ret;
338
339         seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
340         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
341         for (i = 0; i < dev_priv->num_fence_regs; i++) {
342                 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
343
344                 if (obj == NULL) {
345                         seq_printf(m, "Fenced object[%2d] = unused\n", i);
346                 } else {
347                         struct drm_i915_gem_object *obj_priv;
348
349                         obj_priv = to_intel_bo(obj);
350                         seq_printf(m, "Fenced object[%2d] = %p: %s "
351                                    "%08x %08zx %08x %s %08x %08x %d",
352                                    i, obj, get_pin_flag(obj_priv),
353                                    obj_priv->gtt_offset,
354                                    obj->size, obj_priv->stride,
355                                    get_tiling_flag(obj_priv),
356                                    obj->read_domains, obj->write_domain,
357                                    obj_priv->last_rendering_seqno);
358                         if (obj->name)
359                                 seq_printf(m, " (name: %d)", obj->name);
360                         seq_printf(m, "\n");
361                 }
362         }
363         mutex_unlock(&dev->struct_mutex);
364
365         return 0;
366 }
367
368 static int i915_hws_info(struct seq_file *m, void *data)
369 {
370         struct drm_info_node *node = (struct drm_info_node *) m->private;
371         struct drm_device *dev = node->minor->dev;
372         drm_i915_private_t *dev_priv = dev->dev_private;
373         int i;
374         volatile u32 *hws;
375
376         hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
377         if (hws == NULL)
378                 return 0;
379
380         for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
381                 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
382                            i * 4,
383                            hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
384         }
385         return 0;
386 }
387
388 static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
389 {
390         int page, i;
391         uint32_t *mem;
392
393         for (page = 0; page < page_count; page++) {
394                 mem = kmap(pages[page]);
395                 for (i = 0; i < PAGE_SIZE; i += 4)
396                         seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
397                 kunmap(pages[page]);
398         }
399 }
400
401 static int i915_batchbuffer_info(struct seq_file *m, void *data)
402 {
403         struct drm_info_node *node = (struct drm_info_node *) m->private;
404         struct drm_device *dev = node->minor->dev;
405         drm_i915_private_t *dev_priv = dev->dev_private;
406         struct drm_gem_object *obj;
407         struct drm_i915_gem_object *obj_priv;
408         int ret;
409
410         ret = mutex_lock_interruptible(&dev->struct_mutex);
411         if (ret)
412                 return ret;
413
414         list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
415                         list) {
416                 obj = &obj_priv->base;
417                 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
418                     ret = i915_gem_object_get_pages(obj, 0);
419                     if (ret) {
420                             mutex_unlock(&dev->struct_mutex);
421                             return ret;
422                     }
423
424                     seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
425                     i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
426
427                     i915_gem_object_put_pages(obj);
428                 }
429         }
430
431         mutex_unlock(&dev->struct_mutex);
432
433         return 0;
434 }
435
436 static int i915_ringbuffer_data(struct seq_file *m, void *data)
437 {
438         struct drm_info_node *node = (struct drm_info_node *) m->private;
439         struct drm_device *dev = node->minor->dev;
440         drm_i915_private_t *dev_priv = dev->dev_private;
441         int ret;
442
443         ret = mutex_lock_interruptible(&dev->struct_mutex);
444         if (ret)
445                 return ret;
446
447         if (!dev_priv->render_ring.gem_object) {
448                 seq_printf(m, "No ringbuffer setup\n");
449         } else {
450                 u8 *virt = dev_priv->render_ring.virtual_start;
451                 uint32_t off;
452
453                 for (off = 0; off < dev_priv->render_ring.size; off += 4) {
454                         uint32_t *ptr = (uint32_t *)(virt + off);
455                         seq_printf(m, "%08x :  %08x\n", off, *ptr);
456                 }
457         }
458         mutex_unlock(&dev->struct_mutex);
459
460         return 0;
461 }
462
463 static int i915_ringbuffer_info(struct seq_file *m, void *data)
464 {
465         struct drm_info_node *node = (struct drm_info_node *) m->private;
466         struct drm_device *dev = node->minor->dev;
467         drm_i915_private_t *dev_priv = dev->dev_private;
468         unsigned int head, tail;
469
470         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
471         tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
472
473         seq_printf(m, "RingHead :  %08x\n", head);
474         seq_printf(m, "RingTail :  %08x\n", tail);
475         seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
476         seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
477
478         return 0;
479 }
480
481 static const char *pin_flag(int pinned)
482 {
483         if (pinned > 0)
484                 return " P";
485         else if (pinned < 0)
486                 return " p";
487         else
488                 return "";
489 }
490
491 static const char *tiling_flag(int tiling)
492 {
493         switch (tiling) {
494         default:
495         case I915_TILING_NONE: return "";
496         case I915_TILING_X: return " X";
497         case I915_TILING_Y: return " Y";
498         }
499 }
500
501 static const char *dirty_flag(int dirty)
502 {
503         return dirty ? " dirty" : "";
504 }
505
506 static const char *purgeable_flag(int purgeable)
507 {
508         return purgeable ? " purgeable" : "";
509 }
510
511 static int i915_error_state(struct seq_file *m, void *unused)
512 {
513         struct drm_info_node *node = (struct drm_info_node *) m->private;
514         struct drm_device *dev = node->minor->dev;
515         drm_i915_private_t *dev_priv = dev->dev_private;
516         struct drm_i915_error_state *error;
517         unsigned long flags;
518         int i, page, offset, elt;
519
520         spin_lock_irqsave(&dev_priv->error_lock, flags);
521         if (!dev_priv->first_error) {
522                 seq_printf(m, "no error state collected\n");
523                 goto out;
524         }
525
526         error = dev_priv->first_error;
527
528         seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
529                    error->time.tv_usec);
530         seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
531         seq_printf(m, "EIR: 0x%08x\n", error->eir);
532         seq_printf(m, "  PGTBL_ER: 0x%08x\n", error->pgtbl_er);
533         seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
534         seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
535         seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
536         seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
537         seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
538         if (IS_I965G(dev)) {
539                 seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
540                 seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
541         }
542         seq_printf(m, "seqno: 0x%08x\n", error->seqno);
543
544         if (error->active_bo_count) {
545                 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
546
547                 for (i = 0; i < error->active_bo_count; i++) {
548                         seq_printf(m, "  %08x %8zd %08x %08x %08x%s%s%s%s",
549                                    error->active_bo[i].gtt_offset,
550                                    error->active_bo[i].size,
551                                    error->active_bo[i].read_domains,
552                                    error->active_bo[i].write_domain,
553                                    error->active_bo[i].seqno,
554                                    pin_flag(error->active_bo[i].pinned),
555                                    tiling_flag(error->active_bo[i].tiling),
556                                    dirty_flag(error->active_bo[i].dirty),
557                                    purgeable_flag(error->active_bo[i].purgeable));
558
559                         if (error->active_bo[i].name)
560                                 seq_printf(m, " (name: %d)", error->active_bo[i].name);
561                         if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
562                                 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
563
564                         seq_printf(m, "\n");
565                 }
566         }
567
568         for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
569                 if (error->batchbuffer[i]) {
570                         struct drm_i915_error_object *obj = error->batchbuffer[i];
571
572                         seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
573                         offset = 0;
574                         for (page = 0; page < obj->page_count; page++) {
575                                 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
576                                         seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
577                                         offset += 4;
578                                 }
579                         }
580                 }
581         }
582
583         if (error->ringbuffer) {
584                 struct drm_i915_error_object *obj = error->ringbuffer;
585
586                 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
587                 offset = 0;
588                 for (page = 0; page < obj->page_count; page++) {
589                         for (elt = 0; elt < PAGE_SIZE/4; elt++) {
590                                 seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
591                                 offset += 4;
592                         }
593                 }
594         }
595
596         if (error->overlay)
597                 intel_overlay_print_error_state(m, error->overlay);
598
599 out:
600         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
601
602         return 0;
603 }
604
605 static int i915_rstdby_delays(struct seq_file *m, void *unused)
606 {
607         struct drm_info_node *node = (struct drm_info_node *) m->private;
608         struct drm_device *dev = node->minor->dev;
609         drm_i915_private_t *dev_priv = dev->dev_private;
610         u16 crstanddelay = I915_READ16(CRSTANDVID);
611
612         seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
613
614         return 0;
615 }
616
617 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
618 {
619         struct drm_info_node *node = (struct drm_info_node *) m->private;
620         struct drm_device *dev = node->minor->dev;
621         drm_i915_private_t *dev_priv = dev->dev_private;
622         u16 rgvswctl = I915_READ16(MEMSWCTL);
623         u16 rgvstat = I915_READ16(MEMSTAT_ILK);
624
625         seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
626         seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
627         seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
628                    MEMSTAT_VID_SHIFT);
629         seq_printf(m, "Current P-state: %d\n",
630                    (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
631
632         return 0;
633 }
634
635 static int i915_delayfreq_table(struct seq_file *m, void *unused)
636 {
637         struct drm_info_node *node = (struct drm_info_node *) m->private;
638         struct drm_device *dev = node->minor->dev;
639         drm_i915_private_t *dev_priv = dev->dev_private;
640         u32 delayfreq;
641         int i;
642
643         for (i = 0; i < 16; i++) {
644                 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
645                 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
646                            (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
647         }
648
649         return 0;
650 }
651
652 static inline int MAP_TO_MV(int map)
653 {
654         return 1250 - (map * 25);
655 }
656
657 static int i915_inttoext_table(struct seq_file *m, void *unused)
658 {
659         struct drm_info_node *node = (struct drm_info_node *) m->private;
660         struct drm_device *dev = node->minor->dev;
661         drm_i915_private_t *dev_priv = dev->dev_private;
662         u32 inttoext;
663         int i;
664
665         for (i = 1; i <= 32; i++) {
666                 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
667                 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
668         }
669
670         return 0;
671 }
672
673 static int i915_drpc_info(struct seq_file *m, void *unused)
674 {
675         struct drm_info_node *node = (struct drm_info_node *) m->private;
676         struct drm_device *dev = node->minor->dev;
677         drm_i915_private_t *dev_priv = dev->dev_private;
678         u32 rgvmodectl = I915_READ(MEMMODECTL);
679         u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
680         u16 crstandvid = I915_READ16(CRSTANDVID);
681
682         seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
683                    "yes" : "no");
684         seq_printf(m, "Boost freq: %d\n",
685                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
686                    MEMMODE_BOOST_FREQ_SHIFT);
687         seq_printf(m, "HW control enabled: %s\n",
688                    rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
689         seq_printf(m, "SW control enabled: %s\n",
690                    rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
691         seq_printf(m, "Gated voltage change: %s\n",
692                    rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
693         seq_printf(m, "Starting frequency: P%d\n",
694                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
695         seq_printf(m, "Max P-state: P%d\n",
696                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
697         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
698         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
699         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
700         seq_printf(m, "Render standby enabled: %s\n",
701                    (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
702
703         return 0;
704 }
705
706 static int i915_fbc_status(struct seq_file *m, void *unused)
707 {
708         struct drm_info_node *node = (struct drm_info_node *) m->private;
709         struct drm_device *dev = node->minor->dev;
710         drm_i915_private_t *dev_priv = dev->dev_private;
711
712         if (!I915_HAS_FBC(dev)) {
713                 seq_printf(m, "FBC unsupported on this chipset\n");
714                 return 0;
715         }
716
717         if (intel_fbc_enabled(dev)) {
718                 seq_printf(m, "FBC enabled\n");
719         } else {
720                 seq_printf(m, "FBC disabled: ");
721                 switch (dev_priv->no_fbc_reason) {
722                 case FBC_STOLEN_TOO_SMALL:
723                         seq_printf(m, "not enough stolen memory");
724                         break;
725                 case FBC_UNSUPPORTED_MODE:
726                         seq_printf(m, "mode not supported");
727                         break;
728                 case FBC_MODE_TOO_LARGE:
729                         seq_printf(m, "mode too large");
730                         break;
731                 case FBC_BAD_PLANE:
732                         seq_printf(m, "FBC unsupported on plane");
733                         break;
734                 case FBC_NOT_TILED:
735                         seq_printf(m, "scanout buffer not tiled");
736                         break;
737                 case FBC_MULTIPLE_PIPES:
738                         seq_printf(m, "multiple pipes are enabled");
739                         break;
740                 default:
741                         seq_printf(m, "unknown reason");
742                 }
743                 seq_printf(m, "\n");
744         }
745         return 0;
746 }
747
748 static int i915_sr_status(struct seq_file *m, void *unused)
749 {
750         struct drm_info_node *node = (struct drm_info_node *) m->private;
751         struct drm_device *dev = node->minor->dev;
752         drm_i915_private_t *dev_priv = dev->dev_private;
753         bool sr_enabled = false;
754
755         if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
756                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
757         else if (IS_I915GM(dev))
758                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
759         else if (IS_PINEVIEW(dev))
760                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
761
762         seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
763                    "disabled");
764
765         return 0;
766 }
767
768 static int i915_emon_status(struct seq_file *m, void *unused)
769 {
770         struct drm_info_node *node = (struct drm_info_node *) m->private;
771         struct drm_device *dev = node->minor->dev;
772         drm_i915_private_t *dev_priv = dev->dev_private;
773         unsigned long temp, chipset, gfx;
774         int ret;
775
776         ret = mutex_lock_interruptible(&dev->struct_mutex);
777         if (ret)
778                 return ret;
779
780         temp = i915_mch_val(dev_priv);
781         chipset = i915_chipset_val(dev_priv);
782         gfx = i915_gfx_val(dev_priv);
783         mutex_unlock(&dev->struct_mutex);
784
785         seq_printf(m, "GMCH temp: %ld\n", temp);
786         seq_printf(m, "Chipset power: %ld\n", chipset);
787         seq_printf(m, "GFX power: %ld\n", gfx);
788         seq_printf(m, "Total power: %ld\n", chipset + gfx);
789
790         return 0;
791 }
792
793 static int i915_gfxec(struct seq_file *m, void *unused)
794 {
795         struct drm_info_node *node = (struct drm_info_node *) m->private;
796         struct drm_device *dev = node->minor->dev;
797         drm_i915_private_t *dev_priv = dev->dev_private;
798
799         seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
800
801         return 0;
802 }
803
804 static int i915_opregion(struct seq_file *m, void *unused)
805 {
806         struct drm_info_node *node = (struct drm_info_node *) m->private;
807         struct drm_device *dev = node->minor->dev;
808         drm_i915_private_t *dev_priv = dev->dev_private;
809         struct intel_opregion *opregion = &dev_priv->opregion;
810         int ret;
811
812         ret = mutex_lock_interruptible(&dev->struct_mutex);
813         if (ret)
814                 return ret;
815
816         if (opregion->header)
817                 seq_write(m, opregion->header, OPREGION_SIZE);
818
819         mutex_unlock(&dev->struct_mutex);
820
821         return 0;
822 }
823
824 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
825 {
826         struct drm_info_node *node = (struct drm_info_node *) m->private;
827         struct drm_device *dev = node->minor->dev;
828         drm_i915_private_t *dev_priv = dev->dev_private;
829         struct intel_fbdev *ifbdev;
830         struct intel_framebuffer *fb;
831         int ret;
832
833         ret = mutex_lock_interruptible(&dev->mode_config.mutex);
834         if (ret)
835                 return ret;
836
837         ifbdev = dev_priv->fbdev;
838         fb = to_intel_framebuffer(ifbdev->helper.fb);
839
840         seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
841                    fb->base.width,
842                    fb->base.height,
843                    fb->base.depth,
844                    fb->base.bits_per_pixel);
845         describe_obj(m, to_intel_bo(fb->obj));
846         seq_printf(m, "\n");
847
848         list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
849                 if (&fb->base == ifbdev->helper.fb)
850                         continue;
851
852                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
853                            fb->base.width,
854                            fb->base.height,
855                            fb->base.depth,
856                            fb->base.bits_per_pixel);
857                 describe_obj(m, to_intel_bo(fb->obj));
858                 seq_printf(m, "\n");
859         }
860
861         mutex_unlock(&dev->mode_config.mutex);
862
863         return 0;
864 }
865
866 static int
867 i915_wedged_open(struct inode *inode,
868                  struct file *filp)
869 {
870         filp->private_data = inode->i_private;
871         return 0;
872 }
873
874 static ssize_t
875 i915_wedged_read(struct file *filp,
876                  char __user *ubuf,
877                  size_t max,
878                  loff_t *ppos)
879 {
880         struct drm_device *dev = filp->private_data;
881         drm_i915_private_t *dev_priv = dev->dev_private;
882         char buf[80];
883         int len;
884
885         len = snprintf(buf, sizeof (buf),
886                        "wedged :  %d\n",
887                        atomic_read(&dev_priv->mm.wedged));
888
889         return simple_read_from_buffer(ubuf, max, ppos, buf, len);
890 }
891
892 static ssize_t
893 i915_wedged_write(struct file *filp,
894                   const char __user *ubuf,
895                   size_t cnt,
896                   loff_t *ppos)
897 {
898         struct drm_device *dev = filp->private_data;
899         drm_i915_private_t *dev_priv = dev->dev_private;
900         char buf[20];
901         int val = 1;
902
903         if (cnt > 0) {
904                 if (cnt > sizeof (buf) - 1)
905                         return -EINVAL;
906
907                 if (copy_from_user(buf, ubuf, cnt))
908                         return -EFAULT;
909                 buf[cnt] = 0;
910
911                 val = simple_strtoul(buf, NULL, 0);
912         }
913
914         DRM_INFO("Manually setting wedged to %d\n", val);
915
916         atomic_set(&dev_priv->mm.wedged, val);
917         if (val) {
918                 DRM_WAKEUP(&dev_priv->irq_queue);
919                 queue_work(dev_priv->wq, &dev_priv->error_work);
920         }
921
922         return cnt;
923 }
924
925 static const struct file_operations i915_wedged_fops = {
926         .owner = THIS_MODULE,
927         .open = i915_wedged_open,
928         .read = i915_wedged_read,
929         .write = i915_wedged_write,
930 };
931
932 /* As the drm_debugfs_init() routines are called before dev->dev_private is
933  * allocated we need to hook into the minor for release. */
934 static int
935 drm_add_fake_info_node(struct drm_minor *minor,
936                        struct dentry *ent,
937                        const void *key)
938 {
939         struct drm_info_node *node;
940
941         node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
942         if (node == NULL) {
943                 debugfs_remove(ent);
944                 return -ENOMEM;
945         }
946
947         node->minor = minor;
948         node->dent = ent;
949         node->info_ent = (void *) key;
950         list_add(&node->list, &minor->debugfs_nodes.list);
951
952         return 0;
953 }
954
955 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
956 {
957         struct drm_device *dev = minor->dev;
958         struct dentry *ent;
959
960         ent = debugfs_create_file("i915_wedged",
961                                   S_IRUGO | S_IWUSR,
962                                   root, dev,
963                                   &i915_wedged_fops);
964         if (IS_ERR(ent))
965                 return PTR_ERR(ent);
966
967         return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
968 }
969
970 static struct drm_info_list i915_debugfs_list[] = {
971         {"i915_capabilities", i915_capabilities, 0, 0},
972         {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
973         {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
974         {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
975         {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
976         {"i915_gem_request", i915_gem_request_info, 0},
977         {"i915_gem_seqno", i915_gem_seqno_info, 0},
978         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
979         {"i915_gem_interrupt", i915_interrupt_info, 0},
980         {"i915_gem_hws", i915_hws_info, 0},
981         {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
982         {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
983         {"i915_batchbuffers", i915_batchbuffer_info, 0},
984         {"i915_error_state", i915_error_state, 0},
985         {"i915_rstdby_delays", i915_rstdby_delays, 0},
986         {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
987         {"i915_delayfreq_table", i915_delayfreq_table, 0},
988         {"i915_inttoext_table", i915_inttoext_table, 0},
989         {"i915_drpc_info", i915_drpc_info, 0},
990         {"i915_emon_status", i915_emon_status, 0},
991         {"i915_gfxec", i915_gfxec, 0},
992         {"i915_fbc_status", i915_fbc_status, 0},
993         {"i915_sr_status", i915_sr_status, 0},
994         {"i915_opregion", i915_opregion, 0},
995         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
996 };
997 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
998
999 int i915_debugfs_init(struct drm_minor *minor)
1000 {
1001         int ret;
1002
1003         ret = i915_wedged_create(minor->debugfs_root, minor);
1004         if (ret)
1005                 return ret;
1006
1007         return drm_debugfs_create_files(i915_debugfs_list,
1008                                         I915_DEBUGFS_ENTRIES,
1009                                         minor->debugfs_root, minor);
1010 }
1011
1012 void i915_debugfs_cleanup(struct drm_minor *minor)
1013 {
1014         drm_debugfs_remove_files(i915_debugfs_list,
1015                                  I915_DEBUGFS_ENTRIES, minor);
1016         drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1017                                  1, minor);
1018 }
1019
1020 #endif /* CONFIG_DEBUG_FS */