Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include "drmP.h"
33 #include "drm.h"
34 #include "intel_drv.h"
35 #include "i915_drm.h"
36 #include "i915_drv.h"
37
38 #define DRM_I915_RING_DEBUG 1
39
40
41 #if defined(CONFIG_DEBUG_FS)
42
43 #define ACTIVE_LIST     1
44 #define FLUSHING_LIST   2
45 #define INACTIVE_LIST   3
46
47 static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
48 {
49         if (obj_priv->user_pin_count > 0)
50                 return "P";
51         else if (obj_priv->pin_count > 0)
52                 return "p";
53         else
54                 return " ";
55 }
56
57 static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
58 {
59     switch (obj_priv->tiling_mode) {
60     default:
61     case I915_TILING_NONE: return " ";
62     case I915_TILING_X: return "X";
63     case I915_TILING_Y: return "Y";
64     }
65 }
66
67 static int i915_gem_object_list_info(struct seq_file *m, void *data)
68 {
69         struct drm_info_node *node = (struct drm_info_node *) m->private;
70         uintptr_t list = (uintptr_t) node->info_ent->data;
71         struct list_head *head;
72         struct drm_device *dev = node->minor->dev;
73         drm_i915_private_t *dev_priv = dev->dev_private;
74         struct drm_i915_gem_object *obj_priv;
75         spinlock_t *lock = NULL;
76
77         switch (list) {
78         case ACTIVE_LIST:
79                 seq_printf(m, "Active:\n");
80                 lock = &dev_priv->mm.active_list_lock;
81                 head = &dev_priv->render_ring.active_list;
82                 break;
83         case INACTIVE_LIST:
84                 seq_printf(m, "Inactive:\n");
85                 head = &dev_priv->mm.inactive_list;
86                 break;
87         case FLUSHING_LIST:
88                 seq_printf(m, "Flushing:\n");
89                 head = &dev_priv->mm.flushing_list;
90                 break;
91         default:
92                 DRM_INFO("Ooops, unexpected list\n");
93                 return 0;
94         }
95
96         if (lock)
97                 spin_lock(lock);
98         list_for_each_entry(obj_priv, head, list)
99         {
100                 seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
101                            &obj_priv->base,
102                            get_pin_flag(obj_priv),
103                            obj_priv->base.size,
104                            obj_priv->base.read_domains,
105                            obj_priv->base.write_domain,
106                            obj_priv->last_rendering_seqno,
107                            obj_priv->dirty ? " dirty" : "",
108                            obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
109
110                 if (obj_priv->base.name)
111                         seq_printf(m, " (name: %d)", obj_priv->base.name);
112                 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
113                         seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
114                 if (obj_priv->gtt_space != NULL)
115                         seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
116
117                 seq_printf(m, "\n");
118         }
119
120         if (lock)
121             spin_unlock(lock);
122         return 0;
123 }
124
125 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
126 {
127         struct drm_info_node *node = (struct drm_info_node *) m->private;
128         struct drm_device *dev = node->minor->dev;
129         unsigned long flags;
130         struct intel_crtc *crtc;
131
132         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
133                 const char *pipe = crtc->pipe ? "B" : "A";
134                 const char *plane = crtc->plane ? "B" : "A";
135                 struct intel_unpin_work *work;
136
137                 spin_lock_irqsave(&dev->event_lock, flags);
138                 work = crtc->unpin_work;
139                 if (work == NULL) {
140                         seq_printf(m, "No flip due on pipe %s (plane %s)\n",
141                                    pipe, plane);
142                 } else {
143                         if (!work->pending) {
144                                 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
145                                            pipe, plane);
146                         } else {
147                                 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
148                                            pipe, plane);
149                         }
150                         if (work->enable_stall_check)
151                                 seq_printf(m, "Stall check enabled, ");
152                         else
153                                 seq_printf(m, "Stall check waiting for page flip ioctl, ");
154                         seq_printf(m, "%d prepares\n", work->pending);
155
156                         if (work->old_fb_obj) {
157                                 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
158                                 if(obj_priv)
159                                         seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
160                         }
161                         if (work->pending_flip_obj) {
162                                 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
163                                 if(obj_priv)
164                                         seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
165                         }
166                 }
167                 spin_unlock_irqrestore(&dev->event_lock, flags);
168         }
169
170         return 0;
171 }
172
173 static int i915_gem_request_info(struct seq_file *m, void *data)
174 {
175         struct drm_info_node *node = (struct drm_info_node *) m->private;
176         struct drm_device *dev = node->minor->dev;
177         drm_i915_private_t *dev_priv = dev->dev_private;
178         struct drm_i915_gem_request *gem_request;
179
180         seq_printf(m, "Request:\n");
181         list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
182                         list) {
183                 seq_printf(m, "    %d @ %d\n",
184                            gem_request->seqno,
185                            (int) (jiffies - gem_request->emitted_jiffies));
186         }
187         return 0;
188 }
189
190 static int i915_gem_seqno_info(struct seq_file *m, void *data)
191 {
192         struct drm_info_node *node = (struct drm_info_node *) m->private;
193         struct drm_device *dev = node->minor->dev;
194         drm_i915_private_t *dev_priv = dev->dev_private;
195
196         if (dev_priv->render_ring.status_page.page_addr != NULL) {
197                 seq_printf(m, "Current sequence: %d\n",
198                            i915_get_gem_seqno(dev,  &dev_priv->render_ring));
199         } else {
200                 seq_printf(m, "Current sequence: hws uninitialized\n");
201         }
202         seq_printf(m, "Waiter sequence:  %d\n",
203                         dev_priv->mm.waiting_gem_seqno);
204         seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
205         return 0;
206 }
207
208
209 static int i915_interrupt_info(struct seq_file *m, void *data)
210 {
211         struct drm_info_node *node = (struct drm_info_node *) m->private;
212         struct drm_device *dev = node->minor->dev;
213         drm_i915_private_t *dev_priv = dev->dev_private;
214
215         if (!HAS_PCH_SPLIT(dev)) {
216                 seq_printf(m, "Interrupt enable:    %08x\n",
217                            I915_READ(IER));
218                 seq_printf(m, "Interrupt identity:  %08x\n",
219                            I915_READ(IIR));
220                 seq_printf(m, "Interrupt mask:      %08x\n",
221                            I915_READ(IMR));
222                 seq_printf(m, "Pipe A stat:         %08x\n",
223                            I915_READ(PIPEASTAT));
224                 seq_printf(m, "Pipe B stat:         %08x\n",
225                            I915_READ(PIPEBSTAT));
226         } else {
227                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
228                            I915_READ(DEIER));
229                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
230                            I915_READ(DEIIR));
231                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
232                            I915_READ(DEIMR));
233                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
234                            I915_READ(SDEIER));
235                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
236                            I915_READ(SDEIIR));
237                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
238                            I915_READ(SDEIMR));
239                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
240                            I915_READ(GTIER));
241                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
242                            I915_READ(GTIIR));
243                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
244                            I915_READ(GTIMR));
245         }
246         seq_printf(m, "Interrupts received: %d\n",
247                    atomic_read(&dev_priv->irq_received));
248         if (dev_priv->render_ring.status_page.page_addr != NULL) {
249                 seq_printf(m, "Current sequence:    %d\n",
250                            i915_get_gem_seqno(dev,  &dev_priv->render_ring));
251         } else {
252                 seq_printf(m, "Current sequence:    hws uninitialized\n");
253         }
254         seq_printf(m, "Waiter sequence:     %d\n",
255                    dev_priv->mm.waiting_gem_seqno);
256         seq_printf(m, "IRQ sequence:        %d\n",
257                    dev_priv->mm.irq_gem_seqno);
258         return 0;
259 }
260
261 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
262 {
263         struct drm_info_node *node = (struct drm_info_node *) m->private;
264         struct drm_device *dev = node->minor->dev;
265         drm_i915_private_t *dev_priv = dev->dev_private;
266         int i;
267
268         seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
269         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
270         for (i = 0; i < dev_priv->num_fence_regs; i++) {
271                 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
272
273                 if (obj == NULL) {
274                         seq_printf(m, "Fenced object[%2d] = unused\n", i);
275                 } else {
276                         struct drm_i915_gem_object *obj_priv;
277
278                         obj_priv = to_intel_bo(obj);
279                         seq_printf(m, "Fenced object[%2d] = %p: %s "
280                                    "%08x %08zx %08x %s %08x %08x %d",
281                                    i, obj, get_pin_flag(obj_priv),
282                                    obj_priv->gtt_offset,
283                                    obj->size, obj_priv->stride,
284                                    get_tiling_flag(obj_priv),
285                                    obj->read_domains, obj->write_domain,
286                                    obj_priv->last_rendering_seqno);
287                         if (obj->name)
288                                 seq_printf(m, " (name: %d)", obj->name);
289                         seq_printf(m, "\n");
290                 }
291         }
292
293         return 0;
294 }
295
296 static int i915_hws_info(struct seq_file *m, void *data)
297 {
298         struct drm_info_node *node = (struct drm_info_node *) m->private;
299         struct drm_device *dev = node->minor->dev;
300         drm_i915_private_t *dev_priv = dev->dev_private;
301         int i;
302         volatile u32 *hws;
303
304         hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
305         if (hws == NULL)
306                 return 0;
307
308         for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
309                 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
310                            i * 4,
311                            hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
312         }
313         return 0;
314 }
315
316 static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
317 {
318         int page, i;
319         uint32_t *mem;
320
321         for (page = 0; page < page_count; page++) {
322                 mem = kmap_atomic(pages[page], KM_USER0);
323                 for (i = 0; i < PAGE_SIZE; i += 4)
324                         seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
325                 kunmap_atomic(mem, KM_USER0);
326         }
327 }
328
329 static int i915_batchbuffer_info(struct seq_file *m, void *data)
330 {
331         struct drm_info_node *node = (struct drm_info_node *) m->private;
332         struct drm_device *dev = node->minor->dev;
333         drm_i915_private_t *dev_priv = dev->dev_private;
334         struct drm_gem_object *obj;
335         struct drm_i915_gem_object *obj_priv;
336         int ret;
337
338         spin_lock(&dev_priv->mm.active_list_lock);
339
340         list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
341                         list) {
342                 obj = &obj_priv->base;
343                 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
344                     ret = i915_gem_object_get_pages(obj, 0);
345                     if (ret) {
346                             DRM_ERROR("Failed to get pages: %d\n", ret);
347                             spin_unlock(&dev_priv->mm.active_list_lock);
348                             return ret;
349                     }
350
351                     seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
352                     i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
353
354                     i915_gem_object_put_pages(obj);
355                 }
356         }
357
358         spin_unlock(&dev_priv->mm.active_list_lock);
359
360         return 0;
361 }
362
363 static int i915_ringbuffer_data(struct seq_file *m, void *data)
364 {
365         struct drm_info_node *node = (struct drm_info_node *) m->private;
366         struct drm_device *dev = node->minor->dev;
367         drm_i915_private_t *dev_priv = dev->dev_private;
368         u8 *virt;
369         uint32_t *ptr, off;
370
371         if (!dev_priv->render_ring.gem_object) {
372                 seq_printf(m, "No ringbuffer setup\n");
373                 return 0;
374         }
375
376         virt = dev_priv->render_ring.virtual_start;
377
378         for (off = 0; off < dev_priv->render_ring.size; off += 4) {
379                 ptr = (uint32_t *)(virt + off);
380                 seq_printf(m, "%08x :  %08x\n", off, *ptr);
381         }
382
383         return 0;
384 }
385
386 static int i915_ringbuffer_info(struct seq_file *m, void *data)
387 {
388         struct drm_info_node *node = (struct drm_info_node *) m->private;
389         struct drm_device *dev = node->minor->dev;
390         drm_i915_private_t *dev_priv = dev->dev_private;
391         unsigned int head, tail;
392
393         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
394         tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
395
396         seq_printf(m, "RingHead :  %08x\n", head);
397         seq_printf(m, "RingTail :  %08x\n", tail);
398         seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
399         seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
400
401         return 0;
402 }
403
404 static const char *pin_flag(int pinned)
405 {
406         if (pinned > 0)
407                 return " P";
408         else if (pinned < 0)
409                 return " p";
410         else
411                 return "";
412 }
413
414 static const char *tiling_flag(int tiling)
415 {
416         switch (tiling) {
417         default:
418         case I915_TILING_NONE: return "";
419         case I915_TILING_X: return " X";
420         case I915_TILING_Y: return " Y";
421         }
422 }
423
424 static const char *dirty_flag(int dirty)
425 {
426         return dirty ? " dirty" : "";
427 }
428
429 static const char *purgeable_flag(int purgeable)
430 {
431         return purgeable ? " purgeable" : "";
432 }
433
434 static int i915_error_state(struct seq_file *m, void *unused)
435 {
436         struct drm_info_node *node = (struct drm_info_node *) m->private;
437         struct drm_device *dev = node->minor->dev;
438         drm_i915_private_t *dev_priv = dev->dev_private;
439         struct drm_i915_error_state *error;
440         unsigned long flags;
441         int i, page, offset, elt;
442
443         spin_lock_irqsave(&dev_priv->error_lock, flags);
444         if (!dev_priv->first_error) {
445                 seq_printf(m, "no error state collected\n");
446                 goto out;
447         }
448
449         error = dev_priv->first_error;
450
451         seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
452                    error->time.tv_usec);
453         seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
454         seq_printf(m, "EIR: 0x%08x\n", error->eir);
455         seq_printf(m, "  PGTBL_ER: 0x%08x\n", error->pgtbl_er);
456         seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
457         seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
458         seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
459         seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
460         seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
461         if (IS_I965G(dev)) {
462                 seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
463                 seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
464         }
465         seq_printf(m, "seqno: 0x%08x\n", error->seqno);
466
467         if (error->active_bo_count) {
468                 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
469
470                 for (i = 0; i < error->active_bo_count; i++) {
471                         seq_printf(m, "  %08x %8zd %08x %08x %08x%s%s%s%s",
472                                    error->active_bo[i].gtt_offset,
473                                    error->active_bo[i].size,
474                                    error->active_bo[i].read_domains,
475                                    error->active_bo[i].write_domain,
476                                    error->active_bo[i].seqno,
477                                    pin_flag(error->active_bo[i].pinned),
478                                    tiling_flag(error->active_bo[i].tiling),
479                                    dirty_flag(error->active_bo[i].dirty),
480                                    purgeable_flag(error->active_bo[i].purgeable));
481
482                         if (error->active_bo[i].name)
483                                 seq_printf(m, " (name: %d)", error->active_bo[i].name);
484                         if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
485                                 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
486
487                         seq_printf(m, "\n");
488                 }
489         }
490
491         for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
492                 if (error->batchbuffer[i]) {
493                         struct drm_i915_error_object *obj = error->batchbuffer[i];
494
495                         seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
496                         offset = 0;
497                         for (page = 0; page < obj->page_count; page++) {
498                                 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
499                                         seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
500                                         offset += 4;
501                                 }
502                         }
503                 }
504         }
505
506         if (error->ringbuffer) {
507                 struct drm_i915_error_object *obj = error->ringbuffer;
508
509                 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
510                 offset = 0;
511                 for (page = 0; page < obj->page_count; page++) {
512                         for (elt = 0; elt < PAGE_SIZE/4; elt++) {
513                                 seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
514                                 offset += 4;
515                         }
516                 }
517         }
518
519         if (error->overlay)
520                 intel_overlay_print_error_state(m, error->overlay);
521
522 out:
523         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
524
525         return 0;
526 }
527
528 static int i915_rstdby_delays(struct seq_file *m, void *unused)
529 {
530         struct drm_info_node *node = (struct drm_info_node *) m->private;
531         struct drm_device *dev = node->minor->dev;
532         drm_i915_private_t *dev_priv = dev->dev_private;
533         u16 crstanddelay = I915_READ16(CRSTANDVID);
534
535         seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
536
537         return 0;
538 }
539
540 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
541 {
542         struct drm_info_node *node = (struct drm_info_node *) m->private;
543         struct drm_device *dev = node->minor->dev;
544         drm_i915_private_t *dev_priv = dev->dev_private;
545         u16 rgvswctl = I915_READ16(MEMSWCTL);
546         u16 rgvstat = I915_READ16(MEMSTAT_ILK);
547
548         seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
549         seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
550         seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
551                    MEMSTAT_VID_SHIFT);
552         seq_printf(m, "Current P-state: %d\n",
553                    (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
554
555         return 0;
556 }
557
558 static int i915_delayfreq_table(struct seq_file *m, void *unused)
559 {
560         struct drm_info_node *node = (struct drm_info_node *) m->private;
561         struct drm_device *dev = node->minor->dev;
562         drm_i915_private_t *dev_priv = dev->dev_private;
563         u32 delayfreq;
564         int i;
565
566         for (i = 0; i < 16; i++) {
567                 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
568                 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
569                            (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
570         }
571
572         return 0;
573 }
574
575 static inline int MAP_TO_MV(int map)
576 {
577         return 1250 - (map * 25);
578 }
579
580 static int i915_inttoext_table(struct seq_file *m, void *unused)
581 {
582         struct drm_info_node *node = (struct drm_info_node *) m->private;
583         struct drm_device *dev = node->minor->dev;
584         drm_i915_private_t *dev_priv = dev->dev_private;
585         u32 inttoext;
586         int i;
587
588         for (i = 1; i <= 32; i++) {
589                 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
590                 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
591         }
592
593         return 0;
594 }
595
596 static int i915_drpc_info(struct seq_file *m, void *unused)
597 {
598         struct drm_info_node *node = (struct drm_info_node *) m->private;
599         struct drm_device *dev = node->minor->dev;
600         drm_i915_private_t *dev_priv = dev->dev_private;
601         u32 rgvmodectl = I915_READ(MEMMODECTL);
602         u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
603         u16 crstandvid = I915_READ16(CRSTANDVID);
604
605         seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
606                    "yes" : "no");
607         seq_printf(m, "Boost freq: %d\n",
608                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
609                    MEMMODE_BOOST_FREQ_SHIFT);
610         seq_printf(m, "HW control enabled: %s\n",
611                    rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
612         seq_printf(m, "SW control enabled: %s\n",
613                    rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
614         seq_printf(m, "Gated voltage change: %s\n",
615                    rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
616         seq_printf(m, "Starting frequency: P%d\n",
617                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
618         seq_printf(m, "Max P-state: P%d\n",
619                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
620         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
621         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
622         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
623         seq_printf(m, "Render standby enabled: %s\n",
624                    (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
625
626         return 0;
627 }
628
629 static int i915_fbc_status(struct seq_file *m, void *unused)
630 {
631         struct drm_info_node *node = (struct drm_info_node *) m->private;
632         struct drm_device *dev = node->minor->dev;
633         drm_i915_private_t *dev_priv = dev->dev_private;
634
635         if (!I915_HAS_FBC(dev)) {
636                 seq_printf(m, "FBC unsupported on this chipset\n");
637                 return 0;
638         }
639
640         if (intel_fbc_enabled(dev)) {
641                 seq_printf(m, "FBC enabled\n");
642         } else {
643                 seq_printf(m, "FBC disabled: ");
644                 switch (dev_priv->no_fbc_reason) {
645                 case FBC_STOLEN_TOO_SMALL:
646                         seq_printf(m, "not enough stolen memory");
647                         break;
648                 case FBC_UNSUPPORTED_MODE:
649                         seq_printf(m, "mode not supported");
650                         break;
651                 case FBC_MODE_TOO_LARGE:
652                         seq_printf(m, "mode too large");
653                         break;
654                 case FBC_BAD_PLANE:
655                         seq_printf(m, "FBC unsupported on plane");
656                         break;
657                 case FBC_NOT_TILED:
658                         seq_printf(m, "scanout buffer not tiled");
659                         break;
660                 case FBC_MULTIPLE_PIPES:
661                         seq_printf(m, "multiple pipes are enabled");
662                         break;
663                 default:
664                         seq_printf(m, "unknown reason");
665                 }
666                 seq_printf(m, "\n");
667         }
668         return 0;
669 }
670
671 static int i915_sr_status(struct seq_file *m, void *unused)
672 {
673         struct drm_info_node *node = (struct drm_info_node *) m->private;
674         struct drm_device *dev = node->minor->dev;
675         drm_i915_private_t *dev_priv = dev->dev_private;
676         bool sr_enabled = false;
677
678         if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
679                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
680         else if (IS_I915GM(dev))
681                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
682         else if (IS_PINEVIEW(dev))
683                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
684
685         seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
686                    "disabled");
687
688         return 0;
689 }
690
691 static int i915_emon_status(struct seq_file *m, void *unused)
692 {
693         struct drm_info_node *node = (struct drm_info_node *) m->private;
694         struct drm_device *dev = node->minor->dev;
695         drm_i915_private_t *dev_priv = dev->dev_private;
696         unsigned long temp, chipset, gfx;
697
698         temp = i915_mch_val(dev_priv);
699         chipset = i915_chipset_val(dev_priv);
700         gfx = i915_gfx_val(dev_priv);
701
702         seq_printf(m, "GMCH temp: %ld\n", temp);
703         seq_printf(m, "Chipset power: %ld\n", chipset);
704         seq_printf(m, "GFX power: %ld\n", gfx);
705         seq_printf(m, "Total power: %ld\n", chipset + gfx);
706
707         return 0;
708 }
709
710 static int i915_gfxec(struct seq_file *m, void *unused)
711 {
712         struct drm_info_node *node = (struct drm_info_node *) m->private;
713         struct drm_device *dev = node->minor->dev;
714         drm_i915_private_t *dev_priv = dev->dev_private;
715
716         seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
717
718         return 0;
719 }
720
721 static int
722 i915_wedged_open(struct inode *inode,
723                  struct file *filp)
724 {
725         filp->private_data = inode->i_private;
726         return 0;
727 }
728
729 static ssize_t
730 i915_wedged_read(struct file *filp,
731                  char __user *ubuf,
732                  size_t max,
733                  loff_t *ppos)
734 {
735         struct drm_device *dev = filp->private_data;
736         drm_i915_private_t *dev_priv = dev->dev_private;
737         char buf[80];
738         int len;
739
740         len = snprintf(buf, sizeof (buf),
741                        "wedged :  %d\n",
742                        atomic_read(&dev_priv->mm.wedged));
743
744         return simple_read_from_buffer(ubuf, max, ppos, buf, len);
745 }
746
747 static ssize_t
748 i915_wedged_write(struct file *filp,
749                   const char __user *ubuf,
750                   size_t cnt,
751                   loff_t *ppos)
752 {
753         struct drm_device *dev = filp->private_data;
754         drm_i915_private_t *dev_priv = dev->dev_private;
755         char buf[20];
756         int val = 1;
757
758         if (cnt > 0) {
759                 if (cnt > sizeof (buf) - 1)
760                         return -EINVAL;
761
762                 if (copy_from_user(buf, ubuf, cnt))
763                         return -EFAULT;
764                 buf[cnt] = 0;
765
766                 val = simple_strtoul(buf, NULL, 0);
767         }
768
769         DRM_INFO("Manually setting wedged to %d\n", val);
770
771         atomic_set(&dev_priv->mm.wedged, val);
772         if (val) {
773                 DRM_WAKEUP(&dev_priv->irq_queue);
774                 queue_work(dev_priv->wq, &dev_priv->error_work);
775         }
776
777         return cnt;
778 }
779
780 static const struct file_operations i915_wedged_fops = {
781         .owner = THIS_MODULE,
782         .open = i915_wedged_open,
783         .read = i915_wedged_read,
784         .write = i915_wedged_write,
785 };
786
787 /* As the drm_debugfs_init() routines are called before dev->dev_private is
788  * allocated we need to hook into the minor for release. */
789 static int
790 drm_add_fake_info_node(struct drm_minor *minor,
791                        struct dentry *ent,
792                        const void *key)
793 {
794         struct drm_info_node *node;
795
796         node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
797         if (node == NULL) {
798                 debugfs_remove(ent);
799                 return -ENOMEM;
800         }
801
802         node->minor = minor;
803         node->dent = ent;
804         node->info_ent = (void *) key;
805         list_add(&node->list, &minor->debugfs_nodes.list);
806
807         return 0;
808 }
809
810 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
811 {
812         struct drm_device *dev = minor->dev;
813         struct dentry *ent;
814
815         ent = debugfs_create_file("i915_wedged",
816                                   S_IRUGO | S_IWUSR,
817                                   root, dev,
818                                   &i915_wedged_fops);
819         if (IS_ERR(ent))
820                 return PTR_ERR(ent);
821
822         return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
823 }
824
825 static struct drm_info_list i915_debugfs_list[] = {
826         {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
827         {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
828         {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
829         {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
830         {"i915_gem_request", i915_gem_request_info, 0},
831         {"i915_gem_seqno", i915_gem_seqno_info, 0},
832         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
833         {"i915_gem_interrupt", i915_interrupt_info, 0},
834         {"i915_gem_hws", i915_hws_info, 0},
835         {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
836         {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
837         {"i915_batchbuffers", i915_batchbuffer_info, 0},
838         {"i915_error_state", i915_error_state, 0},
839         {"i915_rstdby_delays", i915_rstdby_delays, 0},
840         {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
841         {"i915_delayfreq_table", i915_delayfreq_table, 0},
842         {"i915_inttoext_table", i915_inttoext_table, 0},
843         {"i915_drpc_info", i915_drpc_info, 0},
844         {"i915_emon_status", i915_emon_status, 0},
845         {"i915_gfxec", i915_gfxec, 0},
846         {"i915_fbc_status", i915_fbc_status, 0},
847         {"i915_sr_status", i915_sr_status, 0},
848 };
849 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
850
851 int i915_debugfs_init(struct drm_minor *minor)
852 {
853         int ret;
854
855         ret = i915_wedged_create(minor->debugfs_root, minor);
856         if (ret)
857                 return ret;
858
859         return drm_debugfs_create_files(i915_debugfs_list,
860                                         I915_DEBUGFS_ENTRIES,
861                                         minor->debugfs_root, minor);
862 }
863
864 void i915_debugfs_cleanup(struct drm_minor *minor)
865 {
866         drm_debugfs_remove_files(i915_debugfs_list,
867                                  I915_DEBUGFS_ENTRIES, minor);
868         drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
869                                  1, minor);
870 }
871
872 #endif /* CONFIG_DEBUG_FS */