1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 /* For display hotplug interrupt */
42 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
44 if ((dev_priv->irq_mask & mask) != 0) {
45 dev_priv->irq_mask &= ~mask;
46 I915_WRITE(DEIMR, dev_priv->irq_mask);
52 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
54 if ((dev_priv->irq_mask & mask) != mask) {
55 dev_priv->irq_mask |= mask;
56 I915_WRITE(DEIMR, dev_priv->irq_mask);
62 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
64 if ((dev_priv->pipestat[pipe] & mask) != mask) {
65 u32 reg = PIPESTAT(pipe);
67 dev_priv->pipestat[pipe] |= mask;
68 /* Enable the interrupt, clear any pending status */
69 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
75 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
77 if ((dev_priv->pipestat[pipe] & mask) != 0) {
78 u32 reg = PIPESTAT(pipe);
80 dev_priv->pipestat[pipe] &= ~mask;
81 I915_WRITE(reg, dev_priv->pipestat[pipe]);
87 * intel_enable_asle - enable ASLE interrupt for OpRegion
89 void intel_enable_asle(struct drm_device *dev)
91 drm_i915_private_t *dev_priv = dev->dev_private;
92 unsigned long irqflags;
94 /* FIXME: opregion/asle for VLV */
95 if (IS_VALLEYVIEW(dev))
98 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
100 if (HAS_PCH_SPLIT(dev))
101 ironlake_enable_display_irq(dev_priv, DE_GSE);
103 i915_enable_pipestat(dev_priv, 1,
104 PIPE_LEGACY_BLC_EVENT_ENABLE);
105 if (INTEL_INFO(dev)->gen >= 4)
106 i915_enable_pipestat(dev_priv, 0,
107 PIPE_LEGACY_BLC_EVENT_ENABLE);
110 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
114 * i915_pipe_enabled - check if a pipe is enabled
116 * @pipe: pipe to check
118 * Reading certain registers when the pipe is disabled can hang the chip.
119 * Use this routine to make sure the PLL is running and the pipe is active
120 * before reading such registers if unsure.
123 i915_pipe_enabled(struct drm_device *dev, int pipe)
125 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
126 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
129 /* Called from drm generic code, passed a 'crtc', which
130 * we use as a pipe index
132 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
134 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
135 unsigned long high_frame;
136 unsigned long low_frame;
137 u32 high1, high2, low;
139 if (!i915_pipe_enabled(dev, pipe)) {
140 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
141 "pipe %c\n", pipe_name(pipe));
145 high_frame = PIPEFRAME(pipe);
146 low_frame = PIPEFRAMEPIXEL(pipe);
149 * High & low register fields aren't synchronized, so make sure
150 * we get a low value that's stable across two reads of the high
154 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
155 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
156 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
157 } while (high1 != high2);
159 high1 >>= PIPE_FRAME_HIGH_SHIFT;
160 low >>= PIPE_FRAME_LOW_SHIFT;
161 return (high1 << 8) | low;
164 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
166 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
167 int reg = PIPE_FRMCOUNT_GM45(pipe);
169 if (!i915_pipe_enabled(dev, pipe)) {
170 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
171 "pipe %c\n", pipe_name(pipe));
175 return I915_READ(reg);
178 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
179 int *vpos, int *hpos)
181 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
182 u32 vbl = 0, position = 0;
183 int vbl_start, vbl_end, htotal, vtotal;
187 if (!i915_pipe_enabled(dev, pipe)) {
188 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
189 "pipe %c\n", pipe_name(pipe));
194 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
196 if (INTEL_INFO(dev)->gen >= 4) {
197 /* No obvious pixelcount register. Only query vertical
198 * scanout position from Display scan line register.
200 position = I915_READ(PIPEDSL(pipe));
202 /* Decode into vertical scanout position. Don't have
203 * horizontal scanout position.
205 *vpos = position & 0x1fff;
208 /* Have access to pixelcount since start of frame.
209 * We can split this into vertical and horizontal
212 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
214 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
215 *vpos = position / htotal;
216 *hpos = position - (*vpos * htotal);
219 /* Query vblank area. */
220 vbl = I915_READ(VBLANK(pipe));
222 /* Test position against vblank region. */
223 vbl_start = vbl & 0x1fff;
224 vbl_end = (vbl >> 16) & 0x1fff;
226 if ((*vpos < vbl_start) || (*vpos > vbl_end))
229 /* Inside "upper part" of vblank area? Apply corrective offset: */
230 if (in_vbl && (*vpos >= vbl_start))
231 *vpos = *vpos - vtotal;
233 /* Readouts valid? */
235 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
239 ret |= DRM_SCANOUTPOS_INVBL;
244 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
246 struct timeval *vblank_time,
249 struct drm_i915_private *dev_priv = dev->dev_private;
250 struct drm_crtc *crtc;
252 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
253 DRM_ERROR("Invalid crtc %d\n", pipe);
257 /* Get drm_crtc to timestamp: */
258 crtc = intel_get_crtc_for_pipe(dev, pipe);
260 DRM_ERROR("Invalid crtc %d\n", pipe);
264 if (!crtc->enabled) {
265 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
269 /* Helper routine in DRM core does all the work: */
270 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
276 * Handle hotplug events outside the interrupt handler proper.
278 static void i915_hotplug_work_func(struct work_struct *work)
280 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
282 struct drm_device *dev = dev_priv->dev;
283 struct drm_mode_config *mode_config = &dev->mode_config;
284 struct intel_encoder *encoder;
286 mutex_lock(&mode_config->mutex);
287 DRM_DEBUG_KMS("running encoder hotplug functions\n");
289 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
290 if (encoder->hot_plug)
291 encoder->hot_plug(encoder);
293 mutex_unlock(&mode_config->mutex);
295 /* Just fire off a uevent and let userspace tell us what to do */
296 drm_helper_hpd_irq_event(dev);
299 static void i915_handle_rps_change(struct drm_device *dev)
301 drm_i915_private_t *dev_priv = dev->dev_private;
302 u32 busy_up, busy_down, max_avg, min_avg;
303 u8 new_delay = dev_priv->cur_delay;
305 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
306 busy_up = I915_READ(RCPREVBSYTUPAVG);
307 busy_down = I915_READ(RCPREVBSYTDNAVG);
308 max_avg = I915_READ(RCBMAXAVG);
309 min_avg = I915_READ(RCBMINAVG);
311 /* Handle RCS change request from hw */
312 if (busy_up > max_avg) {
313 if (dev_priv->cur_delay != dev_priv->max_delay)
314 new_delay = dev_priv->cur_delay - 1;
315 if (new_delay < dev_priv->max_delay)
316 new_delay = dev_priv->max_delay;
317 } else if (busy_down < min_avg) {
318 if (dev_priv->cur_delay != dev_priv->min_delay)
319 new_delay = dev_priv->cur_delay + 1;
320 if (new_delay > dev_priv->min_delay)
321 new_delay = dev_priv->min_delay;
324 if (ironlake_set_drps(dev, new_delay))
325 dev_priv->cur_delay = new_delay;
330 static void notify_ring(struct drm_device *dev,
331 struct intel_ring_buffer *ring)
333 struct drm_i915_private *dev_priv = dev->dev_private;
335 if (ring->obj == NULL)
338 trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
340 wake_up_all(&ring->irq_queue);
341 if (i915_enable_hangcheck) {
342 dev_priv->hangcheck_count = 0;
343 mod_timer(&dev_priv->hangcheck_timer,
345 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
349 static void gen6_pm_rps_work(struct work_struct *work)
351 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
356 spin_lock_irq(&dev_priv->rps_lock);
357 pm_iir = dev_priv->pm_iir;
358 dev_priv->pm_iir = 0;
359 pm_imr = I915_READ(GEN6_PMIMR);
360 I915_WRITE(GEN6_PMIMR, 0);
361 spin_unlock_irq(&dev_priv->rps_lock);
363 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
366 mutex_lock(&dev_priv->dev->struct_mutex);
368 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
369 new_delay = dev_priv->cur_delay + 1;
371 new_delay = dev_priv->cur_delay - 1;
373 gen6_set_rps(dev_priv->dev, new_delay);
375 mutex_unlock(&dev_priv->dev->struct_mutex);
380 * ivybridge_parity_work - Workqueue called when a parity error interrupt
382 * @work: workqueue struct
384 * Doesn't actually do anything except notify userspace. As a consequence of
385 * this event, userspace should try to remap the bad rows since statistically
386 * it is likely the same row is more likely to go bad again.
388 static void ivybridge_parity_work(struct work_struct *work)
390 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
392 u32 error_status, row, bank, subbank;
393 char *parity_event[5];
397 /* We must turn off DOP level clock gating to access the L3 registers.
398 * In order to prevent a get/put style interface, acquire struct mutex
399 * any time we access those registers.
401 mutex_lock(&dev_priv->dev->struct_mutex);
403 misccpctl = I915_READ(GEN7_MISCCPCTL);
404 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
405 POSTING_READ(GEN7_MISCCPCTL);
407 error_status = I915_READ(GEN7_L3CDERRST1);
408 row = GEN7_PARITY_ERROR_ROW(error_status);
409 bank = GEN7_PARITY_ERROR_BANK(error_status);
410 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
412 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
413 GEN7_L3CDERRST1_ENABLE);
414 POSTING_READ(GEN7_L3CDERRST1);
416 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
418 spin_lock_irqsave(&dev_priv->irq_lock, flags);
419 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
420 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
421 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
423 mutex_unlock(&dev_priv->dev->struct_mutex);
425 parity_event[0] = "L3_PARITY_ERROR=1";
426 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
427 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
428 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
429 parity_event[4] = NULL;
431 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
432 KOBJ_CHANGE, parity_event);
434 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
437 kfree(parity_event[3]);
438 kfree(parity_event[2]);
439 kfree(parity_event[1]);
442 static void ivybridge_handle_parity_error(struct drm_device *dev)
444 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
447 if (!IS_IVYBRIDGE(dev))
450 spin_lock_irqsave(&dev_priv->irq_lock, flags);
451 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
452 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
453 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
455 queue_work(dev_priv->wq, &dev_priv->parity_error_work);
458 static void snb_gt_irq_handler(struct drm_device *dev,
459 struct drm_i915_private *dev_priv,
463 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
464 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
465 notify_ring(dev, &dev_priv->ring[RCS]);
466 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
467 notify_ring(dev, &dev_priv->ring[VCS]);
468 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
469 notify_ring(dev, &dev_priv->ring[BCS]);
471 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
472 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
473 GT_RENDER_CS_ERROR_INTERRUPT)) {
474 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
475 i915_handle_error(dev, false);
478 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
479 ivybridge_handle_parity_error(dev);
482 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
488 * IIR bits should never already be set because IMR should
489 * prevent an interrupt from being shown in IIR. The warning
490 * displays a case where we've unsafely cleared
491 * dev_priv->pm_iir. Although missing an interrupt of the same
492 * type is not a problem, it displays a problem in the logic.
494 * The mask bit in IMR is cleared by rps_work.
497 spin_lock_irqsave(&dev_priv->rps_lock, flags);
498 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
499 dev_priv->pm_iir |= pm_iir;
500 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
501 POSTING_READ(GEN6_PMIMR);
502 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
504 queue_work(dev_priv->wq, &dev_priv->rps_work);
507 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
509 struct drm_device *dev = (struct drm_device *) arg;
510 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
511 u32 iir, gt_iir, pm_iir;
512 irqreturn_t ret = IRQ_NONE;
513 unsigned long irqflags;
515 u32 pipe_stats[I915_MAX_PIPES];
518 atomic_inc(&dev_priv->irq_received);
521 iir = I915_READ(VLV_IIR);
522 gt_iir = I915_READ(GTIIR);
523 pm_iir = I915_READ(GEN6_PMIIR);
525 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
530 snb_gt_irq_handler(dev, dev_priv, gt_iir);
532 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
533 for_each_pipe(pipe) {
534 int reg = PIPESTAT(pipe);
535 pipe_stats[pipe] = I915_READ(reg);
538 * Clear the PIPE*STAT regs before the IIR
540 if (pipe_stats[pipe] & 0x8000ffff) {
541 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
542 DRM_DEBUG_DRIVER("pipe %c underrun\n",
544 I915_WRITE(reg, pipe_stats[pipe]);
547 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
549 for_each_pipe(pipe) {
550 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
551 drm_handle_vblank(dev, pipe);
553 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
554 intel_prepare_page_flip(dev, pipe);
555 intel_finish_page_flip(dev, pipe);
559 /* Consume port. Then clear IIR or we'll miss events */
560 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
561 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
563 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
565 if (hotplug_status & dev_priv->hotplug_supported_mask)
566 queue_work(dev_priv->wq,
567 &dev_priv->hotplug_work);
569 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
570 I915_READ(PORT_HOTPLUG_STAT);
573 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
576 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
577 gen6_queue_rps_work(dev_priv, pm_iir);
579 I915_WRITE(GTIIR, gt_iir);
580 I915_WRITE(GEN6_PMIIR, pm_iir);
581 I915_WRITE(VLV_IIR, iir);
588 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
590 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
593 if (pch_iir & SDE_AUDIO_POWER_MASK)
594 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
595 (pch_iir & SDE_AUDIO_POWER_MASK) >>
596 SDE_AUDIO_POWER_SHIFT);
598 if (pch_iir & SDE_GMBUS)
599 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
601 if (pch_iir & SDE_AUDIO_HDCP_MASK)
602 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
604 if (pch_iir & SDE_AUDIO_TRANS_MASK)
605 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
607 if (pch_iir & SDE_POISON)
608 DRM_ERROR("PCH poison interrupt\n");
610 if (pch_iir & SDE_FDI_MASK)
612 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
614 I915_READ(FDI_RX_IIR(pipe)));
616 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
617 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
619 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
620 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
622 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
623 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
624 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
625 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
628 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
630 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
633 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
634 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
635 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
636 SDE_AUDIO_POWER_SHIFT_CPT);
638 if (pch_iir & SDE_AUX_MASK_CPT)
639 DRM_DEBUG_DRIVER("AUX channel interrupt\n");
641 if (pch_iir & SDE_GMBUS_CPT)
642 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
644 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
645 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
647 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
648 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
650 if (pch_iir & SDE_FDI_MASK_CPT)
652 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
654 I915_READ(FDI_RX_IIR(pipe)));
657 static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
659 struct drm_device *dev = (struct drm_device *) arg;
660 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
661 u32 de_iir, gt_iir, de_ier, pm_iir;
662 irqreturn_t ret = IRQ_NONE;
665 atomic_inc(&dev_priv->irq_received);
667 /* disable master interrupt before clearing iir */
668 de_ier = I915_READ(DEIER);
669 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
671 gt_iir = I915_READ(GTIIR);
673 snb_gt_irq_handler(dev, dev_priv, gt_iir);
674 I915_WRITE(GTIIR, gt_iir);
678 de_iir = I915_READ(DEIIR);
680 if (de_iir & DE_GSE_IVB)
681 intel_opregion_gse_intr(dev);
683 for (i = 0; i < 3; i++) {
684 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
685 intel_prepare_page_flip(dev, i);
686 intel_finish_page_flip_plane(dev, i);
688 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
689 drm_handle_vblank(dev, i);
692 /* check event from PCH */
693 if (de_iir & DE_PCH_EVENT_IVB) {
694 u32 pch_iir = I915_READ(SDEIIR);
696 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
697 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
698 cpt_irq_handler(dev, pch_iir);
700 /* clear PCH hotplug event before clear CPU irq */
701 I915_WRITE(SDEIIR, pch_iir);
704 I915_WRITE(DEIIR, de_iir);
708 pm_iir = I915_READ(GEN6_PMIIR);
710 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
711 gen6_queue_rps_work(dev_priv, pm_iir);
712 I915_WRITE(GEN6_PMIIR, pm_iir);
716 I915_WRITE(DEIER, de_ier);
722 static void ilk_gt_irq_handler(struct drm_device *dev,
723 struct drm_i915_private *dev_priv,
726 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
727 notify_ring(dev, &dev_priv->ring[RCS]);
728 if (gt_iir & GT_BSD_USER_INTERRUPT)
729 notify_ring(dev, &dev_priv->ring[VCS]);
732 static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
734 struct drm_device *dev = (struct drm_device *) arg;
735 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
737 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
740 atomic_inc(&dev_priv->irq_received);
742 /* disable master interrupt before clearing iir */
743 de_ier = I915_READ(DEIER);
744 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
747 de_iir = I915_READ(DEIIR);
748 gt_iir = I915_READ(GTIIR);
749 pch_iir = I915_READ(SDEIIR);
750 pm_iir = I915_READ(GEN6_PMIIR);
752 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
753 (!IS_GEN6(dev) || pm_iir == 0))
756 if (HAS_PCH_CPT(dev))
757 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
759 hotplug_mask = SDE_HOTPLUG_MASK;
764 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
766 snb_gt_irq_handler(dev, dev_priv, gt_iir);
769 intel_opregion_gse_intr(dev);
771 if (de_iir & DE_PLANEA_FLIP_DONE) {
772 intel_prepare_page_flip(dev, 0);
773 intel_finish_page_flip_plane(dev, 0);
776 if (de_iir & DE_PLANEB_FLIP_DONE) {
777 intel_prepare_page_flip(dev, 1);
778 intel_finish_page_flip_plane(dev, 1);
781 if (de_iir & DE_PIPEA_VBLANK)
782 drm_handle_vblank(dev, 0);
784 if (de_iir & DE_PIPEB_VBLANK)
785 drm_handle_vblank(dev, 1);
787 /* check event from PCH */
788 if (de_iir & DE_PCH_EVENT) {
789 if (pch_iir & hotplug_mask)
790 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
791 if (HAS_PCH_CPT(dev))
792 cpt_irq_handler(dev, pch_iir);
794 ibx_irq_handler(dev, pch_iir);
797 if (de_iir & DE_PCU_EVENT) {
798 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
799 i915_handle_rps_change(dev);
802 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
803 gen6_queue_rps_work(dev_priv, pm_iir);
805 /* should clear PCH hotplug event before clear CPU irq */
806 I915_WRITE(SDEIIR, pch_iir);
807 I915_WRITE(GTIIR, gt_iir);
808 I915_WRITE(DEIIR, de_iir);
809 I915_WRITE(GEN6_PMIIR, pm_iir);
812 I915_WRITE(DEIER, de_ier);
819 * i915_error_work_func - do process context error handling work
822 * Fire an error uevent so userspace can see that a hang or error
825 static void i915_error_work_func(struct work_struct *work)
827 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
829 struct drm_device *dev = dev_priv->dev;
830 char *error_event[] = { "ERROR=1", NULL };
831 char *reset_event[] = { "RESET=1", NULL };
832 char *reset_done_event[] = { "ERROR=0", NULL };
834 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
836 if (atomic_read(&dev_priv->mm.wedged)) {
837 DRM_DEBUG_DRIVER("resetting chip\n");
838 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
839 if (!i915_reset(dev)) {
840 atomic_set(&dev_priv->mm.wedged, 0);
841 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
843 complete_all(&dev_priv->error_completion);
847 #ifdef CONFIG_DEBUG_FS
848 static struct drm_i915_error_object *
849 i915_error_object_create(struct drm_i915_private *dev_priv,
850 struct drm_i915_gem_object *src)
852 struct drm_i915_error_object *dst;
853 int page, page_count;
856 if (src == NULL || src->pages == NULL)
859 page_count = src->base.size / PAGE_SIZE;
861 dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
865 reloc_offset = src->gtt_offset;
866 for (page = 0; page < page_count; page++) {
870 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
874 local_irq_save(flags);
875 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
876 src->has_global_gtt_mapping) {
879 /* Simply ignore tiling or any overlapping fence.
880 * It's part of the error state, and this hopefully
881 * captures what the GPU read.
884 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
886 memcpy_fromio(d, s, PAGE_SIZE);
887 io_mapping_unmap_atomic(s);
891 drm_clflush_pages(&src->pages[page], 1);
893 s = kmap_atomic(src->pages[page]);
894 memcpy(d, s, PAGE_SIZE);
897 drm_clflush_pages(&src->pages[page], 1);
899 local_irq_restore(flags);
901 dst->pages[page] = d;
903 reloc_offset += PAGE_SIZE;
905 dst->page_count = page_count;
906 dst->gtt_offset = src->gtt_offset;
912 kfree(dst->pages[page]);
918 i915_error_object_free(struct drm_i915_error_object *obj)
925 for (page = 0; page < obj->page_count; page++)
926 kfree(obj->pages[page]);
932 i915_error_state_free(struct kref *error_ref)
934 struct drm_i915_error_state *error = container_of(error_ref,
935 typeof(*error), ref);
938 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
939 i915_error_object_free(error->ring[i].batchbuffer);
940 i915_error_object_free(error->ring[i].ringbuffer);
941 kfree(error->ring[i].requests);
944 kfree(error->active_bo);
945 kfree(error->overlay);
948 static void capture_bo(struct drm_i915_error_buffer *err,
949 struct drm_i915_gem_object *obj)
951 err->size = obj->base.size;
952 err->name = obj->base.name;
953 err->seqno = obj->last_rendering_seqno;
954 err->gtt_offset = obj->gtt_offset;
955 err->read_domains = obj->base.read_domains;
956 err->write_domain = obj->base.write_domain;
957 err->fence_reg = obj->fence_reg;
959 if (obj->pin_count > 0)
961 if (obj->user_pin_count > 0)
963 err->tiling = obj->tiling_mode;
964 err->dirty = obj->dirty;
965 err->purgeable = obj->madv != I915_MADV_WILLNEED;
966 err->ring = obj->ring ? obj->ring->id : -1;
967 err->cache_level = obj->cache_level;
970 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
971 int count, struct list_head *head)
973 struct drm_i915_gem_object *obj;
976 list_for_each_entry(obj, head, mm_list) {
977 capture_bo(err++, obj);
985 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
986 int count, struct list_head *head)
988 struct drm_i915_gem_object *obj;
991 list_for_each_entry(obj, head, gtt_list) {
992 if (obj->pin_count == 0)
995 capture_bo(err++, obj);
1003 static void i915_gem_record_fences(struct drm_device *dev,
1004 struct drm_i915_error_state *error)
1006 struct drm_i915_private *dev_priv = dev->dev_private;
1010 switch (INTEL_INFO(dev)->gen) {
1013 for (i = 0; i < 16; i++)
1014 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1018 for (i = 0; i < 16; i++)
1019 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1022 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1023 for (i = 0; i < 8; i++)
1024 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1026 for (i = 0; i < 8; i++)
1027 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1033 static struct drm_i915_error_object *
1034 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1035 struct intel_ring_buffer *ring)
1037 struct drm_i915_gem_object *obj;
1040 if (!ring->get_seqno)
1043 seqno = ring->get_seqno(ring);
1044 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1045 if (obj->ring != ring)
1048 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
1051 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1054 /* We need to copy these to an anonymous buffer as the simplest
1055 * method to avoid being overwritten by userspace.
1057 return i915_error_object_create(dev_priv, obj);
1063 static void i915_record_ring_state(struct drm_device *dev,
1064 struct drm_i915_error_state *error,
1065 struct intel_ring_buffer *ring)
1067 struct drm_i915_private *dev_priv = dev->dev_private;
1069 if (INTEL_INFO(dev)->gen >= 6) {
1070 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1071 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1072 error->semaphore_mboxes[ring->id][0]
1073 = I915_READ(RING_SYNC_0(ring->mmio_base));
1074 error->semaphore_mboxes[ring->id][1]
1075 = I915_READ(RING_SYNC_1(ring->mmio_base));
1078 if (INTEL_INFO(dev)->gen >= 4) {
1079 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1080 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1081 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1082 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1083 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1084 if (ring->id == RCS) {
1085 error->instdone1 = I915_READ(INSTDONE1);
1086 error->bbaddr = I915_READ64(BB_ADDR);
1089 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1090 error->ipeir[ring->id] = I915_READ(IPEIR);
1091 error->ipehr[ring->id] = I915_READ(IPEHR);
1092 error->instdone[ring->id] = I915_READ(INSTDONE);
1095 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1096 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1097 error->seqno[ring->id] = ring->get_seqno(ring);
1098 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1099 error->head[ring->id] = I915_READ_HEAD(ring);
1100 error->tail[ring->id] = I915_READ_TAIL(ring);
1102 error->cpu_ring_head[ring->id] = ring->head;
1103 error->cpu_ring_tail[ring->id] = ring->tail;
1106 static void i915_gem_record_rings(struct drm_device *dev,
1107 struct drm_i915_error_state *error)
1109 struct drm_i915_private *dev_priv = dev->dev_private;
1110 struct intel_ring_buffer *ring;
1111 struct drm_i915_gem_request *request;
1114 for_each_ring(ring, dev_priv, i) {
1115 i915_record_ring_state(dev, error, ring);
1117 error->ring[i].batchbuffer =
1118 i915_error_first_batchbuffer(dev_priv, ring);
1120 error->ring[i].ringbuffer =
1121 i915_error_object_create(dev_priv, ring->obj);
1124 list_for_each_entry(request, &ring->request_list, list)
1127 error->ring[i].num_requests = count;
1128 error->ring[i].requests =
1129 kmalloc(count*sizeof(struct drm_i915_error_request),
1131 if (error->ring[i].requests == NULL) {
1132 error->ring[i].num_requests = 0;
1137 list_for_each_entry(request, &ring->request_list, list) {
1138 struct drm_i915_error_request *erq;
1140 erq = &error->ring[i].requests[count++];
1141 erq->seqno = request->seqno;
1142 erq->jiffies = request->emitted_jiffies;
1143 erq->tail = request->tail;
1149 * i915_capture_error_state - capture an error record for later analysis
1152 * Should be called when an error is detected (either a hang or an error
1153 * interrupt) to capture error state from the time of the error. Fills
1154 * out a structure which becomes available in debugfs for user level tools
1157 static void i915_capture_error_state(struct drm_device *dev)
1159 struct drm_i915_private *dev_priv = dev->dev_private;
1160 struct drm_i915_gem_object *obj;
1161 struct drm_i915_error_state *error;
1162 unsigned long flags;
1165 spin_lock_irqsave(&dev_priv->error_lock, flags);
1166 error = dev_priv->first_error;
1167 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1171 /* Account for pipe specific data like PIPE*STAT */
1172 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1174 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1178 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1179 dev->primary->index);
1181 kref_init(&error->ref);
1182 error->eir = I915_READ(EIR);
1183 error->pgtbl_er = I915_READ(PGTBL_ER);
1184 error->ccid = I915_READ(CCID);
1186 if (HAS_PCH_SPLIT(dev))
1187 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1188 else if (IS_VALLEYVIEW(dev))
1189 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1190 else if (IS_GEN2(dev))
1191 error->ier = I915_READ16(IER);
1193 error->ier = I915_READ(IER);
1196 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1198 if (INTEL_INFO(dev)->gen >= 6) {
1199 error->error = I915_READ(ERROR_GEN6);
1200 error->done_reg = I915_READ(DONE_REG);
1203 i915_gem_record_fences(dev, error);
1204 i915_gem_record_rings(dev, error);
1206 /* Record buffers on the active and pinned lists. */
1207 error->active_bo = NULL;
1208 error->pinned_bo = NULL;
1211 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1213 error->active_bo_count = i;
1214 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
1217 error->pinned_bo_count = i - error->active_bo_count;
1219 error->active_bo = NULL;
1220 error->pinned_bo = NULL;
1222 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1224 if (error->active_bo)
1226 error->active_bo + error->active_bo_count;
1229 if (error->active_bo)
1230 error->active_bo_count =
1231 capture_active_bo(error->active_bo,
1232 error->active_bo_count,
1233 &dev_priv->mm.active_list);
1235 if (error->pinned_bo)
1236 error->pinned_bo_count =
1237 capture_pinned_bo(error->pinned_bo,
1238 error->pinned_bo_count,
1239 &dev_priv->mm.gtt_list);
1241 do_gettimeofday(&error->time);
1243 error->overlay = intel_overlay_capture_error_state(dev);
1244 error->display = intel_display_capture_error_state(dev);
1246 spin_lock_irqsave(&dev_priv->error_lock, flags);
1247 if (dev_priv->first_error == NULL) {
1248 dev_priv->first_error = error;
1251 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1254 i915_error_state_free(&error->ref);
1257 void i915_destroy_error_state(struct drm_device *dev)
1259 struct drm_i915_private *dev_priv = dev->dev_private;
1260 struct drm_i915_error_state *error;
1261 unsigned long flags;
1263 spin_lock_irqsave(&dev_priv->error_lock, flags);
1264 error = dev_priv->first_error;
1265 dev_priv->first_error = NULL;
1266 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1269 kref_put(&error->ref, i915_error_state_free);
1272 #define i915_capture_error_state(x)
1275 static void i915_report_and_clear_eir(struct drm_device *dev)
1277 struct drm_i915_private *dev_priv = dev->dev_private;
1278 u32 eir = I915_READ(EIR);
1284 pr_err("render error detected, EIR: 0x%08x\n", eir);
1287 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1288 u32 ipeir = I915_READ(IPEIR_I965);
1290 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1291 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1292 pr_err(" INSTDONE: 0x%08x\n",
1293 I915_READ(INSTDONE_I965));
1294 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1295 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1296 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1297 I915_WRITE(IPEIR_I965, ipeir);
1298 POSTING_READ(IPEIR_I965);
1300 if (eir & GM45_ERROR_PAGE_TABLE) {
1301 u32 pgtbl_err = I915_READ(PGTBL_ER);
1302 pr_err("page table error\n");
1303 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1304 I915_WRITE(PGTBL_ER, pgtbl_err);
1305 POSTING_READ(PGTBL_ER);
1309 if (!IS_GEN2(dev)) {
1310 if (eir & I915_ERROR_PAGE_TABLE) {
1311 u32 pgtbl_err = I915_READ(PGTBL_ER);
1312 pr_err("page table error\n");
1313 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1314 I915_WRITE(PGTBL_ER, pgtbl_err);
1315 POSTING_READ(PGTBL_ER);
1319 if (eir & I915_ERROR_MEMORY_REFRESH) {
1320 pr_err("memory refresh error:\n");
1322 pr_err("pipe %c stat: 0x%08x\n",
1323 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1324 /* pipestat has already been acked */
1326 if (eir & I915_ERROR_INSTRUCTION) {
1327 pr_err("instruction error\n");
1328 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
1329 if (INTEL_INFO(dev)->gen < 4) {
1330 u32 ipeir = I915_READ(IPEIR);
1332 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1333 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
1334 pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
1335 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
1336 I915_WRITE(IPEIR, ipeir);
1337 POSTING_READ(IPEIR);
1339 u32 ipeir = I915_READ(IPEIR_I965);
1341 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1342 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1343 pr_err(" INSTDONE: 0x%08x\n",
1344 I915_READ(INSTDONE_I965));
1345 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1346 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1347 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1348 I915_WRITE(IPEIR_I965, ipeir);
1349 POSTING_READ(IPEIR_I965);
1353 I915_WRITE(EIR, eir);
1355 eir = I915_READ(EIR);
1358 * some errors might have become stuck,
1361 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1362 I915_WRITE(EMR, I915_READ(EMR) | eir);
1363 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1368 * i915_handle_error - handle an error interrupt
1371 * Do some basic checking of regsiter state at error interrupt time and
1372 * dump it to the syslog. Also call i915_capture_error_state() to make
1373 * sure we get a record and make it available in debugfs. Fire a uevent
1374 * so userspace knows something bad happened (should trigger collection
1375 * of a ring dump etc.).
1377 void i915_handle_error(struct drm_device *dev, bool wedged)
1379 struct drm_i915_private *dev_priv = dev->dev_private;
1380 struct intel_ring_buffer *ring;
1383 i915_capture_error_state(dev);
1384 i915_report_and_clear_eir(dev);
1387 INIT_COMPLETION(dev_priv->error_completion);
1388 atomic_set(&dev_priv->mm.wedged, 1);
1391 * Wakeup waiting processes so they don't hang
1393 for_each_ring(ring, dev_priv, i)
1394 wake_up_all(&ring->irq_queue);
1397 queue_work(dev_priv->wq, &dev_priv->error_work);
1400 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1402 drm_i915_private_t *dev_priv = dev->dev_private;
1403 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1404 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1405 struct drm_i915_gem_object *obj;
1406 struct intel_unpin_work *work;
1407 unsigned long flags;
1408 bool stall_detected;
1410 /* Ignore early vblank irqs */
1411 if (intel_crtc == NULL)
1414 spin_lock_irqsave(&dev->event_lock, flags);
1415 work = intel_crtc->unpin_work;
1417 if (work == NULL || work->pending || !work->enable_stall_check) {
1418 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1419 spin_unlock_irqrestore(&dev->event_lock, flags);
1423 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1424 obj = work->pending_flip_obj;
1425 if (INTEL_INFO(dev)->gen >= 4) {
1426 int dspsurf = DSPSURF(intel_crtc->plane);
1427 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1430 int dspaddr = DSPADDR(intel_crtc->plane);
1431 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1432 crtc->y * crtc->fb->pitches[0] +
1433 crtc->x * crtc->fb->bits_per_pixel/8);
1436 spin_unlock_irqrestore(&dev->event_lock, flags);
1438 if (stall_detected) {
1439 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1440 intel_prepare_page_flip(dev, intel_crtc->plane);
1444 /* Called from drm generic code, passed 'crtc' which
1445 * we use as a pipe index
1447 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1449 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1450 unsigned long irqflags;
1452 if (!i915_pipe_enabled(dev, pipe))
1455 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1456 if (INTEL_INFO(dev)->gen >= 4)
1457 i915_enable_pipestat(dev_priv, pipe,
1458 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1460 i915_enable_pipestat(dev_priv, pipe,
1461 PIPE_VBLANK_INTERRUPT_ENABLE);
1463 /* maintain vblank delivery even in deep C-states */
1464 if (dev_priv->info->gen == 3)
1465 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1466 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1471 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1473 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1474 unsigned long irqflags;
1476 if (!i915_pipe_enabled(dev, pipe))
1479 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1480 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1481 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1482 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1487 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1489 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1490 unsigned long irqflags;
1492 if (!i915_pipe_enabled(dev, pipe))
1495 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1496 ironlake_enable_display_irq(dev_priv,
1497 DE_PIPEA_VBLANK_IVB << (5 * pipe));
1498 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1503 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1505 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1506 unsigned long irqflags;
1509 if (!i915_pipe_enabled(dev, pipe))
1512 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1513 imr = I915_READ(VLV_IMR);
1515 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1517 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1518 I915_WRITE(VLV_IMR, imr);
1519 i915_enable_pipestat(dev_priv, pipe,
1520 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1521 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1526 /* Called from drm generic code, passed 'crtc' which
1527 * we use as a pipe index
1529 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1531 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1532 unsigned long irqflags;
1534 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1535 if (dev_priv->info->gen == 3)
1536 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1538 i915_disable_pipestat(dev_priv, pipe,
1539 PIPE_VBLANK_INTERRUPT_ENABLE |
1540 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1541 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1544 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1546 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1547 unsigned long irqflags;
1549 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1550 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1551 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1552 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1555 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1557 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1558 unsigned long irqflags;
1560 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1561 ironlake_disable_display_irq(dev_priv,
1562 DE_PIPEA_VBLANK_IVB << (pipe * 5));
1563 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1566 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1568 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1569 unsigned long irqflags;
1572 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1573 i915_disable_pipestat(dev_priv, pipe,
1574 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1575 imr = I915_READ(VLV_IMR);
1577 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1579 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1580 I915_WRITE(VLV_IMR, imr);
1581 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1585 ring_last_seqno(struct intel_ring_buffer *ring)
1587 return list_entry(ring->request_list.prev,
1588 struct drm_i915_gem_request, list)->seqno;
1591 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1593 if (list_empty(&ring->request_list) ||
1594 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1595 /* Issue a wake-up to catch stuck h/w. */
1596 if (waitqueue_active(&ring->irq_queue)) {
1597 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1599 wake_up_all(&ring->irq_queue);
1607 static bool kick_ring(struct intel_ring_buffer *ring)
1609 struct drm_device *dev = ring->dev;
1610 struct drm_i915_private *dev_priv = dev->dev_private;
1611 u32 tmp = I915_READ_CTL(ring);
1612 if (tmp & RING_WAIT) {
1613 DRM_ERROR("Kicking stuck wait on %s\n",
1615 I915_WRITE_CTL(ring, tmp);
1621 static bool i915_hangcheck_hung(struct drm_device *dev)
1623 drm_i915_private_t *dev_priv = dev->dev_private;
1625 if (dev_priv->hangcheck_count++ > 1) {
1628 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1629 i915_handle_error(dev, true);
1631 if (!IS_GEN2(dev)) {
1632 struct intel_ring_buffer *ring;
1635 /* Is the chip hanging on a WAIT_FOR_EVENT?
1636 * If so we can simply poke the RB_WAIT bit
1637 * and break the hang. This should work on
1638 * all but the second generation chipsets.
1640 for_each_ring(ring, dev_priv, i)
1641 hung &= !kick_ring(ring);
1651 * This is called when the chip hasn't reported back with completed
1652 * batchbuffers in a long time. The first time this is called we simply record
1653 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1654 * again, we assume the chip is wedged and try to fix it.
1656 void i915_hangcheck_elapsed(unsigned long data)
1658 struct drm_device *dev = (struct drm_device *)data;
1659 drm_i915_private_t *dev_priv = dev->dev_private;
1660 uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
1661 struct intel_ring_buffer *ring;
1662 bool err = false, idle;
1665 if (!i915_enable_hangcheck)
1668 memset(acthd, 0, sizeof(acthd));
1670 for_each_ring(ring, dev_priv, i) {
1671 idle &= i915_hangcheck_ring_idle(ring, &err);
1672 acthd[i] = intel_ring_get_active_head(ring);
1675 /* If all work is done then ACTHD clearly hasn't advanced. */
1678 if (i915_hangcheck_hung(dev))
1684 dev_priv->hangcheck_count = 0;
1688 if (INTEL_INFO(dev)->gen < 4) {
1689 instdone = I915_READ(INSTDONE);
1692 instdone = I915_READ(INSTDONE_I965);
1693 instdone1 = I915_READ(INSTDONE1);
1696 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1697 dev_priv->last_instdone == instdone &&
1698 dev_priv->last_instdone1 == instdone1) {
1699 if (i915_hangcheck_hung(dev))
1702 dev_priv->hangcheck_count = 0;
1704 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1705 dev_priv->last_instdone = instdone;
1706 dev_priv->last_instdone1 = instdone1;
1710 /* Reset timer case chip hangs without another request being added */
1711 mod_timer(&dev_priv->hangcheck_timer,
1712 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1717 static void ironlake_irq_preinstall(struct drm_device *dev)
1719 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1721 atomic_set(&dev_priv->irq_received, 0);
1723 I915_WRITE(HWSTAM, 0xeffe);
1725 /* XXX hotplug from PCH */
1727 I915_WRITE(DEIMR, 0xffffffff);
1728 I915_WRITE(DEIER, 0x0);
1729 POSTING_READ(DEIER);
1732 I915_WRITE(GTIMR, 0xffffffff);
1733 I915_WRITE(GTIER, 0x0);
1734 POSTING_READ(GTIER);
1736 /* south display irq */
1737 I915_WRITE(SDEIMR, 0xffffffff);
1738 I915_WRITE(SDEIER, 0x0);
1739 POSTING_READ(SDEIER);
1742 static void valleyview_irq_preinstall(struct drm_device *dev)
1744 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1747 atomic_set(&dev_priv->irq_received, 0);
1750 I915_WRITE(VLV_IMR, 0);
1751 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1752 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1753 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1756 I915_WRITE(GTIIR, I915_READ(GTIIR));
1757 I915_WRITE(GTIIR, I915_READ(GTIIR));
1758 I915_WRITE(GTIMR, 0xffffffff);
1759 I915_WRITE(GTIER, 0x0);
1760 POSTING_READ(GTIER);
1762 I915_WRITE(DPINVGTT, 0xff);
1764 I915_WRITE(PORT_HOTPLUG_EN, 0);
1765 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1767 I915_WRITE(PIPESTAT(pipe), 0xffff);
1768 I915_WRITE(VLV_IIR, 0xffffffff);
1769 I915_WRITE(VLV_IMR, 0xffffffff);
1770 I915_WRITE(VLV_IER, 0x0);
1771 POSTING_READ(VLV_IER);
1775 * Enable digital hotplug on the PCH, and configure the DP short pulse
1776 * duration to 2ms (which is the minimum in the Display Port spec)
1778 * This register is the same on all known PCH chips.
1781 static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1783 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1786 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1787 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1788 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1789 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1790 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1791 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1794 static int ironlake_irq_postinstall(struct drm_device *dev)
1796 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1797 /* enable kind of interrupts always enabled */
1798 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1799 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1803 dev_priv->irq_mask = ~display_mask;
1805 /* should always can generate irq */
1806 I915_WRITE(DEIIR, I915_READ(DEIIR));
1807 I915_WRITE(DEIMR, dev_priv->irq_mask);
1808 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1809 POSTING_READ(DEIER);
1811 dev_priv->gt_irq_mask = ~0;
1813 I915_WRITE(GTIIR, I915_READ(GTIIR));
1814 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1819 GEN6_BSD_USER_INTERRUPT |
1820 GEN6_BLITTER_USER_INTERRUPT;
1825 GT_BSD_USER_INTERRUPT;
1826 I915_WRITE(GTIER, render_irqs);
1827 POSTING_READ(GTIER);
1829 if (HAS_PCH_CPT(dev)) {
1830 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1831 SDE_PORTB_HOTPLUG_CPT |
1832 SDE_PORTC_HOTPLUG_CPT |
1833 SDE_PORTD_HOTPLUG_CPT);
1835 hotplug_mask = (SDE_CRT_HOTPLUG |
1842 dev_priv->pch_irq_mask = ~hotplug_mask;
1844 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1845 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1846 I915_WRITE(SDEIER, hotplug_mask);
1847 POSTING_READ(SDEIER);
1849 ironlake_enable_pch_hotplug(dev);
1851 if (IS_IRONLAKE_M(dev)) {
1852 /* Clear & enable PCU event interrupts */
1853 I915_WRITE(DEIIR, DE_PCU_EVENT);
1854 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1855 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1861 static int ivybridge_irq_postinstall(struct drm_device *dev)
1863 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1864 /* enable kind of interrupts always enabled */
1866 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1867 DE_PLANEC_FLIP_DONE_IVB |
1868 DE_PLANEB_FLIP_DONE_IVB |
1869 DE_PLANEA_FLIP_DONE_IVB;
1873 dev_priv->irq_mask = ~display_mask;
1875 /* should always can generate irq */
1876 I915_WRITE(DEIIR, I915_READ(DEIIR));
1877 I915_WRITE(DEIMR, dev_priv->irq_mask);
1880 DE_PIPEC_VBLANK_IVB |
1881 DE_PIPEB_VBLANK_IVB |
1882 DE_PIPEA_VBLANK_IVB);
1883 POSTING_READ(DEIER);
1885 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1887 I915_WRITE(GTIIR, I915_READ(GTIIR));
1888 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1890 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1891 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1892 I915_WRITE(GTIER, render_irqs);
1893 POSTING_READ(GTIER);
1895 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1896 SDE_PORTB_HOTPLUG_CPT |
1897 SDE_PORTC_HOTPLUG_CPT |
1898 SDE_PORTD_HOTPLUG_CPT);
1899 dev_priv->pch_irq_mask = ~hotplug_mask;
1901 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1902 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1903 I915_WRITE(SDEIER, hotplug_mask);
1904 POSTING_READ(SDEIER);
1906 ironlake_enable_pch_hotplug(dev);
1911 static int valleyview_irq_postinstall(struct drm_device *dev)
1913 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1915 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1916 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1919 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1920 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1921 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1922 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1923 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1926 *Leave vblank interrupts masked initially. enable/disable will
1927 * toggle them based on usage.
1929 dev_priv->irq_mask = (~enable_mask) |
1930 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1931 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1933 dev_priv->pipestat[0] = 0;
1934 dev_priv->pipestat[1] = 0;
1936 /* Hack for broken MSIs on VLV */
1937 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1938 pci_read_config_word(dev->pdev, 0x98, &msid);
1939 msid &= 0xff; /* mask out delivery bits */
1941 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
1943 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1944 I915_WRITE(VLV_IER, enable_mask);
1945 I915_WRITE(VLV_IIR, 0xffffffff);
1946 I915_WRITE(PIPESTAT(0), 0xffff);
1947 I915_WRITE(PIPESTAT(1), 0xffff);
1948 POSTING_READ(VLV_IER);
1950 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
1951 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1953 I915_WRITE(VLV_IIR, 0xffffffff);
1954 I915_WRITE(VLV_IIR, 0xffffffff);
1956 dev_priv->gt_irq_mask = ~0;
1958 I915_WRITE(GTIIR, I915_READ(GTIIR));
1959 I915_WRITE(GTIIR, I915_READ(GTIIR));
1960 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1961 I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
1962 GT_GEN6_BLT_CS_ERROR_INTERRUPT |
1963 GT_GEN6_BLT_USER_INTERRUPT |
1964 GT_GEN6_BSD_USER_INTERRUPT |
1965 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
1966 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
1968 GT_RENDER_CS_ERROR_INTERRUPT |
1971 POSTING_READ(GTIER);
1973 /* ack & enable invalid PTE error interrupts */
1974 #if 0 /* FIXME: add support to irq handler for checking these bits */
1975 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1976 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1979 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1980 #if 0 /* FIXME: check register definitions; some have moved */
1981 /* Note HDMI and DP share bits */
1982 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1983 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1984 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1985 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1986 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1987 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1988 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1989 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1990 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1991 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1992 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1993 hotplug_en |= CRT_HOTPLUG_INT_EN;
1994 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1998 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2003 static void valleyview_irq_uninstall(struct drm_device *dev)
2005 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2012 I915_WRITE(PIPESTAT(pipe), 0xffff);
2014 I915_WRITE(HWSTAM, 0xffffffff);
2015 I915_WRITE(PORT_HOTPLUG_EN, 0);
2016 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2018 I915_WRITE(PIPESTAT(pipe), 0xffff);
2019 I915_WRITE(VLV_IIR, 0xffffffff);
2020 I915_WRITE(VLV_IMR, 0xffffffff);
2021 I915_WRITE(VLV_IER, 0x0);
2022 POSTING_READ(VLV_IER);
2025 static void ironlake_irq_uninstall(struct drm_device *dev)
2027 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2032 I915_WRITE(HWSTAM, 0xffffffff);
2034 I915_WRITE(DEIMR, 0xffffffff);
2035 I915_WRITE(DEIER, 0x0);
2036 I915_WRITE(DEIIR, I915_READ(DEIIR));
2038 I915_WRITE(GTIMR, 0xffffffff);
2039 I915_WRITE(GTIER, 0x0);
2040 I915_WRITE(GTIIR, I915_READ(GTIIR));
2042 I915_WRITE(SDEIMR, 0xffffffff);
2043 I915_WRITE(SDEIER, 0x0);
2044 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2047 static void i8xx_irq_preinstall(struct drm_device * dev)
2049 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2052 atomic_set(&dev_priv->irq_received, 0);
2055 I915_WRITE(PIPESTAT(pipe), 0);
2056 I915_WRITE16(IMR, 0xffff);
2057 I915_WRITE16(IER, 0x0);
2058 POSTING_READ16(IER);
2061 static int i8xx_irq_postinstall(struct drm_device *dev)
2063 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2065 dev_priv->pipestat[0] = 0;
2066 dev_priv->pipestat[1] = 0;
2069 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2071 /* Unmask the interrupts that we always want on. */
2072 dev_priv->irq_mask =
2073 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2074 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2075 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2076 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2077 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2078 I915_WRITE16(IMR, dev_priv->irq_mask);
2081 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2082 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2083 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2084 I915_USER_INTERRUPT);
2085 POSTING_READ16(IER);
2090 static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
2092 struct drm_device *dev = (struct drm_device *) arg;
2093 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2096 unsigned long irqflags;
2100 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2101 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2103 atomic_inc(&dev_priv->irq_received);
2105 iir = I915_READ16(IIR);
2109 while (iir & ~flip_mask) {
2110 /* Can't rely on pipestat interrupt bit in iir as it might
2111 * have been cleared after the pipestat interrupt was received.
2112 * It doesn't set the bit in iir again, but it still produces
2113 * interrupts (for non-MSI).
2115 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2116 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2117 i915_handle_error(dev, false);
2119 for_each_pipe(pipe) {
2120 int reg = PIPESTAT(pipe);
2121 pipe_stats[pipe] = I915_READ(reg);
2124 * Clear the PIPE*STAT regs before the IIR
2126 if (pipe_stats[pipe] & 0x8000ffff) {
2127 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2128 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2130 I915_WRITE(reg, pipe_stats[pipe]);
2134 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2136 I915_WRITE16(IIR, iir & ~flip_mask);
2137 new_iir = I915_READ16(IIR); /* Flush posted writes */
2139 i915_update_dri1_breadcrumb(dev);
2141 if (iir & I915_USER_INTERRUPT)
2142 notify_ring(dev, &dev_priv->ring[RCS]);
2144 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2145 drm_handle_vblank(dev, 0)) {
2146 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2147 intel_prepare_page_flip(dev, 0);
2148 intel_finish_page_flip(dev, 0);
2149 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2153 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2154 drm_handle_vblank(dev, 1)) {
2155 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2156 intel_prepare_page_flip(dev, 1);
2157 intel_finish_page_flip(dev, 1);
2158 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2168 static void i8xx_irq_uninstall(struct drm_device * dev)
2170 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2173 for_each_pipe(pipe) {
2174 /* Clear enable bits; then clear status bits */
2175 I915_WRITE(PIPESTAT(pipe), 0);
2176 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2178 I915_WRITE16(IMR, 0xffff);
2179 I915_WRITE16(IER, 0x0);
2180 I915_WRITE16(IIR, I915_READ16(IIR));
2183 static void i915_irq_preinstall(struct drm_device * dev)
2185 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2188 atomic_set(&dev_priv->irq_received, 0);
2190 if (I915_HAS_HOTPLUG(dev)) {
2191 I915_WRITE(PORT_HOTPLUG_EN, 0);
2192 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2195 I915_WRITE16(HWSTAM, 0xeffe);
2197 I915_WRITE(PIPESTAT(pipe), 0);
2198 I915_WRITE(IMR, 0xffffffff);
2199 I915_WRITE(IER, 0x0);
2203 static int i915_irq_postinstall(struct drm_device *dev)
2205 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2208 dev_priv->pipestat[0] = 0;
2209 dev_priv->pipestat[1] = 0;
2211 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2213 /* Unmask the interrupts that we always want on. */
2214 dev_priv->irq_mask =
2215 ~(I915_ASLE_INTERRUPT |
2216 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2217 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2218 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2219 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2220 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2223 I915_ASLE_INTERRUPT |
2224 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2225 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2226 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2227 I915_USER_INTERRUPT;
2229 if (I915_HAS_HOTPLUG(dev)) {
2230 /* Enable in IER... */
2231 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2232 /* and unmask in IMR */
2233 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2236 I915_WRITE(IMR, dev_priv->irq_mask);
2237 I915_WRITE(IER, enable_mask);
2240 if (I915_HAS_HOTPLUG(dev)) {
2241 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2243 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2244 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2245 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2246 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2247 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2248 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2249 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2250 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2251 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2252 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2253 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2254 hotplug_en |= CRT_HOTPLUG_INT_EN;
2255 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2258 /* Ignore TV since it's buggy */
2260 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2263 intel_opregion_enable_asle(dev);
2268 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
2270 struct drm_device *dev = (struct drm_device *) arg;
2271 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2272 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2273 unsigned long irqflags;
2275 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2276 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2278 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2279 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2281 int pipe, ret = IRQ_NONE;
2283 atomic_inc(&dev_priv->irq_received);
2285 iir = I915_READ(IIR);
2287 bool irq_received = (iir & ~flip_mask) != 0;
2288 bool blc_event = false;
2290 /* Can't rely on pipestat interrupt bit in iir as it might
2291 * have been cleared after the pipestat interrupt was received.
2292 * It doesn't set the bit in iir again, but it still produces
2293 * interrupts (for non-MSI).
2295 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2296 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2297 i915_handle_error(dev, false);
2299 for_each_pipe(pipe) {
2300 int reg = PIPESTAT(pipe);
2301 pipe_stats[pipe] = I915_READ(reg);
2303 /* Clear the PIPE*STAT regs before the IIR */
2304 if (pipe_stats[pipe] & 0x8000ffff) {
2305 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2306 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2308 I915_WRITE(reg, pipe_stats[pipe]);
2309 irq_received = true;
2312 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2317 /* Consume port. Then clear IIR or we'll miss events */
2318 if ((I915_HAS_HOTPLUG(dev)) &&
2319 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2320 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2322 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2324 if (hotplug_status & dev_priv->hotplug_supported_mask)
2325 queue_work(dev_priv->wq,
2326 &dev_priv->hotplug_work);
2328 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2329 POSTING_READ(PORT_HOTPLUG_STAT);
2332 I915_WRITE(IIR, iir & ~flip_mask);
2333 new_iir = I915_READ(IIR); /* Flush posted writes */
2335 if (iir & I915_USER_INTERRUPT)
2336 notify_ring(dev, &dev_priv->ring[RCS]);
2338 for_each_pipe(pipe) {
2342 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2343 drm_handle_vblank(dev, pipe)) {
2344 if (iir & flip[plane]) {
2345 intel_prepare_page_flip(dev, plane);
2346 intel_finish_page_flip(dev, pipe);
2347 flip_mask &= ~flip[plane];
2351 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2355 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2356 intel_opregion_asle_intr(dev);
2358 /* With MSI, interrupts are only generated when iir
2359 * transitions from zero to nonzero. If another bit got
2360 * set while we were handling the existing iir bits, then
2361 * we would never get another interrupt.
2363 * This is fine on non-MSI as well, as if we hit this path
2364 * we avoid exiting the interrupt handler only to generate
2367 * Note that for MSI this could cause a stray interrupt report
2368 * if an interrupt landed in the time between writing IIR and
2369 * the posting read. This should be rare enough to never
2370 * trigger the 99% of 100,000 interrupts test for disabling
2375 } while (iir & ~flip_mask);
2377 i915_update_dri1_breadcrumb(dev);
2382 static void i915_irq_uninstall(struct drm_device * dev)
2384 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2387 if (I915_HAS_HOTPLUG(dev)) {
2388 I915_WRITE(PORT_HOTPLUG_EN, 0);
2389 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2392 I915_WRITE16(HWSTAM, 0xffff);
2393 for_each_pipe(pipe) {
2394 /* Clear enable bits; then clear status bits */
2395 I915_WRITE(PIPESTAT(pipe), 0);
2396 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2398 I915_WRITE(IMR, 0xffffffff);
2399 I915_WRITE(IER, 0x0);
2401 I915_WRITE(IIR, I915_READ(IIR));
2404 static void i965_irq_preinstall(struct drm_device * dev)
2406 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2409 atomic_set(&dev_priv->irq_received, 0);
2411 I915_WRITE(PORT_HOTPLUG_EN, 0);
2412 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2414 I915_WRITE(HWSTAM, 0xeffe);
2416 I915_WRITE(PIPESTAT(pipe), 0);
2417 I915_WRITE(IMR, 0xffffffff);
2418 I915_WRITE(IER, 0x0);
2422 static int i965_irq_postinstall(struct drm_device *dev)
2424 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2429 /* Unmask the interrupts that we always want on. */
2430 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2431 I915_DISPLAY_PORT_INTERRUPT |
2432 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2433 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2434 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2435 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2436 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2438 enable_mask = ~dev_priv->irq_mask;
2439 enable_mask |= I915_USER_INTERRUPT;
2442 enable_mask |= I915_BSD_USER_INTERRUPT;
2444 dev_priv->pipestat[0] = 0;
2445 dev_priv->pipestat[1] = 0;
2448 * Enable some error detection, note the instruction error mask
2449 * bit is reserved, so we leave it masked.
2452 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2453 GM45_ERROR_MEM_PRIV |
2454 GM45_ERROR_CP_PRIV |
2455 I915_ERROR_MEMORY_REFRESH);
2457 error_mask = ~(I915_ERROR_PAGE_TABLE |
2458 I915_ERROR_MEMORY_REFRESH);
2460 I915_WRITE(EMR, error_mask);
2462 I915_WRITE(IMR, dev_priv->irq_mask);
2463 I915_WRITE(IER, enable_mask);
2466 /* Note HDMI and DP share hotplug bits */
2468 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2469 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2470 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2471 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2472 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2473 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2475 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2476 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2477 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2478 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2480 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2481 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2482 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2483 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2485 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2486 hotplug_en |= CRT_HOTPLUG_INT_EN;
2488 /* Programming the CRT detection parameters tends
2489 to generate a spurious hotplug event about three
2490 seconds later. So just do it once.
2493 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2494 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2497 /* Ignore TV since it's buggy */
2499 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2501 intel_opregion_enable_asle(dev);
2506 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
2508 struct drm_device *dev = (struct drm_device *) arg;
2509 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2511 u32 pipe_stats[I915_MAX_PIPES];
2512 unsigned long irqflags;
2514 int ret = IRQ_NONE, pipe;
2516 atomic_inc(&dev_priv->irq_received);
2518 iir = I915_READ(IIR);
2521 bool blc_event = false;
2523 irq_received = iir != 0;
2525 /* Can't rely on pipestat interrupt bit in iir as it might
2526 * have been cleared after the pipestat interrupt was received.
2527 * It doesn't set the bit in iir again, but it still produces
2528 * interrupts (for non-MSI).
2530 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2531 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2532 i915_handle_error(dev, false);
2534 for_each_pipe(pipe) {
2535 int reg = PIPESTAT(pipe);
2536 pipe_stats[pipe] = I915_READ(reg);
2539 * Clear the PIPE*STAT regs before the IIR
2541 if (pipe_stats[pipe] & 0x8000ffff) {
2542 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2543 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2545 I915_WRITE(reg, pipe_stats[pipe]);
2549 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2556 /* Consume port. Then clear IIR or we'll miss events */
2557 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2558 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2560 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2562 if (hotplug_status & dev_priv->hotplug_supported_mask)
2563 queue_work(dev_priv->wq,
2564 &dev_priv->hotplug_work);
2566 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2567 I915_READ(PORT_HOTPLUG_STAT);
2570 I915_WRITE(IIR, iir);
2571 new_iir = I915_READ(IIR); /* Flush posted writes */
2573 if (iir & I915_USER_INTERRUPT)
2574 notify_ring(dev, &dev_priv->ring[RCS]);
2575 if (iir & I915_BSD_USER_INTERRUPT)
2576 notify_ring(dev, &dev_priv->ring[VCS]);
2578 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2579 intel_prepare_page_flip(dev, 0);
2581 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2582 intel_prepare_page_flip(dev, 1);
2584 for_each_pipe(pipe) {
2585 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2586 drm_handle_vblank(dev, pipe)) {
2587 i915_pageflip_stall_check(dev, pipe);
2588 intel_finish_page_flip(dev, pipe);
2591 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2596 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2597 intel_opregion_asle_intr(dev);
2599 /* With MSI, interrupts are only generated when iir
2600 * transitions from zero to nonzero. If another bit got
2601 * set while we were handling the existing iir bits, then
2602 * we would never get another interrupt.
2604 * This is fine on non-MSI as well, as if we hit this path
2605 * we avoid exiting the interrupt handler only to generate
2608 * Note that for MSI this could cause a stray interrupt report
2609 * if an interrupt landed in the time between writing IIR and
2610 * the posting read. This should be rare enough to never
2611 * trigger the 99% of 100,000 interrupts test for disabling
2617 i915_update_dri1_breadcrumb(dev);
2622 static void i965_irq_uninstall(struct drm_device * dev)
2624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2630 I915_WRITE(PORT_HOTPLUG_EN, 0);
2631 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2633 I915_WRITE(HWSTAM, 0xffffffff);
2635 I915_WRITE(PIPESTAT(pipe), 0);
2636 I915_WRITE(IMR, 0xffffffff);
2637 I915_WRITE(IER, 0x0);
2640 I915_WRITE(PIPESTAT(pipe),
2641 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2642 I915_WRITE(IIR, I915_READ(IIR));
2645 void intel_irq_init(struct drm_device *dev)
2647 struct drm_i915_private *dev_priv = dev->dev_private;
2649 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2650 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2651 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
2652 INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
2654 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2655 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2656 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2657 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2658 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2661 if (drm_core_check_feature(dev, DRIVER_MODESET))
2662 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2664 dev->driver->get_vblank_timestamp = NULL;
2665 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2667 if (IS_VALLEYVIEW(dev)) {
2668 dev->driver->irq_handler = valleyview_irq_handler;
2669 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2670 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2671 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2672 dev->driver->enable_vblank = valleyview_enable_vblank;
2673 dev->driver->disable_vblank = valleyview_disable_vblank;
2674 } else if (IS_IVYBRIDGE(dev)) {
2675 /* Share pre & uninstall handlers with ILK/SNB */
2676 dev->driver->irq_handler = ivybridge_irq_handler;
2677 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2678 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2679 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2680 dev->driver->enable_vblank = ivybridge_enable_vblank;
2681 dev->driver->disable_vblank = ivybridge_disable_vblank;
2682 } else if (IS_HASWELL(dev)) {
2683 /* Share interrupts handling with IVB */
2684 dev->driver->irq_handler = ivybridge_irq_handler;
2685 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2686 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2687 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2688 dev->driver->enable_vblank = ivybridge_enable_vblank;
2689 dev->driver->disable_vblank = ivybridge_disable_vblank;
2690 } else if (HAS_PCH_SPLIT(dev)) {
2691 dev->driver->irq_handler = ironlake_irq_handler;
2692 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2693 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2694 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2695 dev->driver->enable_vblank = ironlake_enable_vblank;
2696 dev->driver->disable_vblank = ironlake_disable_vblank;
2698 if (INTEL_INFO(dev)->gen == 2) {
2699 dev->driver->irq_preinstall = i8xx_irq_preinstall;
2700 dev->driver->irq_postinstall = i8xx_irq_postinstall;
2701 dev->driver->irq_handler = i8xx_irq_handler;
2702 dev->driver->irq_uninstall = i8xx_irq_uninstall;
2703 } else if (INTEL_INFO(dev)->gen == 3) {
2704 /* IIR "flip pending" means done if this bit is set */
2705 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
2707 dev->driver->irq_preinstall = i915_irq_preinstall;
2708 dev->driver->irq_postinstall = i915_irq_postinstall;
2709 dev->driver->irq_uninstall = i915_irq_uninstall;
2710 dev->driver->irq_handler = i915_irq_handler;
2712 dev->driver->irq_preinstall = i965_irq_preinstall;
2713 dev->driver->irq_postinstall = i965_irq_postinstall;
2714 dev->driver->irq_uninstall = i965_irq_uninstall;
2715 dev->driver->irq_handler = i965_irq_handler;
2717 dev->driver->enable_vblank = i915_enable_vblank;
2718 dev->driver->disable_vblank = i915_disable_vblank;