drm/dp: Add dp_aux_i2c_speed_khz module param to set the assume i2c bus speed
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <linux/device.h>
31 #include <linux/acpi.h>
32 #include <drm/drmP.h>
33 #include <drm/i915_drm.h>
34 #include "i915_drv.h"
35 #include "i915_trace.h"
36 #include "intel_drv.h"
37
38 #include <linux/console.h>
39 #include <linux/module.h>
40 #include <linux/pm_runtime.h>
41 #include <drm/drm_crtc_helper.h>
42
43 static struct drm_driver driver;
44
45 #define GEN_DEFAULT_PIPEOFFSETS \
46         .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
47                           PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
48         .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
49                            TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
50         .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51
52 #define GEN_CHV_PIPEOFFSETS \
53         .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
54                           CHV_PIPE_C_OFFSET }, \
55         .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
56                            CHV_TRANSCODER_C_OFFSET, }, \
57         .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
58                              CHV_PALETTE_C_OFFSET }
59
60 #define CURSOR_OFFSETS \
61         .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
62
63 #define IVB_CURSOR_OFFSETS \
64         .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
65
66 static const struct intel_device_info intel_i830_info = {
67         .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
68         .has_overlay = 1, .overlay_needs_physical = 1,
69         .ring_mask = RENDER_RING,
70         GEN_DEFAULT_PIPEOFFSETS,
71         CURSOR_OFFSETS,
72 };
73
74 static const struct intel_device_info intel_845g_info = {
75         .gen = 2, .num_pipes = 1,
76         .has_overlay = 1, .overlay_needs_physical = 1,
77         .ring_mask = RENDER_RING,
78         GEN_DEFAULT_PIPEOFFSETS,
79         CURSOR_OFFSETS,
80 };
81
82 static const struct intel_device_info intel_i85x_info = {
83         .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
84         .cursor_needs_physical = 1,
85         .has_overlay = 1, .overlay_needs_physical = 1,
86         .has_fbc = 1,
87         .ring_mask = RENDER_RING,
88         GEN_DEFAULT_PIPEOFFSETS,
89         CURSOR_OFFSETS,
90 };
91
92 static const struct intel_device_info intel_i865g_info = {
93         .gen = 2, .num_pipes = 1,
94         .has_overlay = 1, .overlay_needs_physical = 1,
95         .ring_mask = RENDER_RING,
96         GEN_DEFAULT_PIPEOFFSETS,
97         CURSOR_OFFSETS,
98 };
99
100 static const struct intel_device_info intel_i915g_info = {
101         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
102         .has_overlay = 1, .overlay_needs_physical = 1,
103         .ring_mask = RENDER_RING,
104         GEN_DEFAULT_PIPEOFFSETS,
105         CURSOR_OFFSETS,
106 };
107 static const struct intel_device_info intel_i915gm_info = {
108         .gen = 3, .is_mobile = 1, .num_pipes = 2,
109         .cursor_needs_physical = 1,
110         .has_overlay = 1, .overlay_needs_physical = 1,
111         .supports_tv = 1,
112         .has_fbc = 1,
113         .ring_mask = RENDER_RING,
114         GEN_DEFAULT_PIPEOFFSETS,
115         CURSOR_OFFSETS,
116 };
117 static const struct intel_device_info intel_i945g_info = {
118         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
119         .has_overlay = 1, .overlay_needs_physical = 1,
120         .ring_mask = RENDER_RING,
121         GEN_DEFAULT_PIPEOFFSETS,
122         CURSOR_OFFSETS,
123 };
124 static const struct intel_device_info intel_i945gm_info = {
125         .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
126         .has_hotplug = 1, .cursor_needs_physical = 1,
127         .has_overlay = 1, .overlay_needs_physical = 1,
128         .supports_tv = 1,
129         .has_fbc = 1,
130         .ring_mask = RENDER_RING,
131         GEN_DEFAULT_PIPEOFFSETS,
132         CURSOR_OFFSETS,
133 };
134
135 static const struct intel_device_info intel_i965g_info = {
136         .gen = 4, .is_broadwater = 1, .num_pipes = 2,
137         .has_hotplug = 1,
138         .has_overlay = 1,
139         .ring_mask = RENDER_RING,
140         GEN_DEFAULT_PIPEOFFSETS,
141         CURSOR_OFFSETS,
142 };
143
144 static const struct intel_device_info intel_i965gm_info = {
145         .gen = 4, .is_crestline = 1, .num_pipes = 2,
146         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
147         .has_overlay = 1,
148         .supports_tv = 1,
149         .ring_mask = RENDER_RING,
150         GEN_DEFAULT_PIPEOFFSETS,
151         CURSOR_OFFSETS,
152 };
153
154 static const struct intel_device_info intel_g33_info = {
155         .gen = 3, .is_g33 = 1, .num_pipes = 2,
156         .need_gfx_hws = 1, .has_hotplug = 1,
157         .has_overlay = 1,
158         .ring_mask = RENDER_RING,
159         GEN_DEFAULT_PIPEOFFSETS,
160         CURSOR_OFFSETS,
161 };
162
163 static const struct intel_device_info intel_g45_info = {
164         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
165         .has_pipe_cxsr = 1, .has_hotplug = 1,
166         .ring_mask = RENDER_RING | BSD_RING,
167         GEN_DEFAULT_PIPEOFFSETS,
168         CURSOR_OFFSETS,
169 };
170
171 static const struct intel_device_info intel_gm45_info = {
172         .gen = 4, .is_g4x = 1, .num_pipes = 2,
173         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
174         .has_pipe_cxsr = 1, .has_hotplug = 1,
175         .supports_tv = 1,
176         .ring_mask = RENDER_RING | BSD_RING,
177         GEN_DEFAULT_PIPEOFFSETS,
178         CURSOR_OFFSETS,
179 };
180
181 static const struct intel_device_info intel_pineview_info = {
182         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
183         .need_gfx_hws = 1, .has_hotplug = 1,
184         .has_overlay = 1,
185         GEN_DEFAULT_PIPEOFFSETS,
186         CURSOR_OFFSETS,
187 };
188
189 static const struct intel_device_info intel_ironlake_d_info = {
190         .gen = 5, .num_pipes = 2,
191         .need_gfx_hws = 1, .has_hotplug = 1,
192         .ring_mask = RENDER_RING | BSD_RING,
193         GEN_DEFAULT_PIPEOFFSETS,
194         CURSOR_OFFSETS,
195 };
196
197 static const struct intel_device_info intel_ironlake_m_info = {
198         .gen = 5, .is_mobile = 1, .num_pipes = 2,
199         .need_gfx_hws = 1, .has_hotplug = 1,
200         .has_fbc = 1,
201         .ring_mask = RENDER_RING | BSD_RING,
202         GEN_DEFAULT_PIPEOFFSETS,
203         CURSOR_OFFSETS,
204 };
205
206 static const struct intel_device_info intel_sandybridge_d_info = {
207         .gen = 6, .num_pipes = 2,
208         .need_gfx_hws = 1, .has_hotplug = 1,
209         .has_fbc = 1,
210         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
211         .has_llc = 1,
212         GEN_DEFAULT_PIPEOFFSETS,
213         CURSOR_OFFSETS,
214 };
215
216 static const struct intel_device_info intel_sandybridge_m_info = {
217         .gen = 6, .is_mobile = 1, .num_pipes = 2,
218         .need_gfx_hws = 1, .has_hotplug = 1,
219         .has_fbc = 1,
220         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
221         .has_llc = 1,
222         GEN_DEFAULT_PIPEOFFSETS,
223         CURSOR_OFFSETS,
224 };
225
226 #define GEN7_FEATURES  \
227         .gen = 7, .num_pipes = 3, \
228         .need_gfx_hws = 1, .has_hotplug = 1, \
229         .has_fbc = 1, \
230         .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
231         .has_llc = 1
232
233 static const struct intel_device_info intel_ivybridge_d_info = {
234         GEN7_FEATURES,
235         .is_ivybridge = 1,
236         GEN_DEFAULT_PIPEOFFSETS,
237         IVB_CURSOR_OFFSETS,
238 };
239
240 static const struct intel_device_info intel_ivybridge_m_info = {
241         GEN7_FEATURES,
242         .is_ivybridge = 1,
243         .is_mobile = 1,
244         GEN_DEFAULT_PIPEOFFSETS,
245         IVB_CURSOR_OFFSETS,
246 };
247
248 static const struct intel_device_info intel_ivybridge_q_info = {
249         GEN7_FEATURES,
250         .is_ivybridge = 1,
251         .num_pipes = 0, /* legal, last one wins */
252         GEN_DEFAULT_PIPEOFFSETS,
253         IVB_CURSOR_OFFSETS,
254 };
255
256 static const struct intel_device_info intel_valleyview_m_info = {
257         GEN7_FEATURES,
258         .is_mobile = 1,
259         .num_pipes = 2,
260         .is_valleyview = 1,
261         .display_mmio_offset = VLV_DISPLAY_BASE,
262         .has_fbc = 0, /* legal, last one wins */
263         .has_llc = 0, /* legal, last one wins */
264         GEN_DEFAULT_PIPEOFFSETS,
265         CURSOR_OFFSETS,
266 };
267
268 static const struct intel_device_info intel_valleyview_d_info = {
269         GEN7_FEATURES,
270         .num_pipes = 2,
271         .is_valleyview = 1,
272         .display_mmio_offset = VLV_DISPLAY_BASE,
273         .has_fbc = 0, /* legal, last one wins */
274         .has_llc = 0, /* legal, last one wins */
275         GEN_DEFAULT_PIPEOFFSETS,
276         CURSOR_OFFSETS,
277 };
278
279 static const struct intel_device_info intel_haswell_d_info = {
280         GEN7_FEATURES,
281         .is_haswell = 1,
282         .has_ddi = 1,
283         .has_fpga_dbg = 1,
284         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
285         GEN_DEFAULT_PIPEOFFSETS,
286         IVB_CURSOR_OFFSETS,
287 };
288
289 static const struct intel_device_info intel_haswell_m_info = {
290         GEN7_FEATURES,
291         .is_haswell = 1,
292         .is_mobile = 1,
293         .has_ddi = 1,
294         .has_fpga_dbg = 1,
295         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
296         GEN_DEFAULT_PIPEOFFSETS,
297         IVB_CURSOR_OFFSETS,
298 };
299
300 static const struct intel_device_info intel_broadwell_d_info = {
301         .gen = 8, .num_pipes = 3,
302         .need_gfx_hws = 1, .has_hotplug = 1,
303         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
304         .has_llc = 1,
305         .has_ddi = 1,
306         .has_fpga_dbg = 1,
307         .has_fbc = 1,
308         GEN_DEFAULT_PIPEOFFSETS,
309         IVB_CURSOR_OFFSETS,
310 };
311
312 static const struct intel_device_info intel_broadwell_m_info = {
313         .gen = 8, .is_mobile = 1, .num_pipes = 3,
314         .need_gfx_hws = 1, .has_hotplug = 1,
315         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
316         .has_llc = 1,
317         .has_ddi = 1,
318         .has_fpga_dbg = 1,
319         .has_fbc = 1,
320         GEN_DEFAULT_PIPEOFFSETS,
321         IVB_CURSOR_OFFSETS,
322 };
323
324 static const struct intel_device_info intel_broadwell_gt3d_info = {
325         .gen = 8, .num_pipes = 3,
326         .need_gfx_hws = 1, .has_hotplug = 1,
327         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
328         .has_llc = 1,
329         .has_ddi = 1,
330         .has_fpga_dbg = 1,
331         .has_fbc = 1,
332         GEN_DEFAULT_PIPEOFFSETS,
333         IVB_CURSOR_OFFSETS,
334 };
335
336 static const struct intel_device_info intel_broadwell_gt3m_info = {
337         .gen = 8, .is_mobile = 1, .num_pipes = 3,
338         .need_gfx_hws = 1, .has_hotplug = 1,
339         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
340         .has_llc = 1,
341         .has_ddi = 1,
342         .has_fpga_dbg = 1,
343         .has_fbc = 1,
344         GEN_DEFAULT_PIPEOFFSETS,
345         IVB_CURSOR_OFFSETS,
346 };
347
348 static const struct intel_device_info intel_cherryview_info = {
349         .gen = 8, .num_pipes = 3,
350         .need_gfx_hws = 1, .has_hotplug = 1,
351         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
352         .is_valleyview = 1,
353         .display_mmio_offset = VLV_DISPLAY_BASE,
354         GEN_CHV_PIPEOFFSETS,
355         CURSOR_OFFSETS,
356 };
357
358 static const struct intel_device_info intel_skylake_info = {
359         .is_skylake = 1,
360         .gen = 9, .num_pipes = 3,
361         .need_gfx_hws = 1, .has_hotplug = 1,
362         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
363         .has_llc = 1,
364         .has_ddi = 1,
365         .has_fbc = 1,
366         GEN_DEFAULT_PIPEOFFSETS,
367         IVB_CURSOR_OFFSETS,
368 };
369
370 static const struct intel_device_info intel_skylake_gt3_info = {
371         .is_skylake = 1,
372         .gen = 9, .num_pipes = 3,
373         .need_gfx_hws = 1, .has_hotplug = 1,
374         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
375         .has_llc = 1,
376         .has_ddi = 1,
377         .has_fbc = 1,
378         GEN_DEFAULT_PIPEOFFSETS,
379         IVB_CURSOR_OFFSETS,
380 };
381
382 static const struct intel_device_info intel_broxton_info = {
383         .is_preliminary = 1,
384         .gen = 9,
385         .need_gfx_hws = 1, .has_hotplug = 1,
386         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
387         .num_pipes = 3,
388         .has_ddi = 1,
389         .has_fbc = 1,
390         GEN_DEFAULT_PIPEOFFSETS,
391         IVB_CURSOR_OFFSETS,
392 };
393
394 /*
395  * Make sure any device matches here are from most specific to most
396  * general.  For example, since the Quanta match is based on the subsystem
397  * and subvendor IDs, we need it to come before the more general IVB
398  * PCI ID matches, otherwise we'll use the wrong info struct above.
399  */
400 #define INTEL_PCI_IDS \
401         INTEL_I830_IDS(&intel_i830_info),       \
402         INTEL_I845G_IDS(&intel_845g_info),      \
403         INTEL_I85X_IDS(&intel_i85x_info),       \
404         INTEL_I865G_IDS(&intel_i865g_info),     \
405         INTEL_I915G_IDS(&intel_i915g_info),     \
406         INTEL_I915GM_IDS(&intel_i915gm_info),   \
407         INTEL_I945G_IDS(&intel_i945g_info),     \
408         INTEL_I945GM_IDS(&intel_i945gm_info),   \
409         INTEL_I965G_IDS(&intel_i965g_info),     \
410         INTEL_G33_IDS(&intel_g33_info),         \
411         INTEL_I965GM_IDS(&intel_i965gm_info),   \
412         INTEL_GM45_IDS(&intel_gm45_info),       \
413         INTEL_G45_IDS(&intel_g45_info),         \
414         INTEL_PINEVIEW_IDS(&intel_pineview_info),       \
415         INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),   \
416         INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),   \
417         INTEL_SNB_D_IDS(&intel_sandybridge_d_info),     \
418         INTEL_SNB_M_IDS(&intel_sandybridge_m_info),     \
419         INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
420         INTEL_IVB_M_IDS(&intel_ivybridge_m_info),       \
421         INTEL_IVB_D_IDS(&intel_ivybridge_d_info),       \
422         INTEL_HSW_D_IDS(&intel_haswell_d_info), \
423         INTEL_HSW_M_IDS(&intel_haswell_m_info), \
424         INTEL_VLV_M_IDS(&intel_valleyview_m_info),      \
425         INTEL_VLV_D_IDS(&intel_valleyview_d_info),      \
426         INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),   \
427         INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),   \
428         INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
429         INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
430         INTEL_CHV_IDS(&intel_cherryview_info),  \
431         INTEL_SKL_GT1_IDS(&intel_skylake_info), \
432         INTEL_SKL_GT2_IDS(&intel_skylake_info), \
433         INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),     \
434         INTEL_BXT_IDS(&intel_broxton_info)
435
436 static const struct pci_device_id pciidlist[] = {               /* aka */
437         INTEL_PCI_IDS,
438         {0, 0, 0}
439 };
440
441 MODULE_DEVICE_TABLE(pci, pciidlist);
442
443 void intel_detect_pch(struct drm_device *dev)
444 {
445         struct drm_i915_private *dev_priv = dev->dev_private;
446         struct pci_dev *pch = NULL;
447
448         /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
449          * (which really amounts to a PCH but no South Display).
450          */
451         if (INTEL_INFO(dev)->num_pipes == 0) {
452                 dev_priv->pch_type = PCH_NOP;
453                 return;
454         }
455
456         /*
457          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
458          * make graphics device passthrough work easy for VMM, that only
459          * need to expose ISA bridge to let driver know the real hardware
460          * underneath. This is a requirement from virtualization team.
461          *
462          * In some virtualized environments (e.g. XEN), there is irrelevant
463          * ISA bridge in the system. To work reliably, we should scan trhough
464          * all the ISA bridge devices and check for the first match, instead
465          * of only checking the first one.
466          */
467         while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
468                 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
469                         unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
470                         dev_priv->pch_id = id;
471
472                         if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
473                                 dev_priv->pch_type = PCH_IBX;
474                                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
475                                 WARN_ON(!IS_GEN5(dev));
476                         } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
477                                 dev_priv->pch_type = PCH_CPT;
478                                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
479                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
480                         } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
481                                 /* PantherPoint is CPT compatible */
482                                 dev_priv->pch_type = PCH_CPT;
483                                 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
484                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
485                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
486                                 dev_priv->pch_type = PCH_LPT;
487                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
488                                 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
489                                 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
490                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
491                                 dev_priv->pch_type = PCH_LPT;
492                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
493                                 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
494                                 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
495                         } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
496                                 dev_priv->pch_type = PCH_SPT;
497                                 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
498                                 WARN_ON(!IS_SKYLAKE(dev));
499                         } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
500                                 dev_priv->pch_type = PCH_SPT;
501                                 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
502                                 WARN_ON(!IS_SKYLAKE(dev));
503                         } else
504                                 continue;
505
506                         break;
507                 }
508         }
509         if (!pch)
510                 DRM_DEBUG_KMS("No PCH found.\n");
511
512         pci_dev_put(pch);
513 }
514
515 bool i915_semaphore_is_enabled(struct drm_device *dev)
516 {
517         if (INTEL_INFO(dev)->gen < 6)
518                 return false;
519
520         if (i915.semaphores >= 0)
521                 return i915.semaphores;
522
523         /* TODO: make semaphores and Execlists play nicely together */
524         if (i915.enable_execlists)
525                 return false;
526
527         /* Until we get further testing... */
528         if (IS_GEN8(dev))
529                 return false;
530
531 #ifdef CONFIG_INTEL_IOMMU
532         /* Enable semaphores on SNB when IO remapping is off */
533         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
534                 return false;
535 #endif
536
537         return true;
538 }
539
540 void i915_firmware_load_error_print(const char *fw_path, int err)
541 {
542         DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
543
544         /*
545          * If the reason is not known assume -ENOENT since that's the most
546          * usual failure mode.
547          */
548         if (!err)
549                 err = -ENOENT;
550
551         if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
552                 return;
553
554         DRM_ERROR(
555           "The driver is built-in, so to load the firmware you need to\n"
556           "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
557           "in your initrd/initramfs image.\n");
558 }
559
560 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
561 {
562         struct drm_device *dev = dev_priv->dev;
563         struct drm_encoder *encoder;
564
565         drm_modeset_lock_all(dev);
566         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
567                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
568
569                 if (intel_encoder->suspend)
570                         intel_encoder->suspend(intel_encoder);
571         }
572         drm_modeset_unlock_all(dev);
573 }
574
575 static int intel_suspend_complete(struct drm_i915_private *dev_priv);
576 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
577                               bool rpm_resume);
578 static int skl_resume_prepare(struct drm_i915_private *dev_priv);
579 static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
580
581
582 static int i915_drm_suspend(struct drm_device *dev)
583 {
584         struct drm_i915_private *dev_priv = dev->dev_private;
585         pci_power_t opregion_target_state;
586         int error;
587
588         /* ignore lid events during suspend */
589         mutex_lock(&dev_priv->modeset_restore_lock);
590         dev_priv->modeset_restore = MODESET_SUSPENDED;
591         mutex_unlock(&dev_priv->modeset_restore_lock);
592
593         /* We do a lot of poking in a lot of registers, make sure they work
594          * properly. */
595         intel_display_set_init_power(dev_priv, true);
596
597         drm_kms_helper_poll_disable(dev);
598
599         pci_save_state(dev->pdev);
600
601         error = i915_gem_suspend(dev);
602         if (error) {
603                 dev_err(&dev->pdev->dev,
604                         "GEM idle failed, resume might fail\n");
605                 return error;
606         }
607
608         intel_suspend_gt_powersave(dev);
609
610         /*
611          * Disable CRTCs directly since we want to preserve sw state
612          * for _thaw. Also, power gate the CRTC power wells.
613          */
614         drm_modeset_lock_all(dev);
615         intel_display_suspend(dev);
616         drm_modeset_unlock_all(dev);
617
618         intel_dp_mst_suspend(dev);
619
620         intel_runtime_pm_disable_interrupts(dev_priv);
621         intel_hpd_cancel_work(dev_priv);
622
623         intel_suspend_encoders(dev_priv);
624
625         intel_suspend_hw(dev);
626
627         i915_gem_suspend_gtt_mappings(dev);
628
629         i915_save_state(dev);
630
631         opregion_target_state = PCI_D3cold;
632 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
633         if (acpi_target_system_state() < ACPI_STATE_S3)
634                 opregion_target_state = PCI_D1;
635 #endif
636         intel_opregion_notify_adapter(dev, opregion_target_state);
637
638         intel_uncore_forcewake_reset(dev, false);
639         intel_opregion_fini(dev);
640
641         intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
642
643         dev_priv->suspend_count++;
644
645         intel_display_set_init_power(dev_priv, false);
646
647         return 0;
648 }
649
650 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
651 {
652         struct drm_i915_private *dev_priv = drm_dev->dev_private;
653         int ret;
654
655         ret = intel_suspend_complete(dev_priv);
656
657         if (ret) {
658                 DRM_ERROR("Suspend complete failed: %d\n", ret);
659
660                 return ret;
661         }
662
663         pci_disable_device(drm_dev->pdev);
664         /*
665          * During hibernation on some GEN4 platforms the BIOS may try to access
666          * the device even though it's already in D3 and hang the machine. So
667          * leave the device in D0 on those platforms and hope the BIOS will
668          * power down the device properly. Platforms where this was seen:
669          * Lenovo Thinkpad X301, X61s
670          */
671         if (!(hibernation &&
672               drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
673               INTEL_INFO(dev_priv)->gen == 4))
674                 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
675
676         return 0;
677 }
678
679 int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
680 {
681         int error;
682
683         if (!dev || !dev->dev_private) {
684                 DRM_ERROR("dev: %p\n", dev);
685                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
686                 return -ENODEV;
687         }
688
689         if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
690                          state.event != PM_EVENT_FREEZE))
691                 return -EINVAL;
692
693         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
694                 return 0;
695
696         error = i915_drm_suspend(dev);
697         if (error)
698                 return error;
699
700         return i915_drm_suspend_late(dev, false);
701 }
702
703 static int i915_drm_resume(struct drm_device *dev)
704 {
705         struct drm_i915_private *dev_priv = dev->dev_private;
706
707         mutex_lock(&dev->struct_mutex);
708         i915_gem_restore_gtt_mappings(dev);
709         mutex_unlock(&dev->struct_mutex);
710
711         i915_restore_state(dev);
712         intel_opregion_setup(dev);
713
714         intel_init_pch_refclk(dev);
715         drm_mode_config_reset(dev);
716
717         /*
718          * Interrupts have to be enabled before any batches are run. If not the
719          * GPU will hang. i915_gem_init_hw() will initiate batches to
720          * update/restore the context.
721          *
722          * Modeset enabling in intel_modeset_init_hw() also needs working
723          * interrupts.
724          */
725         intel_runtime_pm_enable_interrupts(dev_priv);
726
727         mutex_lock(&dev->struct_mutex);
728         if (i915_gem_init_hw(dev)) {
729                 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
730                 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
731         }
732         mutex_unlock(&dev->struct_mutex);
733
734         intel_modeset_init_hw(dev);
735
736         spin_lock_irq(&dev_priv->irq_lock);
737         if (dev_priv->display.hpd_irq_setup)
738                 dev_priv->display.hpd_irq_setup(dev);
739         spin_unlock_irq(&dev_priv->irq_lock);
740
741         drm_modeset_lock_all(dev);
742         intel_display_resume(dev);
743         drm_modeset_unlock_all(dev);
744
745         intel_dp_mst_resume(dev);
746
747         /*
748          * ... but also need to make sure that hotplug processing
749          * doesn't cause havoc. Like in the driver load code we don't
750          * bother with the tiny race here where we might loose hotplug
751          * notifications.
752          * */
753         intel_hpd_init(dev_priv);
754         /* Config may have changed between suspend and resume */
755         drm_helper_hpd_irq_event(dev);
756
757         intel_opregion_init(dev);
758
759         intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
760
761         mutex_lock(&dev_priv->modeset_restore_lock);
762         dev_priv->modeset_restore = MODESET_DONE;
763         mutex_unlock(&dev_priv->modeset_restore_lock);
764
765         intel_opregion_notify_adapter(dev, PCI_D0);
766
767         drm_kms_helper_poll_enable(dev);
768
769         return 0;
770 }
771
772 static int i915_drm_resume_early(struct drm_device *dev)
773 {
774         struct drm_i915_private *dev_priv = dev->dev_private;
775         int ret = 0;
776
777         /*
778          * We have a resume ordering issue with the snd-hda driver also
779          * requiring our device to be power up. Due to the lack of a
780          * parent/child relationship we currently solve this with an early
781          * resume hook.
782          *
783          * FIXME: This should be solved with a special hdmi sink device or
784          * similar so that power domains can be employed.
785          */
786         if (pci_enable_device(dev->pdev))
787                 return -EIO;
788
789         pci_set_master(dev->pdev);
790
791         if (IS_VALLEYVIEW(dev_priv))
792                 ret = vlv_resume_prepare(dev_priv, false);
793         if (ret)
794                 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
795                           ret);
796
797         intel_uncore_early_sanitize(dev, true);
798
799         if (IS_BROXTON(dev))
800                 ret = bxt_resume_prepare(dev_priv);
801         else if (IS_SKYLAKE(dev_priv))
802                 ret = skl_resume_prepare(dev_priv);
803         else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
804                 hsw_disable_pc8(dev_priv);
805
806         intel_uncore_sanitize(dev);
807         intel_power_domains_init_hw(dev_priv);
808
809         return ret;
810 }
811
812 int i915_resume_legacy(struct drm_device *dev)
813 {
814         int ret;
815
816         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
817                 return 0;
818
819         ret = i915_drm_resume_early(dev);
820         if (ret)
821                 return ret;
822
823         return i915_drm_resume(dev);
824 }
825
826 /**
827  * i915_reset - reset chip after a hang
828  * @dev: drm device to reset
829  *
830  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
831  * reset or otherwise an error code.
832  *
833  * Procedure is fairly simple:
834  *   - reset the chip using the reset reg
835  *   - re-init context state
836  *   - re-init hardware status page
837  *   - re-init ring buffer
838  *   - re-init interrupt state
839  *   - re-init display
840  */
841 int i915_reset(struct drm_device *dev)
842 {
843         struct drm_i915_private *dev_priv = dev->dev_private;
844         bool simulated;
845         int ret;
846
847         intel_reset_gt_powersave(dev);
848
849         mutex_lock(&dev->struct_mutex);
850
851         i915_gem_reset(dev);
852
853         simulated = dev_priv->gpu_error.stop_rings != 0;
854
855         ret = intel_gpu_reset(dev);
856
857         /* Also reset the gpu hangman. */
858         if (simulated) {
859                 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
860                 dev_priv->gpu_error.stop_rings = 0;
861                 if (ret == -ENODEV) {
862                         DRM_INFO("Reset not implemented, but ignoring "
863                                  "error for simulated gpu hangs\n");
864                         ret = 0;
865                 }
866         }
867
868         if (i915_stop_ring_allow_warn(dev_priv))
869                 pr_notice("drm/i915: Resetting chip after gpu hang\n");
870
871         if (ret) {
872                 DRM_ERROR("Failed to reset chip: %i\n", ret);
873                 mutex_unlock(&dev->struct_mutex);
874                 return ret;
875         }
876
877         intel_overlay_reset(dev_priv);
878
879         /* Ok, now get things going again... */
880
881         /*
882          * Everything depends on having the GTT running, so we need to start
883          * there.  Fortunately we don't need to do this unless we reset the
884          * chip at a PCI level.
885          *
886          * Next we need to restore the context, but we don't use those
887          * yet either...
888          *
889          * Ring buffer needs to be re-initialized in the KMS case, or if X
890          * was running at the time of the reset (i.e. we weren't VT
891          * switched away).
892          */
893
894         /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
895         dev_priv->gpu_error.reload_in_reset = true;
896
897         ret = i915_gem_init_hw(dev);
898
899         dev_priv->gpu_error.reload_in_reset = false;
900
901         mutex_unlock(&dev->struct_mutex);
902         if (ret) {
903                 DRM_ERROR("Failed hw init on reset %d\n", ret);
904                 return ret;
905         }
906
907         /*
908          * rps/rc6 re-init is necessary to restore state lost after the
909          * reset and the re-install of gt irqs. Skip for ironlake per
910          * previous concerns that it doesn't respond well to some forms
911          * of re-init after reset.
912          */
913         if (INTEL_INFO(dev)->gen > 5)
914                 intel_enable_gt_powersave(dev);
915
916         return 0;
917 }
918
919 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
920 {
921         struct intel_device_info *intel_info =
922                 (struct intel_device_info *) ent->driver_data;
923
924         if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
925                 DRM_INFO("This hardware requires preliminary hardware support.\n"
926                          "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
927                 return -ENODEV;
928         }
929
930         /* Only bind to function 0 of the device. Early generations
931          * used function 1 as a placeholder for multi-head. This causes
932          * us confusion instead, especially on the systems where both
933          * functions have the same PCI-ID!
934          */
935         if (PCI_FUNC(pdev->devfn))
936                 return -ENODEV;
937
938         return drm_get_pci_dev(pdev, ent, &driver);
939 }
940
941 static void
942 i915_pci_remove(struct pci_dev *pdev)
943 {
944         struct drm_device *dev = pci_get_drvdata(pdev);
945
946         drm_put_dev(dev);
947 }
948
949 static int i915_pm_suspend(struct device *dev)
950 {
951         struct pci_dev *pdev = to_pci_dev(dev);
952         struct drm_device *drm_dev = pci_get_drvdata(pdev);
953
954         if (!drm_dev || !drm_dev->dev_private) {
955                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
956                 return -ENODEV;
957         }
958
959         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
960                 return 0;
961
962         return i915_drm_suspend(drm_dev);
963 }
964
965 static int i915_pm_suspend_late(struct device *dev)
966 {
967         struct drm_device *drm_dev = dev_to_i915(dev)->dev;
968
969         /*
970          * We have a suspend ordering issue with the snd-hda driver also
971          * requiring our device to be power up. Due to the lack of a
972          * parent/child relationship we currently solve this with an late
973          * suspend hook.
974          *
975          * FIXME: This should be solved with a special hdmi sink device or
976          * similar so that power domains can be employed.
977          */
978         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
979                 return 0;
980
981         return i915_drm_suspend_late(drm_dev, false);
982 }
983
984 static int i915_pm_poweroff_late(struct device *dev)
985 {
986         struct drm_device *drm_dev = dev_to_i915(dev)->dev;
987
988         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
989                 return 0;
990
991         return i915_drm_suspend_late(drm_dev, true);
992 }
993
994 static int i915_pm_resume_early(struct device *dev)
995 {
996         struct drm_device *drm_dev = dev_to_i915(dev)->dev;
997
998         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
999                 return 0;
1000
1001         return i915_drm_resume_early(drm_dev);
1002 }
1003
1004 static int i915_pm_resume(struct device *dev)
1005 {
1006         struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1007
1008         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1009                 return 0;
1010
1011         return i915_drm_resume(drm_dev);
1012 }
1013
1014 static int skl_suspend_complete(struct drm_i915_private *dev_priv)
1015 {
1016         /* Enabling DC6 is not a hard requirement to enter runtime D3 */
1017
1018         /*
1019          * This is to ensure that CSR isn't identified as loaded before
1020          * CSR-loading program is called during runtime-resume.
1021          */
1022         intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED);
1023
1024         skl_uninit_cdclk(dev_priv);
1025
1026         return 0;
1027 }
1028
1029 static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1030 {
1031         hsw_enable_pc8(dev_priv);
1032
1033         return 0;
1034 }
1035
1036 static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
1037 {
1038         struct drm_device *dev = dev_priv->dev;
1039
1040         /* TODO: when DC5 support is added disable DC5 here. */
1041
1042         broxton_ddi_phy_uninit(dev);
1043         broxton_uninit_cdclk(dev);
1044         bxt_enable_dc9(dev_priv);
1045
1046         return 0;
1047 }
1048
1049 static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1050 {
1051         struct drm_device *dev = dev_priv->dev;
1052
1053         /* TODO: when CSR FW support is added make sure the FW is loaded */
1054
1055         bxt_disable_dc9(dev_priv);
1056
1057         /*
1058          * TODO: when DC5 support is added enable DC5 here if the CSR FW
1059          * is available.
1060          */
1061         broxton_init_cdclk(dev);
1062         broxton_ddi_phy_init(dev);
1063         intel_prepare_ddi(dev);
1064
1065         return 0;
1066 }
1067
1068 static int skl_resume_prepare(struct drm_i915_private *dev_priv)
1069 {
1070         struct drm_device *dev = dev_priv->dev;
1071
1072         skl_init_cdclk(dev_priv);
1073         intel_csr_load_program(dev);
1074
1075         return 0;
1076 }
1077
1078 /*
1079  * Save all Gunit registers that may be lost after a D3 and a subsequent
1080  * S0i[R123] transition. The list of registers needing a save/restore is
1081  * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1082  * registers in the following way:
1083  * - Driver: saved/restored by the driver
1084  * - Punit : saved/restored by the Punit firmware
1085  * - No, w/o marking: no need to save/restore, since the register is R/O or
1086  *                    used internally by the HW in a way that doesn't depend
1087  *                    keeping the content across a suspend/resume.
1088  * - Debug : used for debugging
1089  *
1090  * We save/restore all registers marked with 'Driver', with the following
1091  * exceptions:
1092  * - Registers out of use, including also registers marked with 'Debug'.
1093  *   These have no effect on the driver's operation, so we don't save/restore
1094  *   them to reduce the overhead.
1095  * - Registers that are fully setup by an initialization function called from
1096  *   the resume path. For example many clock gating and RPS/RC6 registers.
1097  * - Registers that provide the right functionality with their reset defaults.
1098  *
1099  * TODO: Except for registers that based on the above 3 criteria can be safely
1100  * ignored, we save/restore all others, practically treating the HW context as
1101  * a black-box for the driver. Further investigation is needed to reduce the
1102  * saved/restored registers even further, by following the same 3 criteria.
1103  */
1104 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1105 {
1106         struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1107         int i;
1108
1109         /* GAM 0x4000-0x4770 */
1110         s->wr_watermark         = I915_READ(GEN7_WR_WATERMARK);
1111         s->gfx_prio_ctrl        = I915_READ(GEN7_GFX_PRIO_CTRL);
1112         s->arb_mode             = I915_READ(ARB_MODE);
1113         s->gfx_pend_tlb0        = I915_READ(GEN7_GFX_PEND_TLB0);
1114         s->gfx_pend_tlb1        = I915_READ(GEN7_GFX_PEND_TLB1);
1115
1116         for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1117                 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
1118
1119         s->media_max_req_count  = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1120         s->gfx_max_req_count    = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
1121
1122         s->render_hwsp          = I915_READ(RENDER_HWS_PGA_GEN7);
1123         s->ecochk               = I915_READ(GAM_ECOCHK);
1124         s->bsd_hwsp             = I915_READ(BSD_HWS_PGA_GEN7);
1125         s->blt_hwsp             = I915_READ(BLT_HWS_PGA_GEN7);
1126
1127         s->tlb_rd_addr          = I915_READ(GEN7_TLB_RD_ADDR);
1128
1129         /* MBC 0x9024-0x91D0, 0x8500 */
1130         s->g3dctl               = I915_READ(VLV_G3DCTL);
1131         s->gsckgctl             = I915_READ(VLV_GSCKGCTL);
1132         s->mbctl                = I915_READ(GEN6_MBCTL);
1133
1134         /* GCP 0x9400-0x9424, 0x8100-0x810C */
1135         s->ucgctl1              = I915_READ(GEN6_UCGCTL1);
1136         s->ucgctl3              = I915_READ(GEN6_UCGCTL3);
1137         s->rcgctl1              = I915_READ(GEN6_RCGCTL1);
1138         s->rcgctl2              = I915_READ(GEN6_RCGCTL2);
1139         s->rstctl               = I915_READ(GEN6_RSTCTL);
1140         s->misccpctl            = I915_READ(GEN7_MISCCPCTL);
1141
1142         /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1143         s->gfxpause             = I915_READ(GEN6_GFXPAUSE);
1144         s->rpdeuhwtc            = I915_READ(GEN6_RPDEUHWTC);
1145         s->rpdeuc               = I915_READ(GEN6_RPDEUC);
1146         s->ecobus               = I915_READ(ECOBUS);
1147         s->pwrdwnupctl          = I915_READ(VLV_PWRDWNUPCTL);
1148         s->rp_down_timeout      = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1149         s->rp_deucsw            = I915_READ(GEN6_RPDEUCSW);
1150         s->rcubmabdtmr          = I915_READ(GEN6_RCUBMABDTMR);
1151         s->rcedata              = I915_READ(VLV_RCEDATA);
1152         s->spare2gh             = I915_READ(VLV_SPAREG2H);
1153
1154         /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1155         s->gt_imr               = I915_READ(GTIMR);
1156         s->gt_ier               = I915_READ(GTIER);
1157         s->pm_imr               = I915_READ(GEN6_PMIMR);
1158         s->pm_ier               = I915_READ(GEN6_PMIER);
1159
1160         for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1161                 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
1162
1163         /* GT SA CZ domain, 0x100000-0x138124 */
1164         s->tilectl              = I915_READ(TILECTL);
1165         s->gt_fifoctl           = I915_READ(GTFIFOCTL);
1166         s->gtlc_wake_ctrl       = I915_READ(VLV_GTLC_WAKE_CTRL);
1167         s->gtlc_survive         = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1168         s->pmwgicz              = I915_READ(VLV_PMWGICZ);
1169
1170         /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1171         s->gu_ctl0              = I915_READ(VLV_GU_CTL0);
1172         s->gu_ctl1              = I915_READ(VLV_GU_CTL1);
1173         s->pcbr                 = I915_READ(VLV_PCBR);
1174         s->clock_gate_dis2      = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1175
1176         /*
1177          * Not saving any of:
1178          * DFT,         0x9800-0x9EC0
1179          * SARB,        0xB000-0xB1FC
1180          * GAC,         0x5208-0x524C, 0x14000-0x14C000
1181          * PCI CFG
1182          */
1183 }
1184
1185 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1186 {
1187         struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1188         u32 val;
1189         int i;
1190
1191         /* GAM 0x4000-0x4770 */
1192         I915_WRITE(GEN7_WR_WATERMARK,   s->wr_watermark);
1193         I915_WRITE(GEN7_GFX_PRIO_CTRL,  s->gfx_prio_ctrl);
1194         I915_WRITE(ARB_MODE,            s->arb_mode | (0xffff << 16));
1195         I915_WRITE(GEN7_GFX_PEND_TLB0,  s->gfx_pend_tlb0);
1196         I915_WRITE(GEN7_GFX_PEND_TLB1,  s->gfx_pend_tlb1);
1197
1198         for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1199                 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
1200
1201         I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1202         I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
1203
1204         I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1205         I915_WRITE(GAM_ECOCHK,          s->ecochk);
1206         I915_WRITE(BSD_HWS_PGA_GEN7,    s->bsd_hwsp);
1207         I915_WRITE(BLT_HWS_PGA_GEN7,    s->blt_hwsp);
1208
1209         I915_WRITE(GEN7_TLB_RD_ADDR,    s->tlb_rd_addr);
1210
1211         /* MBC 0x9024-0x91D0, 0x8500 */
1212         I915_WRITE(VLV_G3DCTL,          s->g3dctl);
1213         I915_WRITE(VLV_GSCKGCTL,        s->gsckgctl);
1214         I915_WRITE(GEN6_MBCTL,          s->mbctl);
1215
1216         /* GCP 0x9400-0x9424, 0x8100-0x810C */
1217         I915_WRITE(GEN6_UCGCTL1,        s->ucgctl1);
1218         I915_WRITE(GEN6_UCGCTL3,        s->ucgctl3);
1219         I915_WRITE(GEN6_RCGCTL1,        s->rcgctl1);
1220         I915_WRITE(GEN6_RCGCTL2,        s->rcgctl2);
1221         I915_WRITE(GEN6_RSTCTL,         s->rstctl);
1222         I915_WRITE(GEN7_MISCCPCTL,      s->misccpctl);
1223
1224         /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1225         I915_WRITE(GEN6_GFXPAUSE,       s->gfxpause);
1226         I915_WRITE(GEN6_RPDEUHWTC,      s->rpdeuhwtc);
1227         I915_WRITE(GEN6_RPDEUC,         s->rpdeuc);
1228         I915_WRITE(ECOBUS,              s->ecobus);
1229         I915_WRITE(VLV_PWRDWNUPCTL,     s->pwrdwnupctl);
1230         I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1231         I915_WRITE(GEN6_RPDEUCSW,       s->rp_deucsw);
1232         I915_WRITE(GEN6_RCUBMABDTMR,    s->rcubmabdtmr);
1233         I915_WRITE(VLV_RCEDATA,         s->rcedata);
1234         I915_WRITE(VLV_SPAREG2H,        s->spare2gh);
1235
1236         /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1237         I915_WRITE(GTIMR,               s->gt_imr);
1238         I915_WRITE(GTIER,               s->gt_ier);
1239         I915_WRITE(GEN6_PMIMR,          s->pm_imr);
1240         I915_WRITE(GEN6_PMIER,          s->pm_ier);
1241
1242         for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1243                 I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
1244
1245         /* GT SA CZ domain, 0x100000-0x138124 */
1246         I915_WRITE(TILECTL,                     s->tilectl);
1247         I915_WRITE(GTFIFOCTL,                   s->gt_fifoctl);
1248         /*
1249          * Preserve the GT allow wake and GFX force clock bit, they are not
1250          * be restored, as they are used to control the s0ix suspend/resume
1251          * sequence by the caller.
1252          */
1253         val = I915_READ(VLV_GTLC_WAKE_CTRL);
1254         val &= VLV_GTLC_ALLOWWAKEREQ;
1255         val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1256         I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1257
1258         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1259         val &= VLV_GFX_CLK_FORCE_ON_BIT;
1260         val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1261         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1262
1263         I915_WRITE(VLV_PMWGICZ,                 s->pmwgicz);
1264
1265         /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1266         I915_WRITE(VLV_GU_CTL0,                 s->gu_ctl0);
1267         I915_WRITE(VLV_GU_CTL1,                 s->gu_ctl1);
1268         I915_WRITE(VLV_PCBR,                    s->pcbr);
1269         I915_WRITE(VLV_GUNIT_CLOCK_GATE2,       s->clock_gate_dis2);
1270 }
1271
1272 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1273 {
1274         u32 val;
1275         int err;
1276
1277 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1278
1279         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1280         val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1281         if (force_on)
1282                 val |= VLV_GFX_CLK_FORCE_ON_BIT;
1283         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1284
1285         if (!force_on)
1286                 return 0;
1287
1288         err = wait_for(COND, 20);
1289         if (err)
1290                 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1291                           I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1292
1293         return err;
1294 #undef COND
1295 }
1296
1297 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1298 {
1299         u32 val;
1300         int err = 0;
1301
1302         val = I915_READ(VLV_GTLC_WAKE_CTRL);
1303         val &= ~VLV_GTLC_ALLOWWAKEREQ;
1304         if (allow)
1305                 val |= VLV_GTLC_ALLOWWAKEREQ;
1306         I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1307         POSTING_READ(VLV_GTLC_WAKE_CTRL);
1308
1309 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1310               allow)
1311         err = wait_for(COND, 1);
1312         if (err)
1313                 DRM_ERROR("timeout disabling GT waking\n");
1314         return err;
1315 #undef COND
1316 }
1317
1318 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1319                                  bool wait_for_on)
1320 {
1321         u32 mask;
1322         u32 val;
1323         int err;
1324
1325         mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1326         val = wait_for_on ? mask : 0;
1327 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1328         if (COND)
1329                 return 0;
1330
1331         DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1332                         wait_for_on ? "on" : "off",
1333                         I915_READ(VLV_GTLC_PW_STATUS));
1334
1335         /*
1336          * RC6 transitioning can be delayed up to 2 msec (see
1337          * valleyview_enable_rps), use 3 msec for safety.
1338          */
1339         err = wait_for(COND, 3);
1340         if (err)
1341                 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1342                           wait_for_on ? "on" : "off");
1343
1344         return err;
1345 #undef COND
1346 }
1347
1348 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1349 {
1350         if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1351                 return;
1352
1353         DRM_ERROR("GT register access while GT waking disabled\n");
1354         I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1355 }
1356
1357 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1358 {
1359         u32 mask;
1360         int err;
1361
1362         /*
1363          * Bspec defines the following GT well on flags as debug only, so
1364          * don't treat them as hard failures.
1365          */
1366         (void)vlv_wait_for_gt_wells(dev_priv, false);
1367
1368         mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1369         WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1370
1371         vlv_check_no_gt_access(dev_priv);
1372
1373         err = vlv_force_gfx_clock(dev_priv, true);
1374         if (err)
1375                 goto err1;
1376
1377         err = vlv_allow_gt_wake(dev_priv, false);
1378         if (err)
1379                 goto err2;
1380
1381         if (!IS_CHERRYVIEW(dev_priv->dev))
1382                 vlv_save_gunit_s0ix_state(dev_priv);
1383
1384         err = vlv_force_gfx_clock(dev_priv, false);
1385         if (err)
1386                 goto err2;
1387
1388         return 0;
1389
1390 err2:
1391         /* For safety always re-enable waking and disable gfx clock forcing */
1392         vlv_allow_gt_wake(dev_priv, true);
1393 err1:
1394         vlv_force_gfx_clock(dev_priv, false);
1395
1396         return err;
1397 }
1398
1399 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1400                                 bool rpm_resume)
1401 {
1402         struct drm_device *dev = dev_priv->dev;
1403         int err;
1404         int ret;
1405
1406         /*
1407          * If any of the steps fail just try to continue, that's the best we
1408          * can do at this point. Return the first error code (which will also
1409          * leave RPM permanently disabled).
1410          */
1411         ret = vlv_force_gfx_clock(dev_priv, true);
1412
1413         if (!IS_CHERRYVIEW(dev_priv->dev))
1414                 vlv_restore_gunit_s0ix_state(dev_priv);
1415
1416         err = vlv_allow_gt_wake(dev_priv, true);
1417         if (!ret)
1418                 ret = err;
1419
1420         err = vlv_force_gfx_clock(dev_priv, false);
1421         if (!ret)
1422                 ret = err;
1423
1424         vlv_check_no_gt_access(dev_priv);
1425
1426         if (rpm_resume) {
1427                 intel_init_clock_gating(dev);
1428                 i915_gem_restore_fences(dev);
1429         }
1430
1431         return ret;
1432 }
1433
1434 static int intel_runtime_suspend(struct device *device)
1435 {
1436         struct pci_dev *pdev = to_pci_dev(device);
1437         struct drm_device *dev = pci_get_drvdata(pdev);
1438         struct drm_i915_private *dev_priv = dev->dev_private;
1439         int ret;
1440
1441         if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1442                 return -ENODEV;
1443
1444         if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1445                 return -ENODEV;
1446
1447         DRM_DEBUG_KMS("Suspending device\n");
1448
1449         /*
1450          * We could deadlock here in case another thread holding struct_mutex
1451          * calls RPM suspend concurrently, since the RPM suspend will wait
1452          * first for this RPM suspend to finish. In this case the concurrent
1453          * RPM resume will be followed by its RPM suspend counterpart. Still
1454          * for consistency return -EAGAIN, which will reschedule this suspend.
1455          */
1456         if (!mutex_trylock(&dev->struct_mutex)) {
1457                 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1458                 /*
1459                  * Bump the expiration timestamp, otherwise the suspend won't
1460                  * be rescheduled.
1461                  */
1462                 pm_runtime_mark_last_busy(device);
1463
1464                 return -EAGAIN;
1465         }
1466         /*
1467          * We are safe here against re-faults, since the fault handler takes
1468          * an RPM reference.
1469          */
1470         i915_gem_release_all_mmaps(dev_priv);
1471         mutex_unlock(&dev->struct_mutex);
1472
1473         intel_suspend_gt_powersave(dev);
1474         intel_runtime_pm_disable_interrupts(dev_priv);
1475
1476         ret = intel_suspend_complete(dev_priv);
1477         if (ret) {
1478                 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1479                 intel_runtime_pm_enable_interrupts(dev_priv);
1480
1481                 return ret;
1482         }
1483
1484         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1485         intel_uncore_forcewake_reset(dev, false);
1486         dev_priv->pm.suspended = true;
1487
1488         /*
1489          * FIXME: We really should find a document that references the arguments
1490          * used below!
1491          */
1492         if (IS_BROADWELL(dev)) {
1493                 /*
1494                  * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1495                  * being detected, and the call we do at intel_runtime_resume()
1496                  * won't be able to restore them. Since PCI_D3hot matches the
1497                  * actual specification and appears to be working, use it.
1498                  */
1499                 intel_opregion_notify_adapter(dev, PCI_D3hot);
1500         } else {
1501                 /*
1502                  * current versions of firmware which depend on this opregion
1503                  * notification have repurposed the D1 definition to mean
1504                  * "runtime suspended" vs. what you would normally expect (D3)
1505                  * to distinguish it from notifications that might be sent via
1506                  * the suspend path.
1507                  */
1508                 intel_opregion_notify_adapter(dev, PCI_D1);
1509         }
1510
1511         assert_forcewakes_inactive(dev_priv);
1512
1513         DRM_DEBUG_KMS("Device suspended\n");
1514         return 0;
1515 }
1516
1517 static int intel_runtime_resume(struct device *device)
1518 {
1519         struct pci_dev *pdev = to_pci_dev(device);
1520         struct drm_device *dev = pci_get_drvdata(pdev);
1521         struct drm_i915_private *dev_priv = dev->dev_private;
1522         int ret = 0;
1523
1524         if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1525                 return -ENODEV;
1526
1527         DRM_DEBUG_KMS("Resuming device\n");
1528
1529         intel_opregion_notify_adapter(dev, PCI_D0);
1530         dev_priv->pm.suspended = false;
1531
1532         if (IS_GEN6(dev_priv))
1533                 intel_init_pch_refclk(dev);
1534
1535         if (IS_BROXTON(dev))
1536                 ret = bxt_resume_prepare(dev_priv);
1537         else if (IS_SKYLAKE(dev))
1538                 ret = skl_resume_prepare(dev_priv);
1539         else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1540                 hsw_disable_pc8(dev_priv);
1541         else if (IS_VALLEYVIEW(dev_priv))
1542                 ret = vlv_resume_prepare(dev_priv, true);
1543
1544         /*
1545          * No point of rolling back things in case of an error, as the best
1546          * we can do is to hope that things will still work (and disable RPM).
1547          */
1548         i915_gem_init_swizzling(dev);
1549         gen6_update_ring_freq(dev);
1550
1551         intel_runtime_pm_enable_interrupts(dev_priv);
1552         intel_enable_gt_powersave(dev);
1553
1554         if (ret)
1555                 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1556         else
1557                 DRM_DEBUG_KMS("Device resumed\n");
1558
1559         return ret;
1560 }
1561
1562 /*
1563  * This function implements common functionality of runtime and system
1564  * suspend sequence.
1565  */
1566 static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1567 {
1568         int ret;
1569
1570         if (IS_BROXTON(dev_priv))
1571                 ret = bxt_suspend_complete(dev_priv);
1572         else if (IS_SKYLAKE(dev_priv))
1573                 ret = skl_suspend_complete(dev_priv);
1574         else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1575                 ret = hsw_suspend_complete(dev_priv);
1576         else if (IS_VALLEYVIEW(dev_priv))
1577                 ret = vlv_suspend_complete(dev_priv);
1578         else
1579                 ret = 0;
1580
1581         return ret;
1582 }
1583
1584 static const struct dev_pm_ops i915_pm_ops = {
1585         /*
1586          * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1587          * PMSG_RESUME]
1588          */
1589         .suspend = i915_pm_suspend,
1590         .suspend_late = i915_pm_suspend_late,
1591         .resume_early = i915_pm_resume_early,
1592         .resume = i915_pm_resume,
1593
1594         /*
1595          * S4 event handlers
1596          * @freeze, @freeze_late    : called (1) before creating the
1597          *                            hibernation image [PMSG_FREEZE] and
1598          *                            (2) after rebooting, before restoring
1599          *                            the image [PMSG_QUIESCE]
1600          * @thaw, @thaw_early       : called (1) after creating the hibernation
1601          *                            image, before writing it [PMSG_THAW]
1602          *                            and (2) after failing to create or
1603          *                            restore the image [PMSG_RECOVER]
1604          * @poweroff, @poweroff_late: called after writing the hibernation
1605          *                            image, before rebooting [PMSG_HIBERNATE]
1606          * @restore, @restore_early : called after rebooting and restoring the
1607          *                            hibernation image [PMSG_RESTORE]
1608          */
1609         .freeze = i915_pm_suspend,
1610         .freeze_late = i915_pm_suspend_late,
1611         .thaw_early = i915_pm_resume_early,
1612         .thaw = i915_pm_resume,
1613         .poweroff = i915_pm_suspend,
1614         .poweroff_late = i915_pm_poweroff_late,
1615         .restore_early = i915_pm_resume_early,
1616         .restore = i915_pm_resume,
1617
1618         /* S0ix (via runtime suspend) event handlers */
1619         .runtime_suspend = intel_runtime_suspend,
1620         .runtime_resume = intel_runtime_resume,
1621 };
1622
1623 static const struct vm_operations_struct i915_gem_vm_ops = {
1624         .fault = i915_gem_fault,
1625         .open = drm_gem_vm_open,
1626         .close = drm_gem_vm_close,
1627 };
1628
1629 static const struct file_operations i915_driver_fops = {
1630         .owner = THIS_MODULE,
1631         .open = drm_open,
1632         .release = drm_release,
1633         .unlocked_ioctl = drm_ioctl,
1634         .mmap = drm_gem_mmap,
1635         .poll = drm_poll,
1636         .read = drm_read,
1637 #ifdef CONFIG_COMPAT
1638         .compat_ioctl = i915_compat_ioctl,
1639 #endif
1640         .llseek = noop_llseek,
1641 };
1642
1643 static struct drm_driver driver = {
1644         /* Don't use MTRRs here; the Xserver or userspace app should
1645          * deal with them for Intel hardware.
1646          */
1647         .driver_features =
1648             DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1649             DRIVER_RENDER,
1650         .load = i915_driver_load,
1651         .unload = i915_driver_unload,
1652         .open = i915_driver_open,
1653         .lastclose = i915_driver_lastclose,
1654         .preclose = i915_driver_preclose,
1655         .postclose = i915_driver_postclose,
1656         .set_busid = drm_pci_set_busid,
1657
1658         /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1659         .suspend = i915_suspend_legacy,
1660         .resume = i915_resume_legacy,
1661
1662 #if defined(CONFIG_DEBUG_FS)
1663         .debugfs_init = i915_debugfs_init,
1664         .debugfs_cleanup = i915_debugfs_cleanup,
1665 #endif
1666         .gem_free_object = i915_gem_free_object,
1667         .gem_vm_ops = &i915_gem_vm_ops,
1668
1669         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1670         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1671         .gem_prime_export = i915_gem_prime_export,
1672         .gem_prime_import = i915_gem_prime_import,
1673
1674         .dumb_create = i915_gem_dumb_create,
1675         .dumb_map_offset = i915_gem_mmap_gtt,
1676         .dumb_destroy = drm_gem_dumb_destroy,
1677         .ioctls = i915_ioctls,
1678         .fops = &i915_driver_fops,
1679         .name = DRIVER_NAME,
1680         .desc = DRIVER_DESC,
1681         .date = DRIVER_DATE,
1682         .major = DRIVER_MAJOR,
1683         .minor = DRIVER_MINOR,
1684         .patchlevel = DRIVER_PATCHLEVEL,
1685 };
1686
1687 static struct pci_driver i915_pci_driver = {
1688         .name = DRIVER_NAME,
1689         .id_table = pciidlist,
1690         .probe = i915_pci_probe,
1691         .remove = i915_pci_remove,
1692         .driver.pm = &i915_pm_ops,
1693 };
1694
1695 static int __init i915_init(void)
1696 {
1697         driver.num_ioctls = i915_max_ioctl;
1698
1699         /*
1700          * Enable KMS by default, unless explicitly overriden by
1701          * either the i915.modeset prarameter or by the
1702          * vga_text_mode_force boot option.
1703          */
1704         driver.driver_features |= DRIVER_MODESET;
1705
1706         if (i915.modeset == 0)
1707                 driver.driver_features &= ~DRIVER_MODESET;
1708
1709 #ifdef CONFIG_VGA_CONSOLE
1710         if (vgacon_text_force() && i915.modeset == -1)
1711                 driver.driver_features &= ~DRIVER_MODESET;
1712 #endif
1713
1714         if (!(driver.driver_features & DRIVER_MODESET)) {
1715                 driver.get_vblank_timestamp = NULL;
1716                 /* Silently fail loading to not upset userspace. */
1717                 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1718                 return 0;
1719         }
1720
1721         /*
1722          * FIXME: Note that we're lying to the DRM core here so that we can get access
1723          * to the atomic ioctl and the atomic properties.  Only plane operations on
1724          * a single CRTC will actually work.
1725          */
1726         if (driver.driver_features & DRIVER_MODESET)
1727                 driver.driver_features |= DRIVER_ATOMIC;
1728
1729         return drm_pci_init(&driver, &i915_pci_driver);
1730 }
1731
1732 static void __exit i915_exit(void)
1733 {
1734         if (!(driver.driver_features & DRIVER_MODESET))
1735                 return; /* Never loaded a driver. */
1736
1737         drm_pci_exit(&driver, &i915_pci_driver);
1738 }
1739
1740 module_init(i915_init);
1741 module_exit(i915_exit);
1742
1743 MODULE_AUTHOR("Tungsten Graphics, Inc.");
1744 MODULE_AUTHOR("Intel Corporation");
1745
1746 MODULE_DESCRIPTION(DRIVER_DESC);
1747 MODULE_LICENSE("GPL and additional rights");