drm/i915/skl: Add support for DP voltage swings and pre-emphasis
[pandora-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_crtc.h>
35 #include <drm/drm_crtc_helper.h>
36 #include <drm/drm_edid.h>
37 #include "intel_drv.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40
41 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
42
43 struct dp_link_dpll {
44         int link_bw;
45         struct dpll dpll;
46 };
47
48 static const struct dp_link_dpll gen4_dpll[] = {
49         { DP_LINK_BW_1_62,
50                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
51         { DP_LINK_BW_2_7,
52                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
53 };
54
55 static const struct dp_link_dpll pch_dpll[] = {
56         { DP_LINK_BW_1_62,
57                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
58         { DP_LINK_BW_2_7,
59                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
60 };
61
62 static const struct dp_link_dpll vlv_dpll[] = {
63         { DP_LINK_BW_1_62,
64                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65         { DP_LINK_BW_2_7,
66                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
67 };
68
69 /*
70  * CHV supports eDP 1.4 that have  more link rates.
71  * Below only provides the fixed rate but exclude variable rate.
72  */
73 static const struct dp_link_dpll chv_dpll[] = {
74         /*
75          * CHV requires to program fractional division for m2.
76          * m2 is stored in fixed point format using formula below
77          * (m2_int << 22) | m2_fraction
78          */
79         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
80                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
81         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
82                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
83         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
84                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
85 };
86
87 /**
88  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
89  * @intel_dp: DP struct
90  *
91  * If a CPU or PCH DP output is attached to an eDP panel, this function
92  * will return true, and false otherwise.
93  */
94 static bool is_edp(struct intel_dp *intel_dp)
95 {
96         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
97
98         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
99 }
100
101 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
102 {
103         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
104
105         return intel_dig_port->base.base.dev;
106 }
107
108 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
109 {
110         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
111 }
112
113 static void intel_dp_link_down(struct intel_dp *intel_dp);
114 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
115 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
116
117 int
118 intel_dp_max_link_bw(struct intel_dp *intel_dp)
119 {
120         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
121         struct drm_device *dev = intel_dp->attached_connector->base.dev;
122
123         switch (max_link_bw) {
124         case DP_LINK_BW_1_62:
125         case DP_LINK_BW_2_7:
126                 break;
127         case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
128                 if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
129                      INTEL_INFO(dev)->gen >= 8) &&
130                     intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
131                         max_link_bw = DP_LINK_BW_5_4;
132                 else
133                         max_link_bw = DP_LINK_BW_2_7;
134                 break;
135         default:
136                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
137                      max_link_bw);
138                 max_link_bw = DP_LINK_BW_1_62;
139                 break;
140         }
141         return max_link_bw;
142 }
143
144 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
145 {
146         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
147         struct drm_device *dev = intel_dig_port->base.base.dev;
148         u8 source_max, sink_max;
149
150         source_max = 4;
151         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
152             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
153                 source_max = 2;
154
155         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
156
157         return min(source_max, sink_max);
158 }
159
160 /*
161  * The units on the numbers in the next two are... bizarre.  Examples will
162  * make it clearer; this one parallels an example in the eDP spec.
163  *
164  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165  *
166  *     270000 * 1 * 8 / 10 == 216000
167  *
168  * The actual data capacity of that configuration is 2.16Gbit/s, so the
169  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
170  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171  * 119000.  At 18bpp that's 2142000 kilobits per second.
172  *
173  * Thus the strange-looking division by 10 in intel_dp_link_required, to
174  * get the result in decakilobits instead of kilobits.
175  */
176
177 static int
178 intel_dp_link_required(int pixel_clock, int bpp)
179 {
180         return (pixel_clock * bpp + 9) / 10;
181 }
182
183 static int
184 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185 {
186         return (max_link_clock * max_lanes * 8) / 10;
187 }
188
189 static enum drm_mode_status
190 intel_dp_mode_valid(struct drm_connector *connector,
191                     struct drm_display_mode *mode)
192 {
193         struct intel_dp *intel_dp = intel_attached_dp(connector);
194         struct intel_connector *intel_connector = to_intel_connector(connector);
195         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
196         int target_clock = mode->clock;
197         int max_rate, mode_rate, max_lanes, max_link_clock;
198
199         if (is_edp(intel_dp) && fixed_mode) {
200                 if (mode->hdisplay > fixed_mode->hdisplay)
201                         return MODE_PANEL;
202
203                 if (mode->vdisplay > fixed_mode->vdisplay)
204                         return MODE_PANEL;
205
206                 target_clock = fixed_mode->clock;
207         }
208
209         max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
210         max_lanes = intel_dp_max_lane_count(intel_dp);
211
212         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
213         mode_rate = intel_dp_link_required(target_clock, 18);
214
215         if (mode_rate > max_rate)
216                 return MODE_CLOCK_HIGH;
217
218         if (mode->clock < 10000)
219                 return MODE_CLOCK_LOW;
220
221         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
222                 return MODE_H_ILLEGAL;
223
224         return MODE_OK;
225 }
226
227 static uint32_t
228 pack_aux(uint8_t *src, int src_bytes)
229 {
230         int     i;
231         uint32_t v = 0;
232
233         if (src_bytes > 4)
234                 src_bytes = 4;
235         for (i = 0; i < src_bytes; i++)
236                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
237         return v;
238 }
239
240 static void
241 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
242 {
243         int i;
244         if (dst_bytes > 4)
245                 dst_bytes = 4;
246         for (i = 0; i < dst_bytes; i++)
247                 dst[i] = src >> ((3-i) * 8);
248 }
249
250 /* hrawclock is 1/4 the FSB frequency */
251 static int
252 intel_hrawclk(struct drm_device *dev)
253 {
254         struct drm_i915_private *dev_priv = dev->dev_private;
255         uint32_t clkcfg;
256
257         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
258         if (IS_VALLEYVIEW(dev))
259                 return 200;
260
261         clkcfg = I915_READ(CLKCFG);
262         switch (clkcfg & CLKCFG_FSB_MASK) {
263         case CLKCFG_FSB_400:
264                 return 100;
265         case CLKCFG_FSB_533:
266                 return 133;
267         case CLKCFG_FSB_667:
268                 return 166;
269         case CLKCFG_FSB_800:
270                 return 200;
271         case CLKCFG_FSB_1067:
272                 return 266;
273         case CLKCFG_FSB_1333:
274                 return 333;
275         /* these two are just a guess; one of them might be right */
276         case CLKCFG_FSB_1600:
277         case CLKCFG_FSB_1600_ALT:
278                 return 400;
279         default:
280                 return 133;
281         }
282 }
283
284 static void
285 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
286                                     struct intel_dp *intel_dp,
287                                     struct edp_power_seq *out);
288 static void
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290                                               struct intel_dp *intel_dp,
291                                               struct edp_power_seq *out);
292
293 static void pps_lock(struct intel_dp *intel_dp)
294 {
295         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296         struct intel_encoder *encoder = &intel_dig_port->base;
297         struct drm_device *dev = encoder->base.dev;
298         struct drm_i915_private *dev_priv = dev->dev_private;
299         enum intel_display_power_domain power_domain;
300
301         /*
302          * See vlv_power_sequencer_reset() why we need
303          * a power domain reference here.
304          */
305         power_domain = intel_display_port_power_domain(encoder);
306         intel_display_power_get(dev_priv, power_domain);
307
308         mutex_lock(&dev_priv->pps_mutex);
309 }
310
311 static void pps_unlock(struct intel_dp *intel_dp)
312 {
313         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
314         struct intel_encoder *encoder = &intel_dig_port->base;
315         struct drm_device *dev = encoder->base.dev;
316         struct drm_i915_private *dev_priv = dev->dev_private;
317         enum intel_display_power_domain power_domain;
318
319         mutex_unlock(&dev_priv->pps_mutex);
320
321         power_domain = intel_display_port_power_domain(encoder);
322         intel_display_power_put(dev_priv, power_domain);
323 }
324
325 static enum pipe
326 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
327 {
328         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
329         struct drm_device *dev = intel_dig_port->base.base.dev;
330         struct drm_i915_private *dev_priv = dev->dev_private;
331         struct intel_encoder *encoder;
332         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
333         struct edp_power_seq power_seq;
334
335         lockdep_assert_held(&dev_priv->pps_mutex);
336
337         if (intel_dp->pps_pipe != INVALID_PIPE)
338                 return intel_dp->pps_pipe;
339
340         /*
341          * We don't have power sequencer currently.
342          * Pick one that's not used by other ports.
343          */
344         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
345                             base.head) {
346                 struct intel_dp *tmp;
347
348                 if (encoder->type != INTEL_OUTPUT_EDP)
349                         continue;
350
351                 tmp = enc_to_intel_dp(&encoder->base);
352
353                 if (tmp->pps_pipe != INVALID_PIPE)
354                         pipes &= ~(1 << tmp->pps_pipe);
355         }
356
357         /*
358          * Didn't find one. This should not happen since there
359          * are two power sequencers and up to two eDP ports.
360          */
361         if (WARN_ON(pipes == 0))
362                 return PIPE_A;
363
364         intel_dp->pps_pipe = ffs(pipes) - 1;
365
366         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
367                       pipe_name(intel_dp->pps_pipe),
368                       port_name(intel_dig_port->port));
369
370         /* init power sequencer on this pipe and port */
371         intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
372         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
373                                                       &power_seq);
374
375         return intel_dp->pps_pipe;
376 }
377
378 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
379                                enum pipe pipe);
380
381 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
382                                enum pipe pipe)
383 {
384         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
385 }
386
387 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
388                                 enum pipe pipe)
389 {
390         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
391 }
392
393 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
394                          enum pipe pipe)
395 {
396         return true;
397 }
398
399 static enum pipe
400 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
401                      enum port port,
402                      vlv_pipe_check pipe_check)
403 {
404         enum pipe pipe;
405
406         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
407                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
408                         PANEL_PORT_SELECT_MASK;
409
410                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
411                         continue;
412
413                 if (!pipe_check(dev_priv, pipe))
414                         continue;
415
416                 return pipe;
417         }
418
419         return INVALID_PIPE;
420 }
421
422 static void
423 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
424 {
425         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
426         struct drm_device *dev = intel_dig_port->base.base.dev;
427         struct drm_i915_private *dev_priv = dev->dev_private;
428         struct edp_power_seq power_seq;
429         enum port port = intel_dig_port->port;
430
431         lockdep_assert_held(&dev_priv->pps_mutex);
432
433         /* try to find a pipe with this port selected */
434         /* first pick one where the panel is on */
435         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
436                                                   vlv_pipe_has_pp_on);
437         /* didn't find one? pick one where vdd is on */
438         if (intel_dp->pps_pipe == INVALID_PIPE)
439                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
440                                                           vlv_pipe_has_vdd_on);
441         /* didn't find one? pick one with just the correct port */
442         if (intel_dp->pps_pipe == INVALID_PIPE)
443                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
444                                                           vlv_pipe_any);
445
446         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
447         if (intel_dp->pps_pipe == INVALID_PIPE) {
448                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
449                               port_name(port));
450                 return;
451         }
452
453         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
454                       port_name(port), pipe_name(intel_dp->pps_pipe));
455
456         intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
457         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
458                                                       &power_seq);
459 }
460
461 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
462 {
463         struct drm_device *dev = dev_priv->dev;
464         struct intel_encoder *encoder;
465
466         if (WARN_ON(!IS_VALLEYVIEW(dev)))
467                 return;
468
469         /*
470          * We can't grab pps_mutex here due to deadlock with power_domain
471          * mutex when power_domain functions are called while holding pps_mutex.
472          * That also means that in order to use pps_pipe the code needs to
473          * hold both a power domain reference and pps_mutex, and the power domain
474          * reference get/put must be done while _not_ holding pps_mutex.
475          * pps_{lock,unlock}() do these steps in the correct order, so one
476          * should use them always.
477          */
478
479         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
480                 struct intel_dp *intel_dp;
481
482                 if (encoder->type != INTEL_OUTPUT_EDP)
483                         continue;
484
485                 intel_dp = enc_to_intel_dp(&encoder->base);
486                 intel_dp->pps_pipe = INVALID_PIPE;
487         }
488 }
489
490 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
491 {
492         struct drm_device *dev = intel_dp_to_dev(intel_dp);
493
494         if (HAS_PCH_SPLIT(dev))
495                 return PCH_PP_CONTROL;
496         else
497                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
498 }
499
500 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
501 {
502         struct drm_device *dev = intel_dp_to_dev(intel_dp);
503
504         if (HAS_PCH_SPLIT(dev))
505                 return PCH_PP_STATUS;
506         else
507                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
508 }
509
510 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
511    This function only applicable when panel PM state is not to be tracked */
512 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
513                               void *unused)
514 {
515         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
516                                                  edp_notifier);
517         struct drm_device *dev = intel_dp_to_dev(intel_dp);
518         struct drm_i915_private *dev_priv = dev->dev_private;
519         u32 pp_div;
520         u32 pp_ctrl_reg, pp_div_reg;
521
522         if (!is_edp(intel_dp) || code != SYS_RESTART)
523                 return 0;
524
525         pps_lock(intel_dp);
526
527         if (IS_VALLEYVIEW(dev)) {
528                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
529
530                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
531                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
532                 pp_div = I915_READ(pp_div_reg);
533                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
534
535                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
536                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
537                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
538                 msleep(intel_dp->panel_power_cycle_delay);
539         }
540
541         pps_unlock(intel_dp);
542
543         return 0;
544 }
545
546 static bool edp_have_panel_power(struct intel_dp *intel_dp)
547 {
548         struct drm_device *dev = intel_dp_to_dev(intel_dp);
549         struct drm_i915_private *dev_priv = dev->dev_private;
550
551         lockdep_assert_held(&dev_priv->pps_mutex);
552
553         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
554 }
555
556 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
557 {
558         struct drm_device *dev = intel_dp_to_dev(intel_dp);
559         struct drm_i915_private *dev_priv = dev->dev_private;
560
561         lockdep_assert_held(&dev_priv->pps_mutex);
562
563         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
564 }
565
566 static void
567 intel_dp_check_edp(struct intel_dp *intel_dp)
568 {
569         struct drm_device *dev = intel_dp_to_dev(intel_dp);
570         struct drm_i915_private *dev_priv = dev->dev_private;
571
572         if (!is_edp(intel_dp))
573                 return;
574
575         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
576                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
577                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
578                               I915_READ(_pp_stat_reg(intel_dp)),
579                               I915_READ(_pp_ctrl_reg(intel_dp)));
580         }
581 }
582
583 static uint32_t
584 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
585 {
586         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
587         struct drm_device *dev = intel_dig_port->base.base.dev;
588         struct drm_i915_private *dev_priv = dev->dev_private;
589         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
590         uint32_t status;
591         bool done;
592
593 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
594         if (has_aux_irq)
595                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
596                                           msecs_to_jiffies_timeout(10));
597         else
598                 done = wait_for_atomic(C, 10) == 0;
599         if (!done)
600                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
601                           has_aux_irq);
602 #undef C
603
604         return status;
605 }
606
607 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
608 {
609         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
610         struct drm_device *dev = intel_dig_port->base.base.dev;
611
612         /*
613          * The clock divider is based off the hrawclk, and would like to run at
614          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
615          */
616         return index ? 0 : intel_hrawclk(dev) / 2;
617 }
618
619 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
620 {
621         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
622         struct drm_device *dev = intel_dig_port->base.base.dev;
623
624         if (index)
625                 return 0;
626
627         if (intel_dig_port->port == PORT_A) {
628                 if (IS_GEN6(dev) || IS_GEN7(dev))
629                         return 200; /* SNB & IVB eDP input clock at 400Mhz */
630                 else
631                         return 225; /* eDP input clock at 450Mhz */
632         } else {
633                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
634         }
635 }
636
637 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
638 {
639         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
640         struct drm_device *dev = intel_dig_port->base.base.dev;
641         struct drm_i915_private *dev_priv = dev->dev_private;
642
643         if (intel_dig_port->port == PORT_A) {
644                 if (index)
645                         return 0;
646                 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
647         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
648                 /* Workaround for non-ULT HSW */
649                 switch (index) {
650                 case 0: return 63;
651                 case 1: return 72;
652                 default: return 0;
653                 }
654         } else  {
655                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
656         }
657 }
658
659 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
660 {
661         return index ? 0 : 100;
662 }
663
664 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
665                                       bool has_aux_irq,
666                                       int send_bytes,
667                                       uint32_t aux_clock_divider)
668 {
669         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
670         struct drm_device *dev = intel_dig_port->base.base.dev;
671         uint32_t precharge, timeout;
672
673         if (IS_GEN6(dev))
674                 precharge = 3;
675         else
676                 precharge = 5;
677
678         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
679                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
680         else
681                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
682
683         return DP_AUX_CH_CTL_SEND_BUSY |
684                DP_AUX_CH_CTL_DONE |
685                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
686                DP_AUX_CH_CTL_TIME_OUT_ERROR |
687                timeout |
688                DP_AUX_CH_CTL_RECEIVE_ERROR |
689                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
690                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
691                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
692 }
693
694 static int
695 intel_dp_aux_ch(struct intel_dp *intel_dp,
696                 uint8_t *send, int send_bytes,
697                 uint8_t *recv, int recv_size)
698 {
699         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
700         struct drm_device *dev = intel_dig_port->base.base.dev;
701         struct drm_i915_private *dev_priv = dev->dev_private;
702         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
703         uint32_t ch_data = ch_ctl + 4;
704         uint32_t aux_clock_divider;
705         int i, ret, recv_bytes;
706         uint32_t status;
707         int try, clock = 0;
708         bool has_aux_irq = HAS_AUX_IRQ(dev);
709         bool vdd;
710
711         pps_lock(intel_dp);
712
713         /*
714          * We will be called with VDD already enabled for dpcd/edid/oui reads.
715          * In such cases we want to leave VDD enabled and it's up to upper layers
716          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
717          * ourselves.
718          */
719         vdd = edp_panel_vdd_on(intel_dp);
720
721         /* dp aux is extremely sensitive to irq latency, hence request the
722          * lowest possible wakeup latency and so prevent the cpu from going into
723          * deep sleep states.
724          */
725         pm_qos_update_request(&dev_priv->pm_qos, 0);
726
727         intel_dp_check_edp(intel_dp);
728
729         intel_aux_display_runtime_get(dev_priv);
730
731         /* Try to wait for any previous AUX channel activity */
732         for (try = 0; try < 3; try++) {
733                 status = I915_READ_NOTRACE(ch_ctl);
734                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
735                         break;
736                 msleep(1);
737         }
738
739         if (try == 3) {
740                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
741                      I915_READ(ch_ctl));
742                 ret = -EBUSY;
743                 goto out;
744         }
745
746         /* Only 5 data registers! */
747         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
748                 ret = -E2BIG;
749                 goto out;
750         }
751
752         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
753                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
754                                                           has_aux_irq,
755                                                           send_bytes,
756                                                           aux_clock_divider);
757
758                 /* Must try at least 3 times according to DP spec */
759                 for (try = 0; try < 5; try++) {
760                         /* Load the send data into the aux channel data registers */
761                         for (i = 0; i < send_bytes; i += 4)
762                                 I915_WRITE(ch_data + i,
763                                            pack_aux(send + i, send_bytes - i));
764
765                         /* Send the command and wait for it to complete */
766                         I915_WRITE(ch_ctl, send_ctl);
767
768                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
769
770                         /* Clear done status and any errors */
771                         I915_WRITE(ch_ctl,
772                                    status |
773                                    DP_AUX_CH_CTL_DONE |
774                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
775                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
776
777                         if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
778                                       DP_AUX_CH_CTL_RECEIVE_ERROR))
779                                 continue;
780                         if (status & DP_AUX_CH_CTL_DONE)
781                                 break;
782                 }
783                 if (status & DP_AUX_CH_CTL_DONE)
784                         break;
785         }
786
787         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
788                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
789                 ret = -EBUSY;
790                 goto out;
791         }
792
793         /* Check for timeout or receive error.
794          * Timeouts occur when the sink is not connected
795          */
796         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
797                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
798                 ret = -EIO;
799                 goto out;
800         }
801
802         /* Timeouts occur when the device isn't connected, so they're
803          * "normal" -- don't fill the kernel log with these */
804         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
805                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
806                 ret = -ETIMEDOUT;
807                 goto out;
808         }
809
810         /* Unload any bytes sent back from the other side */
811         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
812                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
813         if (recv_bytes > recv_size)
814                 recv_bytes = recv_size;
815
816         for (i = 0; i < recv_bytes; i += 4)
817                 unpack_aux(I915_READ(ch_data + i),
818                            recv + i, recv_bytes - i);
819
820         ret = recv_bytes;
821 out:
822         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
823         intel_aux_display_runtime_put(dev_priv);
824
825         if (vdd)
826                 edp_panel_vdd_off(intel_dp, false);
827
828         pps_unlock(intel_dp);
829
830         return ret;
831 }
832
833 #define BARE_ADDRESS_SIZE       3
834 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
835 static ssize_t
836 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
837 {
838         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
839         uint8_t txbuf[20], rxbuf[20];
840         size_t txsize, rxsize;
841         int ret;
842
843         txbuf[0] = msg->request << 4;
844         txbuf[1] = msg->address >> 8;
845         txbuf[2] = msg->address & 0xff;
846         txbuf[3] = msg->size - 1;
847
848         switch (msg->request & ~DP_AUX_I2C_MOT) {
849         case DP_AUX_NATIVE_WRITE:
850         case DP_AUX_I2C_WRITE:
851                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
852                 rxsize = 1;
853
854                 if (WARN_ON(txsize > 20))
855                         return -E2BIG;
856
857                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
858
859                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
860                 if (ret > 0) {
861                         msg->reply = rxbuf[0] >> 4;
862
863                         /* Return payload size. */
864                         ret = msg->size;
865                 }
866                 break;
867
868         case DP_AUX_NATIVE_READ:
869         case DP_AUX_I2C_READ:
870                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
871                 rxsize = msg->size + 1;
872
873                 if (WARN_ON(rxsize > 20))
874                         return -E2BIG;
875
876                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
877                 if (ret > 0) {
878                         msg->reply = rxbuf[0] >> 4;
879                         /*
880                          * Assume happy day, and copy the data. The caller is
881                          * expected to check msg->reply before touching it.
882                          *
883                          * Return payload size.
884                          */
885                         ret--;
886                         memcpy(msg->buffer, rxbuf + 1, ret);
887                 }
888                 break;
889
890         default:
891                 ret = -EINVAL;
892                 break;
893         }
894
895         return ret;
896 }
897
898 static void
899 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
900 {
901         struct drm_device *dev = intel_dp_to_dev(intel_dp);
902         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
903         enum port port = intel_dig_port->port;
904         const char *name = NULL;
905         int ret;
906
907         switch (port) {
908         case PORT_A:
909                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
910                 name = "DPDDC-A";
911                 break;
912         case PORT_B:
913                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
914                 name = "DPDDC-B";
915                 break;
916         case PORT_C:
917                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
918                 name = "DPDDC-C";
919                 break;
920         case PORT_D:
921                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
922                 name = "DPDDC-D";
923                 break;
924         default:
925                 BUG();
926         }
927
928         if (!HAS_DDI(dev))
929                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
930
931         intel_dp->aux.name = name;
932         intel_dp->aux.dev = dev->dev;
933         intel_dp->aux.transfer = intel_dp_aux_transfer;
934
935         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
936                       connector->base.kdev->kobj.name);
937
938         ret = drm_dp_aux_register(&intel_dp->aux);
939         if (ret < 0) {
940                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
941                           name, ret);
942                 return;
943         }
944
945         ret = sysfs_create_link(&connector->base.kdev->kobj,
946                                 &intel_dp->aux.ddc.dev.kobj,
947                                 intel_dp->aux.ddc.dev.kobj.name);
948         if (ret < 0) {
949                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
950                 drm_dp_aux_unregister(&intel_dp->aux);
951         }
952 }
953
954 static void
955 intel_dp_connector_unregister(struct intel_connector *intel_connector)
956 {
957         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
958
959         if (!intel_connector->mst_port)
960                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
961                                   intel_dp->aux.ddc.dev.kobj.name);
962         intel_connector_unregister(intel_connector);
963 }
964
965 static void
966 hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
967 {
968         switch (link_bw) {
969         case DP_LINK_BW_1_62:
970                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
971                 break;
972         case DP_LINK_BW_2_7:
973                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
974                 break;
975         case DP_LINK_BW_5_4:
976                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
977                 break;
978         }
979 }
980
981 static void
982 intel_dp_set_clock(struct intel_encoder *encoder,
983                    struct intel_crtc_config *pipe_config, int link_bw)
984 {
985         struct drm_device *dev = encoder->base.dev;
986         const struct dp_link_dpll *divisor = NULL;
987         int i, count = 0;
988
989         if (IS_G4X(dev)) {
990                 divisor = gen4_dpll;
991                 count = ARRAY_SIZE(gen4_dpll);
992         } else if (HAS_PCH_SPLIT(dev)) {
993                 divisor = pch_dpll;
994                 count = ARRAY_SIZE(pch_dpll);
995         } else if (IS_CHERRYVIEW(dev)) {
996                 divisor = chv_dpll;
997                 count = ARRAY_SIZE(chv_dpll);
998         } else if (IS_VALLEYVIEW(dev)) {
999                 divisor = vlv_dpll;
1000                 count = ARRAY_SIZE(vlv_dpll);
1001         }
1002
1003         if (divisor && count) {
1004                 for (i = 0; i < count; i++) {
1005                         if (link_bw == divisor[i].link_bw) {
1006                                 pipe_config->dpll = divisor[i].dpll;
1007                                 pipe_config->clock_set = true;
1008                                 break;
1009                         }
1010                 }
1011         }
1012 }
1013
1014 bool
1015 intel_dp_compute_config(struct intel_encoder *encoder,
1016                         struct intel_crtc_config *pipe_config)
1017 {
1018         struct drm_device *dev = encoder->base.dev;
1019         struct drm_i915_private *dev_priv = dev->dev_private;
1020         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
1021         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1022         enum port port = dp_to_dig_port(intel_dp)->port;
1023         struct intel_crtc *intel_crtc = encoder->new_crtc;
1024         struct intel_connector *intel_connector = intel_dp->attached_connector;
1025         int lane_count, clock;
1026         int min_lane_count = 1;
1027         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1028         /* Conveniently, the link BW constants become indices with a shift...*/
1029         int min_clock = 0;
1030         int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
1031         int bpp, mode_rate;
1032         static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
1033         int link_avail, link_clock;
1034
1035         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1036                 pipe_config->has_pch_encoder = true;
1037
1038         pipe_config->has_dp_encoder = true;
1039         pipe_config->has_drrs = false;
1040         pipe_config->has_audio = intel_dp->has_audio;
1041
1042         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1043                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1044                                        adjusted_mode);
1045                 if (!HAS_PCH_SPLIT(dev))
1046                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1047                                                  intel_connector->panel.fitting_mode);
1048                 else
1049                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1050                                                 intel_connector->panel.fitting_mode);
1051         }
1052
1053         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1054                 return false;
1055
1056         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1057                       "max bw %02x pixel clock %iKHz\n",
1058                       max_lane_count, bws[max_clock],
1059                       adjusted_mode->crtc_clock);
1060
1061         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1062          * bpc in between. */
1063         bpp = pipe_config->pipe_bpp;
1064         if (is_edp(intel_dp)) {
1065                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1066                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1067                                       dev_priv->vbt.edp_bpp);
1068                         bpp = dev_priv->vbt.edp_bpp;
1069                 }
1070
1071                 if (IS_BROADWELL(dev)) {
1072                         /* Yes, it's an ugly hack. */
1073                         min_lane_count = max_lane_count;
1074                         DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
1075                                       min_lane_count);
1076                 } else if (dev_priv->vbt.edp_lanes) {
1077                         min_lane_count = min(dev_priv->vbt.edp_lanes,
1078                                              max_lane_count);
1079                         DRM_DEBUG_KMS("using min %u lanes per VBT\n",
1080                                       min_lane_count);
1081                 }
1082
1083                 if (dev_priv->vbt.edp_rate) {
1084                         min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
1085                         DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
1086                                       bws[min_clock]);
1087                 }
1088         }
1089
1090         for (; bpp >= 6*3; bpp -= 2*3) {
1091                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1092                                                    bpp);
1093
1094                 for (clock = min_clock; clock <= max_clock; clock++) {
1095                         for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
1096                                 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
1097                                 link_avail = intel_dp_max_data_rate(link_clock,
1098                                                                     lane_count);
1099
1100                                 if (mode_rate <= link_avail) {
1101                                         goto found;
1102                                 }
1103                         }
1104                 }
1105         }
1106
1107         return false;
1108
1109 found:
1110         if (intel_dp->color_range_auto) {
1111                 /*
1112                  * See:
1113                  * CEA-861-E - 5.1 Default Encoding Parameters
1114                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1115                  */
1116                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1117                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1118                 else
1119                         intel_dp->color_range = 0;
1120         }
1121
1122         if (intel_dp->color_range)
1123                 pipe_config->limited_color_range = true;
1124
1125         intel_dp->link_bw = bws[clock];
1126         intel_dp->lane_count = lane_count;
1127         pipe_config->pipe_bpp = bpp;
1128         pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
1129
1130         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1131                       intel_dp->link_bw, intel_dp->lane_count,
1132                       pipe_config->port_clock, bpp);
1133         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1134                       mode_rate, link_avail);
1135
1136         intel_link_compute_m_n(bpp, lane_count,
1137                                adjusted_mode->crtc_clock,
1138                                pipe_config->port_clock,
1139                                &pipe_config->dp_m_n);
1140
1141         if (intel_connector->panel.downclock_mode != NULL &&
1142                 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
1143                         pipe_config->has_drrs = true;
1144                         intel_link_compute_m_n(bpp, lane_count,
1145                                 intel_connector->panel.downclock_mode->clock,
1146                                 pipe_config->port_clock,
1147                                 &pipe_config->dp_m2_n2);
1148         }
1149
1150         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1151                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1152         else
1153                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1154
1155         return true;
1156 }
1157
1158 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1159 {
1160         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1161         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1162         struct drm_device *dev = crtc->base.dev;
1163         struct drm_i915_private *dev_priv = dev->dev_private;
1164         u32 dpa_ctl;
1165
1166         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
1167         dpa_ctl = I915_READ(DP_A);
1168         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1169
1170         if (crtc->config.port_clock == 162000) {
1171                 /* For a long time we've carried around a ILK-DevA w/a for the
1172                  * 160MHz clock. If we're really unlucky, it's still required.
1173                  */
1174                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1175                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1176                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1177         } else {
1178                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1179                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1180         }
1181
1182         I915_WRITE(DP_A, dpa_ctl);
1183
1184         POSTING_READ(DP_A);
1185         udelay(500);
1186 }
1187
1188 static void intel_dp_prepare(struct intel_encoder *encoder)
1189 {
1190         struct drm_device *dev = encoder->base.dev;
1191         struct drm_i915_private *dev_priv = dev->dev_private;
1192         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1193         enum port port = dp_to_dig_port(intel_dp)->port;
1194         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1195         struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
1196
1197         /*
1198          * There are four kinds of DP registers:
1199          *
1200          *      IBX PCH
1201          *      SNB CPU
1202          *      IVB CPU
1203          *      CPT PCH
1204          *
1205          * IBX PCH and CPU are the same for almost everything,
1206          * except that the CPU DP PLL is configured in this
1207          * register
1208          *
1209          * CPT PCH is quite different, having many bits moved
1210          * to the TRANS_DP_CTL register instead. That
1211          * configuration happens (oddly) in ironlake_pch_enable
1212          */
1213
1214         /* Preserve the BIOS-computed detected bit. This is
1215          * supposed to be read-only.
1216          */
1217         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1218
1219         /* Handle DP bits in common between all three register formats */
1220         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1221         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1222
1223         if (crtc->config.has_audio) {
1224                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
1225                                  pipe_name(crtc->pipe));
1226                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1227                 intel_write_eld(&encoder->base, adjusted_mode);
1228         }
1229
1230         /* Split out the IBX/CPU vs CPT settings */
1231
1232         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1233                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1234                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1235                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1236                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1237                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1238
1239                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1240                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1241
1242                 intel_dp->DP |= crtc->pipe << 29;
1243         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1244                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1245                         intel_dp->DP |= intel_dp->color_range;
1246
1247                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1248                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1249                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1250                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1251                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1252
1253                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1254                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1255
1256                 if (!IS_CHERRYVIEW(dev)) {
1257                         if (crtc->pipe == 1)
1258                                 intel_dp->DP |= DP_PIPEB_SELECT;
1259                 } else {
1260                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1261                 }
1262         } else {
1263                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1264         }
1265 }
1266
1267 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1268 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1269
1270 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1271 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1272
1273 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1274 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1275
1276 static void wait_panel_status(struct intel_dp *intel_dp,
1277                                        u32 mask,
1278                                        u32 value)
1279 {
1280         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1281         struct drm_i915_private *dev_priv = dev->dev_private;
1282         u32 pp_stat_reg, pp_ctrl_reg;
1283
1284         lockdep_assert_held(&dev_priv->pps_mutex);
1285
1286         pp_stat_reg = _pp_stat_reg(intel_dp);
1287         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1288
1289         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1290                         mask, value,
1291                         I915_READ(pp_stat_reg),
1292                         I915_READ(pp_ctrl_reg));
1293
1294         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1295                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1296                                 I915_READ(pp_stat_reg),
1297                                 I915_READ(pp_ctrl_reg));
1298         }
1299
1300         DRM_DEBUG_KMS("Wait complete\n");
1301 }
1302
1303 static void wait_panel_on(struct intel_dp *intel_dp)
1304 {
1305         DRM_DEBUG_KMS("Wait for panel power on\n");
1306         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1307 }
1308
1309 static void wait_panel_off(struct intel_dp *intel_dp)
1310 {
1311         DRM_DEBUG_KMS("Wait for panel power off time\n");
1312         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1313 }
1314
1315 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1316 {
1317         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1318
1319         /* When we disable the VDD override bit last we have to do the manual
1320          * wait. */
1321         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1322                                        intel_dp->panel_power_cycle_delay);
1323
1324         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1325 }
1326
1327 static void wait_backlight_on(struct intel_dp *intel_dp)
1328 {
1329         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1330                                        intel_dp->backlight_on_delay);
1331 }
1332
1333 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1334 {
1335         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1336                                        intel_dp->backlight_off_delay);
1337 }
1338
1339 /* Read the current pp_control value, unlocking the register if it
1340  * is locked
1341  */
1342
1343 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1344 {
1345         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1346         struct drm_i915_private *dev_priv = dev->dev_private;
1347         u32 control;
1348
1349         lockdep_assert_held(&dev_priv->pps_mutex);
1350
1351         control = I915_READ(_pp_ctrl_reg(intel_dp));
1352         control &= ~PANEL_UNLOCK_MASK;
1353         control |= PANEL_UNLOCK_REGS;
1354         return control;
1355 }
1356
1357 /*
1358  * Must be paired with edp_panel_vdd_off().
1359  * Must hold pps_mutex around the whole on/off sequence.
1360  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1361  */
1362 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1363 {
1364         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1365         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1366         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1367         struct drm_i915_private *dev_priv = dev->dev_private;
1368         enum intel_display_power_domain power_domain;
1369         u32 pp;
1370         u32 pp_stat_reg, pp_ctrl_reg;
1371         bool need_to_disable = !intel_dp->want_panel_vdd;
1372
1373         lockdep_assert_held(&dev_priv->pps_mutex);
1374
1375         if (!is_edp(intel_dp))
1376                 return false;
1377
1378         intel_dp->want_panel_vdd = true;
1379
1380         if (edp_have_panel_vdd(intel_dp))
1381                 return need_to_disable;
1382
1383         power_domain = intel_display_port_power_domain(intel_encoder);
1384         intel_display_power_get(dev_priv, power_domain);
1385
1386         DRM_DEBUG_KMS("Turning eDP VDD on\n");
1387
1388         if (!edp_have_panel_power(intel_dp))
1389                 wait_panel_power_cycle(intel_dp);
1390
1391         pp = ironlake_get_pp_control(intel_dp);
1392         pp |= EDP_FORCE_VDD;
1393
1394         pp_stat_reg = _pp_stat_reg(intel_dp);
1395         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1396
1397         I915_WRITE(pp_ctrl_reg, pp);
1398         POSTING_READ(pp_ctrl_reg);
1399         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1400                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1401         /*
1402          * If the panel wasn't on, delay before accessing aux channel
1403          */
1404         if (!edp_have_panel_power(intel_dp)) {
1405                 DRM_DEBUG_KMS("eDP was not running\n");
1406                 msleep(intel_dp->panel_power_up_delay);
1407         }
1408
1409         return need_to_disable;
1410 }
1411
1412 /*
1413  * Must be paired with intel_edp_panel_vdd_off() or
1414  * intel_edp_panel_off().
1415  * Nested calls to these functions are not allowed since
1416  * we drop the lock. Caller must use some higher level
1417  * locking to prevent nested calls from other threads.
1418  */
1419 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1420 {
1421         bool vdd;
1422
1423         if (!is_edp(intel_dp))
1424                 return;
1425
1426         pps_lock(intel_dp);
1427         vdd = edp_panel_vdd_on(intel_dp);
1428         pps_unlock(intel_dp);
1429
1430         WARN(!vdd, "eDP VDD already requested on\n");
1431 }
1432
1433 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1434 {
1435         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1436         struct drm_i915_private *dev_priv = dev->dev_private;
1437         struct intel_digital_port *intel_dig_port =
1438                 dp_to_dig_port(intel_dp);
1439         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1440         enum intel_display_power_domain power_domain;
1441         u32 pp;
1442         u32 pp_stat_reg, pp_ctrl_reg;
1443
1444         lockdep_assert_held(&dev_priv->pps_mutex);
1445
1446         WARN_ON(intel_dp->want_panel_vdd);
1447
1448         if (!edp_have_panel_vdd(intel_dp))
1449                 return;
1450
1451         DRM_DEBUG_KMS("Turning eDP VDD off\n");
1452
1453         pp = ironlake_get_pp_control(intel_dp);
1454         pp &= ~EDP_FORCE_VDD;
1455
1456         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1457         pp_stat_reg = _pp_stat_reg(intel_dp);
1458
1459         I915_WRITE(pp_ctrl_reg, pp);
1460         POSTING_READ(pp_ctrl_reg);
1461
1462         /* Make sure sequencer is idle before allowing subsequent activity */
1463         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1464         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1465
1466         if ((pp & POWER_TARGET_ON) == 0)
1467                 intel_dp->last_power_cycle = jiffies;
1468
1469         power_domain = intel_display_port_power_domain(intel_encoder);
1470         intel_display_power_put(dev_priv, power_domain);
1471 }
1472
1473 static void edp_panel_vdd_work(struct work_struct *__work)
1474 {
1475         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1476                                                  struct intel_dp, panel_vdd_work);
1477
1478         pps_lock(intel_dp);
1479         if (!intel_dp->want_panel_vdd)
1480                 edp_panel_vdd_off_sync(intel_dp);
1481         pps_unlock(intel_dp);
1482 }
1483
1484 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1485 {
1486         unsigned long delay;
1487
1488         /*
1489          * Queue the timer to fire a long time from now (relative to the power
1490          * down delay) to keep the panel power up across a sequence of
1491          * operations.
1492          */
1493         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1494         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1495 }
1496
1497 /*
1498  * Must be paired with edp_panel_vdd_on().
1499  * Must hold pps_mutex around the whole on/off sequence.
1500  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1501  */
1502 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1503 {
1504         struct drm_i915_private *dev_priv =
1505                 intel_dp_to_dev(intel_dp)->dev_private;
1506
1507         lockdep_assert_held(&dev_priv->pps_mutex);
1508
1509         if (!is_edp(intel_dp))
1510                 return;
1511
1512         WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1513
1514         intel_dp->want_panel_vdd = false;
1515
1516         if (sync)
1517                 edp_panel_vdd_off_sync(intel_dp);
1518         else
1519                 edp_panel_vdd_schedule_off(intel_dp);
1520 }
1521
1522 /*
1523  * Must be paired with intel_edp_panel_vdd_on().
1524  * Nested calls to these functions are not allowed since
1525  * we drop the lock. Caller must use some higher level
1526  * locking to prevent nested calls from other threads.
1527  */
1528 static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1529 {
1530         if (!is_edp(intel_dp))
1531                 return;
1532
1533         pps_lock(intel_dp);
1534         edp_panel_vdd_off(intel_dp, sync);
1535         pps_unlock(intel_dp);
1536 }
1537
1538 void intel_edp_panel_on(struct intel_dp *intel_dp)
1539 {
1540         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1541         struct drm_i915_private *dev_priv = dev->dev_private;
1542         u32 pp;
1543         u32 pp_ctrl_reg;
1544
1545         if (!is_edp(intel_dp))
1546                 return;
1547
1548         DRM_DEBUG_KMS("Turn eDP power on\n");
1549
1550         pps_lock(intel_dp);
1551
1552         if (edp_have_panel_power(intel_dp)) {
1553                 DRM_DEBUG_KMS("eDP power already on\n");
1554                 goto out;
1555         }
1556
1557         wait_panel_power_cycle(intel_dp);
1558
1559         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1560         pp = ironlake_get_pp_control(intel_dp);
1561         if (IS_GEN5(dev)) {
1562                 /* ILK workaround: disable reset around power sequence */
1563                 pp &= ~PANEL_POWER_RESET;
1564                 I915_WRITE(pp_ctrl_reg, pp);
1565                 POSTING_READ(pp_ctrl_reg);
1566         }
1567
1568         pp |= POWER_TARGET_ON;
1569         if (!IS_GEN5(dev))
1570                 pp |= PANEL_POWER_RESET;
1571
1572         I915_WRITE(pp_ctrl_reg, pp);
1573         POSTING_READ(pp_ctrl_reg);
1574
1575         wait_panel_on(intel_dp);
1576         intel_dp->last_power_on = jiffies;
1577
1578         if (IS_GEN5(dev)) {
1579                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1580                 I915_WRITE(pp_ctrl_reg, pp);
1581                 POSTING_READ(pp_ctrl_reg);
1582         }
1583
1584  out:
1585         pps_unlock(intel_dp);
1586 }
1587
1588 void intel_edp_panel_off(struct intel_dp *intel_dp)
1589 {
1590         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1591         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1592         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1593         struct drm_i915_private *dev_priv = dev->dev_private;
1594         enum intel_display_power_domain power_domain;
1595         u32 pp;
1596         u32 pp_ctrl_reg;
1597
1598         if (!is_edp(intel_dp))
1599                 return;
1600
1601         DRM_DEBUG_KMS("Turn eDP power off\n");
1602
1603         pps_lock(intel_dp);
1604
1605         WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1606
1607         pp = ironlake_get_pp_control(intel_dp);
1608         /* We need to switch off panel power _and_ force vdd, for otherwise some
1609          * panels get very unhappy and cease to work. */
1610         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1611                 EDP_BLC_ENABLE);
1612
1613         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1614
1615         intel_dp->want_panel_vdd = false;
1616
1617         I915_WRITE(pp_ctrl_reg, pp);
1618         POSTING_READ(pp_ctrl_reg);
1619
1620         intel_dp->last_power_cycle = jiffies;
1621         wait_panel_off(intel_dp);
1622
1623         /* We got a reference when we enabled the VDD. */
1624         power_domain = intel_display_port_power_domain(intel_encoder);
1625         intel_display_power_put(dev_priv, power_domain);
1626
1627         pps_unlock(intel_dp);
1628 }
1629
1630 /* Enable backlight in the panel power control. */
1631 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1632 {
1633         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1634         struct drm_device *dev = intel_dig_port->base.base.dev;
1635         struct drm_i915_private *dev_priv = dev->dev_private;
1636         u32 pp;
1637         u32 pp_ctrl_reg;
1638
1639         /*
1640          * If we enable the backlight right away following a panel power
1641          * on, we may see slight flicker as the panel syncs with the eDP
1642          * link.  So delay a bit to make sure the image is solid before
1643          * allowing it to appear.
1644          */
1645         wait_backlight_on(intel_dp);
1646
1647         pps_lock(intel_dp);
1648
1649         pp = ironlake_get_pp_control(intel_dp);
1650         pp |= EDP_BLC_ENABLE;
1651
1652         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1653
1654         I915_WRITE(pp_ctrl_reg, pp);
1655         POSTING_READ(pp_ctrl_reg);
1656
1657         pps_unlock(intel_dp);
1658 }
1659
1660 /* Enable backlight PWM and backlight PP control. */
1661 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1662 {
1663         if (!is_edp(intel_dp))
1664                 return;
1665
1666         DRM_DEBUG_KMS("\n");
1667
1668         intel_panel_enable_backlight(intel_dp->attached_connector);
1669         _intel_edp_backlight_on(intel_dp);
1670 }
1671
1672 /* Disable backlight in the panel power control. */
1673 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1674 {
1675         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1676         struct drm_i915_private *dev_priv = dev->dev_private;
1677         u32 pp;
1678         u32 pp_ctrl_reg;
1679
1680         if (!is_edp(intel_dp))
1681                 return;
1682
1683         pps_lock(intel_dp);
1684
1685         pp = ironlake_get_pp_control(intel_dp);
1686         pp &= ~EDP_BLC_ENABLE;
1687
1688         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1689
1690         I915_WRITE(pp_ctrl_reg, pp);
1691         POSTING_READ(pp_ctrl_reg);
1692
1693         pps_unlock(intel_dp);
1694
1695         intel_dp->last_backlight_off = jiffies;
1696         edp_wait_backlight_off(intel_dp);
1697 }
1698
1699 /* Disable backlight PP control and backlight PWM. */
1700 void intel_edp_backlight_off(struct intel_dp *intel_dp)
1701 {
1702         if (!is_edp(intel_dp))
1703                 return;
1704
1705         DRM_DEBUG_KMS("\n");
1706
1707         _intel_edp_backlight_off(intel_dp);
1708         intel_panel_disable_backlight(intel_dp->attached_connector);
1709 }
1710
1711 /*
1712  * Hook for controlling the panel power control backlight through the bl_power
1713  * sysfs attribute. Take care to handle multiple calls.
1714  */
1715 static void intel_edp_backlight_power(struct intel_connector *connector,
1716                                       bool enable)
1717 {
1718         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
1719         bool is_enabled;
1720
1721         pps_lock(intel_dp);
1722         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1723         pps_unlock(intel_dp);
1724
1725         if (is_enabled == enable)
1726                 return;
1727
1728         DRM_DEBUG_KMS("panel power control backlight %s\n",
1729                       enable ? "enable" : "disable");
1730
1731         if (enable)
1732                 _intel_edp_backlight_on(intel_dp);
1733         else
1734                 _intel_edp_backlight_off(intel_dp);
1735 }
1736
1737 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1738 {
1739         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1740         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1741         struct drm_device *dev = crtc->dev;
1742         struct drm_i915_private *dev_priv = dev->dev_private;
1743         u32 dpa_ctl;
1744
1745         assert_pipe_disabled(dev_priv,
1746                              to_intel_crtc(crtc)->pipe);
1747
1748         DRM_DEBUG_KMS("\n");
1749         dpa_ctl = I915_READ(DP_A);
1750         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1751         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1752
1753         /* We don't adjust intel_dp->DP while tearing down the link, to
1754          * facilitate link retraining (e.g. after hotplug). Hence clear all
1755          * enable bits here to ensure that we don't enable too much. */
1756         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1757         intel_dp->DP |= DP_PLL_ENABLE;
1758         I915_WRITE(DP_A, intel_dp->DP);
1759         POSTING_READ(DP_A);
1760         udelay(200);
1761 }
1762
1763 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1764 {
1765         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1766         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1767         struct drm_device *dev = crtc->dev;
1768         struct drm_i915_private *dev_priv = dev->dev_private;
1769         u32 dpa_ctl;
1770
1771         assert_pipe_disabled(dev_priv,
1772                              to_intel_crtc(crtc)->pipe);
1773
1774         dpa_ctl = I915_READ(DP_A);
1775         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1776              "dp pll off, should be on\n");
1777         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1778
1779         /* We can't rely on the value tracked for the DP register in
1780          * intel_dp->DP because link_down must not change that (otherwise link
1781          * re-training will fail. */
1782         dpa_ctl &= ~DP_PLL_ENABLE;
1783         I915_WRITE(DP_A, dpa_ctl);
1784         POSTING_READ(DP_A);
1785         udelay(200);
1786 }
1787
1788 /* If the sink supports it, try to set the power state appropriately */
1789 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1790 {
1791         int ret, i;
1792
1793         /* Should have a valid DPCD by this point */
1794         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1795                 return;
1796
1797         if (mode != DRM_MODE_DPMS_ON) {
1798                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1799                                          DP_SET_POWER_D3);
1800         } else {
1801                 /*
1802                  * When turning on, we need to retry for 1ms to give the sink
1803                  * time to wake up.
1804                  */
1805                 for (i = 0; i < 3; i++) {
1806                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1807                                                  DP_SET_POWER_D0);
1808                         if (ret == 1)
1809                                 break;
1810                         msleep(1);
1811                 }
1812         }
1813
1814         if (ret != 1)
1815                 DRM_DEBUG_KMS("failed to %s sink power state\n",
1816                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
1817 }
1818
1819 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1820                                   enum pipe *pipe)
1821 {
1822         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1823         enum port port = dp_to_dig_port(intel_dp)->port;
1824         struct drm_device *dev = encoder->base.dev;
1825         struct drm_i915_private *dev_priv = dev->dev_private;
1826         enum intel_display_power_domain power_domain;
1827         u32 tmp;
1828
1829         power_domain = intel_display_port_power_domain(encoder);
1830         if (!intel_display_power_enabled(dev_priv, power_domain))
1831                 return false;
1832
1833         tmp = I915_READ(intel_dp->output_reg);
1834
1835         if (!(tmp & DP_PORT_EN))
1836                 return false;
1837
1838         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1839                 *pipe = PORT_TO_PIPE_CPT(tmp);
1840         } else if (IS_CHERRYVIEW(dev)) {
1841                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
1842         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1843                 *pipe = PORT_TO_PIPE(tmp);
1844         } else {
1845                 u32 trans_sel;
1846                 u32 trans_dp;
1847                 int i;
1848
1849                 switch (intel_dp->output_reg) {
1850                 case PCH_DP_B:
1851                         trans_sel = TRANS_DP_PORT_SEL_B;
1852                         break;
1853                 case PCH_DP_C:
1854                         trans_sel = TRANS_DP_PORT_SEL_C;
1855                         break;
1856                 case PCH_DP_D:
1857                         trans_sel = TRANS_DP_PORT_SEL_D;
1858                         break;
1859                 default:
1860                         return true;
1861                 }
1862
1863                 for_each_pipe(dev_priv, i) {
1864                         trans_dp = I915_READ(TRANS_DP_CTL(i));
1865                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1866                                 *pipe = i;
1867                                 return true;
1868                         }
1869                 }
1870
1871                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1872                               intel_dp->output_reg);
1873         }
1874
1875         return true;
1876 }
1877
1878 static void intel_dp_get_config(struct intel_encoder *encoder,
1879                                 struct intel_crtc_config *pipe_config)
1880 {
1881         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1882         u32 tmp, flags = 0;
1883         struct drm_device *dev = encoder->base.dev;
1884         struct drm_i915_private *dev_priv = dev->dev_private;
1885         enum port port = dp_to_dig_port(intel_dp)->port;
1886         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1887         int dotclock;
1888
1889         tmp = I915_READ(intel_dp->output_reg);
1890         if (tmp & DP_AUDIO_OUTPUT_ENABLE)
1891                 pipe_config->has_audio = true;
1892
1893         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1894                 if (tmp & DP_SYNC_HS_HIGH)
1895                         flags |= DRM_MODE_FLAG_PHSYNC;
1896                 else
1897                         flags |= DRM_MODE_FLAG_NHSYNC;
1898
1899                 if (tmp & DP_SYNC_VS_HIGH)
1900                         flags |= DRM_MODE_FLAG_PVSYNC;
1901                 else
1902                         flags |= DRM_MODE_FLAG_NVSYNC;
1903         } else {
1904                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1905                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1906                         flags |= DRM_MODE_FLAG_PHSYNC;
1907                 else
1908                         flags |= DRM_MODE_FLAG_NHSYNC;
1909
1910                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1911                         flags |= DRM_MODE_FLAG_PVSYNC;
1912                 else
1913                         flags |= DRM_MODE_FLAG_NVSYNC;
1914         }
1915
1916         pipe_config->adjusted_mode.flags |= flags;
1917
1918         pipe_config->has_dp_encoder = true;
1919
1920         intel_dp_get_m_n(crtc, pipe_config);
1921
1922         if (port == PORT_A) {
1923                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1924                         pipe_config->port_clock = 162000;
1925                 else
1926                         pipe_config->port_clock = 270000;
1927         }
1928
1929         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1930                                             &pipe_config->dp_m_n);
1931
1932         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1933                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1934
1935         pipe_config->adjusted_mode.crtc_clock = dotclock;
1936
1937         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1938             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1939                 /*
1940                  * This is a big fat ugly hack.
1941                  *
1942                  * Some machines in UEFI boot mode provide us a VBT that has 18
1943                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1944                  * unknown we fail to light up. Yet the same BIOS boots up with
1945                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1946                  * max, not what it tells us to use.
1947                  *
1948                  * Note: This will still be broken if the eDP panel is not lit
1949                  * up by the BIOS, and thus we can't get the mode at module
1950                  * load.
1951                  */
1952                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1953                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1954                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1955         }
1956 }
1957
1958 static bool is_edp_psr(struct intel_dp *intel_dp)
1959 {
1960         return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1961 }
1962
1963 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1964 {
1965         struct drm_i915_private *dev_priv = dev->dev_private;
1966
1967         if (!HAS_PSR(dev))
1968                 return false;
1969
1970         return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1971 }
1972
1973 static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1974                                     struct edp_vsc_psr *vsc_psr)
1975 {
1976         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1977         struct drm_device *dev = dig_port->base.base.dev;
1978         struct drm_i915_private *dev_priv = dev->dev_private;
1979         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1980         u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1981         u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1982         uint32_t *data = (uint32_t *) vsc_psr;
1983         unsigned int i;
1984
1985         /* As per BSPec (Pipe Video Data Island Packet), we need to disable
1986            the video DIP being updated before program video DIP data buffer
1987            registers for DIP being updated. */
1988         I915_WRITE(ctl_reg, 0);
1989         POSTING_READ(ctl_reg);
1990
1991         for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1992                 if (i < sizeof(struct edp_vsc_psr))
1993                         I915_WRITE(data_reg + i, *data++);
1994                 else
1995                         I915_WRITE(data_reg + i, 0);
1996         }
1997
1998         I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1999         POSTING_READ(ctl_reg);
2000 }
2001
2002 static void intel_edp_psr_setup(struct intel_dp *intel_dp)
2003 {
2004         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2005         struct drm_i915_private *dev_priv = dev->dev_private;
2006         struct edp_vsc_psr psr_vsc;
2007
2008         /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
2009         memset(&psr_vsc, 0, sizeof(psr_vsc));
2010         psr_vsc.sdp_header.HB0 = 0;
2011         psr_vsc.sdp_header.HB1 = 0x7;
2012         psr_vsc.sdp_header.HB2 = 0x2;
2013         psr_vsc.sdp_header.HB3 = 0x8;
2014         intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
2015
2016         /* Avoid continuous PSR exit by masking memup and hpd */
2017         I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
2018                    EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
2019 }
2020
2021 static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
2022 {
2023         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2024         struct drm_device *dev = dig_port->base.base.dev;
2025         struct drm_i915_private *dev_priv = dev->dev_private;
2026         uint32_t aux_clock_divider;
2027         int precharge = 0x3;
2028         int msg_size = 5;       /* Header(4) + Message(1) */
2029         bool only_standby = false;
2030
2031         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
2032
2033         if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
2034                 only_standby = true;
2035
2036         /* Enable PSR in sink */
2037         if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
2038                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
2039                                    DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
2040         else
2041                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
2042                                    DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
2043
2044         /* Setup AUX registers */
2045         I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
2046         I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
2047         I915_WRITE(EDP_PSR_AUX_CTL(dev),
2048                    DP_AUX_CH_CTL_TIME_OUT_400us |
2049                    (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
2050                    (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
2051                    (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
2052 }
2053
2054 static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
2055 {
2056         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2057         struct drm_device *dev = dig_port->base.base.dev;
2058         struct drm_i915_private *dev_priv = dev->dev_private;
2059         uint32_t max_sleep_time = 0x1f;
2060         uint32_t idle_frames = 1;
2061         uint32_t val = 0x0;
2062         const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
2063         bool only_standby = false;
2064
2065         if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
2066                 only_standby = true;
2067
2068         if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
2069                 val |= EDP_PSR_LINK_STANDBY;
2070                 val |= EDP_PSR_TP2_TP3_TIME_0us;
2071                 val |= EDP_PSR_TP1_TIME_0us;
2072                 val |= EDP_PSR_SKIP_AUX_EXIT;
2073                 val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
2074         } else
2075                 val |= EDP_PSR_LINK_DISABLE;
2076
2077         I915_WRITE(EDP_PSR_CTL(dev), val |
2078                    (IS_BROADWELL(dev) ? 0 : link_entry_time) |
2079                    max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
2080                    idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
2081                    EDP_PSR_ENABLE);
2082 }
2083
2084 static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
2085 {
2086         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2087         struct drm_device *dev = dig_port->base.base.dev;
2088         struct drm_i915_private *dev_priv = dev->dev_private;
2089         struct drm_crtc *crtc = dig_port->base.base.crtc;
2090         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2091
2092         lockdep_assert_held(&dev_priv->psr.lock);
2093         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
2094         WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
2095
2096         dev_priv->psr.source_ok = false;
2097
2098         if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
2099                 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
2100                 return false;
2101         }
2102
2103         if (!i915.enable_psr) {
2104                 DRM_DEBUG_KMS("PSR disable by flag\n");
2105                 return false;
2106         }
2107
2108         /* Below limitations aren't valid for Broadwell */
2109         if (IS_BROADWELL(dev))
2110                 goto out;
2111
2112         if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
2113             S3D_ENABLE) {
2114                 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
2115                 return false;
2116         }
2117
2118         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
2119                 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
2120                 return false;
2121         }
2122
2123  out:
2124         dev_priv->psr.source_ok = true;
2125         return true;
2126 }
2127
2128 static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
2129 {
2130         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2131         struct drm_device *dev = intel_dig_port->base.base.dev;
2132         struct drm_i915_private *dev_priv = dev->dev_private;
2133
2134         WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
2135         WARN_ON(dev_priv->psr.active);
2136         lockdep_assert_held(&dev_priv->psr.lock);
2137
2138         /* Enable PSR on the panel */
2139         intel_edp_psr_enable_sink(intel_dp);
2140
2141         /* Enable PSR on the host */
2142         intel_edp_psr_enable_source(intel_dp);
2143
2144         dev_priv->psr.active = true;
2145 }
2146
2147 void intel_edp_psr_enable(struct intel_dp *intel_dp)
2148 {
2149         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2150         struct drm_i915_private *dev_priv = dev->dev_private;
2151
2152         if (!HAS_PSR(dev)) {
2153                 DRM_DEBUG_KMS("PSR not supported on this platform\n");
2154                 return;
2155         }
2156
2157         if (!is_edp_psr(intel_dp)) {
2158                 DRM_DEBUG_KMS("PSR not supported by this panel\n");
2159                 return;
2160         }
2161
2162         mutex_lock(&dev_priv->psr.lock);
2163         if (dev_priv->psr.enabled) {
2164                 DRM_DEBUG_KMS("PSR already in use\n");
2165                 mutex_unlock(&dev_priv->psr.lock);
2166                 return;
2167         }
2168
2169         dev_priv->psr.busy_frontbuffer_bits = 0;
2170
2171         /* Setup PSR once */
2172         intel_edp_psr_setup(intel_dp);
2173
2174         if (intel_edp_psr_match_conditions(intel_dp))
2175                 dev_priv->psr.enabled = intel_dp;
2176         mutex_unlock(&dev_priv->psr.lock);
2177 }
2178
2179 void intel_edp_psr_disable(struct intel_dp *intel_dp)
2180 {
2181         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2182         struct drm_i915_private *dev_priv = dev->dev_private;
2183
2184         mutex_lock(&dev_priv->psr.lock);
2185         if (!dev_priv->psr.enabled) {
2186                 mutex_unlock(&dev_priv->psr.lock);
2187                 return;
2188         }
2189
2190         if (dev_priv->psr.active) {
2191                 I915_WRITE(EDP_PSR_CTL(dev),
2192                            I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
2193
2194                 /* Wait till PSR is idle */
2195                 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
2196                                EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
2197                         DRM_ERROR("Timed out waiting for PSR Idle State\n");
2198
2199                 dev_priv->psr.active = false;
2200         } else {
2201                 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
2202         }
2203
2204         dev_priv->psr.enabled = NULL;
2205         mutex_unlock(&dev_priv->psr.lock);
2206
2207         cancel_delayed_work_sync(&dev_priv->psr.work);
2208 }
2209
2210 static void intel_edp_psr_work(struct work_struct *work)
2211 {
2212         struct drm_i915_private *dev_priv =
2213                 container_of(work, typeof(*dev_priv), psr.work.work);
2214         struct intel_dp *intel_dp = dev_priv->psr.enabled;
2215
2216         mutex_lock(&dev_priv->psr.lock);
2217         intel_dp = dev_priv->psr.enabled;
2218
2219         if (!intel_dp)
2220                 goto unlock;
2221
2222         /*
2223          * The delayed work can race with an invalidate hence we need to
2224          * recheck. Since psr_flush first clears this and then reschedules we
2225          * won't ever miss a flush when bailing out here.
2226          */
2227         if (dev_priv->psr.busy_frontbuffer_bits)
2228                 goto unlock;
2229
2230         intel_edp_psr_do_enable(intel_dp);
2231 unlock:
2232         mutex_unlock(&dev_priv->psr.lock);
2233 }
2234
2235 static void intel_edp_psr_do_exit(struct drm_device *dev)
2236 {
2237         struct drm_i915_private *dev_priv = dev->dev_private;
2238
2239         if (dev_priv->psr.active) {
2240                 u32 val = I915_READ(EDP_PSR_CTL(dev));
2241
2242                 WARN_ON(!(val & EDP_PSR_ENABLE));
2243
2244                 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
2245
2246                 dev_priv->psr.active = false;
2247         }
2248
2249 }
2250
2251 void intel_edp_psr_invalidate(struct drm_device *dev,
2252                               unsigned frontbuffer_bits)
2253 {
2254         struct drm_i915_private *dev_priv = dev->dev_private;
2255         struct drm_crtc *crtc;
2256         enum pipe pipe;
2257
2258         mutex_lock(&dev_priv->psr.lock);
2259         if (!dev_priv->psr.enabled) {
2260                 mutex_unlock(&dev_priv->psr.lock);
2261                 return;
2262         }
2263
2264         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2265         pipe = to_intel_crtc(crtc)->pipe;
2266
2267         intel_edp_psr_do_exit(dev);
2268
2269         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
2270
2271         dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
2272         mutex_unlock(&dev_priv->psr.lock);
2273 }
2274
2275 void intel_edp_psr_flush(struct drm_device *dev,
2276                          unsigned frontbuffer_bits)
2277 {
2278         struct drm_i915_private *dev_priv = dev->dev_private;
2279         struct drm_crtc *crtc;
2280         enum pipe pipe;
2281
2282         mutex_lock(&dev_priv->psr.lock);
2283         if (!dev_priv->psr.enabled) {
2284                 mutex_unlock(&dev_priv->psr.lock);
2285                 return;
2286         }
2287
2288         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2289         pipe = to_intel_crtc(crtc)->pipe;
2290         dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
2291
2292         /*
2293          * On Haswell sprite plane updates don't result in a psr invalidating
2294          * signal in the hardware. Which means we need to manually fake this in
2295          * software for all flushes, not just when we've seen a preceding
2296          * invalidation through frontbuffer rendering.
2297          */
2298         if (IS_HASWELL(dev) &&
2299             (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
2300                 intel_edp_psr_do_exit(dev);
2301
2302         if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
2303                 schedule_delayed_work(&dev_priv->psr.work,
2304                                       msecs_to_jiffies(100));
2305         mutex_unlock(&dev_priv->psr.lock);
2306 }
2307
2308 void intel_edp_psr_init(struct drm_device *dev)
2309 {
2310         struct drm_i915_private *dev_priv = dev->dev_private;
2311
2312         INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
2313         mutex_init(&dev_priv->psr.lock);
2314 }
2315
2316 static void intel_disable_dp(struct intel_encoder *encoder)
2317 {
2318         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2319         struct drm_device *dev = encoder->base.dev;
2320
2321         /* Make sure the panel is off before trying to change the mode. But also
2322          * ensure that we have vdd while we switch off the panel. */
2323         intel_edp_panel_vdd_on(intel_dp);
2324         intel_edp_backlight_off(intel_dp);
2325         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2326         intel_edp_panel_off(intel_dp);
2327
2328         /* disable the port before the pipe on g4x */
2329         if (INTEL_INFO(dev)->gen < 5)
2330                 intel_dp_link_down(intel_dp);
2331 }
2332
2333 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2334 {
2335         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2336         enum port port = dp_to_dig_port(intel_dp)->port;
2337
2338         intel_dp_link_down(intel_dp);
2339         if (port == PORT_A)
2340                 ironlake_edp_pll_off(intel_dp);
2341 }
2342
2343 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2344 {
2345         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2346
2347         intel_dp_link_down(intel_dp);
2348 }
2349
2350 static void chv_post_disable_dp(struct intel_encoder *encoder)
2351 {
2352         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2353         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2354         struct drm_device *dev = encoder->base.dev;
2355         struct drm_i915_private *dev_priv = dev->dev_private;
2356         struct intel_crtc *intel_crtc =
2357                 to_intel_crtc(encoder->base.crtc);
2358         enum dpio_channel ch = vlv_dport_to_channel(dport);
2359         enum pipe pipe = intel_crtc->pipe;
2360         u32 val;
2361
2362         intel_dp_link_down(intel_dp);
2363
2364         mutex_lock(&dev_priv->dpio_lock);
2365
2366         /* Propagate soft reset to data lane reset */
2367         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2368         val |= CHV_PCS_REQ_SOFTRESET_EN;
2369         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2370
2371         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2372         val |= CHV_PCS_REQ_SOFTRESET_EN;
2373         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2374
2375         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2376         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2377         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2378
2379         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2380         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2381         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2382
2383         mutex_unlock(&dev_priv->dpio_lock);
2384 }
2385
2386 static void
2387 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2388                          uint32_t *DP,
2389                          uint8_t dp_train_pat)
2390 {
2391         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2392         struct drm_device *dev = intel_dig_port->base.base.dev;
2393         struct drm_i915_private *dev_priv = dev->dev_private;
2394         enum port port = intel_dig_port->port;
2395
2396         if (HAS_DDI(dev)) {
2397                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2398
2399                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2400                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2401                 else
2402                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2403
2404                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2405                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2406                 case DP_TRAINING_PATTERN_DISABLE:
2407                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2408
2409                         break;
2410                 case DP_TRAINING_PATTERN_1:
2411                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2412                         break;
2413                 case DP_TRAINING_PATTERN_2:
2414                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2415                         break;
2416                 case DP_TRAINING_PATTERN_3:
2417                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2418                         break;
2419                 }
2420                 I915_WRITE(DP_TP_CTL(port), temp);
2421
2422         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2423                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2424
2425                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2426                 case DP_TRAINING_PATTERN_DISABLE:
2427                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2428                         break;
2429                 case DP_TRAINING_PATTERN_1:
2430                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2431                         break;
2432                 case DP_TRAINING_PATTERN_2:
2433                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2434                         break;
2435                 case DP_TRAINING_PATTERN_3:
2436                         DRM_ERROR("DP training pattern 3 not supported\n");
2437                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2438                         break;
2439                 }
2440
2441         } else {
2442                 if (IS_CHERRYVIEW(dev))
2443                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2444                 else
2445                         *DP &= ~DP_LINK_TRAIN_MASK;
2446
2447                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2448                 case DP_TRAINING_PATTERN_DISABLE:
2449                         *DP |= DP_LINK_TRAIN_OFF;
2450                         break;
2451                 case DP_TRAINING_PATTERN_1:
2452                         *DP |= DP_LINK_TRAIN_PAT_1;
2453                         break;
2454                 case DP_TRAINING_PATTERN_2:
2455                         *DP |= DP_LINK_TRAIN_PAT_2;
2456                         break;
2457                 case DP_TRAINING_PATTERN_3:
2458                         if (IS_CHERRYVIEW(dev)) {
2459                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2460                         } else {
2461                                 DRM_ERROR("DP training pattern 3 not supported\n");
2462                                 *DP |= DP_LINK_TRAIN_PAT_2;
2463                         }
2464                         break;
2465                 }
2466         }
2467 }
2468
2469 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2470 {
2471         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2472         struct drm_i915_private *dev_priv = dev->dev_private;
2473
2474         intel_dp->DP |= DP_PORT_EN;
2475
2476         /* enable with pattern 1 (as per spec) */
2477         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2478                                  DP_TRAINING_PATTERN_1);
2479
2480         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2481         POSTING_READ(intel_dp->output_reg);
2482 }
2483
2484 static void intel_enable_dp(struct intel_encoder *encoder)
2485 {
2486         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2487         struct drm_device *dev = encoder->base.dev;
2488         struct drm_i915_private *dev_priv = dev->dev_private;
2489         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2490
2491         if (WARN_ON(dp_reg & DP_PORT_EN))
2492                 return;
2493
2494         intel_dp_enable_port(intel_dp);
2495         intel_edp_panel_vdd_on(intel_dp);
2496         intel_edp_panel_on(intel_dp);
2497         intel_edp_panel_vdd_off(intel_dp, true);
2498         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2499         intel_dp_start_link_train(intel_dp);
2500         intel_dp_complete_link_train(intel_dp);
2501         intel_dp_stop_link_train(intel_dp);
2502 }
2503
2504 static void g4x_enable_dp(struct intel_encoder *encoder)
2505 {
2506         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2507
2508         intel_enable_dp(encoder);
2509         intel_edp_backlight_on(intel_dp);
2510 }
2511
2512 static void vlv_enable_dp(struct intel_encoder *encoder)
2513 {
2514         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2515
2516         intel_edp_backlight_on(intel_dp);
2517 }
2518
2519 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2520 {
2521         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2522         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2523
2524         intel_dp_prepare(encoder);
2525
2526         /* Only ilk+ has port A */
2527         if (dport->port == PORT_A) {
2528                 ironlake_set_pll_cpu_edp(intel_dp);
2529                 ironlake_edp_pll_on(intel_dp);
2530         }
2531 }
2532
2533 static void vlv_steal_power_sequencer(struct drm_device *dev,
2534                                       enum pipe pipe)
2535 {
2536         struct drm_i915_private *dev_priv = dev->dev_private;
2537         struct intel_encoder *encoder;
2538
2539         lockdep_assert_held(&dev_priv->pps_mutex);
2540
2541         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2542                             base.head) {
2543                 struct intel_dp *intel_dp;
2544                 enum port port;
2545
2546                 if (encoder->type != INTEL_OUTPUT_EDP)
2547                         continue;
2548
2549                 intel_dp = enc_to_intel_dp(&encoder->base);
2550                 port = dp_to_dig_port(intel_dp)->port;
2551
2552                 if (intel_dp->pps_pipe != pipe)
2553                         continue;
2554
2555                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2556                               pipe_name(pipe), port_name(port));
2557
2558                 /* make sure vdd is off before we steal it */
2559                 edp_panel_vdd_off_sync(intel_dp);
2560
2561                 intel_dp->pps_pipe = INVALID_PIPE;
2562         }
2563 }
2564
2565 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2566 {
2567         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2568         struct intel_encoder *encoder = &intel_dig_port->base;
2569         struct drm_device *dev = encoder->base.dev;
2570         struct drm_i915_private *dev_priv = dev->dev_private;
2571         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2572         struct edp_power_seq power_seq;
2573
2574         lockdep_assert_held(&dev_priv->pps_mutex);
2575
2576         if (intel_dp->pps_pipe == crtc->pipe)
2577                 return;
2578
2579         /*
2580          * If another power sequencer was being used on this
2581          * port previously make sure to turn off vdd there while
2582          * we still have control of it.
2583          */
2584         if (intel_dp->pps_pipe != INVALID_PIPE)
2585                 edp_panel_vdd_off_sync(intel_dp);
2586
2587         /*
2588          * We may be stealing the power
2589          * sequencer from another port.
2590          */
2591         vlv_steal_power_sequencer(dev, crtc->pipe);
2592
2593         /* now it's all ours */
2594         intel_dp->pps_pipe = crtc->pipe;
2595
2596         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2597                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2598
2599         /* init power sequencer on this pipe and port */
2600         intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2601         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2602                                                       &power_seq);
2603 }
2604
2605 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2606 {
2607         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2608         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2609         struct drm_device *dev = encoder->base.dev;
2610         struct drm_i915_private *dev_priv = dev->dev_private;
2611         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2612         enum dpio_channel port = vlv_dport_to_channel(dport);
2613         int pipe = intel_crtc->pipe;
2614         u32 val;
2615
2616         mutex_lock(&dev_priv->dpio_lock);
2617
2618         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2619         val = 0;
2620         if (pipe)
2621                 val |= (1<<21);
2622         else
2623                 val &= ~(1<<21);
2624         val |= 0x001000c4;
2625         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2626         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2627         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2628
2629         mutex_unlock(&dev_priv->dpio_lock);
2630
2631         if (is_edp(intel_dp)) {
2632                 pps_lock(intel_dp);
2633                 vlv_init_panel_power_sequencer(intel_dp);
2634                 pps_unlock(intel_dp);
2635         }
2636
2637         intel_enable_dp(encoder);
2638
2639         vlv_wait_port_ready(dev_priv, dport);
2640 }
2641
2642 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2643 {
2644         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2645         struct drm_device *dev = encoder->base.dev;
2646         struct drm_i915_private *dev_priv = dev->dev_private;
2647         struct intel_crtc *intel_crtc =
2648                 to_intel_crtc(encoder->base.crtc);
2649         enum dpio_channel port = vlv_dport_to_channel(dport);
2650         int pipe = intel_crtc->pipe;
2651
2652         intel_dp_prepare(encoder);
2653
2654         /* Program Tx lane resets to default */
2655         mutex_lock(&dev_priv->dpio_lock);
2656         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2657                          DPIO_PCS_TX_LANE2_RESET |
2658                          DPIO_PCS_TX_LANE1_RESET);
2659         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2660                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2661                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2662                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2663                                  DPIO_PCS_CLK_SOFT_RESET);
2664
2665         /* Fix up inter-pair skew failure */
2666         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2667         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2668         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2669         mutex_unlock(&dev_priv->dpio_lock);
2670 }
2671
2672 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2673 {
2674         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2675         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2676         struct drm_device *dev = encoder->base.dev;
2677         struct drm_i915_private *dev_priv = dev->dev_private;
2678         struct intel_crtc *intel_crtc =
2679                 to_intel_crtc(encoder->base.crtc);
2680         enum dpio_channel ch = vlv_dport_to_channel(dport);
2681         int pipe = intel_crtc->pipe;
2682         int data, i;
2683         u32 val;
2684
2685         mutex_lock(&dev_priv->dpio_lock);
2686
2687         /* Deassert soft data lane reset*/
2688         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2689         val |= CHV_PCS_REQ_SOFTRESET_EN;
2690         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2691
2692         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2693         val |= CHV_PCS_REQ_SOFTRESET_EN;
2694         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2695
2696         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2697         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2698         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2699
2700         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2701         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2702         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2703
2704         /* Program Tx lane latency optimal setting*/
2705         for (i = 0; i < 4; i++) {
2706                 /* Set the latency optimal bit */
2707                 data = (i == 1) ? 0x0 : 0x6;
2708                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2709                                 data << DPIO_FRC_LATENCY_SHFIT);
2710
2711                 /* Set the upar bit */
2712                 data = (i == 1) ? 0x0 : 0x1;
2713                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2714                                 data << DPIO_UPAR_SHIFT);
2715         }
2716
2717         /* Data lane stagger programming */
2718         /* FIXME: Fix up value only after power analysis */
2719
2720         mutex_unlock(&dev_priv->dpio_lock);
2721
2722         if (is_edp(intel_dp)) {
2723                 pps_lock(intel_dp);
2724                 vlv_init_panel_power_sequencer(intel_dp);
2725                 pps_unlock(intel_dp);
2726         }
2727
2728         intel_enable_dp(encoder);
2729
2730         vlv_wait_port_ready(dev_priv, dport);
2731 }
2732
2733 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2734 {
2735         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2736         struct drm_device *dev = encoder->base.dev;
2737         struct drm_i915_private *dev_priv = dev->dev_private;
2738         struct intel_crtc *intel_crtc =
2739                 to_intel_crtc(encoder->base.crtc);
2740         enum dpio_channel ch = vlv_dport_to_channel(dport);
2741         enum pipe pipe = intel_crtc->pipe;
2742         u32 val;
2743
2744         intel_dp_prepare(encoder);
2745
2746         mutex_lock(&dev_priv->dpio_lock);
2747
2748         /* program left/right clock distribution */
2749         if (pipe != PIPE_B) {
2750                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2751                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2752                 if (ch == DPIO_CH0)
2753                         val |= CHV_BUFLEFTENA1_FORCE;
2754                 if (ch == DPIO_CH1)
2755                         val |= CHV_BUFRIGHTENA1_FORCE;
2756                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2757         } else {
2758                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2759                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2760                 if (ch == DPIO_CH0)
2761                         val |= CHV_BUFLEFTENA2_FORCE;
2762                 if (ch == DPIO_CH1)
2763                         val |= CHV_BUFRIGHTENA2_FORCE;
2764                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2765         }
2766
2767         /* program clock channel usage */
2768         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2769         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2770         if (pipe != PIPE_B)
2771                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2772         else
2773                 val |= CHV_PCS_USEDCLKCHANNEL;
2774         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2775
2776         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2777         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2778         if (pipe != PIPE_B)
2779                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2780         else
2781                 val |= CHV_PCS_USEDCLKCHANNEL;
2782         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2783
2784         /*
2785          * This a a bit weird since generally CL
2786          * matches the pipe, but here we need to
2787          * pick the CL based on the port.
2788          */
2789         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2790         if (pipe != PIPE_B)
2791                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2792         else
2793                 val |= CHV_CMN_USEDCLKCHANNEL;
2794         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2795
2796         mutex_unlock(&dev_priv->dpio_lock);
2797 }
2798
2799 /*
2800  * Native read with retry for link status and receiver capability reads for
2801  * cases where the sink may still be asleep.
2802  *
2803  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2804  * supposed to retry 3 times per the spec.
2805  */
2806 static ssize_t
2807 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2808                         void *buffer, size_t size)
2809 {
2810         ssize_t ret;
2811         int i;
2812
2813         for (i = 0; i < 3; i++) {
2814                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2815                 if (ret == size)
2816                         return ret;
2817                 msleep(1);
2818         }
2819
2820         return ret;
2821 }
2822
2823 /*
2824  * Fetch AUX CH registers 0x202 - 0x207 which contain
2825  * link status information
2826  */
2827 static bool
2828 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2829 {
2830         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2831                                        DP_LANE0_1_STATUS,
2832                                        link_status,
2833                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2834 }
2835
2836 /* These are source-specific values. */
2837 static uint8_t
2838 intel_dp_voltage_max(struct intel_dp *intel_dp)
2839 {
2840         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2841         enum port port = dp_to_dig_port(intel_dp)->port;
2842
2843         if (INTEL_INFO(dev)->gen >= 9)
2844                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2845         else if (IS_VALLEYVIEW(dev))
2846                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2847         else if (IS_GEN7(dev) && port == PORT_A)
2848                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2849         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2850                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2851         else
2852                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2853 }
2854
2855 static uint8_t
2856 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2857 {
2858         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2859         enum port port = dp_to_dig_port(intel_dp)->port;
2860
2861         if (INTEL_INFO(dev)->gen >= 9) {
2862                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2863                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2864                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2865                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2866                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2867                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2868                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2869                 default:
2870                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2871                 }
2872         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2873                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2874                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2875                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2876                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2877                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2878                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2879                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2880                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2881                 default:
2882                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2883                 }
2884         } else if (IS_VALLEYVIEW(dev)) {
2885                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2886                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2887                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2888                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2889                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2890                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2891                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2892                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2893                 default:
2894                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2895                 }
2896         } else if (IS_GEN7(dev) && port == PORT_A) {
2897                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2898                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2899                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2900                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2901                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2902                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2903                 default:
2904                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2905                 }
2906         } else {
2907                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2908                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2909                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2910                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2911                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2912                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2913                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2914                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2915                 default:
2916                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2917                 }
2918         }
2919 }
2920
2921 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2922 {
2923         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2924         struct drm_i915_private *dev_priv = dev->dev_private;
2925         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2926         struct intel_crtc *intel_crtc =
2927                 to_intel_crtc(dport->base.base.crtc);
2928         unsigned long demph_reg_value, preemph_reg_value,
2929                 uniqtranscale_reg_value;
2930         uint8_t train_set = intel_dp->train_set[0];
2931         enum dpio_channel port = vlv_dport_to_channel(dport);
2932         int pipe = intel_crtc->pipe;
2933
2934         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2935         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2936                 preemph_reg_value = 0x0004000;
2937                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2938                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2939                         demph_reg_value = 0x2B405555;
2940                         uniqtranscale_reg_value = 0x552AB83A;
2941                         break;
2942                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2943                         demph_reg_value = 0x2B404040;
2944                         uniqtranscale_reg_value = 0x5548B83A;
2945                         break;
2946                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2947                         demph_reg_value = 0x2B245555;
2948                         uniqtranscale_reg_value = 0x5560B83A;
2949                         break;
2950                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2951                         demph_reg_value = 0x2B405555;
2952                         uniqtranscale_reg_value = 0x5598DA3A;
2953                         break;
2954                 default:
2955                         return 0;
2956                 }
2957                 break;
2958         case DP_TRAIN_PRE_EMPH_LEVEL_1:
2959                 preemph_reg_value = 0x0002000;
2960                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2961                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2962                         demph_reg_value = 0x2B404040;
2963                         uniqtranscale_reg_value = 0x5552B83A;
2964                         break;
2965                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2966                         demph_reg_value = 0x2B404848;
2967                         uniqtranscale_reg_value = 0x5580B83A;
2968                         break;
2969                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2970                         demph_reg_value = 0x2B404040;
2971                         uniqtranscale_reg_value = 0x55ADDA3A;
2972                         break;
2973                 default:
2974                         return 0;
2975                 }
2976                 break;
2977         case DP_TRAIN_PRE_EMPH_LEVEL_2:
2978                 preemph_reg_value = 0x0000000;
2979                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2980                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2981                         demph_reg_value = 0x2B305555;
2982                         uniqtranscale_reg_value = 0x5570B83A;
2983                         break;
2984                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2985                         demph_reg_value = 0x2B2B4040;
2986                         uniqtranscale_reg_value = 0x55ADDA3A;
2987                         break;
2988                 default:
2989                         return 0;
2990                 }
2991                 break;
2992         case DP_TRAIN_PRE_EMPH_LEVEL_3:
2993                 preemph_reg_value = 0x0006000;
2994                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2995                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2996                         demph_reg_value = 0x1B405555;
2997                         uniqtranscale_reg_value = 0x55ADDA3A;
2998                         break;
2999                 default:
3000                         return 0;
3001                 }
3002                 break;
3003         default:
3004                 return 0;
3005         }
3006
3007         mutex_lock(&dev_priv->dpio_lock);
3008         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3009         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3010         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3011                          uniqtranscale_reg_value);
3012         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3013         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3014         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3015         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3016         mutex_unlock(&dev_priv->dpio_lock);
3017
3018         return 0;
3019 }
3020
3021 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3022 {
3023         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3024         struct drm_i915_private *dev_priv = dev->dev_private;
3025         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3026         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3027         u32 deemph_reg_value, margin_reg_value, val;
3028         uint8_t train_set = intel_dp->train_set[0];
3029         enum dpio_channel ch = vlv_dport_to_channel(dport);
3030         enum pipe pipe = intel_crtc->pipe;
3031         int i;
3032
3033         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3034         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3035                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3036                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3037                         deemph_reg_value = 128;
3038                         margin_reg_value = 52;
3039                         break;
3040                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3041                         deemph_reg_value = 128;
3042                         margin_reg_value = 77;
3043                         break;
3044                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3045                         deemph_reg_value = 128;
3046                         margin_reg_value = 102;
3047                         break;
3048                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3049                         deemph_reg_value = 128;
3050                         margin_reg_value = 154;
3051                         /* FIXME extra to set for 1200 */
3052                         break;
3053                 default:
3054                         return 0;
3055                 }
3056                 break;
3057         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3058                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3059                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3060                         deemph_reg_value = 85;
3061                         margin_reg_value = 78;
3062                         break;
3063                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3064                         deemph_reg_value = 85;
3065                         margin_reg_value = 116;
3066                         break;
3067                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3068                         deemph_reg_value = 85;
3069                         margin_reg_value = 154;
3070                         break;
3071                 default:
3072                         return 0;
3073                 }
3074                 break;
3075         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3076                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3077                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3078                         deemph_reg_value = 64;
3079                         margin_reg_value = 104;
3080                         break;
3081                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3082                         deemph_reg_value = 64;
3083                         margin_reg_value = 154;
3084                         break;
3085                 default:
3086                         return 0;
3087                 }
3088                 break;
3089         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3090                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3091                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3092                         deemph_reg_value = 43;
3093                         margin_reg_value = 154;
3094                         break;
3095                 default:
3096                         return 0;
3097                 }
3098                 break;
3099         default:
3100                 return 0;
3101         }
3102
3103         mutex_lock(&dev_priv->dpio_lock);
3104
3105         /* Clear calc init */
3106         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3107         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3108         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3109
3110         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3111         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3112         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3113
3114         /* Program swing deemph */
3115         for (i = 0; i < 4; i++) {
3116                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3117                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3118                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3119                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3120         }
3121
3122         /* Program swing margin */
3123         for (i = 0; i < 4; i++) {
3124                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3125                 val &= ~DPIO_SWING_MARGIN000_MASK;
3126                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3127                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3128         }
3129
3130         /* Disable unique transition scale */
3131         for (i = 0; i < 4; i++) {
3132                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3133                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3134                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3135         }
3136
3137         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3138                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3139                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3140                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3141
3142                 /*
3143                  * The document said it needs to set bit 27 for ch0 and bit 26
3144                  * for ch1. Might be a typo in the doc.
3145                  * For now, for this unique transition scale selection, set bit
3146                  * 27 for ch0 and ch1.
3147                  */
3148                 for (i = 0; i < 4; i++) {
3149                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3150                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3151                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3152                 }
3153
3154                 for (i = 0; i < 4; i++) {
3155                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3156                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3157                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3158                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3159                 }
3160         }
3161
3162         /* Start swing calculation */
3163         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3164         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3165         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3166
3167         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3168         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3169         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3170
3171         /* LRC Bypass */
3172         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3173         val |= DPIO_LRC_BYPASS;
3174         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3175
3176         mutex_unlock(&dev_priv->dpio_lock);
3177
3178         return 0;
3179 }
3180
3181 static void
3182 intel_get_adjust_train(struct intel_dp *intel_dp,
3183                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3184 {
3185         uint8_t v = 0;
3186         uint8_t p = 0;
3187         int lane;
3188         uint8_t voltage_max;
3189         uint8_t preemph_max;
3190
3191         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3192                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3193                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3194
3195                 if (this_v > v)
3196                         v = this_v;
3197                 if (this_p > p)
3198                         p = this_p;
3199         }
3200
3201         voltage_max = intel_dp_voltage_max(intel_dp);
3202         if (v >= voltage_max)
3203                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3204
3205         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3206         if (p >= preemph_max)
3207                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3208
3209         for (lane = 0; lane < 4; lane++)
3210                 intel_dp->train_set[lane] = v | p;
3211 }
3212
3213 static uint32_t
3214 intel_gen4_signal_levels(uint8_t train_set)
3215 {
3216         uint32_t        signal_levels = 0;
3217
3218         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3219         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3220         default:
3221                 signal_levels |= DP_VOLTAGE_0_4;
3222                 break;
3223         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3224                 signal_levels |= DP_VOLTAGE_0_6;
3225                 break;
3226         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3227                 signal_levels |= DP_VOLTAGE_0_8;
3228                 break;
3229         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3230                 signal_levels |= DP_VOLTAGE_1_2;
3231                 break;
3232         }
3233         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3234         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3235         default:
3236                 signal_levels |= DP_PRE_EMPHASIS_0;
3237                 break;
3238         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3239                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3240                 break;
3241         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3242                 signal_levels |= DP_PRE_EMPHASIS_6;
3243                 break;
3244         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3245                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3246                 break;
3247         }
3248         return signal_levels;
3249 }
3250
3251 /* Gen6's DP voltage swing and pre-emphasis control */
3252 static uint32_t
3253 intel_gen6_edp_signal_levels(uint8_t train_set)
3254 {
3255         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3256                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3257         switch (signal_levels) {
3258         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3259         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3260                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3261         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3262                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3263         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3264         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3265                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3266         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3267         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3268                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3269         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3270         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3271                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3272         default:
3273                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3274                               "0x%x\n", signal_levels);
3275                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3276         }
3277 }
3278
3279 /* Gen7's DP voltage swing and pre-emphasis control */
3280 static uint32_t
3281 intel_gen7_edp_signal_levels(uint8_t train_set)
3282 {
3283         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3284                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3285         switch (signal_levels) {
3286         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3287                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3288         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3289                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3290         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3291                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3292
3293         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3294                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3295         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3296                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3297
3298         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3299                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3300         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3301                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3302
3303         default:
3304                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3305                               "0x%x\n", signal_levels);
3306                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3307         }
3308 }
3309
3310 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3311 static uint32_t
3312 intel_hsw_signal_levels(uint8_t train_set)
3313 {
3314         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3315                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3316         switch (signal_levels) {
3317         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3318                 return DDI_BUF_TRANS_SELECT(0);
3319         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3320                 return DDI_BUF_TRANS_SELECT(1);
3321         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3322                 return DDI_BUF_TRANS_SELECT(2);
3323         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3324                 return DDI_BUF_TRANS_SELECT(3);
3325
3326         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3327                 return DDI_BUF_TRANS_SELECT(4);
3328         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3329                 return DDI_BUF_TRANS_SELECT(5);
3330         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3331                 return DDI_BUF_TRANS_SELECT(6);
3332
3333         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3334                 return DDI_BUF_TRANS_SELECT(7);
3335         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3336                 return DDI_BUF_TRANS_SELECT(8);
3337         default:
3338                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3339                               "0x%x\n", signal_levels);
3340                 return DDI_BUF_TRANS_SELECT(0);
3341         }
3342 }
3343
3344 /* Properly updates "DP" with the correct signal levels. */
3345 static void
3346 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3347 {
3348         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3349         enum port port = intel_dig_port->port;
3350         struct drm_device *dev = intel_dig_port->base.base.dev;
3351         uint32_t signal_levels, mask;
3352         uint8_t train_set = intel_dp->train_set[0];
3353
3354         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3355                 signal_levels = intel_hsw_signal_levels(train_set);
3356                 mask = DDI_BUF_EMP_MASK;
3357         } else if (IS_CHERRYVIEW(dev)) {
3358                 signal_levels = intel_chv_signal_levels(intel_dp);
3359                 mask = 0;
3360         } else if (IS_VALLEYVIEW(dev)) {
3361                 signal_levels = intel_vlv_signal_levels(intel_dp);
3362                 mask = 0;
3363         } else if (IS_GEN7(dev) && port == PORT_A) {
3364                 signal_levels = intel_gen7_edp_signal_levels(train_set);
3365                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3366         } else if (IS_GEN6(dev) && port == PORT_A) {
3367                 signal_levels = intel_gen6_edp_signal_levels(train_set);
3368                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3369         } else {
3370                 signal_levels = intel_gen4_signal_levels(train_set);
3371                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3372         }
3373
3374         DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3375
3376         *DP = (*DP & ~mask) | signal_levels;
3377 }
3378
3379 static bool
3380 intel_dp_set_link_train(struct intel_dp *intel_dp,
3381                         uint32_t *DP,
3382                         uint8_t dp_train_pat)
3383 {
3384         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3385         struct drm_device *dev = intel_dig_port->base.base.dev;
3386         struct drm_i915_private *dev_priv = dev->dev_private;
3387         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3388         int ret, len;
3389
3390         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3391
3392         I915_WRITE(intel_dp->output_reg, *DP);
3393         POSTING_READ(intel_dp->output_reg);
3394
3395         buf[0] = dp_train_pat;
3396         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3397             DP_TRAINING_PATTERN_DISABLE) {
3398                 /* don't write DP_TRAINING_LANEx_SET on disable */
3399                 len = 1;
3400         } else {
3401                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3402                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3403                 len = intel_dp->lane_count + 1;
3404         }
3405
3406         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3407                                 buf, len);
3408
3409         return ret == len;
3410 }
3411
3412 static bool
3413 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3414                         uint8_t dp_train_pat)
3415 {
3416         memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3417         intel_dp_set_signal_levels(intel_dp, DP);
3418         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3419 }
3420
3421 static bool
3422 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3423                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3424 {
3425         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3426         struct drm_device *dev = intel_dig_port->base.base.dev;
3427         struct drm_i915_private *dev_priv = dev->dev_private;
3428         int ret;
3429
3430         intel_get_adjust_train(intel_dp, link_status);
3431         intel_dp_set_signal_levels(intel_dp, DP);
3432
3433         I915_WRITE(intel_dp->output_reg, *DP);
3434         POSTING_READ(intel_dp->output_reg);
3435
3436         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3437                                 intel_dp->train_set, intel_dp->lane_count);
3438
3439         return ret == intel_dp->lane_count;
3440 }
3441
3442 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3443 {
3444         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3445         struct drm_device *dev = intel_dig_port->base.base.dev;
3446         struct drm_i915_private *dev_priv = dev->dev_private;
3447         enum port port = intel_dig_port->port;
3448         uint32_t val;
3449
3450         if (!HAS_DDI(dev))
3451                 return;
3452
3453         val = I915_READ(DP_TP_CTL(port));
3454         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3455         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3456         I915_WRITE(DP_TP_CTL(port), val);
3457
3458         /*
3459          * On PORT_A we can have only eDP in SST mode. There the only reason
3460          * we need to set idle transmission mode is to work around a HW issue
3461          * where we enable the pipe while not in idle link-training mode.
3462          * In this case there is requirement to wait for a minimum number of
3463          * idle patterns to be sent.
3464          */
3465         if (port == PORT_A)
3466                 return;
3467
3468         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3469                      1))
3470                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3471 }
3472
3473 /* Enable corresponding port and start training pattern 1 */
3474 void
3475 intel_dp_start_link_train(struct intel_dp *intel_dp)
3476 {
3477         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3478         struct drm_device *dev = encoder->dev;
3479         int i;
3480         uint8_t voltage;
3481         int voltage_tries, loop_tries;
3482         uint32_t DP = intel_dp->DP;
3483         uint8_t link_config[2];
3484
3485         if (HAS_DDI(dev))
3486                 intel_ddi_prepare_link_retrain(encoder);
3487
3488         /* Write the link configuration data */
3489         link_config[0] = intel_dp->link_bw;
3490         link_config[1] = intel_dp->lane_count;
3491         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3492                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3493         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3494
3495         link_config[0] = 0;
3496         link_config[1] = DP_SET_ANSI_8B10B;
3497         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3498
3499         DP |= DP_PORT_EN;
3500
3501         /* clock recovery */
3502         if (!intel_dp_reset_link_train(intel_dp, &DP,
3503                                        DP_TRAINING_PATTERN_1 |
3504                                        DP_LINK_SCRAMBLING_DISABLE)) {
3505                 DRM_ERROR("failed to enable link training\n");
3506                 return;
3507         }
3508
3509         voltage = 0xff;
3510         voltage_tries = 0;
3511         loop_tries = 0;
3512         for (;;) {
3513                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3514
3515                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3516                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3517                         DRM_ERROR("failed to get link status\n");
3518                         break;
3519                 }
3520
3521                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3522                         DRM_DEBUG_KMS("clock recovery OK\n");
3523                         break;
3524                 }
3525
3526                 /* Check to see if we've tried the max voltage */
3527                 for (i = 0; i < intel_dp->lane_count; i++)
3528                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3529                                 break;
3530                 if (i == intel_dp->lane_count) {
3531                         ++loop_tries;
3532                         if (loop_tries == 5) {
3533                                 DRM_ERROR("too many full retries, give up\n");
3534                                 break;
3535                         }
3536                         intel_dp_reset_link_train(intel_dp, &DP,
3537                                                   DP_TRAINING_PATTERN_1 |
3538                                                   DP_LINK_SCRAMBLING_DISABLE);
3539                         voltage_tries = 0;
3540                         continue;
3541                 }
3542
3543                 /* Check to see if we've tried the same voltage 5 times */
3544                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3545                         ++voltage_tries;
3546                         if (voltage_tries == 5) {
3547                                 DRM_ERROR("too many voltage retries, give up\n");
3548                                 break;
3549                         }
3550                 } else
3551                         voltage_tries = 0;
3552                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3553
3554                 /* Update training set as requested by target */
3555                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3556                         DRM_ERROR("failed to update link training\n");
3557                         break;
3558                 }
3559         }
3560
3561         intel_dp->DP = DP;
3562 }
3563
3564 void
3565 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3566 {
3567         bool channel_eq = false;
3568         int tries, cr_tries;
3569         uint32_t DP = intel_dp->DP;
3570         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3571
3572         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3573         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3574                 training_pattern = DP_TRAINING_PATTERN_3;
3575
3576         /* channel equalization */
3577         if (!intel_dp_set_link_train(intel_dp, &DP,
3578                                      training_pattern |
3579                                      DP_LINK_SCRAMBLING_DISABLE)) {
3580                 DRM_ERROR("failed to start channel equalization\n");
3581                 return;
3582         }
3583
3584         tries = 0;
3585         cr_tries = 0;
3586         channel_eq = false;
3587         for (;;) {
3588                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3589
3590                 if (cr_tries > 5) {
3591                         DRM_ERROR("failed to train DP, aborting\n");
3592                         break;
3593                 }
3594
3595                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3596                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3597                         DRM_ERROR("failed to get link status\n");
3598                         break;
3599                 }
3600
3601                 /* Make sure clock is still ok */
3602                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3603                         intel_dp_start_link_train(intel_dp);
3604                         intel_dp_set_link_train(intel_dp, &DP,
3605                                                 training_pattern |
3606                                                 DP_LINK_SCRAMBLING_DISABLE);
3607                         cr_tries++;
3608                         continue;
3609                 }
3610
3611                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3612                         channel_eq = true;
3613                         break;
3614                 }
3615
3616                 /* Try 5 times, then try clock recovery if that fails */
3617                 if (tries > 5) {
3618                         intel_dp_link_down(intel_dp);
3619                         intel_dp_start_link_train(intel_dp);
3620                         intel_dp_set_link_train(intel_dp, &DP,
3621                                                 training_pattern |
3622                                                 DP_LINK_SCRAMBLING_DISABLE);
3623                         tries = 0;
3624                         cr_tries++;
3625                         continue;
3626                 }
3627
3628                 /* Update training set as requested by target */
3629                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3630                         DRM_ERROR("failed to update link training\n");
3631                         break;
3632                 }
3633                 ++tries;
3634         }
3635
3636         intel_dp_set_idle_link_train(intel_dp);
3637
3638         intel_dp->DP = DP;
3639
3640         if (channel_eq)
3641                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3642
3643 }
3644
3645 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3646 {
3647         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3648                                 DP_TRAINING_PATTERN_DISABLE);
3649 }
3650
3651 static void
3652 intel_dp_link_down(struct intel_dp *intel_dp)
3653 {
3654         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3655         enum port port = intel_dig_port->port;
3656         struct drm_device *dev = intel_dig_port->base.base.dev;
3657         struct drm_i915_private *dev_priv = dev->dev_private;
3658         struct intel_crtc *intel_crtc =
3659                 to_intel_crtc(intel_dig_port->base.base.crtc);
3660         uint32_t DP = intel_dp->DP;
3661
3662         if (WARN_ON(HAS_DDI(dev)))
3663                 return;
3664
3665         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3666                 return;
3667
3668         DRM_DEBUG_KMS("\n");
3669
3670         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3671                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3672                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3673         } else {
3674                 if (IS_CHERRYVIEW(dev))
3675                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3676                 else
3677                         DP &= ~DP_LINK_TRAIN_MASK;
3678                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3679         }
3680         POSTING_READ(intel_dp->output_reg);
3681
3682         if (HAS_PCH_IBX(dev) &&
3683             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3684                 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
3685
3686                 /* Hardware workaround: leaving our transcoder select
3687                  * set to transcoder B while it's off will prevent the
3688                  * corresponding HDMI output on transcoder A.
3689                  *
3690                  * Combine this with another hardware workaround:
3691                  * transcoder select bit can only be cleared while the
3692                  * port is enabled.
3693                  */
3694                 DP &= ~DP_PIPEB_SELECT;
3695                 I915_WRITE(intel_dp->output_reg, DP);
3696
3697                 /* Changes to enable or select take place the vblank
3698                  * after being written.
3699                  */
3700                 if (WARN_ON(crtc == NULL)) {
3701                         /* We should never try to disable a port without a crtc
3702                          * attached. For paranoia keep the code around for a
3703                          * bit. */
3704                         POSTING_READ(intel_dp->output_reg);
3705                         msleep(50);
3706                 } else
3707                         intel_wait_for_vblank(dev, intel_crtc->pipe);
3708         }
3709
3710         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3711         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3712         POSTING_READ(intel_dp->output_reg);
3713         msleep(intel_dp->panel_power_down_delay);
3714 }
3715
3716 static bool
3717 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3718 {
3719         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3720         struct drm_device *dev = dig_port->base.base.dev;
3721         struct drm_i915_private *dev_priv = dev->dev_private;
3722
3723         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3724                                     sizeof(intel_dp->dpcd)) < 0)
3725                 return false; /* aux transfer failed */
3726
3727         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3728
3729         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3730                 return false; /* DPCD not present */
3731
3732         /* Check if the panel supports PSR */
3733         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3734         if (is_edp(intel_dp)) {
3735                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3736                                         intel_dp->psr_dpcd,
3737                                         sizeof(intel_dp->psr_dpcd));
3738                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3739                         dev_priv->psr.sink_support = true;
3740                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3741                 }
3742         }
3743
3744         /* Training Pattern 3 support */
3745         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3746             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
3747                 intel_dp->use_tps3 = true;
3748                 DRM_DEBUG_KMS("Displayport TPS3 supported");
3749         } else
3750                 intel_dp->use_tps3 = false;
3751
3752         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3753               DP_DWN_STRM_PORT_PRESENT))
3754                 return true; /* native DP sink */
3755
3756         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3757                 return true; /* no per-port downstream info */
3758
3759         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3760                                     intel_dp->downstream_ports,
3761                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3762                 return false; /* downstream port status fetch failed */
3763
3764         return true;
3765 }
3766
3767 static void
3768 intel_dp_probe_oui(struct intel_dp *intel_dp)
3769 {
3770         u8 buf[3];
3771
3772         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3773                 return;
3774
3775         intel_edp_panel_vdd_on(intel_dp);
3776
3777         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3778                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3779                               buf[0], buf[1], buf[2]);
3780
3781         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3782                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3783                               buf[0], buf[1], buf[2]);
3784
3785         intel_edp_panel_vdd_off(intel_dp, false);
3786 }
3787
3788 static bool
3789 intel_dp_probe_mst(struct intel_dp *intel_dp)
3790 {
3791         u8 buf[1];
3792
3793         if (!intel_dp->can_mst)
3794                 return false;
3795
3796         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3797                 return false;
3798
3799         intel_edp_panel_vdd_on(intel_dp);
3800         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3801                 if (buf[0] & DP_MST_CAP) {
3802                         DRM_DEBUG_KMS("Sink is MST capable\n");
3803                         intel_dp->is_mst = true;
3804                 } else {
3805                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3806                         intel_dp->is_mst = false;
3807                 }
3808         }
3809         intel_edp_panel_vdd_off(intel_dp, false);
3810
3811         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3812         return intel_dp->is_mst;
3813 }
3814
3815 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3816 {
3817         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3818         struct drm_device *dev = intel_dig_port->base.base.dev;
3819         struct intel_crtc *intel_crtc =
3820                 to_intel_crtc(intel_dig_port->base.base.crtc);
3821         u8 buf[1];
3822
3823         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
3824                 return -EAGAIN;
3825
3826         if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
3827                 return -ENOTTY;
3828
3829         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3830                                DP_TEST_SINK_START) < 0)
3831                 return -EAGAIN;
3832
3833         /* Wait 2 vblanks to be sure we will have the correct CRC value */
3834         intel_wait_for_vblank(dev, intel_crtc->pipe);
3835         intel_wait_for_vblank(dev, intel_crtc->pipe);
3836
3837         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3838                 return -EAGAIN;
3839
3840         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
3841         return 0;
3842 }
3843
3844 static bool
3845 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3846 {
3847         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3848                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3849                                        sink_irq_vector, 1) == 1;
3850 }
3851
3852 static bool
3853 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3854 {
3855         int ret;
3856
3857         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3858                                              DP_SINK_COUNT_ESI,
3859                                              sink_irq_vector, 14);
3860         if (ret != 14)
3861                 return false;
3862
3863         return true;
3864 }
3865
3866 static void
3867 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3868 {
3869         /* NAK by default */
3870         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3871 }
3872
3873 static int
3874 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3875 {
3876         bool bret;
3877
3878         if (intel_dp->is_mst) {
3879                 u8 esi[16] = { 0 };
3880                 int ret = 0;
3881                 int retry;
3882                 bool handled;
3883                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3884 go_again:
3885                 if (bret == true) {
3886
3887                         /* check link status - esi[10] = 0x200c */
3888                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3889                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3890                                 intel_dp_start_link_train(intel_dp);
3891                                 intel_dp_complete_link_train(intel_dp);
3892                                 intel_dp_stop_link_train(intel_dp);
3893                         }
3894
3895                         DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3896                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3897
3898                         if (handled) {
3899                                 for (retry = 0; retry < 3; retry++) {
3900                                         int wret;
3901                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
3902                                                                  DP_SINK_COUNT_ESI+1,
3903                                                                  &esi[1], 3);
3904                                         if (wret == 3) {
3905                                                 break;
3906                                         }
3907                                 }
3908
3909                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3910                                 if (bret == true) {
3911                                         DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3912                                         goto go_again;
3913                                 }
3914                         } else
3915                                 ret = 0;
3916
3917                         return ret;
3918                 } else {
3919                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3920                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3921                         intel_dp->is_mst = false;
3922                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3923                         /* send a hotplug event */
3924                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3925                 }
3926         }
3927         return -EINVAL;
3928 }
3929
3930 /*
3931  * According to DP spec
3932  * 5.1.2:
3933  *  1. Read DPCD
3934  *  2. Configure link according to Receiver Capabilities
3935  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3936  *  4. Check link status on receipt of hot-plug interrupt
3937  */
3938 void
3939 intel_dp_check_link_status(struct intel_dp *intel_dp)
3940 {
3941         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3942         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3943         u8 sink_irq_vector;
3944         u8 link_status[DP_LINK_STATUS_SIZE];
3945
3946         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3947
3948         if (!intel_encoder->connectors_active)
3949                 return;
3950
3951         if (WARN_ON(!intel_encoder->base.crtc))
3952                 return;
3953
3954         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3955                 return;
3956
3957         /* Try to read receiver status if the link appears to be up */
3958         if (!intel_dp_get_link_status(intel_dp, link_status)) {
3959                 return;
3960         }
3961
3962         /* Now read the DPCD to see if it's actually running */
3963         if (!intel_dp_get_dpcd(intel_dp)) {
3964                 return;
3965         }
3966
3967         /* Try to read the source of the interrupt */
3968         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3969             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3970                 /* Clear interrupt source */
3971                 drm_dp_dpcd_writeb(&intel_dp->aux,
3972                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
3973                                    sink_irq_vector);
3974
3975                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3976                         intel_dp_handle_test_request(intel_dp);
3977                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3978                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3979         }
3980
3981         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3982                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3983                               intel_encoder->base.name);
3984                 intel_dp_start_link_train(intel_dp);
3985                 intel_dp_complete_link_train(intel_dp);
3986                 intel_dp_stop_link_train(intel_dp);
3987         }
3988 }
3989
3990 /* XXX this is probably wrong for multiple downstream ports */
3991 static enum drm_connector_status
3992 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3993 {
3994         uint8_t *dpcd = intel_dp->dpcd;
3995         uint8_t type;
3996
3997         if (!intel_dp_get_dpcd(intel_dp))
3998                 return connector_status_disconnected;
3999
4000         /* if there's no downstream port, we're done */
4001         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4002                 return connector_status_connected;
4003
4004         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4005         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4006             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4007                 uint8_t reg;
4008
4009                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4010                                             &reg, 1) < 0)
4011                         return connector_status_unknown;
4012
4013                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4014                                               : connector_status_disconnected;
4015         }
4016
4017         /* If no HPD, poke DDC gently */
4018         if (drm_probe_ddc(&intel_dp->aux.ddc))
4019                 return connector_status_connected;
4020
4021         /* Well we tried, say unknown for unreliable port types */
4022         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4023                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4024                 if (type == DP_DS_PORT_TYPE_VGA ||
4025                     type == DP_DS_PORT_TYPE_NON_EDID)
4026                         return connector_status_unknown;
4027         } else {
4028                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4029                         DP_DWN_STRM_PORT_TYPE_MASK;
4030                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4031                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4032                         return connector_status_unknown;
4033         }
4034
4035         /* Anything else is out of spec, warn and ignore */
4036         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4037         return connector_status_disconnected;
4038 }
4039
4040 static enum drm_connector_status
4041 edp_detect(struct intel_dp *intel_dp)
4042 {
4043         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4044         enum drm_connector_status status;
4045
4046         status = intel_panel_detect(dev);
4047         if (status == connector_status_unknown)
4048                 status = connector_status_connected;
4049
4050         return status;
4051 }
4052
4053 static enum drm_connector_status
4054 ironlake_dp_detect(struct intel_dp *intel_dp)
4055 {
4056         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4057         struct drm_i915_private *dev_priv = dev->dev_private;
4058         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4059
4060         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4061                 return connector_status_disconnected;
4062
4063         return intel_dp_detect_dpcd(intel_dp);
4064 }
4065
4066 static int g4x_digital_port_connected(struct drm_device *dev,
4067                                        struct intel_digital_port *intel_dig_port)
4068 {
4069         struct drm_i915_private *dev_priv = dev->dev_private;
4070         uint32_t bit;
4071
4072         if (IS_VALLEYVIEW(dev)) {
4073                 switch (intel_dig_port->port) {
4074                 case PORT_B:
4075                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4076                         break;
4077                 case PORT_C:
4078                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4079                         break;
4080                 case PORT_D:
4081                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4082                         break;
4083                 default:
4084                         return -EINVAL;
4085                 }
4086         } else {
4087                 switch (intel_dig_port->port) {
4088                 case PORT_B:
4089                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4090                         break;
4091                 case PORT_C:
4092                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4093                         break;
4094                 case PORT_D:
4095                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4096                         break;
4097                 default:
4098                         return -EINVAL;
4099                 }
4100         }
4101
4102         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4103                 return 0;
4104         return 1;
4105 }
4106
4107 static enum drm_connector_status
4108 g4x_dp_detect(struct intel_dp *intel_dp)
4109 {
4110         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4111         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4112         int ret;
4113
4114         /* Can't disconnect eDP, but you can close the lid... */
4115         if (is_edp(intel_dp)) {
4116                 enum drm_connector_status status;
4117
4118                 status = intel_panel_detect(dev);
4119                 if (status == connector_status_unknown)
4120                         status = connector_status_connected;
4121                 return status;
4122         }
4123
4124         ret = g4x_digital_port_connected(dev, intel_dig_port);
4125         if (ret == -EINVAL)
4126                 return connector_status_unknown;
4127         else if (ret == 0)
4128                 return connector_status_disconnected;
4129
4130         return intel_dp_detect_dpcd(intel_dp);
4131 }
4132
4133 static struct edid *
4134 intel_dp_get_edid(struct intel_dp *intel_dp)
4135 {
4136         struct intel_connector *intel_connector = intel_dp->attached_connector;
4137
4138         /* use cached edid if we have one */
4139         if (intel_connector->edid) {
4140                 /* invalid edid */
4141                 if (IS_ERR(intel_connector->edid))
4142                         return NULL;
4143
4144                 return drm_edid_duplicate(intel_connector->edid);
4145         } else
4146                 return drm_get_edid(&intel_connector->base,
4147                                     &intel_dp->aux.ddc);
4148 }
4149
4150 static void
4151 intel_dp_set_edid(struct intel_dp *intel_dp)
4152 {
4153         struct intel_connector *intel_connector = intel_dp->attached_connector;
4154         struct edid *edid;
4155
4156         edid = intel_dp_get_edid(intel_dp);
4157         intel_connector->detect_edid = edid;
4158
4159         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4160                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4161         else
4162                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4163 }
4164
4165 static void
4166 intel_dp_unset_edid(struct intel_dp *intel_dp)
4167 {
4168         struct intel_connector *intel_connector = intel_dp->attached_connector;
4169
4170         kfree(intel_connector->detect_edid);
4171         intel_connector->detect_edid = NULL;
4172
4173         intel_dp->has_audio = false;
4174 }
4175
4176 static enum intel_display_power_domain
4177 intel_dp_power_get(struct intel_dp *dp)
4178 {
4179         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4180         enum intel_display_power_domain power_domain;
4181
4182         power_domain = intel_display_port_power_domain(encoder);
4183         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4184
4185         return power_domain;
4186 }
4187
4188 static void
4189 intel_dp_power_put(struct intel_dp *dp,
4190                    enum intel_display_power_domain power_domain)
4191 {
4192         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4193         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4194 }
4195
4196 static enum drm_connector_status
4197 intel_dp_detect(struct drm_connector *connector, bool force)
4198 {
4199         struct intel_dp *intel_dp = intel_attached_dp(connector);
4200         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4201         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4202         struct drm_device *dev = connector->dev;
4203         enum drm_connector_status status;
4204         enum intel_display_power_domain power_domain;
4205         bool ret;
4206
4207         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4208                       connector->base.id, connector->name);
4209         intel_dp_unset_edid(intel_dp);
4210
4211         if (intel_dp->is_mst) {
4212                 /* MST devices are disconnected from a monitor POV */
4213                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4214                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4215                 return connector_status_disconnected;
4216         }
4217
4218         power_domain = intel_dp_power_get(intel_dp);
4219
4220         /* Can't disconnect eDP, but you can close the lid... */
4221         if (is_edp(intel_dp))
4222                 status = edp_detect(intel_dp);
4223         else if (HAS_PCH_SPLIT(dev))
4224                 status = ironlake_dp_detect(intel_dp);
4225         else
4226                 status = g4x_dp_detect(intel_dp);
4227         if (status != connector_status_connected)
4228                 goto out;
4229
4230         intel_dp_probe_oui(intel_dp);
4231
4232         ret = intel_dp_probe_mst(intel_dp);
4233         if (ret) {
4234                 /* if we are in MST mode then this connector
4235                    won't appear connected or have anything with EDID on it */
4236                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4237                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4238                 status = connector_status_disconnected;
4239                 goto out;
4240         }
4241
4242         intel_dp_set_edid(intel_dp);
4243
4244         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4245                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4246         status = connector_status_connected;
4247
4248 out:
4249         intel_dp_power_put(intel_dp, power_domain);
4250         return status;
4251 }
4252
4253 static void
4254 intel_dp_force(struct drm_connector *connector)
4255 {
4256         struct intel_dp *intel_dp = intel_attached_dp(connector);
4257         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4258         enum intel_display_power_domain power_domain;
4259
4260         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4261                       connector->base.id, connector->name);
4262         intel_dp_unset_edid(intel_dp);
4263
4264         if (connector->status != connector_status_connected)
4265                 return;
4266
4267         power_domain = intel_dp_power_get(intel_dp);
4268
4269         intel_dp_set_edid(intel_dp);
4270
4271         intel_dp_power_put(intel_dp, power_domain);
4272
4273         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4274                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4275 }
4276
4277 static int intel_dp_get_modes(struct drm_connector *connector)
4278 {
4279         struct intel_connector *intel_connector = to_intel_connector(connector);
4280         struct edid *edid;
4281
4282         edid = intel_connector->detect_edid;
4283         if (edid) {
4284                 int ret = intel_connector_update_modes(connector, edid);
4285                 if (ret)
4286                         return ret;
4287         }
4288
4289         /* if eDP has no EDID, fall back to fixed mode */
4290         if (is_edp(intel_attached_dp(connector)) &&
4291             intel_connector->panel.fixed_mode) {
4292                 struct drm_display_mode *mode;
4293
4294                 mode = drm_mode_duplicate(connector->dev,
4295                                           intel_connector->panel.fixed_mode);
4296                 if (mode) {
4297                         drm_mode_probed_add(connector, mode);
4298                         return 1;
4299                 }
4300         }
4301
4302         return 0;
4303 }
4304
4305 static bool
4306 intel_dp_detect_audio(struct drm_connector *connector)
4307 {
4308         bool has_audio = false;
4309         struct edid *edid;
4310
4311         edid = to_intel_connector(connector)->detect_edid;
4312         if (edid)
4313                 has_audio = drm_detect_monitor_audio(edid);
4314
4315         return has_audio;
4316 }
4317
4318 static int
4319 intel_dp_set_property(struct drm_connector *connector,
4320                       struct drm_property *property,
4321                       uint64_t val)
4322 {
4323         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4324         struct intel_connector *intel_connector = to_intel_connector(connector);
4325         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4326         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4327         int ret;
4328
4329         ret = drm_object_property_set_value(&connector->base, property, val);
4330         if (ret)
4331                 return ret;
4332
4333         if (property == dev_priv->force_audio_property) {
4334                 int i = val;
4335                 bool has_audio;
4336
4337                 if (i == intel_dp->force_audio)
4338                         return 0;
4339
4340                 intel_dp->force_audio = i;
4341
4342                 if (i == HDMI_AUDIO_AUTO)
4343                         has_audio = intel_dp_detect_audio(connector);
4344                 else
4345                         has_audio = (i == HDMI_AUDIO_ON);
4346
4347                 if (has_audio == intel_dp->has_audio)
4348                         return 0;
4349
4350                 intel_dp->has_audio = has_audio;
4351                 goto done;
4352         }
4353
4354         if (property == dev_priv->broadcast_rgb_property) {
4355                 bool old_auto = intel_dp->color_range_auto;
4356                 uint32_t old_range = intel_dp->color_range;
4357
4358                 switch (val) {
4359                 case INTEL_BROADCAST_RGB_AUTO:
4360                         intel_dp->color_range_auto = true;
4361                         break;
4362                 case INTEL_BROADCAST_RGB_FULL:
4363                         intel_dp->color_range_auto = false;
4364                         intel_dp->color_range = 0;
4365                         break;
4366                 case INTEL_BROADCAST_RGB_LIMITED:
4367                         intel_dp->color_range_auto = false;
4368                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4369                         break;
4370                 default:
4371                         return -EINVAL;
4372                 }
4373
4374                 if (old_auto == intel_dp->color_range_auto &&
4375                     old_range == intel_dp->color_range)
4376                         return 0;
4377
4378                 goto done;
4379         }
4380
4381         if (is_edp(intel_dp) &&
4382             property == connector->dev->mode_config.scaling_mode_property) {
4383                 if (val == DRM_MODE_SCALE_NONE) {
4384                         DRM_DEBUG_KMS("no scaling not supported\n");
4385                         return -EINVAL;
4386                 }
4387
4388                 if (intel_connector->panel.fitting_mode == val) {
4389                         /* the eDP scaling property is not changed */
4390                         return 0;
4391                 }
4392                 intel_connector->panel.fitting_mode = val;
4393
4394                 goto done;
4395         }
4396
4397         return -EINVAL;
4398
4399 done:
4400         if (intel_encoder->base.crtc)
4401                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4402
4403         return 0;
4404 }
4405
4406 static void
4407 intel_dp_connector_destroy(struct drm_connector *connector)
4408 {
4409         struct intel_connector *intel_connector = to_intel_connector(connector);
4410
4411         intel_dp_unset_edid(intel_attached_dp(connector));
4412
4413         if (!IS_ERR_OR_NULL(intel_connector->edid))
4414                 kfree(intel_connector->edid);
4415
4416         /* Can't call is_edp() since the encoder may have been destroyed
4417          * already. */
4418         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4419                 intel_panel_fini(&intel_connector->panel);
4420
4421         drm_connector_cleanup(connector);
4422         kfree(connector);
4423 }
4424
4425 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4426 {
4427         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4428         struct intel_dp *intel_dp = &intel_dig_port->dp;
4429
4430         drm_dp_aux_unregister(&intel_dp->aux);
4431         intel_dp_mst_encoder_cleanup(intel_dig_port);
4432         drm_encoder_cleanup(encoder);
4433         if (is_edp(intel_dp)) {
4434                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4435                 /*
4436                  * vdd might still be enabled do to the delayed vdd off.
4437                  * Make sure vdd is actually turned off here.
4438                  */
4439                 pps_lock(intel_dp);
4440                 edp_panel_vdd_off_sync(intel_dp);
4441                 pps_unlock(intel_dp);
4442
4443                 if (intel_dp->edp_notifier.notifier_call) {
4444                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4445                         intel_dp->edp_notifier.notifier_call = NULL;
4446                 }
4447         }
4448         kfree(intel_dig_port);
4449 }
4450
4451 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4452 {
4453         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4454
4455         if (!is_edp(intel_dp))
4456                 return;
4457
4458         /*
4459          * vdd might still be enabled do to the delayed vdd off.
4460          * Make sure vdd is actually turned off here.
4461          */
4462         pps_lock(intel_dp);
4463         edp_panel_vdd_off_sync(intel_dp);
4464         pps_unlock(intel_dp);
4465 }
4466
4467 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4468 {
4469         intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
4470 }
4471
4472 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4473         .dpms = intel_connector_dpms,
4474         .detect = intel_dp_detect,
4475         .force = intel_dp_force,
4476         .fill_modes = drm_helper_probe_single_connector_modes,
4477         .set_property = intel_dp_set_property,
4478         .destroy = intel_dp_connector_destroy,
4479 };
4480
4481 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4482         .get_modes = intel_dp_get_modes,
4483         .mode_valid = intel_dp_mode_valid,
4484         .best_encoder = intel_best_encoder,
4485 };
4486
4487 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4488         .reset = intel_dp_encoder_reset,
4489         .destroy = intel_dp_encoder_destroy,
4490 };
4491
4492 void
4493 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4494 {
4495         return;
4496 }
4497
4498 bool
4499 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4500 {
4501         struct intel_dp *intel_dp = &intel_dig_port->dp;
4502         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4503         struct drm_device *dev = intel_dig_port->base.base.dev;
4504         struct drm_i915_private *dev_priv = dev->dev_private;
4505         enum intel_display_power_domain power_domain;
4506         bool ret = true;
4507
4508         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4509                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4510
4511         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4512                       port_name(intel_dig_port->port),
4513                       long_hpd ? "long" : "short");
4514
4515         power_domain = intel_display_port_power_domain(intel_encoder);
4516         intel_display_power_get(dev_priv, power_domain);
4517
4518         if (long_hpd) {
4519
4520                 if (HAS_PCH_SPLIT(dev)) {
4521                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4522                                 goto mst_fail;
4523                 } else {
4524                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4525                                 goto mst_fail;
4526                 }
4527
4528                 if (!intel_dp_get_dpcd(intel_dp)) {
4529                         goto mst_fail;
4530                 }
4531
4532                 intel_dp_probe_oui(intel_dp);
4533
4534                 if (!intel_dp_probe_mst(intel_dp))
4535                         goto mst_fail;
4536
4537         } else {
4538                 if (intel_dp->is_mst) {
4539                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4540                                 goto mst_fail;
4541                 }
4542
4543                 if (!intel_dp->is_mst) {
4544                         /*
4545                          * we'll check the link status via the normal hot plug path later -
4546                          * but for short hpds we should check it now
4547                          */
4548                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4549                         intel_dp_check_link_status(intel_dp);
4550                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4551                 }
4552         }
4553         ret = false;
4554         goto put_power;
4555 mst_fail:
4556         /* if we were in MST mode, and device is not there get out of MST mode */
4557         if (intel_dp->is_mst) {
4558                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4559                 intel_dp->is_mst = false;
4560                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4561         }
4562 put_power:
4563         intel_display_power_put(dev_priv, power_domain);
4564
4565         return ret;
4566 }
4567
4568 /* Return which DP Port should be selected for Transcoder DP control */
4569 int
4570 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4571 {
4572         struct drm_device *dev = crtc->dev;
4573         struct intel_encoder *intel_encoder;
4574         struct intel_dp *intel_dp;
4575
4576         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4577                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4578
4579                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4580                     intel_encoder->type == INTEL_OUTPUT_EDP)
4581                         return intel_dp->output_reg;
4582         }
4583
4584         return -1;
4585 }
4586
4587 /* check the VBT to see whether the eDP is on DP-D port */
4588 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4589 {
4590         struct drm_i915_private *dev_priv = dev->dev_private;
4591         union child_device_config *p_child;
4592         int i;
4593         static const short port_mapping[] = {
4594                 [PORT_B] = PORT_IDPB,
4595                 [PORT_C] = PORT_IDPC,
4596                 [PORT_D] = PORT_IDPD,
4597         };
4598
4599         if (port == PORT_A)
4600                 return true;
4601
4602         if (!dev_priv->vbt.child_dev_num)
4603                 return false;
4604
4605         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4606                 p_child = dev_priv->vbt.child_dev + i;
4607
4608                 if (p_child->common.dvo_port == port_mapping[port] &&
4609                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4610                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4611                         return true;
4612         }
4613         return false;
4614 }
4615
4616 void
4617 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4618 {
4619         struct intel_connector *intel_connector = to_intel_connector(connector);
4620
4621         intel_attach_force_audio_property(connector);
4622         intel_attach_broadcast_rgb_property(connector);
4623         intel_dp->color_range_auto = true;
4624
4625         if (is_edp(intel_dp)) {
4626                 drm_mode_create_scaling_mode_property(connector->dev);
4627                 drm_object_attach_property(
4628                         &connector->base,
4629                         connector->dev->mode_config.scaling_mode_property,
4630                         DRM_MODE_SCALE_ASPECT);
4631                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4632         }
4633 }
4634
4635 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4636 {
4637         intel_dp->last_power_cycle = jiffies;
4638         intel_dp->last_power_on = jiffies;
4639         intel_dp->last_backlight_off = jiffies;
4640 }
4641
4642 static void
4643 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4644                                     struct intel_dp *intel_dp,
4645                                     struct edp_power_seq *out)
4646 {
4647         struct drm_i915_private *dev_priv = dev->dev_private;
4648         struct edp_power_seq cur, vbt, spec, final;
4649         u32 pp_on, pp_off, pp_div, pp;
4650         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4651
4652         lockdep_assert_held(&dev_priv->pps_mutex);
4653
4654         if (HAS_PCH_SPLIT(dev)) {
4655                 pp_ctrl_reg = PCH_PP_CONTROL;
4656                 pp_on_reg = PCH_PP_ON_DELAYS;
4657                 pp_off_reg = PCH_PP_OFF_DELAYS;
4658                 pp_div_reg = PCH_PP_DIVISOR;
4659         } else {
4660                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4661
4662                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4663                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4664                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4665                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4666         }
4667
4668         /* Workaround: Need to write PP_CONTROL with the unlock key as
4669          * the very first thing. */
4670         pp = ironlake_get_pp_control(intel_dp);
4671         I915_WRITE(pp_ctrl_reg, pp);
4672
4673         pp_on = I915_READ(pp_on_reg);
4674         pp_off = I915_READ(pp_off_reg);
4675         pp_div = I915_READ(pp_div_reg);
4676
4677         /* Pull timing values out of registers */
4678         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4679                 PANEL_POWER_UP_DELAY_SHIFT;
4680
4681         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4682                 PANEL_LIGHT_ON_DELAY_SHIFT;
4683
4684         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4685                 PANEL_LIGHT_OFF_DELAY_SHIFT;
4686
4687         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4688                 PANEL_POWER_DOWN_DELAY_SHIFT;
4689
4690         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4691                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4692
4693         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4694                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4695
4696         vbt = dev_priv->vbt.edp_pps;
4697
4698         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4699          * our hw here, which are all in 100usec. */
4700         spec.t1_t3 = 210 * 10;
4701         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4702         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4703         spec.t10 = 500 * 10;
4704         /* This one is special and actually in units of 100ms, but zero
4705          * based in the hw (so we need to add 100 ms). But the sw vbt
4706          * table multiplies it with 1000 to make it in units of 100usec,
4707          * too. */
4708         spec.t11_t12 = (510 + 100) * 10;
4709
4710         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4711                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4712
4713         /* Use the max of the register settings and vbt. If both are
4714          * unset, fall back to the spec limits. */
4715 #define assign_final(field)     final.field = (max(cur.field, vbt.field) == 0 ? \
4716                                        spec.field : \
4717                                        max(cur.field, vbt.field))
4718         assign_final(t1_t3);
4719         assign_final(t8);
4720         assign_final(t9);
4721         assign_final(t10);
4722         assign_final(t11_t12);
4723 #undef assign_final
4724
4725 #define get_delay(field)        (DIV_ROUND_UP(final.field, 10))
4726         intel_dp->panel_power_up_delay = get_delay(t1_t3);
4727         intel_dp->backlight_on_delay = get_delay(t8);
4728         intel_dp->backlight_off_delay = get_delay(t9);
4729         intel_dp->panel_power_down_delay = get_delay(t10);
4730         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4731 #undef get_delay
4732
4733         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4734                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4735                       intel_dp->panel_power_cycle_delay);
4736
4737         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4738                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4739
4740         if (out)
4741                 *out = final;
4742 }
4743
4744 static void
4745 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4746                                               struct intel_dp *intel_dp,
4747                                               struct edp_power_seq *seq)
4748 {
4749         struct drm_i915_private *dev_priv = dev->dev_private;
4750         u32 pp_on, pp_off, pp_div, port_sel = 0;
4751         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4752         int pp_on_reg, pp_off_reg, pp_div_reg;
4753         enum port port = dp_to_dig_port(intel_dp)->port;
4754
4755         lockdep_assert_held(&dev_priv->pps_mutex);
4756
4757         if (HAS_PCH_SPLIT(dev)) {
4758                 pp_on_reg = PCH_PP_ON_DELAYS;
4759                 pp_off_reg = PCH_PP_OFF_DELAYS;
4760                 pp_div_reg = PCH_PP_DIVISOR;
4761         } else {
4762                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4763
4764                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4765                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4766                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4767         }
4768
4769         /*
4770          * And finally store the new values in the power sequencer. The
4771          * backlight delays are set to 1 because we do manual waits on them. For
4772          * T8, even BSpec recommends doing it. For T9, if we don't do this,
4773          * we'll end up waiting for the backlight off delay twice: once when we
4774          * do the manual sleep, and once when we disable the panel and wait for
4775          * the PP_STATUS bit to become zero.
4776          */
4777         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4778                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4779         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4780                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4781         /* Compute the divisor for the pp clock, simply match the Bspec
4782          * formula. */
4783         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4784         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4785                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
4786
4787         /* Haswell doesn't have any port selection bits for the panel
4788          * power sequencer any more. */
4789         if (IS_VALLEYVIEW(dev)) {
4790                 port_sel = PANEL_PORT_SELECT_VLV(port);
4791         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4792                 if (port == PORT_A)
4793                         port_sel = PANEL_PORT_SELECT_DPA;
4794                 else
4795                         port_sel = PANEL_PORT_SELECT_DPD;
4796         }
4797
4798         pp_on |= port_sel;
4799
4800         I915_WRITE(pp_on_reg, pp_on);
4801         I915_WRITE(pp_off_reg, pp_off);
4802         I915_WRITE(pp_div_reg, pp_div);
4803
4804         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4805                       I915_READ(pp_on_reg),
4806                       I915_READ(pp_off_reg),
4807                       I915_READ(pp_div_reg));
4808 }
4809
4810 void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4811 {
4812         struct drm_i915_private *dev_priv = dev->dev_private;
4813         struct intel_encoder *encoder;
4814         struct intel_dp *intel_dp = NULL;
4815         struct intel_crtc_config *config = NULL;
4816         struct intel_crtc *intel_crtc = NULL;
4817         struct intel_connector *intel_connector = dev_priv->drrs.connector;
4818         u32 reg, val;
4819         enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
4820
4821         if (refresh_rate <= 0) {
4822                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4823                 return;
4824         }
4825
4826         if (intel_connector == NULL) {
4827                 DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
4828                 return;
4829         }
4830
4831         /*
4832          * FIXME: This needs proper synchronization with psr state. But really
4833          * hard to tell without seeing the user of this function of this code.
4834          * Check locking and ordering once that lands.
4835          */
4836         if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
4837                 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
4838                 return;
4839         }
4840
4841         encoder = intel_attached_encoder(&intel_connector->base);
4842         intel_dp = enc_to_intel_dp(&encoder->base);
4843         intel_crtc = encoder->new_crtc;
4844
4845         if (!intel_crtc) {
4846                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4847                 return;
4848         }
4849
4850         config = &intel_crtc->config;
4851
4852         if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
4853                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4854                 return;
4855         }
4856
4857         if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
4858                 index = DRRS_LOW_RR;
4859
4860         if (index == intel_dp->drrs_state.refresh_rate_type) {
4861                 DRM_DEBUG_KMS(
4862                         "DRRS requested for previously set RR...ignoring\n");
4863                 return;
4864         }
4865
4866         if (!intel_crtc->active) {
4867                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4868                 return;
4869         }
4870
4871         if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
4872                 reg = PIPECONF(intel_crtc->config.cpu_transcoder);
4873                 val = I915_READ(reg);
4874                 if (index > DRRS_HIGH_RR) {
4875                         val |= PIPECONF_EDP_RR_MODE_SWITCH;
4876                         intel_dp_set_m_n(intel_crtc);
4877                 } else {
4878                         val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4879                 }
4880                 I915_WRITE(reg, val);
4881         }
4882
4883         /*
4884          * mutex taken to ensure that there is no race between differnt
4885          * drrs calls trying to update refresh rate. This scenario may occur
4886          * in future when idleness detection based DRRS in kernel and
4887          * possible calls from user space to set differnt RR are made.
4888          */
4889
4890         mutex_lock(&intel_dp->drrs_state.mutex);
4891
4892         intel_dp->drrs_state.refresh_rate_type = index;
4893
4894         mutex_unlock(&intel_dp->drrs_state.mutex);
4895
4896         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4897 }
4898
4899 static struct drm_display_mode *
4900 intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4901                         struct intel_connector *intel_connector,
4902                         struct drm_display_mode *fixed_mode)
4903 {
4904         struct drm_connector *connector = &intel_connector->base;
4905         struct intel_dp *intel_dp = &intel_dig_port->dp;
4906         struct drm_device *dev = intel_dig_port->base.base.dev;
4907         struct drm_i915_private *dev_priv = dev->dev_private;
4908         struct drm_display_mode *downclock_mode = NULL;
4909
4910         if (INTEL_INFO(dev)->gen <= 6) {
4911                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
4912                 return NULL;
4913         }
4914
4915         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4916                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4917                 return NULL;
4918         }
4919
4920         downclock_mode = intel_find_panel_downclock
4921                                         (dev, fixed_mode, connector);
4922
4923         if (!downclock_mode) {
4924                 DRM_DEBUG_KMS("DRRS not supported\n");
4925                 return NULL;
4926         }
4927
4928         dev_priv->drrs.connector = intel_connector;
4929
4930         mutex_init(&intel_dp->drrs_state.mutex);
4931
4932         intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
4933
4934         intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
4935         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4936         return downclock_mode;
4937 }
4938
4939 void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
4940 {
4941         struct drm_device *dev = intel_encoder->base.dev;
4942         struct drm_i915_private *dev_priv = dev->dev_private;
4943         struct intel_dp *intel_dp;
4944         enum intel_display_power_domain power_domain;
4945
4946         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4947                 return;
4948
4949         intel_dp = enc_to_intel_dp(&intel_encoder->base);
4950
4951         pps_lock(intel_dp);
4952
4953         if (!edp_have_panel_vdd(intel_dp))
4954                 goto out;
4955         /*
4956          * The VDD bit needs a power domain reference, so if the bit is
4957          * already enabled when we boot or resume, grab this reference and
4958          * schedule a vdd off, so we don't hold on to the reference
4959          * indefinitely.
4960          */
4961         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4962         power_domain = intel_display_port_power_domain(intel_encoder);
4963         intel_display_power_get(dev_priv, power_domain);
4964
4965         edp_panel_vdd_schedule_off(intel_dp);
4966  out:
4967         pps_unlock(intel_dp);
4968 }
4969
4970 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4971                                      struct intel_connector *intel_connector,
4972                                      struct edp_power_seq *power_seq)
4973 {
4974         struct drm_connector *connector = &intel_connector->base;
4975         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4976         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4977         struct drm_device *dev = intel_encoder->base.dev;
4978         struct drm_i915_private *dev_priv = dev->dev_private;
4979         struct drm_display_mode *fixed_mode = NULL;
4980         struct drm_display_mode *downclock_mode = NULL;
4981         bool has_dpcd;
4982         struct drm_display_mode *scan;
4983         struct edid *edid;
4984
4985         intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
4986
4987         if (!is_edp(intel_dp))
4988                 return true;
4989
4990         intel_edp_panel_vdd_sanitize(intel_encoder);
4991
4992         /* Cache DPCD and EDID for edp. */
4993         intel_edp_panel_vdd_on(intel_dp);
4994         has_dpcd = intel_dp_get_dpcd(intel_dp);
4995         intel_edp_panel_vdd_off(intel_dp, false);
4996
4997         if (has_dpcd) {
4998                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4999                         dev_priv->no_aux_handshake =
5000                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5001                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5002         } else {
5003                 /* if this fails, presume the device is a ghost */
5004                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5005                 return false;
5006         }
5007
5008         /* We now know it's not a ghost, init power sequence regs. */
5009         pps_lock(intel_dp);
5010         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
5011         pps_unlock(intel_dp);
5012
5013         mutex_lock(&dev->mode_config.mutex);
5014         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5015         if (edid) {
5016                 if (drm_add_edid_modes(connector, edid)) {
5017                         drm_mode_connector_update_edid_property(connector,
5018                                                                 edid);
5019                         drm_edid_to_eld(connector, edid);
5020                 } else {
5021                         kfree(edid);
5022                         edid = ERR_PTR(-EINVAL);
5023                 }
5024         } else {
5025                 edid = ERR_PTR(-ENOENT);
5026         }
5027         intel_connector->edid = edid;
5028
5029         /* prefer fixed mode from EDID if available */
5030         list_for_each_entry(scan, &connector->probed_modes, head) {
5031                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5032                         fixed_mode = drm_mode_duplicate(dev, scan);
5033                         downclock_mode = intel_dp_drrs_init(
5034                                                 intel_dig_port,
5035                                                 intel_connector, fixed_mode);
5036                         break;
5037                 }
5038         }
5039
5040         /* fallback to VBT if available for eDP */
5041         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5042                 fixed_mode = drm_mode_duplicate(dev,
5043                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5044                 if (fixed_mode)
5045                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5046         }
5047         mutex_unlock(&dev->mode_config.mutex);
5048
5049         if (IS_VALLEYVIEW(dev)) {
5050                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5051                 register_reboot_notifier(&intel_dp->edp_notifier);
5052         }
5053
5054         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5055         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5056         intel_panel_setup_backlight(connector);
5057
5058         return true;
5059 }
5060
5061 bool
5062 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5063                         struct intel_connector *intel_connector)
5064 {
5065         struct drm_connector *connector = &intel_connector->base;
5066         struct intel_dp *intel_dp = &intel_dig_port->dp;
5067         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5068         struct drm_device *dev = intel_encoder->base.dev;
5069         struct drm_i915_private *dev_priv = dev->dev_private;
5070         enum port port = intel_dig_port->port;
5071         struct edp_power_seq power_seq = { 0 };
5072         int type;
5073
5074         intel_dp->pps_pipe = INVALID_PIPE;
5075
5076         /* intel_dp vfuncs */
5077         if (IS_VALLEYVIEW(dev))
5078                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5079         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5080                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5081         else if (HAS_PCH_SPLIT(dev))
5082                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5083         else
5084                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5085
5086         intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5087
5088         /* Preserve the current hw state. */
5089         intel_dp->DP = I915_READ(intel_dp->output_reg);
5090         intel_dp->attached_connector = intel_connector;
5091
5092         if (intel_dp_is_edp(dev, port))
5093                 type = DRM_MODE_CONNECTOR_eDP;
5094         else
5095                 type = DRM_MODE_CONNECTOR_DisplayPort;
5096
5097         /*
5098          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5099          * for DP the encoder type can be set by the caller to
5100          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5101          */
5102         if (type == DRM_MODE_CONNECTOR_eDP)
5103                 intel_encoder->type = INTEL_OUTPUT_EDP;
5104
5105         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5106                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5107                         port_name(port));
5108
5109         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5110         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5111
5112         connector->interlace_allowed = true;
5113         connector->doublescan_allowed = 0;
5114
5115         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5116                           edp_panel_vdd_work);
5117
5118         intel_connector_attach_encoder(intel_connector, intel_encoder);
5119         drm_connector_register(connector);
5120
5121         if (HAS_DDI(dev))
5122                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5123         else
5124                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5125         intel_connector->unregister = intel_dp_connector_unregister;
5126
5127         /* Set up the hotplug pin. */
5128         switch (port) {
5129         case PORT_A:
5130                 intel_encoder->hpd_pin = HPD_PORT_A;
5131                 break;
5132         case PORT_B:
5133                 intel_encoder->hpd_pin = HPD_PORT_B;
5134                 break;
5135         case PORT_C:
5136                 intel_encoder->hpd_pin = HPD_PORT_C;
5137                 break;
5138         case PORT_D:
5139                 intel_encoder->hpd_pin = HPD_PORT_D;
5140                 break;
5141         default:
5142                 BUG();
5143         }
5144
5145         if (is_edp(intel_dp)) {
5146                 pps_lock(intel_dp);
5147                 if (IS_VALLEYVIEW(dev)) {
5148                         vlv_initial_power_sequencer_setup(intel_dp);
5149                 } else {
5150                         intel_dp_init_panel_power_timestamps(intel_dp);
5151                         intel_dp_init_panel_power_sequencer(dev, intel_dp,
5152                                                             &power_seq);
5153                 }
5154                 pps_unlock(intel_dp);
5155         }
5156
5157         intel_dp_aux_init(intel_dp, intel_connector);
5158
5159         /* init MST on ports that can support it */
5160         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5161                 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5162                         intel_dp_mst_encoder_init(intel_dig_port,
5163                                                   intel_connector->base.base.id);
5164                 }
5165         }
5166
5167         if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
5168                 drm_dp_aux_unregister(&intel_dp->aux);
5169                 if (is_edp(intel_dp)) {
5170                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5171                         /*
5172                          * vdd might still be enabled do to the delayed vdd off.
5173                          * Make sure vdd is actually turned off here.
5174                          */
5175                         pps_lock(intel_dp);
5176                         edp_panel_vdd_off_sync(intel_dp);
5177                         pps_unlock(intel_dp);
5178                 }
5179                 drm_connector_unregister(connector);
5180                 drm_connector_cleanup(connector);
5181                 return false;
5182         }
5183
5184         intel_dp_add_properties(intel_dp, connector);
5185
5186         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5187          * 0xd.  Failure to do so will result in spurious interrupts being
5188          * generated on the port when a cable is not attached.
5189          */
5190         if (IS_G4X(dev) && !IS_GM45(dev)) {
5191                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5192                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5193         }
5194
5195         return true;
5196 }
5197
5198 void
5199 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5200 {
5201         struct drm_i915_private *dev_priv = dev->dev_private;
5202         struct intel_digital_port *intel_dig_port;
5203         struct intel_encoder *intel_encoder;
5204         struct drm_encoder *encoder;
5205         struct intel_connector *intel_connector;
5206
5207         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5208         if (!intel_dig_port)
5209                 return;
5210
5211         intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
5212         if (!intel_connector) {
5213                 kfree(intel_dig_port);
5214                 return;
5215         }
5216
5217         intel_encoder = &intel_dig_port->base;
5218         encoder = &intel_encoder->base;
5219
5220         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5221                          DRM_MODE_ENCODER_TMDS);
5222
5223         intel_encoder->compute_config = intel_dp_compute_config;
5224         intel_encoder->disable = intel_disable_dp;
5225         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5226         intel_encoder->get_config = intel_dp_get_config;
5227         intel_encoder->suspend = intel_dp_encoder_suspend;
5228         if (IS_CHERRYVIEW(dev)) {
5229                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5230                 intel_encoder->pre_enable = chv_pre_enable_dp;
5231                 intel_encoder->enable = vlv_enable_dp;
5232                 intel_encoder->post_disable = chv_post_disable_dp;
5233         } else if (IS_VALLEYVIEW(dev)) {
5234                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5235                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5236                 intel_encoder->enable = vlv_enable_dp;
5237                 intel_encoder->post_disable = vlv_post_disable_dp;
5238         } else {
5239                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5240                 intel_encoder->enable = g4x_enable_dp;
5241                 if (INTEL_INFO(dev)->gen >= 5)
5242                         intel_encoder->post_disable = ilk_post_disable_dp;
5243         }
5244
5245         intel_dig_port->port = port;
5246         intel_dig_port->dp.output_reg = output_reg;
5247
5248         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5249         if (IS_CHERRYVIEW(dev)) {
5250                 if (port == PORT_D)
5251                         intel_encoder->crtc_mask = 1 << 2;
5252                 else
5253                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5254         } else {
5255                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5256         }
5257         intel_encoder->cloneable = 0;
5258         intel_encoder->hot_plug = intel_dp_hot_plug;
5259
5260         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5261         dev_priv->hpd_irq_port[port] = intel_dig_port;
5262
5263         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5264                 drm_encoder_cleanup(encoder);
5265                 kfree(intel_dig_port);
5266                 kfree(intel_connector);
5267         }
5268 }
5269
5270 void intel_dp_mst_suspend(struct drm_device *dev)
5271 {
5272         struct drm_i915_private *dev_priv = dev->dev_private;
5273         int i;
5274
5275         /* disable MST */
5276         for (i = 0; i < I915_MAX_PORTS; i++) {
5277                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5278                 if (!intel_dig_port)
5279                         continue;
5280
5281                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5282                         if (!intel_dig_port->dp.can_mst)
5283                                 continue;
5284                         if (intel_dig_port->dp.is_mst)
5285                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5286                 }
5287         }
5288 }
5289
5290 void intel_dp_mst_resume(struct drm_device *dev)
5291 {
5292         struct drm_i915_private *dev_priv = dev->dev_private;
5293         int i;
5294
5295         for (i = 0; i < I915_MAX_PORTS; i++) {
5296                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5297                 if (!intel_dig_port)
5298                         continue;
5299                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5300                         int ret;
5301
5302                         if (!intel_dig_port->dp.can_mst)
5303                                 continue;
5304
5305                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5306                         if (ret != 0) {
5307                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5308                         }
5309                 }
5310         }
5311 }