Merge tag 'sound-fix-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[pandora-kernel.git] / drivers / gpu / drm / radeon / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "drmP.h"
25 #include "radeon.h"
26 #include "cikd.h"
27 #include "r600_dpm.h"
28 #include "ci_dpm.h"
29 #include "atom.h"
30 #include <linux/seq_file.h>
31
32 #define MC_CG_ARB_FREQ_F0           0x0a
33 #define MC_CG_ARB_FREQ_F1           0x0b
34 #define MC_CG_ARB_FREQ_F2           0x0c
35 #define MC_CG_ARB_FREQ_F3           0x0d
36
37 #define SMC_RAM_END 0x40000
38
39 #define VOLTAGE_SCALE               4
40 #define VOLTAGE_VID_OFFSET_SCALE1    625
41 #define VOLTAGE_VID_OFFSET_SCALE2    100
42
43 static const struct ci_pt_defaults defaults_hawaii_xt =
44 {
45         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
46         { 0x84,  0x0,   0x0,   0x7F,  0x0,   0x0,   0x5A,  0x60,  0x51,  0x8E,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
47         { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
48 };
49
50 static const struct ci_pt_defaults defaults_hawaii_pro =
51 {
52         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
53         { 0x93,  0x0,   0x0,   0x97,  0x0,   0x0,   0x6B,  0x60,  0x51,  0x95,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
54         { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
55 };
56
57 static const struct ci_pt_defaults defaults_bonaire_xt =
58 {
59         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
60         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
61         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
62 };
63
64 static const struct ci_pt_defaults defaults_bonaire_pro =
65 {
66         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
67         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
68         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
69 };
70
71 static const struct ci_pt_defaults defaults_saturn_xt =
72 {
73         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
74         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
75         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
76 };
77
78 static const struct ci_pt_defaults defaults_saturn_pro =
79 {
80         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
81         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
82         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
83 };
84
85 static const struct ci_pt_config_reg didt_config_ci[] =
86 {
87         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
88         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
89         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
90         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
100         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
101         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
102         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
103         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
104         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
105         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
106         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
107         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
154         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
155         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
156         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
157         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
158         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159         { 0xFFFFFFFF }
160 };
161
162 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
163 extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
164                                                             u32 *max_clock);
165 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
166                                        u32 arb_freq_src, u32 arb_freq_dest);
167 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
168 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
169 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
170                                                      u32 max_voltage_steps,
171                                                      struct atom_voltage_table *voltage_table);
172 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
173 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
174 extern int ci_mc_load_microcode(struct radeon_device *rdev);
175 extern void cik_update_cg(struct radeon_device *rdev,
176                           u32 block, bool enable);
177
178 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
179                                          struct atom_voltage_table_entry *voltage_table,
180                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
181 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
182 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
183                                        u32 target_tdp);
184 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
185
186 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
187 {
188         struct ci_power_info *pi = rdev->pm.dpm.priv;
189
190         return pi;
191 }
192
193 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
194 {
195         struct ci_ps *ps = rps->ps_priv;
196
197         return ps;
198 }
199
200 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
201 {
202         struct ci_power_info *pi = ci_get_pi(rdev);
203
204         switch (rdev->pdev->device) {
205         case 0x6650:
206         case 0x6658:
207         case 0x665C:
208         default:
209                 pi->powertune_defaults = &defaults_bonaire_xt;
210                 break;
211         case 0x6651:
212         case 0x665D:
213                 pi->powertune_defaults = &defaults_bonaire_pro;
214                 break;
215         case 0x6640:
216                 pi->powertune_defaults = &defaults_saturn_xt;
217                 break;
218         case 0x6641:
219                 pi->powertune_defaults = &defaults_saturn_pro;
220                 break;
221         case 0x67B8:
222         case 0x67B0:
223         case 0x67A0:
224         case 0x67A1:
225         case 0x67A2:
226         case 0x67A8:
227         case 0x67A9:
228         case 0x67AA:
229         case 0x67B9:
230         case 0x67BE:
231                 pi->powertune_defaults = &defaults_hawaii_xt;
232                 break;
233         case 0x67BA:
234         case 0x67B1:
235                 pi->powertune_defaults = &defaults_hawaii_pro;
236                 break;
237         }
238
239         pi->dte_tj_offset = 0;
240
241         pi->caps_power_containment = true;
242         pi->caps_cac = false;
243         pi->caps_sq_ramping = false;
244         pi->caps_db_ramping = false;
245         pi->caps_td_ramping = false;
246         pi->caps_tcp_ramping = false;
247
248         if (pi->caps_power_containment) {
249                 pi->caps_cac = true;
250                 pi->enable_bapm_feature = true;
251                 pi->enable_tdc_limit_feature = true;
252                 pi->enable_pkg_pwr_tracking_feature = true;
253         }
254 }
255
256 static u8 ci_convert_to_vid(u16 vddc)
257 {
258         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
259 }
260
261 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
262 {
263         struct ci_power_info *pi = ci_get_pi(rdev);
264         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
265         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
266         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
267         u32 i;
268
269         if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
270                 return -EINVAL;
271         if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
272                 return -EINVAL;
273         if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
274             rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
275                 return -EINVAL;
276
277         for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
278                 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
279                         lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
280                         hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
281                         hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
282                 } else {
283                         lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
284                         hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
285                 }
286         }
287         return 0;
288 }
289
290 static int ci_populate_vddc_vid(struct radeon_device *rdev)
291 {
292         struct ci_power_info *pi = ci_get_pi(rdev);
293         u8 *vid = pi->smc_powertune_table.VddCVid;
294         u32 i;
295
296         if (pi->vddc_voltage_table.count > 8)
297                 return -EINVAL;
298
299         for (i = 0; i < pi->vddc_voltage_table.count; i++)
300                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
301
302         return 0;
303 }
304
305 static int ci_populate_svi_load_line(struct radeon_device *rdev)
306 {
307         struct ci_power_info *pi = ci_get_pi(rdev);
308         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
309
310         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
311         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
312         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
313         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
314
315         return 0;
316 }
317
318 static int ci_populate_tdc_limit(struct radeon_device *rdev)
319 {
320         struct ci_power_info *pi = ci_get_pi(rdev);
321         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
322         u16 tdc_limit;
323
324         tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
325         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
326         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
327                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
328         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
329
330         return 0;
331 }
332
333 static int ci_populate_dw8(struct radeon_device *rdev)
334 {
335         struct ci_power_info *pi = ci_get_pi(rdev);
336         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
337         int ret;
338
339         ret = ci_read_smc_sram_dword(rdev,
340                                      SMU7_FIRMWARE_HEADER_LOCATION +
341                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
342                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
343                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
344                                      pi->sram_end);
345         if (ret)
346                 return -EINVAL;
347         else
348                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
349
350         return 0;
351 }
352
353 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
354 {
355         struct ci_power_info *pi = ci_get_pi(rdev);
356         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
357         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
358         int i, min, max;
359
360         min = max = hi_vid[0];
361         for (i = 0; i < 8; i++) {
362                 if (0 != hi_vid[i]) {
363                         if (min > hi_vid[i])
364                                 min = hi_vid[i];
365                         if (max < hi_vid[i])
366                                 max = hi_vid[i];
367                 }
368
369                 if (0 != lo_vid[i]) {
370                         if (min > lo_vid[i])
371                                 min = lo_vid[i];
372                         if (max < lo_vid[i])
373                                 max = lo_vid[i];
374                 }
375         }
376
377         if ((min == 0) || (max == 0))
378                 return -EINVAL;
379         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
380         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
381
382         return 0;
383 }
384
385 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
386 {
387         struct ci_power_info *pi = ci_get_pi(rdev);
388         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
389         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
390         struct radeon_cac_tdp_table *cac_tdp_table =
391                 rdev->pm.dpm.dyn_state.cac_tdp_table;
392
393         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
394         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
395
396         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
397         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
398
399         return 0;
400 }
401
402 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
403 {
404         struct ci_power_info *pi = ci_get_pi(rdev);
405         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
406         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
407         struct radeon_cac_tdp_table *cac_tdp_table =
408                 rdev->pm.dpm.dyn_state.cac_tdp_table;
409         struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
410         int i, j, k;
411         const u16 *def1;
412         const u16 *def2;
413
414         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
415         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
416
417         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
418         dpm_table->GpuTjMax =
419                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
420         dpm_table->GpuTjHyst = 8;
421
422         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
423
424         if (ppm) {
425                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
426                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
427         } else {
428                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
429                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
430         }
431
432         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
433         def1 = pt_defaults->bapmti_r;
434         def2 = pt_defaults->bapmti_rc;
435
436         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
437                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
438                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
439                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
440                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
441                                 def1++;
442                                 def2++;
443                         }
444                 }
445         }
446
447         return 0;
448 }
449
450 static int ci_populate_pm_base(struct radeon_device *rdev)
451 {
452         struct ci_power_info *pi = ci_get_pi(rdev);
453         u32 pm_fuse_table_offset;
454         int ret;
455
456         if (pi->caps_power_containment) {
457                 ret = ci_read_smc_sram_dword(rdev,
458                                              SMU7_FIRMWARE_HEADER_LOCATION +
459                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
460                                              &pm_fuse_table_offset, pi->sram_end);
461                 if (ret)
462                         return ret;
463                 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
464                 if (ret)
465                         return ret;
466                 ret = ci_populate_vddc_vid(rdev);
467                 if (ret)
468                         return ret;
469                 ret = ci_populate_svi_load_line(rdev);
470                 if (ret)
471                         return ret;
472                 ret = ci_populate_tdc_limit(rdev);
473                 if (ret)
474                         return ret;
475                 ret = ci_populate_dw8(rdev);
476                 if (ret)
477                         return ret;
478                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
479                 if (ret)
480                         return ret;
481                 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
482                 if (ret)
483                         return ret;
484                 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
485                                            (u8 *)&pi->smc_powertune_table,
486                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
487                 if (ret)
488                         return ret;
489         }
490
491         return 0;
492 }
493
494 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
495 {
496         struct ci_power_info *pi = ci_get_pi(rdev);
497         u32 data;
498
499         if (pi->caps_sq_ramping) {
500                 data = RREG32_DIDT(DIDT_SQ_CTRL0);
501                 if (enable)
502                         data |= DIDT_CTRL_EN;
503                 else
504                         data &= ~DIDT_CTRL_EN;
505                 WREG32_DIDT(DIDT_SQ_CTRL0, data);
506         }
507
508         if (pi->caps_db_ramping) {
509                 data = RREG32_DIDT(DIDT_DB_CTRL0);
510                 if (enable)
511                         data |= DIDT_CTRL_EN;
512                 else
513                         data &= ~DIDT_CTRL_EN;
514                 WREG32_DIDT(DIDT_DB_CTRL0, data);
515         }
516
517         if (pi->caps_td_ramping) {
518                 data = RREG32_DIDT(DIDT_TD_CTRL0);
519                 if (enable)
520                         data |= DIDT_CTRL_EN;
521                 else
522                         data &= ~DIDT_CTRL_EN;
523                 WREG32_DIDT(DIDT_TD_CTRL0, data);
524         }
525
526         if (pi->caps_tcp_ramping) {
527                 data = RREG32_DIDT(DIDT_TCP_CTRL0);
528                 if (enable)
529                         data |= DIDT_CTRL_EN;
530                 else
531                         data &= ~DIDT_CTRL_EN;
532                 WREG32_DIDT(DIDT_TCP_CTRL0, data);
533         }
534 }
535
536 static int ci_program_pt_config_registers(struct radeon_device *rdev,
537                                           const struct ci_pt_config_reg *cac_config_regs)
538 {
539         const struct ci_pt_config_reg *config_regs = cac_config_regs;
540         u32 data;
541         u32 cache = 0;
542
543         if (config_regs == NULL)
544                 return -EINVAL;
545
546         while (config_regs->offset != 0xFFFFFFFF) {
547                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
548                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
549                 } else {
550                         switch (config_regs->type) {
551                         case CISLANDS_CONFIGREG_SMC_IND:
552                                 data = RREG32_SMC(config_regs->offset);
553                                 break;
554                         case CISLANDS_CONFIGREG_DIDT_IND:
555                                 data = RREG32_DIDT(config_regs->offset);
556                                 break;
557                         default:
558                                 data = RREG32(config_regs->offset << 2);
559                                 break;
560                         }
561
562                         data &= ~config_regs->mask;
563                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
564                         data |= cache;
565
566                         switch (config_regs->type) {
567                         case CISLANDS_CONFIGREG_SMC_IND:
568                                 WREG32_SMC(config_regs->offset, data);
569                                 break;
570                         case CISLANDS_CONFIGREG_DIDT_IND:
571                                 WREG32_DIDT(config_regs->offset, data);
572                                 break;
573                         default:
574                                 WREG32(config_regs->offset << 2, data);
575                                 break;
576                         }
577                         cache = 0;
578                 }
579                 config_regs++;
580         }
581         return 0;
582 }
583
584 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
585 {
586         struct ci_power_info *pi = ci_get_pi(rdev);
587         int ret;
588
589         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
590             pi->caps_td_ramping || pi->caps_tcp_ramping) {
591                 cik_enter_rlc_safe_mode(rdev);
592
593                 if (enable) {
594                         ret = ci_program_pt_config_registers(rdev, didt_config_ci);
595                         if (ret) {
596                                 cik_exit_rlc_safe_mode(rdev);
597                                 return ret;
598                         }
599                 }
600
601                 ci_do_enable_didt(rdev, enable);
602
603                 cik_exit_rlc_safe_mode(rdev);
604         }
605
606         return 0;
607 }
608
609 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
610 {
611         struct ci_power_info *pi = ci_get_pi(rdev);
612         PPSMC_Result smc_result;
613         int ret = 0;
614
615         if (enable) {
616                 pi->power_containment_features = 0;
617                 if (pi->caps_power_containment) {
618                         if (pi->enable_bapm_feature) {
619                                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
620                                 if (smc_result != PPSMC_Result_OK)
621                                         ret = -EINVAL;
622                                 else
623                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
624                         }
625
626                         if (pi->enable_tdc_limit_feature) {
627                                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
628                                 if (smc_result != PPSMC_Result_OK)
629                                         ret = -EINVAL;
630                                 else
631                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
632                         }
633
634                         if (pi->enable_pkg_pwr_tracking_feature) {
635                                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
636                                 if (smc_result != PPSMC_Result_OK) {
637                                         ret = -EINVAL;
638                                 } else {
639                                         struct radeon_cac_tdp_table *cac_tdp_table =
640                                                 rdev->pm.dpm.dyn_state.cac_tdp_table;
641                                         u32 default_pwr_limit =
642                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
643
644                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
645
646                                         ci_set_power_limit(rdev, default_pwr_limit);
647                                 }
648                         }
649                 }
650         } else {
651                 if (pi->caps_power_containment && pi->power_containment_features) {
652                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
653                                 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
654
655                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
656                                 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
657
658                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
659                                 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
660                         pi->power_containment_features = 0;
661                 }
662         }
663
664         return ret;
665 }
666
667 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
668 {
669         struct ci_power_info *pi = ci_get_pi(rdev);
670         PPSMC_Result smc_result;
671         int ret = 0;
672
673         if (pi->caps_cac) {
674                 if (enable) {
675                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
676                         if (smc_result != PPSMC_Result_OK) {
677                                 ret = -EINVAL;
678                                 pi->cac_enabled = false;
679                         } else {
680                                 pi->cac_enabled = true;
681                         }
682                 } else if (pi->cac_enabled) {
683                         ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
684                         pi->cac_enabled = false;
685                 }
686         }
687
688         return ret;
689 }
690
691 static int ci_power_control_set_level(struct radeon_device *rdev)
692 {
693         struct ci_power_info *pi = ci_get_pi(rdev);
694         struct radeon_cac_tdp_table *cac_tdp_table =
695                 rdev->pm.dpm.dyn_state.cac_tdp_table;
696         s32 adjust_percent;
697         s32 target_tdp;
698         int ret = 0;
699         bool adjust_polarity = false; /* ??? */
700
701         if (pi->caps_power_containment &&
702             (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
703                 adjust_percent = adjust_polarity ?
704                         rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
705                 target_tdp = ((100 + adjust_percent) *
706                               (s32)cac_tdp_table->configurable_tdp) / 100;
707                 target_tdp *= 256;
708
709                 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
710         }
711
712         return ret;
713 }
714
715 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
716 {
717         struct ci_power_info *pi = ci_get_pi(rdev);
718
719         if (pi->uvd_power_gated == gate)
720                 return;
721
722         pi->uvd_power_gated = gate;
723
724         ci_update_uvd_dpm(rdev, gate);
725 }
726
727 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
728 {
729         struct ci_power_info *pi = ci_get_pi(rdev);
730         u32 vblank_time = r600_dpm_get_vblank_time(rdev);
731         u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
732
733         if (vblank_time < switch_limit)
734                 return true;
735         else
736                 return false;
737
738 }
739
740 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
741                                         struct radeon_ps *rps)
742 {
743         struct ci_ps *ps = ci_get_ps(rps);
744         struct ci_power_info *pi = ci_get_pi(rdev);
745         struct radeon_clock_and_voltage_limits *max_limits;
746         bool disable_mclk_switching;
747         u32 sclk, mclk;
748         u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
749         int i;
750
751         if (rps->vce_active) {
752                 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
753                 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
754         } else {
755                 rps->evclk = 0;
756                 rps->ecclk = 0;
757         }
758
759         if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
760             ci_dpm_vblank_too_short(rdev))
761                 disable_mclk_switching = true;
762         else
763                 disable_mclk_switching = false;
764
765         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
766                 pi->battery_state = true;
767         else
768                 pi->battery_state = false;
769
770         if (rdev->pm.dpm.ac_power)
771                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
772         else
773                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
774
775         if (rdev->pm.dpm.ac_power == false) {
776                 for (i = 0; i < ps->performance_level_count; i++) {
777                         if (ps->performance_levels[i].mclk > max_limits->mclk)
778                                 ps->performance_levels[i].mclk = max_limits->mclk;
779                         if (ps->performance_levels[i].sclk > max_limits->sclk)
780                                 ps->performance_levels[i].sclk = max_limits->sclk;
781                 }
782         }
783
784         /* limit clocks to max supported clocks based on voltage dependency tables */
785         btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
786                                                         &max_sclk_vddc);
787         btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
788                                                         &max_mclk_vddci);
789         btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
790                                                         &max_mclk_vddc);
791
792         for (i = 0; i < ps->performance_level_count; i++) {
793                 if (max_sclk_vddc) {
794                         if (ps->performance_levels[i].sclk > max_sclk_vddc)
795                                 ps->performance_levels[i].sclk = max_sclk_vddc;
796                 }
797                 if (max_mclk_vddci) {
798                         if (ps->performance_levels[i].mclk > max_mclk_vddci)
799                                 ps->performance_levels[i].mclk = max_mclk_vddci;
800                 }
801                 if (max_mclk_vddc) {
802                         if (ps->performance_levels[i].mclk > max_mclk_vddc)
803                                 ps->performance_levels[i].mclk = max_mclk_vddc;
804                 }
805         }
806
807         /* XXX validate the min clocks required for display */
808
809         if (disable_mclk_switching) {
810                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
811                 sclk = ps->performance_levels[0].sclk;
812         } else {
813                 mclk = ps->performance_levels[0].mclk;
814                 sclk = ps->performance_levels[0].sclk;
815         }
816
817         if (rps->vce_active) {
818                 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
819                         sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
820                 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
821                         mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
822         }
823
824         ps->performance_levels[0].sclk = sclk;
825         ps->performance_levels[0].mclk = mclk;
826
827         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
828                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
829
830         if (disable_mclk_switching) {
831                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
832                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
833         } else {
834                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
835                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
836         }
837 }
838
839 static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
840                                             int min_temp, int max_temp)
841 {
842         int low_temp = 0 * 1000;
843         int high_temp = 255 * 1000;
844         u32 tmp;
845
846         if (low_temp < min_temp)
847                 low_temp = min_temp;
848         if (high_temp > max_temp)
849                 high_temp = max_temp;
850         if (high_temp < low_temp) {
851                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
852                 return -EINVAL;
853         }
854
855         tmp = RREG32_SMC(CG_THERMAL_INT);
856         tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
857         tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
858                 CI_DIG_THERM_INTL(low_temp / 1000);
859         WREG32_SMC(CG_THERMAL_INT, tmp);
860
861 #if 0
862         /* XXX: need to figure out how to handle this properly */
863         tmp = RREG32_SMC(CG_THERMAL_CTRL);
864         tmp &= DIG_THERM_DPM_MASK;
865         tmp |= DIG_THERM_DPM(high_temp / 1000);
866         WREG32_SMC(CG_THERMAL_CTRL, tmp);
867 #endif
868
869         return 0;
870 }
871
872 #if 0
873 static int ci_read_smc_soft_register(struct radeon_device *rdev,
874                                      u16 reg_offset, u32 *value)
875 {
876         struct ci_power_info *pi = ci_get_pi(rdev);
877
878         return ci_read_smc_sram_dword(rdev,
879                                       pi->soft_regs_start + reg_offset,
880                                       value, pi->sram_end);
881 }
882 #endif
883
884 static int ci_write_smc_soft_register(struct radeon_device *rdev,
885                                       u16 reg_offset, u32 value)
886 {
887         struct ci_power_info *pi = ci_get_pi(rdev);
888
889         return ci_write_smc_sram_dword(rdev,
890                                        pi->soft_regs_start + reg_offset,
891                                        value, pi->sram_end);
892 }
893
894 static void ci_init_fps_limits(struct radeon_device *rdev)
895 {
896         struct ci_power_info *pi = ci_get_pi(rdev);
897         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
898
899         if (pi->caps_fps) {
900                 u16 tmp;
901
902                 tmp = 45;
903                 table->FpsHighT = cpu_to_be16(tmp);
904
905                 tmp = 30;
906                 table->FpsLowT = cpu_to_be16(tmp);
907         }
908 }
909
910 static int ci_update_sclk_t(struct radeon_device *rdev)
911 {
912         struct ci_power_info *pi = ci_get_pi(rdev);
913         int ret = 0;
914         u32 low_sclk_interrupt_t = 0;
915
916         if (pi->caps_sclk_throttle_low_notification) {
917                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
918
919                 ret = ci_copy_bytes_to_smc(rdev,
920                                            pi->dpm_table_start +
921                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
922                                            (u8 *)&low_sclk_interrupt_t,
923                                            sizeof(u32), pi->sram_end);
924
925         }
926
927         return ret;
928 }
929
930 static void ci_get_leakage_voltages(struct radeon_device *rdev)
931 {
932         struct ci_power_info *pi = ci_get_pi(rdev);
933         u16 leakage_id, virtual_voltage_id;
934         u16 vddc, vddci;
935         int i;
936
937         pi->vddc_leakage.count = 0;
938         pi->vddci_leakage.count = 0;
939
940         if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
941                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
942                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
943                         if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
944                                                                                  virtual_voltage_id,
945                                                                                  leakage_id) == 0) {
946                                 if (vddc != 0 && vddc != virtual_voltage_id) {
947                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
948                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
949                                         pi->vddc_leakage.count++;
950                                 }
951                                 if (vddci != 0 && vddci != virtual_voltage_id) {
952                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
953                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
954                                         pi->vddci_leakage.count++;
955                                 }
956                         }
957                 }
958         }
959 }
960
961 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
962 {
963         struct ci_power_info *pi = ci_get_pi(rdev);
964         bool want_thermal_protection;
965         enum radeon_dpm_event_src dpm_event_src;
966         u32 tmp;
967
968         switch (sources) {
969         case 0:
970         default:
971                 want_thermal_protection = false;
972                 break;
973         case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
974                 want_thermal_protection = true;
975                 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
976                 break;
977         case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
978                 want_thermal_protection = true;
979                 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
980                 break;
981         case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
982               (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
983                 want_thermal_protection = true;
984                 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
985                 break;
986         }
987
988         if (want_thermal_protection) {
989 #if 0
990                 /* XXX: need to figure out how to handle this properly */
991                 tmp = RREG32_SMC(CG_THERMAL_CTRL);
992                 tmp &= DPM_EVENT_SRC_MASK;
993                 tmp |= DPM_EVENT_SRC(dpm_event_src);
994                 WREG32_SMC(CG_THERMAL_CTRL, tmp);
995 #endif
996
997                 tmp = RREG32_SMC(GENERAL_PWRMGT);
998                 if (pi->thermal_protection)
999                         tmp &= ~THERMAL_PROTECTION_DIS;
1000                 else
1001                         tmp |= THERMAL_PROTECTION_DIS;
1002                 WREG32_SMC(GENERAL_PWRMGT, tmp);
1003         } else {
1004                 tmp = RREG32_SMC(GENERAL_PWRMGT);
1005                 tmp |= THERMAL_PROTECTION_DIS;
1006                 WREG32_SMC(GENERAL_PWRMGT, tmp);
1007         }
1008 }
1009
1010 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1011                                            enum radeon_dpm_auto_throttle_src source,
1012                                            bool enable)
1013 {
1014         struct ci_power_info *pi = ci_get_pi(rdev);
1015
1016         if (enable) {
1017                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1018                         pi->active_auto_throttle_sources |= 1 << source;
1019                         ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1020                 }
1021         } else {
1022                 if (pi->active_auto_throttle_sources & (1 << source)) {
1023                         pi->active_auto_throttle_sources &= ~(1 << source);
1024                         ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1025                 }
1026         }
1027 }
1028
1029 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1030 {
1031         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1032                 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1033 }
1034
1035 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1036 {
1037         struct ci_power_info *pi = ci_get_pi(rdev);
1038         PPSMC_Result smc_result;
1039
1040         if (!pi->need_update_smu7_dpm_table)
1041                 return 0;
1042
1043         if ((!pi->sclk_dpm_key_disabled) &&
1044             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1045                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1046                 if (smc_result != PPSMC_Result_OK)
1047                         return -EINVAL;
1048         }
1049
1050         if ((!pi->mclk_dpm_key_disabled) &&
1051             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1052                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1053                 if (smc_result != PPSMC_Result_OK)
1054                         return -EINVAL;
1055         }
1056
1057         pi->need_update_smu7_dpm_table = 0;
1058         return 0;
1059 }
1060
1061 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1062 {
1063         struct ci_power_info *pi = ci_get_pi(rdev);
1064         PPSMC_Result smc_result;
1065
1066         if (enable) {
1067                 if (!pi->sclk_dpm_key_disabled) {
1068                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1069                         if (smc_result != PPSMC_Result_OK)
1070                                 return -EINVAL;
1071                 }
1072
1073                 if (!pi->mclk_dpm_key_disabled) {
1074                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1075                         if (smc_result != PPSMC_Result_OK)
1076                                 return -EINVAL;
1077
1078                         WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1079
1080                         WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1081                         WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1082                         WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1083
1084                         udelay(10);
1085
1086                         WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1087                         WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1088                         WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1089                 }
1090         } else {
1091                 if (!pi->sclk_dpm_key_disabled) {
1092                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1093                         if (smc_result != PPSMC_Result_OK)
1094                                 return -EINVAL;
1095                 }
1096
1097                 if (!pi->mclk_dpm_key_disabled) {
1098                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1099                         if (smc_result != PPSMC_Result_OK)
1100                                 return -EINVAL;
1101                 }
1102         }
1103
1104         return 0;
1105 }
1106
1107 static int ci_start_dpm(struct radeon_device *rdev)
1108 {
1109         struct ci_power_info *pi = ci_get_pi(rdev);
1110         PPSMC_Result smc_result;
1111         int ret;
1112         u32 tmp;
1113
1114         tmp = RREG32_SMC(GENERAL_PWRMGT);
1115         tmp |= GLOBAL_PWRMGT_EN;
1116         WREG32_SMC(GENERAL_PWRMGT, tmp);
1117
1118         tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1119         tmp |= DYNAMIC_PM_EN;
1120         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1121
1122         ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1123
1124         WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1125
1126         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1127         if (smc_result != PPSMC_Result_OK)
1128                 return -EINVAL;
1129
1130         ret = ci_enable_sclk_mclk_dpm(rdev, true);
1131         if (ret)
1132                 return ret;
1133
1134         if (!pi->pcie_dpm_key_disabled) {
1135                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1136                 if (smc_result != PPSMC_Result_OK)
1137                         return -EINVAL;
1138         }
1139
1140         return 0;
1141 }
1142
1143 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1144 {
1145         struct ci_power_info *pi = ci_get_pi(rdev);
1146         PPSMC_Result smc_result;
1147
1148         if (!pi->need_update_smu7_dpm_table)
1149                 return 0;
1150
1151         if ((!pi->sclk_dpm_key_disabled) &&
1152             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1153                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1154                 if (smc_result != PPSMC_Result_OK)
1155                         return -EINVAL;
1156         }
1157
1158         if ((!pi->mclk_dpm_key_disabled) &&
1159             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1160                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1161                 if (smc_result != PPSMC_Result_OK)
1162                         return -EINVAL;
1163         }
1164
1165         return 0;
1166 }
1167
1168 static int ci_stop_dpm(struct radeon_device *rdev)
1169 {
1170         struct ci_power_info *pi = ci_get_pi(rdev);
1171         PPSMC_Result smc_result;
1172         int ret;
1173         u32 tmp;
1174
1175         tmp = RREG32_SMC(GENERAL_PWRMGT);
1176         tmp &= ~GLOBAL_PWRMGT_EN;
1177         WREG32_SMC(GENERAL_PWRMGT, tmp);
1178
1179         tmp = RREG32(SCLK_PWRMGT_CNTL);
1180         tmp &= ~DYNAMIC_PM_EN;
1181         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1182
1183         if (!pi->pcie_dpm_key_disabled) {
1184                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1185                 if (smc_result != PPSMC_Result_OK)
1186                         return -EINVAL;
1187         }
1188
1189         ret = ci_enable_sclk_mclk_dpm(rdev, false);
1190         if (ret)
1191                 return ret;
1192
1193         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1194         if (smc_result != PPSMC_Result_OK)
1195                 return -EINVAL;
1196
1197         return 0;
1198 }
1199
1200 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1201 {
1202         u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1203
1204         if (enable)
1205                 tmp &= ~SCLK_PWRMGT_OFF;
1206         else
1207                 tmp |= SCLK_PWRMGT_OFF;
1208         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1209 }
1210
1211 #if 0
1212 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1213                                         bool ac_power)
1214 {
1215         struct ci_power_info *pi = ci_get_pi(rdev);
1216         struct radeon_cac_tdp_table *cac_tdp_table =
1217                 rdev->pm.dpm.dyn_state.cac_tdp_table;
1218         u32 power_limit;
1219
1220         if (ac_power)
1221                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1222         else
1223                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1224
1225         ci_set_power_limit(rdev, power_limit);
1226
1227         if (pi->caps_automatic_dc_transition) {
1228                 if (ac_power)
1229                         ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1230                 else
1231                         ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1232         }
1233
1234         return 0;
1235 }
1236 #endif
1237
1238 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1239                                                       PPSMC_Msg msg, u32 parameter)
1240 {
1241         WREG32(SMC_MSG_ARG_0, parameter);
1242         return ci_send_msg_to_smc(rdev, msg);
1243 }
1244
1245 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1246                                                         PPSMC_Msg msg, u32 *parameter)
1247 {
1248         PPSMC_Result smc_result;
1249
1250         smc_result = ci_send_msg_to_smc(rdev, msg);
1251
1252         if ((smc_result == PPSMC_Result_OK) && parameter)
1253                 *parameter = RREG32(SMC_MSG_ARG_0);
1254
1255         return smc_result;
1256 }
1257
1258 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1259 {
1260         struct ci_power_info *pi = ci_get_pi(rdev);
1261
1262         if (!pi->sclk_dpm_key_disabled) {
1263                 PPSMC_Result smc_result =
1264                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1265                 if (smc_result != PPSMC_Result_OK)
1266                         return -EINVAL;
1267         }
1268
1269         return 0;
1270 }
1271
1272 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1273 {
1274         struct ci_power_info *pi = ci_get_pi(rdev);
1275
1276         if (!pi->mclk_dpm_key_disabled) {
1277                 PPSMC_Result smc_result =
1278                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1279                 if (smc_result != PPSMC_Result_OK)
1280                         return -EINVAL;
1281         }
1282
1283         return 0;
1284 }
1285
1286 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1287 {
1288         struct ci_power_info *pi = ci_get_pi(rdev);
1289
1290         if (!pi->pcie_dpm_key_disabled) {
1291                 PPSMC_Result smc_result =
1292                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1293                 if (smc_result != PPSMC_Result_OK)
1294                         return -EINVAL;
1295         }
1296
1297         return 0;
1298 }
1299
1300 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1301 {
1302         struct ci_power_info *pi = ci_get_pi(rdev);
1303
1304         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1305                 PPSMC_Result smc_result =
1306                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1307                 if (smc_result != PPSMC_Result_OK)
1308                         return -EINVAL;
1309         }
1310
1311         return 0;
1312 }
1313
1314 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1315                                        u32 target_tdp)
1316 {
1317         PPSMC_Result smc_result =
1318                 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1319         if (smc_result != PPSMC_Result_OK)
1320                 return -EINVAL;
1321         return 0;
1322 }
1323
1324 static int ci_set_boot_state(struct radeon_device *rdev)
1325 {
1326         return ci_enable_sclk_mclk_dpm(rdev, false);
1327 }
1328
1329 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1330 {
1331         u32 sclk_freq;
1332         PPSMC_Result smc_result =
1333                 ci_send_msg_to_smc_return_parameter(rdev,
1334                                                     PPSMC_MSG_API_GetSclkFrequency,
1335                                                     &sclk_freq);
1336         if (smc_result != PPSMC_Result_OK)
1337                 sclk_freq = 0;
1338
1339         return sclk_freq;
1340 }
1341
1342 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1343 {
1344         u32 mclk_freq;
1345         PPSMC_Result smc_result =
1346                 ci_send_msg_to_smc_return_parameter(rdev,
1347                                                     PPSMC_MSG_API_GetMclkFrequency,
1348                                                     &mclk_freq);
1349         if (smc_result != PPSMC_Result_OK)
1350                 mclk_freq = 0;
1351
1352         return mclk_freq;
1353 }
1354
1355 static void ci_dpm_start_smc(struct radeon_device *rdev)
1356 {
1357         int i;
1358
1359         ci_program_jump_on_start(rdev);
1360         ci_start_smc_clock(rdev);
1361         ci_start_smc(rdev);
1362         for (i = 0; i < rdev->usec_timeout; i++) {
1363                 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1364                         break;
1365         }
1366 }
1367
1368 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1369 {
1370         ci_reset_smc(rdev);
1371         ci_stop_smc_clock(rdev);
1372 }
1373
1374 static int ci_process_firmware_header(struct radeon_device *rdev)
1375 {
1376         struct ci_power_info *pi = ci_get_pi(rdev);
1377         u32 tmp;
1378         int ret;
1379
1380         ret = ci_read_smc_sram_dword(rdev,
1381                                      SMU7_FIRMWARE_HEADER_LOCATION +
1382                                      offsetof(SMU7_Firmware_Header, DpmTable),
1383                                      &tmp, pi->sram_end);
1384         if (ret)
1385                 return ret;
1386
1387         pi->dpm_table_start = tmp;
1388
1389         ret = ci_read_smc_sram_dword(rdev,
1390                                      SMU7_FIRMWARE_HEADER_LOCATION +
1391                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1392                                      &tmp, pi->sram_end);
1393         if (ret)
1394                 return ret;
1395
1396         pi->soft_regs_start = tmp;
1397
1398         ret = ci_read_smc_sram_dword(rdev,
1399                                      SMU7_FIRMWARE_HEADER_LOCATION +
1400                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1401                                      &tmp, pi->sram_end);
1402         if (ret)
1403                 return ret;
1404
1405         pi->mc_reg_table_start = tmp;
1406
1407         ret = ci_read_smc_sram_dword(rdev,
1408                                      SMU7_FIRMWARE_HEADER_LOCATION +
1409                                      offsetof(SMU7_Firmware_Header, FanTable),
1410                                      &tmp, pi->sram_end);
1411         if (ret)
1412                 return ret;
1413
1414         pi->fan_table_start = tmp;
1415
1416         ret = ci_read_smc_sram_dword(rdev,
1417                                      SMU7_FIRMWARE_HEADER_LOCATION +
1418                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1419                                      &tmp, pi->sram_end);
1420         if (ret)
1421                 return ret;
1422
1423         pi->arb_table_start = tmp;
1424
1425         return 0;
1426 }
1427
1428 static void ci_read_clock_registers(struct radeon_device *rdev)
1429 {
1430         struct ci_power_info *pi = ci_get_pi(rdev);
1431
1432         pi->clock_registers.cg_spll_func_cntl =
1433                 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1434         pi->clock_registers.cg_spll_func_cntl_2 =
1435                 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1436         pi->clock_registers.cg_spll_func_cntl_3 =
1437                 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1438         pi->clock_registers.cg_spll_func_cntl_4 =
1439                 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1440         pi->clock_registers.cg_spll_spread_spectrum =
1441                 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1442         pi->clock_registers.cg_spll_spread_spectrum_2 =
1443                 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1444         pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1445         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1446         pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1447         pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1448         pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1449         pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1450         pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1451         pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1452         pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1453 }
1454
1455 static void ci_init_sclk_t(struct radeon_device *rdev)
1456 {
1457         struct ci_power_info *pi = ci_get_pi(rdev);
1458
1459         pi->low_sclk_interrupt_t = 0;
1460 }
1461
1462 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1463                                          bool enable)
1464 {
1465         u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1466
1467         if (enable)
1468                 tmp &= ~THERMAL_PROTECTION_DIS;
1469         else
1470                 tmp |= THERMAL_PROTECTION_DIS;
1471         WREG32_SMC(GENERAL_PWRMGT, tmp);
1472 }
1473
1474 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1475 {
1476         u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1477
1478         tmp |= STATIC_PM_EN;
1479
1480         WREG32_SMC(GENERAL_PWRMGT, tmp);
1481 }
1482
1483 #if 0
1484 static int ci_enter_ulp_state(struct radeon_device *rdev)
1485 {
1486
1487         WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1488
1489         udelay(25000);
1490
1491         return 0;
1492 }
1493
1494 static int ci_exit_ulp_state(struct radeon_device *rdev)
1495 {
1496         int i;
1497
1498         WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1499
1500         udelay(7000);
1501
1502         for (i = 0; i < rdev->usec_timeout; i++) {
1503                 if (RREG32(SMC_RESP_0) == 1)
1504                         break;
1505                 udelay(1000);
1506         }
1507
1508         return 0;
1509 }
1510 #endif
1511
1512 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1513                                         bool has_display)
1514 {
1515         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1516
1517         return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1518 }
1519
1520 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1521                                       bool enable)
1522 {
1523         struct ci_power_info *pi = ci_get_pi(rdev);
1524
1525         if (enable) {
1526                 if (pi->caps_sclk_ds) {
1527                         if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1528                                 return -EINVAL;
1529                 } else {
1530                         if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1531                                 return -EINVAL;
1532                 }
1533         } else {
1534                 if (pi->caps_sclk_ds) {
1535                         if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1536                                 return -EINVAL;
1537                 }
1538         }
1539
1540         return 0;
1541 }
1542
1543 static void ci_program_display_gap(struct radeon_device *rdev)
1544 {
1545         u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1546         u32 pre_vbi_time_in_us;
1547         u32 frame_time_in_us;
1548         u32 ref_clock = rdev->clock.spll.reference_freq;
1549         u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1550         u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1551
1552         tmp &= ~DISP_GAP_MASK;
1553         if (rdev->pm.dpm.new_active_crtc_count > 0)
1554                 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1555         else
1556                 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1557         WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1558
1559         if (refresh_rate == 0)
1560                 refresh_rate = 60;
1561         if (vblank_time == 0xffffffff)
1562                 vblank_time = 500;
1563         frame_time_in_us = 1000000 / refresh_rate;
1564         pre_vbi_time_in_us =
1565                 frame_time_in_us - 200 - vblank_time;
1566         tmp = pre_vbi_time_in_us * (ref_clock / 100);
1567
1568         WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1569         ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1570         ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1571
1572
1573         ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1574
1575 }
1576
1577 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1578 {
1579         struct ci_power_info *pi = ci_get_pi(rdev);
1580         u32 tmp;
1581
1582         if (enable) {
1583                 if (pi->caps_sclk_ss_support) {
1584                         tmp = RREG32_SMC(GENERAL_PWRMGT);
1585                         tmp |= DYN_SPREAD_SPECTRUM_EN;
1586                         WREG32_SMC(GENERAL_PWRMGT, tmp);
1587                 }
1588         } else {
1589                 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1590                 tmp &= ~SSEN;
1591                 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1592
1593                 tmp = RREG32_SMC(GENERAL_PWRMGT);
1594                 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1595                 WREG32_SMC(GENERAL_PWRMGT, tmp);
1596         }
1597 }
1598
1599 static void ci_program_sstp(struct radeon_device *rdev)
1600 {
1601         WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1602 }
1603
1604 static void ci_enable_display_gap(struct radeon_device *rdev)
1605 {
1606         u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1607
1608         tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1609         tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1610                 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1611
1612         WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1613 }
1614
1615 static void ci_program_vc(struct radeon_device *rdev)
1616 {
1617         u32 tmp;
1618
1619         tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1620         tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1621         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1622
1623         WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1624         WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1625         WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1626         WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1627         WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1628         WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1629         WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1630         WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1631 }
1632
1633 static void ci_clear_vc(struct radeon_device *rdev)
1634 {
1635         u32 tmp;
1636
1637         tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1638         tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1639         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1640
1641         WREG32_SMC(CG_FTV_0, 0);
1642         WREG32_SMC(CG_FTV_1, 0);
1643         WREG32_SMC(CG_FTV_2, 0);
1644         WREG32_SMC(CG_FTV_3, 0);
1645         WREG32_SMC(CG_FTV_4, 0);
1646         WREG32_SMC(CG_FTV_5, 0);
1647         WREG32_SMC(CG_FTV_6, 0);
1648         WREG32_SMC(CG_FTV_7, 0);
1649 }
1650
1651 static int ci_upload_firmware(struct radeon_device *rdev)
1652 {
1653         struct ci_power_info *pi = ci_get_pi(rdev);
1654         int i, ret;
1655
1656         for (i = 0; i < rdev->usec_timeout; i++) {
1657                 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1658                         break;
1659         }
1660         WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1661
1662         ci_stop_smc_clock(rdev);
1663         ci_reset_smc(rdev);
1664
1665         ret = ci_load_smc_ucode(rdev, pi->sram_end);
1666
1667         return ret;
1668
1669 }
1670
1671 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1672                                      struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1673                                      struct atom_voltage_table *voltage_table)
1674 {
1675         u32 i;
1676
1677         if (voltage_dependency_table == NULL)
1678                 return -EINVAL;
1679
1680         voltage_table->mask_low = 0;
1681         voltage_table->phase_delay = 0;
1682
1683         voltage_table->count = voltage_dependency_table->count;
1684         for (i = 0; i < voltage_table->count; i++) {
1685                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1686                 voltage_table->entries[i].smio_low = 0;
1687         }
1688
1689         return 0;
1690 }
1691
1692 static int ci_construct_voltage_tables(struct radeon_device *rdev)
1693 {
1694         struct ci_power_info *pi = ci_get_pi(rdev);
1695         int ret;
1696
1697         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1698                 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1699                                                     VOLTAGE_OBJ_GPIO_LUT,
1700                                                     &pi->vddc_voltage_table);
1701                 if (ret)
1702                         return ret;
1703         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1704                 ret = ci_get_svi2_voltage_table(rdev,
1705                                                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1706                                                 &pi->vddc_voltage_table);
1707                 if (ret)
1708                         return ret;
1709         }
1710
1711         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1712                 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1713                                                          &pi->vddc_voltage_table);
1714
1715         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1716                 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1717                                                     VOLTAGE_OBJ_GPIO_LUT,
1718                                                     &pi->vddci_voltage_table);
1719                 if (ret)
1720                         return ret;
1721         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1722                 ret = ci_get_svi2_voltage_table(rdev,
1723                                                 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1724                                                 &pi->vddci_voltage_table);
1725                 if (ret)
1726                         return ret;
1727         }
1728
1729         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1730                 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1731                                                          &pi->vddci_voltage_table);
1732
1733         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1734                 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1735                                                     VOLTAGE_OBJ_GPIO_LUT,
1736                                                     &pi->mvdd_voltage_table);
1737                 if (ret)
1738                         return ret;
1739         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1740                 ret = ci_get_svi2_voltage_table(rdev,
1741                                                 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1742                                                 &pi->mvdd_voltage_table);
1743                 if (ret)
1744                         return ret;
1745         }
1746
1747         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1748                 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1749                                                          &pi->mvdd_voltage_table);
1750
1751         return 0;
1752 }
1753
1754 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1755                                           struct atom_voltage_table_entry *voltage_table,
1756                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
1757 {
1758         int ret;
1759
1760         ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1761                                             &smc_voltage_table->StdVoltageHiSidd,
1762                                             &smc_voltage_table->StdVoltageLoSidd);
1763
1764         if (ret) {
1765                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1766                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1767         }
1768
1769         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1770         smc_voltage_table->StdVoltageHiSidd =
1771                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1772         smc_voltage_table->StdVoltageLoSidd =
1773                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1774 }
1775
1776 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1777                                       SMU7_Discrete_DpmTable *table)
1778 {
1779         struct ci_power_info *pi = ci_get_pi(rdev);
1780         unsigned int count;
1781
1782         table->VddcLevelCount = pi->vddc_voltage_table.count;
1783         for (count = 0; count < table->VddcLevelCount; count++) {
1784                 ci_populate_smc_voltage_table(rdev,
1785                                               &pi->vddc_voltage_table.entries[count],
1786                                               &table->VddcLevel[count]);
1787
1788                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1789                         table->VddcLevel[count].Smio |=
1790                                 pi->vddc_voltage_table.entries[count].smio_low;
1791                 else
1792                         table->VddcLevel[count].Smio = 0;
1793         }
1794         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1795
1796         return 0;
1797 }
1798
1799 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1800                                        SMU7_Discrete_DpmTable *table)
1801 {
1802         unsigned int count;
1803         struct ci_power_info *pi = ci_get_pi(rdev);
1804
1805         table->VddciLevelCount = pi->vddci_voltage_table.count;
1806         for (count = 0; count < table->VddciLevelCount; count++) {
1807                 ci_populate_smc_voltage_table(rdev,
1808                                               &pi->vddci_voltage_table.entries[count],
1809                                               &table->VddciLevel[count]);
1810
1811                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1812                         table->VddciLevel[count].Smio |=
1813                                 pi->vddci_voltage_table.entries[count].smio_low;
1814                 else
1815                         table->VddciLevel[count].Smio = 0;
1816         }
1817         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1818
1819         return 0;
1820 }
1821
1822 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1823                                       SMU7_Discrete_DpmTable *table)
1824 {
1825         struct ci_power_info *pi = ci_get_pi(rdev);
1826         unsigned int count;
1827
1828         table->MvddLevelCount = pi->mvdd_voltage_table.count;
1829         for (count = 0; count < table->MvddLevelCount; count++) {
1830                 ci_populate_smc_voltage_table(rdev,
1831                                               &pi->mvdd_voltage_table.entries[count],
1832                                               &table->MvddLevel[count]);
1833
1834                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1835                         table->MvddLevel[count].Smio |=
1836                                 pi->mvdd_voltage_table.entries[count].smio_low;
1837                 else
1838                         table->MvddLevel[count].Smio = 0;
1839         }
1840         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1841
1842         return 0;
1843 }
1844
1845 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1846                                           SMU7_Discrete_DpmTable *table)
1847 {
1848         int ret;
1849
1850         ret = ci_populate_smc_vddc_table(rdev, table);
1851         if (ret)
1852                 return ret;
1853
1854         ret = ci_populate_smc_vddci_table(rdev, table);
1855         if (ret)
1856                 return ret;
1857
1858         ret = ci_populate_smc_mvdd_table(rdev, table);
1859         if (ret)
1860                 return ret;
1861
1862         return 0;
1863 }
1864
1865 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1866                                   SMU7_Discrete_VoltageLevel *voltage)
1867 {
1868         struct ci_power_info *pi = ci_get_pi(rdev);
1869         u32 i = 0;
1870
1871         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1872                 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1873                         if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1874                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1875                                 break;
1876                         }
1877                 }
1878
1879                 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1880                         return -EINVAL;
1881         }
1882
1883         return -EINVAL;
1884 }
1885
1886 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1887                                          struct atom_voltage_table_entry *voltage_table,
1888                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1889 {
1890         u16 v_index, idx;
1891         bool voltage_found = false;
1892         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1893         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1894
1895         if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1896                 return -EINVAL;
1897
1898         if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1899                 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1900                         if (voltage_table->value ==
1901                             rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1902                                 voltage_found = true;
1903                                 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1904                                         idx = v_index;
1905                                 else
1906                                         idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1907                                 *std_voltage_lo_sidd =
1908                                         rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1909                                 *std_voltage_hi_sidd =
1910                                         rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1911                                 break;
1912                         }
1913                 }
1914
1915                 if (!voltage_found) {
1916                         for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1917                                 if (voltage_table->value <=
1918                                     rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1919                                         voltage_found = true;
1920                                         if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1921                                                 idx = v_index;
1922                                         else
1923                                                 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1924                                         *std_voltage_lo_sidd =
1925                                                 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1926                                         *std_voltage_hi_sidd =
1927                                                 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1928                                         break;
1929                                 }
1930                         }
1931                 }
1932         }
1933
1934         return 0;
1935 }
1936
1937 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1938                                                   const struct radeon_phase_shedding_limits_table *limits,
1939                                                   u32 sclk,
1940                                                   u32 *phase_shedding)
1941 {
1942         unsigned int i;
1943
1944         *phase_shedding = 1;
1945
1946         for (i = 0; i < limits->count; i++) {
1947                 if (sclk < limits->entries[i].sclk) {
1948                         *phase_shedding = i;
1949                         break;
1950                 }
1951         }
1952 }
1953
1954 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1955                                                   const struct radeon_phase_shedding_limits_table *limits,
1956                                                   u32 mclk,
1957                                                   u32 *phase_shedding)
1958 {
1959         unsigned int i;
1960
1961         *phase_shedding = 1;
1962
1963         for (i = 0; i < limits->count; i++) {
1964                 if (mclk < limits->entries[i].mclk) {
1965                         *phase_shedding = i;
1966                         break;
1967                 }
1968         }
1969 }
1970
1971 static int ci_init_arb_table_index(struct radeon_device *rdev)
1972 {
1973         struct ci_power_info *pi = ci_get_pi(rdev);
1974         u32 tmp;
1975         int ret;
1976
1977         ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1978                                      &tmp, pi->sram_end);
1979         if (ret)
1980                 return ret;
1981
1982         tmp &= 0x00FFFFFF;
1983         tmp |= MC_CG_ARB_FREQ_F1 << 24;
1984
1985         return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1986                                        tmp, pi->sram_end);
1987 }
1988
1989 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1990                                          struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1991                                          u32 clock, u32 *voltage)
1992 {
1993         u32 i = 0;
1994
1995         if (allowed_clock_voltage_table->count == 0)
1996                 return -EINVAL;
1997
1998         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1999                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2000                         *voltage = allowed_clock_voltage_table->entries[i].v;
2001                         return 0;
2002                 }
2003         }
2004
2005         *voltage = allowed_clock_voltage_table->entries[i-1].v;
2006
2007         return 0;
2008 }
2009
2010 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2011                                              u32 sclk, u32 min_sclk_in_sr)
2012 {
2013         u32 i;
2014         u32 tmp;
2015         u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2016                 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2017
2018         if (sclk < min)
2019                 return 0;
2020
2021         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2022                 tmp = sclk / (1 << i);
2023                 if (tmp >= min || i == 0)
2024                         break;
2025         }
2026
2027         return (u8)i;
2028 }
2029
2030 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2031 {
2032         return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2033 }
2034
2035 static int ci_reset_to_default(struct radeon_device *rdev)
2036 {
2037         return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2038                 0 : -EINVAL;
2039 }
2040
2041 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2042 {
2043         u32 tmp;
2044
2045         tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2046
2047         if (tmp == MC_CG_ARB_FREQ_F0)
2048                 return 0;
2049
2050         return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2051 }
2052
2053 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2054                                                 u32 sclk,
2055                                                 u32 mclk,
2056                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2057 {
2058         u32 dram_timing;
2059         u32 dram_timing2;
2060         u32 burst_time;
2061
2062         radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2063
2064         dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2065         dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2066         burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2067
2068         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2069         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2070         arb_regs->McArbBurstTime = (u8)burst_time;
2071
2072         return 0;
2073 }
2074
2075 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2076 {
2077         struct ci_power_info *pi = ci_get_pi(rdev);
2078         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2079         u32 i, j;
2080         int ret =  0;
2081
2082         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2083
2084         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2085                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2086                         ret = ci_populate_memory_timing_parameters(rdev,
2087                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2088                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2089                                                                    &arb_regs.entries[i][j]);
2090                         if (ret)
2091                                 break;
2092                 }
2093         }
2094
2095         if (ret == 0)
2096                 ret = ci_copy_bytes_to_smc(rdev,
2097                                            pi->arb_table_start,
2098                                            (u8 *)&arb_regs,
2099                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2100                                            pi->sram_end);
2101
2102         return ret;
2103 }
2104
2105 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2106 {
2107         struct ci_power_info *pi = ci_get_pi(rdev);
2108
2109         if (pi->need_update_smu7_dpm_table == 0)
2110                 return 0;
2111
2112         return ci_do_program_memory_timing_parameters(rdev);
2113 }
2114
2115 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2116                                           struct radeon_ps *radeon_boot_state)
2117 {
2118         struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2119         struct ci_power_info *pi = ci_get_pi(rdev);
2120         u32 level = 0;
2121
2122         for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2123                 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2124                     boot_state->performance_levels[0].sclk) {
2125                         pi->smc_state_table.GraphicsBootLevel = level;
2126                         break;
2127                 }
2128         }
2129
2130         for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2131                 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2132                     boot_state->performance_levels[0].mclk) {
2133                         pi->smc_state_table.MemoryBootLevel = level;
2134                         break;
2135                 }
2136         }
2137 }
2138
2139 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2140 {
2141         u32 i;
2142         u32 mask_value = 0;
2143
2144         for (i = dpm_table->count; i > 0; i--) {
2145                 mask_value = mask_value << 1;
2146                 if (dpm_table->dpm_levels[i-1].enabled)
2147                         mask_value |= 0x1;
2148                 else
2149                         mask_value &= 0xFFFFFFFE;
2150         }
2151
2152         return mask_value;
2153 }
2154
2155 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2156                                        SMU7_Discrete_DpmTable *table)
2157 {
2158         struct ci_power_info *pi = ci_get_pi(rdev);
2159         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2160         u32 i;
2161
2162         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2163                 table->LinkLevel[i].PcieGenSpeed =
2164                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2165                 table->LinkLevel[i].PcieLaneCount =
2166                         r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2167                 table->LinkLevel[i].EnabledForActivity = 1;
2168                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2169                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2170         }
2171
2172         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2173         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2174                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2175 }
2176
2177 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2178                                      SMU7_Discrete_DpmTable *table)
2179 {
2180         u32 count;
2181         struct atom_clock_dividers dividers;
2182         int ret = -EINVAL;
2183
2184         table->UvdLevelCount =
2185                 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2186
2187         for (count = 0; count < table->UvdLevelCount; count++) {
2188                 table->UvdLevel[count].VclkFrequency =
2189                         rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2190                 table->UvdLevel[count].DclkFrequency =
2191                         rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2192                 table->UvdLevel[count].MinVddc =
2193                         rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2194                 table->UvdLevel[count].MinVddcPhases = 1;
2195
2196                 ret = radeon_atom_get_clock_dividers(rdev,
2197                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2198                                                      table->UvdLevel[count].VclkFrequency, false, &dividers);
2199                 if (ret)
2200                         return ret;
2201
2202                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2203
2204                 ret = radeon_atom_get_clock_dividers(rdev,
2205                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2206                                                      table->UvdLevel[count].DclkFrequency, false, &dividers);
2207                 if (ret)
2208                         return ret;
2209
2210                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2211
2212                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2213                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2214                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2215         }
2216
2217         return ret;
2218 }
2219
2220 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2221                                      SMU7_Discrete_DpmTable *table)
2222 {
2223         u32 count;
2224         struct atom_clock_dividers dividers;
2225         int ret = -EINVAL;
2226
2227         table->VceLevelCount =
2228                 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2229
2230         for (count = 0; count < table->VceLevelCount; count++) {
2231                 table->VceLevel[count].Frequency =
2232                         rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2233                 table->VceLevel[count].MinVoltage =
2234                         (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2235                 table->VceLevel[count].MinPhases = 1;
2236
2237                 ret = radeon_atom_get_clock_dividers(rdev,
2238                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2239                                                      table->VceLevel[count].Frequency, false, &dividers);
2240                 if (ret)
2241                         return ret;
2242
2243                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2244
2245                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2246                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2247         }
2248
2249         return ret;
2250
2251 }
2252
2253 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2254                                      SMU7_Discrete_DpmTable *table)
2255 {
2256         u32 count;
2257         struct atom_clock_dividers dividers;
2258         int ret = -EINVAL;
2259
2260         table->AcpLevelCount = (u8)
2261                 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2262
2263         for (count = 0; count < table->AcpLevelCount; count++) {
2264                 table->AcpLevel[count].Frequency =
2265                         rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2266                 table->AcpLevel[count].MinVoltage =
2267                         rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2268                 table->AcpLevel[count].MinPhases = 1;
2269
2270                 ret = radeon_atom_get_clock_dividers(rdev,
2271                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2272                                                      table->AcpLevel[count].Frequency, false, &dividers);
2273                 if (ret)
2274                         return ret;
2275
2276                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2277
2278                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2279                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2280         }
2281
2282         return ret;
2283 }
2284
2285 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2286                                       SMU7_Discrete_DpmTable *table)
2287 {
2288         u32 count;
2289         struct atom_clock_dividers dividers;
2290         int ret = -EINVAL;
2291
2292         table->SamuLevelCount =
2293                 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2294
2295         for (count = 0; count < table->SamuLevelCount; count++) {
2296                 table->SamuLevel[count].Frequency =
2297                         rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2298                 table->SamuLevel[count].MinVoltage =
2299                         rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2300                 table->SamuLevel[count].MinPhases = 1;
2301
2302                 ret = radeon_atom_get_clock_dividers(rdev,
2303                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2304                                                      table->SamuLevel[count].Frequency, false, &dividers);
2305                 if (ret)
2306                         return ret;
2307
2308                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2309
2310                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2311                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2312         }
2313
2314         return ret;
2315 }
2316
2317 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2318                                     u32 memory_clock,
2319                                     SMU7_Discrete_MemoryLevel *mclk,
2320                                     bool strobe_mode,
2321                                     bool dll_state_on)
2322 {
2323         struct ci_power_info *pi = ci_get_pi(rdev);
2324         u32  dll_cntl = pi->clock_registers.dll_cntl;
2325         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2326         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2327         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2328         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2329         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2330         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2331         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2332         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2333         struct atom_mpll_param mpll_param;
2334         int ret;
2335
2336         ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2337         if (ret)
2338                 return ret;
2339
2340         mpll_func_cntl &= ~BWCTRL_MASK;
2341         mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2342
2343         mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2344         mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2345                 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2346
2347         mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2348         mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2349
2350         if (pi->mem_gddr5) {
2351                 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2352                 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2353                         YCLK_POST_DIV(mpll_param.post_div);
2354         }
2355
2356         if (pi->caps_mclk_ss_support) {
2357                 struct radeon_atom_ss ss;
2358                 u32 freq_nom;
2359                 u32 tmp;
2360                 u32 reference_clock = rdev->clock.mpll.reference_freq;
2361
2362                 if (pi->mem_gddr5)
2363                         freq_nom = memory_clock * 4;
2364                 else
2365                         freq_nom = memory_clock * 2;
2366
2367                 tmp = (freq_nom / reference_clock);
2368                 tmp = tmp * tmp;
2369                 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2370                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2371                         u32 clks = reference_clock * 5 / ss.rate;
2372                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2373
2374                         mpll_ss1 &= ~CLKV_MASK;
2375                         mpll_ss1 |= CLKV(clkv);
2376
2377                         mpll_ss2 &= ~CLKS_MASK;
2378                         mpll_ss2 |= CLKS(clks);
2379                 }
2380         }
2381
2382         mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2383         mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2384
2385         if (dll_state_on)
2386                 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2387         else
2388                 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2389
2390         mclk->MclkFrequency = memory_clock;
2391         mclk->MpllFuncCntl = mpll_func_cntl;
2392         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2393         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2394         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2395         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2396         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2397         mclk->DllCntl = dll_cntl;
2398         mclk->MpllSs1 = mpll_ss1;
2399         mclk->MpllSs2 = mpll_ss2;
2400
2401         return 0;
2402 }
2403
2404 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2405                                            u32 memory_clock,
2406                                            SMU7_Discrete_MemoryLevel *memory_level)
2407 {
2408         struct ci_power_info *pi = ci_get_pi(rdev);
2409         int ret;
2410         bool dll_state_on;
2411
2412         if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2413                 ret = ci_get_dependency_volt_by_clk(rdev,
2414                                                     &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2415                                                     memory_clock, &memory_level->MinVddc);
2416                 if (ret)
2417                         return ret;
2418         }
2419
2420         if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2421                 ret = ci_get_dependency_volt_by_clk(rdev,
2422                                                     &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2423                                                     memory_clock, &memory_level->MinVddci);
2424                 if (ret)
2425                         return ret;
2426         }
2427
2428         if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2429                 ret = ci_get_dependency_volt_by_clk(rdev,
2430                                                     &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2431                                                     memory_clock, &memory_level->MinMvdd);
2432                 if (ret)
2433                         return ret;
2434         }
2435
2436         memory_level->MinVddcPhases = 1;
2437
2438         if (pi->vddc_phase_shed_control)
2439                 ci_populate_phase_value_based_on_mclk(rdev,
2440                                                       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2441                                                       memory_clock,
2442                                                       &memory_level->MinVddcPhases);
2443
2444         memory_level->EnabledForThrottle = 1;
2445         memory_level->EnabledForActivity = 1;
2446         memory_level->UpH = 0;
2447         memory_level->DownH = 100;
2448         memory_level->VoltageDownH = 0;
2449         memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2450
2451         memory_level->StutterEnable = false;
2452         memory_level->StrobeEnable = false;
2453         memory_level->EdcReadEnable = false;
2454         memory_level->EdcWriteEnable = false;
2455         memory_level->RttEnable = false;
2456
2457         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2458
2459         if (pi->mclk_stutter_mode_threshold &&
2460             (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2461             (pi->uvd_enabled == false) &&
2462             (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2463             (rdev->pm.dpm.new_active_crtc_count <= 2))
2464                 memory_level->StutterEnable = true;
2465
2466         if (pi->mclk_strobe_mode_threshold &&
2467             (memory_clock <= pi->mclk_strobe_mode_threshold))
2468                 memory_level->StrobeEnable = 1;
2469
2470         if (pi->mem_gddr5) {
2471                 memory_level->StrobeRatio =
2472                         si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2473                 if (pi->mclk_edc_enable_threshold &&
2474                     (memory_clock > pi->mclk_edc_enable_threshold))
2475                         memory_level->EdcReadEnable = true;
2476
2477                 if (pi->mclk_edc_wr_enable_threshold &&
2478                     (memory_clock > pi->mclk_edc_wr_enable_threshold))
2479                         memory_level->EdcWriteEnable = true;
2480
2481                 if (memory_level->StrobeEnable) {
2482                         if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2483                             ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2484                                 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2485                         else
2486                                 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2487                 } else {
2488                         dll_state_on = pi->dll_default_on;
2489                 }
2490         } else {
2491                 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2492                 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2493         }
2494
2495         ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2496         if (ret)
2497                 return ret;
2498
2499         memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2500         memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2501         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2502         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2503
2504         memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2505         memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2506         memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2507         memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2508         memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2509         memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2510         memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2511         memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2512         memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2513         memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2514         memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2515
2516         return 0;
2517 }
2518
2519 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2520                                       SMU7_Discrete_DpmTable *table)
2521 {
2522         struct ci_power_info *pi = ci_get_pi(rdev);
2523         struct atom_clock_dividers dividers;
2524         SMU7_Discrete_VoltageLevel voltage_level;
2525         u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2526         u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2527         u32 dll_cntl = pi->clock_registers.dll_cntl;
2528         u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2529         int ret;
2530
2531         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2532
2533         if (pi->acpi_vddc)
2534                 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2535         else
2536                 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2537
2538         table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2539
2540         table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2541
2542         ret = radeon_atom_get_clock_dividers(rdev,
2543                                              COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2544                                              table->ACPILevel.SclkFrequency, false, &dividers);
2545         if (ret)
2546                 return ret;
2547
2548         table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2549         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2550         table->ACPILevel.DeepSleepDivId = 0;
2551
2552         spll_func_cntl &= ~SPLL_PWRON;
2553         spll_func_cntl |= SPLL_RESET;
2554
2555         spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2556         spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2557
2558         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2559         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2560         table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2561         table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2562         table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2563         table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2564         table->ACPILevel.CcPwrDynRm = 0;
2565         table->ACPILevel.CcPwrDynRm1 = 0;
2566
2567         table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2568         table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2569         table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2570         table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2571         table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2572         table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2573         table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2574         table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2575         table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2576         table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2577         table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2578
2579         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2580         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2581
2582         if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2583                 if (pi->acpi_vddci)
2584                         table->MemoryACPILevel.MinVddci =
2585                                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2586                 else
2587                         table->MemoryACPILevel.MinVddci =
2588                                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2589         }
2590
2591         if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2592                 table->MemoryACPILevel.MinMvdd = 0;
2593         else
2594                 table->MemoryACPILevel.MinMvdd =
2595                         cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2596
2597         mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2598         mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2599
2600         dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2601
2602         table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2603         table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2604         table->MemoryACPILevel.MpllAdFuncCntl =
2605                 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2606         table->MemoryACPILevel.MpllDqFuncCntl =
2607                 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2608         table->MemoryACPILevel.MpllFuncCntl =
2609                 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2610         table->MemoryACPILevel.MpllFuncCntl_1 =
2611                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2612         table->MemoryACPILevel.MpllFuncCntl_2 =
2613                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2614         table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2615         table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2616
2617         table->MemoryACPILevel.EnabledForThrottle = 0;
2618         table->MemoryACPILevel.EnabledForActivity = 0;
2619         table->MemoryACPILevel.UpH = 0;
2620         table->MemoryACPILevel.DownH = 100;
2621         table->MemoryACPILevel.VoltageDownH = 0;
2622         table->MemoryACPILevel.ActivityLevel =
2623                 cpu_to_be16((u16)pi->mclk_activity_target);
2624
2625         table->MemoryACPILevel.StutterEnable = false;
2626         table->MemoryACPILevel.StrobeEnable = false;
2627         table->MemoryACPILevel.EdcReadEnable = false;
2628         table->MemoryACPILevel.EdcWriteEnable = false;
2629         table->MemoryACPILevel.RttEnable = false;
2630
2631         return 0;
2632 }
2633
2634
2635 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2636 {
2637         struct ci_power_info *pi = ci_get_pi(rdev);
2638         struct ci_ulv_parm *ulv = &pi->ulv;
2639
2640         if (ulv->supported) {
2641                 if (enable)
2642                         return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2643                                 0 : -EINVAL;
2644                 else
2645                         return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2646                                 0 : -EINVAL;
2647         }
2648
2649         return 0;
2650 }
2651
2652 static int ci_populate_ulv_level(struct radeon_device *rdev,
2653                                  SMU7_Discrete_Ulv *state)
2654 {
2655         struct ci_power_info *pi = ci_get_pi(rdev);
2656         u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2657
2658         state->CcPwrDynRm = 0;
2659         state->CcPwrDynRm1 = 0;
2660
2661         if (ulv_voltage == 0) {
2662                 pi->ulv.supported = false;
2663                 return 0;
2664         }
2665
2666         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2667                 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2668                         state->VddcOffset = 0;
2669                 else
2670                         state->VddcOffset =
2671                                 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2672         } else {
2673                 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2674                         state->VddcOffsetVid = 0;
2675                 else
2676                         state->VddcOffsetVid = (u8)
2677                                 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2678                                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2679         }
2680         state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2681
2682         state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2683         state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2684         state->VddcOffset = cpu_to_be16(state->VddcOffset);
2685
2686         return 0;
2687 }
2688
2689 static int ci_calculate_sclk_params(struct radeon_device *rdev,
2690                                     u32 engine_clock,
2691                                     SMU7_Discrete_GraphicsLevel *sclk)
2692 {
2693         struct ci_power_info *pi = ci_get_pi(rdev);
2694         struct atom_clock_dividers dividers;
2695         u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2696         u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2697         u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2698         u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2699         u32 reference_clock = rdev->clock.spll.reference_freq;
2700         u32 reference_divider;
2701         u32 fbdiv;
2702         int ret;
2703
2704         ret = radeon_atom_get_clock_dividers(rdev,
2705                                              COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2706                                              engine_clock, false, &dividers);
2707         if (ret)
2708                 return ret;
2709
2710         reference_divider = 1 + dividers.ref_div;
2711         fbdiv = dividers.fb_div & 0x3FFFFFF;
2712
2713         spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2714         spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2715         spll_func_cntl_3 |= SPLL_DITHEN;
2716
2717         if (pi->caps_sclk_ss_support) {
2718                 struct radeon_atom_ss ss;
2719                 u32 vco_freq = engine_clock * dividers.post_div;
2720
2721                 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2722                                                      ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2723                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2724                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2725
2726                         cg_spll_spread_spectrum &= ~CLK_S_MASK;
2727                         cg_spll_spread_spectrum |= CLK_S(clk_s);
2728                         cg_spll_spread_spectrum |= SSEN;
2729
2730                         cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2731                         cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2732                 }
2733         }
2734
2735         sclk->SclkFrequency = engine_clock;
2736         sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2737         sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2738         sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2739         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
2740         sclk->SclkDid = (u8)dividers.post_divider;
2741
2742         return 0;
2743 }
2744
2745 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2746                                             u32 engine_clock,
2747                                             u16 sclk_activity_level_t,
2748                                             SMU7_Discrete_GraphicsLevel *graphic_level)
2749 {
2750         struct ci_power_info *pi = ci_get_pi(rdev);
2751         int ret;
2752
2753         ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2754         if (ret)
2755                 return ret;
2756
2757         ret = ci_get_dependency_volt_by_clk(rdev,
2758                                             &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2759                                             engine_clock, &graphic_level->MinVddc);
2760         if (ret)
2761                 return ret;
2762
2763         graphic_level->SclkFrequency = engine_clock;
2764
2765         graphic_level->Flags =  0;
2766         graphic_level->MinVddcPhases = 1;
2767
2768         if (pi->vddc_phase_shed_control)
2769                 ci_populate_phase_value_based_on_sclk(rdev,
2770                                                       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2771                                                       engine_clock,
2772                                                       &graphic_level->MinVddcPhases);
2773
2774         graphic_level->ActivityLevel = sclk_activity_level_t;
2775
2776         graphic_level->CcPwrDynRm = 0;
2777         graphic_level->CcPwrDynRm1 = 0;
2778         graphic_level->EnabledForActivity = 1;
2779         graphic_level->EnabledForThrottle = 1;
2780         graphic_level->UpH = 0;
2781         graphic_level->DownH = 0;
2782         graphic_level->VoltageDownH = 0;
2783         graphic_level->PowerThrottle = 0;
2784
2785         if (pi->caps_sclk_ds)
2786                 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2787                                                                                    engine_clock,
2788                                                                                    CISLAND_MINIMUM_ENGINE_CLOCK);
2789
2790         graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2791
2792         graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2793         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2794         graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2795         graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2796         graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2797         graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2798         graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2799         graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2800         graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2801         graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2802         graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2803
2804         return 0;
2805 }
2806
2807 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2808 {
2809         struct ci_power_info *pi = ci_get_pi(rdev);
2810         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2811         u32 level_array_address = pi->dpm_table_start +
2812                 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2813         u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2814                 SMU7_MAX_LEVELS_GRAPHICS;
2815         SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2816         u32 i, ret;
2817
2818         memset(levels, 0, level_array_size);
2819
2820         for (i = 0; i < dpm_table->sclk_table.count; i++) {
2821                 ret = ci_populate_single_graphic_level(rdev,
2822                                                        dpm_table->sclk_table.dpm_levels[i].value,
2823                                                        (u16)pi->activity_target[i],
2824                                                        &pi->smc_state_table.GraphicsLevel[i]);
2825                 if (ret)
2826                         return ret;
2827                 if (i == (dpm_table->sclk_table.count - 1))
2828                         pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2829                                 PPSMC_DISPLAY_WATERMARK_HIGH;
2830         }
2831
2832         pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2833         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2834                 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2835
2836         ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2837                                    (u8 *)levels, level_array_size,
2838                                    pi->sram_end);
2839         if (ret)
2840                 return ret;
2841
2842         return 0;
2843 }
2844
2845 static int ci_populate_ulv_state(struct radeon_device *rdev,
2846                                  SMU7_Discrete_Ulv *ulv_level)
2847 {
2848         return ci_populate_ulv_level(rdev, ulv_level);
2849 }
2850
2851 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2852 {
2853         struct ci_power_info *pi = ci_get_pi(rdev);
2854         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2855         u32 level_array_address = pi->dpm_table_start +
2856                 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2857         u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2858                 SMU7_MAX_LEVELS_MEMORY;
2859         SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2860         u32 i, ret;
2861
2862         memset(levels, 0, level_array_size);
2863
2864         for (i = 0; i < dpm_table->mclk_table.count; i++) {
2865                 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2866                         return -EINVAL;
2867                 ret = ci_populate_single_memory_level(rdev,
2868                                                       dpm_table->mclk_table.dpm_levels[i].value,
2869                                                       &pi->smc_state_table.MemoryLevel[i]);
2870                 if (ret)
2871                         return ret;
2872         }
2873
2874         pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2875
2876         pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2877         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2878                 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2879
2880         pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2881                 PPSMC_DISPLAY_WATERMARK_HIGH;
2882
2883         ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2884                                    (u8 *)levels, level_array_size,
2885                                    pi->sram_end);
2886         if (ret)
2887                 return ret;
2888
2889         return 0;
2890 }
2891
2892 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2893                                       struct ci_single_dpm_table* dpm_table,
2894                                       u32 count)
2895 {
2896         u32 i;
2897
2898         dpm_table->count = count;
2899         for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2900                 dpm_table->dpm_levels[i].enabled = false;
2901 }
2902
2903 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2904                                       u32 index, u32 pcie_gen, u32 pcie_lanes)
2905 {
2906         dpm_table->dpm_levels[index].value = pcie_gen;
2907         dpm_table->dpm_levels[index].param1 = pcie_lanes;
2908         dpm_table->dpm_levels[index].enabled = true;
2909 }
2910
2911 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2912 {
2913         struct ci_power_info *pi = ci_get_pi(rdev);
2914
2915         if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2916                 return -EINVAL;
2917
2918         if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2919                 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2920                 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2921         } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2922                 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2923                 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2924         }
2925
2926         ci_reset_single_dpm_table(rdev,
2927                                   &pi->dpm_table.pcie_speed_table,
2928                                   SMU7_MAX_LEVELS_LINK);
2929
2930         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2931                                   pi->pcie_gen_powersaving.min,
2932                                   pi->pcie_lane_powersaving.min);
2933         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2934                                   pi->pcie_gen_performance.min,
2935                                   pi->pcie_lane_performance.min);
2936         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2937                                   pi->pcie_gen_powersaving.min,
2938                                   pi->pcie_lane_powersaving.max);
2939         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2940                                   pi->pcie_gen_performance.min,
2941                                   pi->pcie_lane_performance.max);
2942         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2943                                   pi->pcie_gen_powersaving.max,
2944                                   pi->pcie_lane_powersaving.max);
2945         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2946                                   pi->pcie_gen_performance.max,
2947                                   pi->pcie_lane_performance.max);
2948
2949         pi->dpm_table.pcie_speed_table.count = 6;
2950
2951         return 0;
2952 }
2953
2954 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2955 {
2956         struct ci_power_info *pi = ci_get_pi(rdev);
2957         struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2958                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2959         struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2960                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2961         struct radeon_cac_leakage_table *std_voltage_table =
2962                 &rdev->pm.dpm.dyn_state.cac_leakage_table;
2963         u32 i;
2964
2965         if (allowed_sclk_vddc_table == NULL)
2966                 return -EINVAL;
2967         if (allowed_sclk_vddc_table->count < 1)
2968                 return -EINVAL;
2969         if (allowed_mclk_table == NULL)
2970                 return -EINVAL;
2971         if (allowed_mclk_table->count < 1)
2972                 return -EINVAL;
2973
2974         memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2975
2976         ci_reset_single_dpm_table(rdev,
2977                                   &pi->dpm_table.sclk_table,
2978                                   SMU7_MAX_LEVELS_GRAPHICS);
2979         ci_reset_single_dpm_table(rdev,
2980                                   &pi->dpm_table.mclk_table,
2981                                   SMU7_MAX_LEVELS_MEMORY);
2982         ci_reset_single_dpm_table(rdev,
2983                                   &pi->dpm_table.vddc_table,
2984                                   SMU7_MAX_LEVELS_VDDC);
2985         ci_reset_single_dpm_table(rdev,
2986                                   &pi->dpm_table.vddci_table,
2987                                   SMU7_MAX_LEVELS_VDDCI);
2988         ci_reset_single_dpm_table(rdev,
2989                                   &pi->dpm_table.mvdd_table,
2990                                   SMU7_MAX_LEVELS_MVDD);
2991
2992         pi->dpm_table.sclk_table.count = 0;
2993         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2994                 if ((i == 0) ||
2995                     (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2996                      allowed_sclk_vddc_table->entries[i].clk)) {
2997                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2998                                 allowed_sclk_vddc_table->entries[i].clk;
2999                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
3000                         pi->dpm_table.sclk_table.count++;
3001                 }
3002         }
3003
3004         pi->dpm_table.mclk_table.count = 0;
3005         for (i = 0; i < allowed_mclk_table->count; i++) {
3006                 if ((i==0) ||
3007                     (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3008                      allowed_mclk_table->entries[i].clk)) {
3009                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3010                                 allowed_mclk_table->entries[i].clk;
3011                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
3012                         pi->dpm_table.mclk_table.count++;
3013                 }
3014         }
3015
3016         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3017                 pi->dpm_table.vddc_table.dpm_levels[i].value =
3018                         allowed_sclk_vddc_table->entries[i].v;
3019                 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3020                         std_voltage_table->entries[i].leakage;
3021                 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3022         }
3023         pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3024
3025         allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3026         if (allowed_mclk_table) {
3027                 for (i = 0; i < allowed_mclk_table->count; i++) {
3028                         pi->dpm_table.vddci_table.dpm_levels[i].value =
3029                                 allowed_mclk_table->entries[i].v;
3030                         pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3031                 }
3032                 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3033         }
3034
3035         allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3036         if (allowed_mclk_table) {
3037                 for (i = 0; i < allowed_mclk_table->count; i++) {
3038                         pi->dpm_table.mvdd_table.dpm_levels[i].value =
3039                                 allowed_mclk_table->entries[i].v;
3040                         pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3041                 }
3042                 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3043         }
3044
3045         ci_setup_default_pcie_tables(rdev);
3046
3047         return 0;
3048 }
3049
3050 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3051                               u32 value, u32 *boot_level)
3052 {
3053         u32 i;
3054         int ret = -EINVAL;
3055
3056         for(i = 0; i < table->count; i++) {
3057                 if (value == table->dpm_levels[i].value) {
3058                         *boot_level = i;
3059                         ret = 0;
3060                 }
3061         }
3062
3063         return ret;
3064 }
3065
3066 static int ci_init_smc_table(struct radeon_device *rdev)
3067 {
3068         struct ci_power_info *pi = ci_get_pi(rdev);
3069         struct ci_ulv_parm *ulv = &pi->ulv;
3070         struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3071         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3072         int ret;
3073
3074         ret = ci_setup_default_dpm_tables(rdev);
3075         if (ret)
3076                 return ret;
3077
3078         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3079                 ci_populate_smc_voltage_tables(rdev, table);
3080
3081         ci_init_fps_limits(rdev);
3082
3083         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3084                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3085
3086         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3087                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3088
3089         if (pi->mem_gddr5)
3090                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3091
3092         if (ulv->supported) {
3093                 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3094                 if (ret)
3095                         return ret;
3096                 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3097         }
3098
3099         ret = ci_populate_all_graphic_levels(rdev);
3100         if (ret)
3101                 return ret;
3102
3103         ret = ci_populate_all_memory_levels(rdev);
3104         if (ret)
3105                 return ret;
3106
3107         ci_populate_smc_link_level(rdev, table);
3108
3109         ret = ci_populate_smc_acpi_level(rdev, table);
3110         if (ret)
3111                 return ret;
3112
3113         ret = ci_populate_smc_vce_level(rdev, table);
3114         if (ret)
3115                 return ret;
3116
3117         ret = ci_populate_smc_acp_level(rdev, table);
3118         if (ret)
3119                 return ret;
3120
3121         ret = ci_populate_smc_samu_level(rdev, table);
3122         if (ret)
3123                 return ret;
3124
3125         ret = ci_do_program_memory_timing_parameters(rdev);
3126         if (ret)
3127                 return ret;
3128
3129         ret = ci_populate_smc_uvd_level(rdev, table);
3130         if (ret)
3131                 return ret;
3132
3133         table->UvdBootLevel  = 0;
3134         table->VceBootLevel  = 0;
3135         table->AcpBootLevel  = 0;
3136         table->SamuBootLevel  = 0;
3137         table->GraphicsBootLevel  = 0;
3138         table->MemoryBootLevel  = 0;
3139
3140         ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3141                                  pi->vbios_boot_state.sclk_bootup_value,
3142                                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3143
3144         ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3145                                  pi->vbios_boot_state.mclk_bootup_value,
3146                                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3147
3148         table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3149         table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3150         table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3151
3152         ci_populate_smc_initial_state(rdev, radeon_boot_state);
3153
3154         ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3155         if (ret)
3156                 return ret;
3157
3158         table->UVDInterval = 1;
3159         table->VCEInterval = 1;
3160         table->ACPInterval = 1;
3161         table->SAMUInterval = 1;
3162         table->GraphicsVoltageChangeEnable = 1;
3163         table->GraphicsThermThrottleEnable = 1;
3164         table->GraphicsInterval = 1;
3165         table->VoltageInterval = 1;
3166         table->ThermalInterval = 1;
3167         table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3168                                              CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3169         table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3170                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3171         table->MemoryVoltageChangeEnable = 1;
3172         table->MemoryInterval = 1;
3173         table->VoltageResponseTime = 0;
3174         table->VddcVddciDelta = 4000;
3175         table->PhaseResponseTime = 0;
3176         table->MemoryThermThrottleEnable = 1;
3177         table->PCIeBootLinkLevel = 0;
3178         table->PCIeGenInterval = 1;
3179         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3180                 table->SVI2Enable  = 1;
3181         else
3182                 table->SVI2Enable  = 0;
3183
3184         table->ThermGpio = 17;
3185         table->SclkStepSize = 0x4000;
3186
3187         table->SystemFlags = cpu_to_be32(table->SystemFlags);
3188         table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3189         table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3190         table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3191         table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3192         table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3193         table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3194         table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3195         table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3196         table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3197         table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3198         table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3199         table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3200         table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3201
3202         ret = ci_copy_bytes_to_smc(rdev,
3203                                    pi->dpm_table_start +
3204                                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3205                                    (u8 *)&table->SystemFlags,
3206                                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3207                                    pi->sram_end);
3208         if (ret)
3209                 return ret;
3210
3211         return 0;
3212 }
3213
3214 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3215                                       struct ci_single_dpm_table *dpm_table,
3216                                       u32 low_limit, u32 high_limit)
3217 {
3218         u32 i;
3219
3220         for (i = 0; i < dpm_table->count; i++) {
3221                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3222                     (dpm_table->dpm_levels[i].value > high_limit))
3223                         dpm_table->dpm_levels[i].enabled = false;
3224                 else
3225                         dpm_table->dpm_levels[i].enabled = true;
3226         }
3227 }
3228
3229 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3230                                     u32 speed_low, u32 lanes_low,
3231                                     u32 speed_high, u32 lanes_high)
3232 {
3233         struct ci_power_info *pi = ci_get_pi(rdev);
3234         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3235         u32 i, j;
3236
3237         for (i = 0; i < pcie_table->count; i++) {
3238                 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3239                     (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3240                     (pcie_table->dpm_levels[i].value > speed_high) ||
3241                     (pcie_table->dpm_levels[i].param1 > lanes_high))
3242                         pcie_table->dpm_levels[i].enabled = false;
3243                 else
3244                         pcie_table->dpm_levels[i].enabled = true;
3245         }
3246
3247         for (i = 0; i < pcie_table->count; i++) {
3248                 if (pcie_table->dpm_levels[i].enabled) {
3249                         for (j = i + 1; j < pcie_table->count; j++) {
3250                                 if (pcie_table->dpm_levels[j].enabled) {
3251                                         if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3252                                             (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3253                                                 pcie_table->dpm_levels[j].enabled = false;
3254                                 }
3255                         }
3256                 }
3257         }
3258 }
3259
3260 static int ci_trim_dpm_states(struct radeon_device *rdev,
3261                               struct radeon_ps *radeon_state)
3262 {
3263         struct ci_ps *state = ci_get_ps(radeon_state);
3264         struct ci_power_info *pi = ci_get_pi(rdev);
3265         u32 high_limit_count;
3266
3267         if (state->performance_level_count < 1)
3268                 return -EINVAL;
3269
3270         if (state->performance_level_count == 1)
3271                 high_limit_count = 0;
3272         else
3273                 high_limit_count = 1;
3274
3275         ci_trim_single_dpm_states(rdev,
3276                                   &pi->dpm_table.sclk_table,
3277                                   state->performance_levels[0].sclk,
3278                                   state->performance_levels[high_limit_count].sclk);
3279
3280         ci_trim_single_dpm_states(rdev,
3281                                   &pi->dpm_table.mclk_table,
3282                                   state->performance_levels[0].mclk,
3283                                   state->performance_levels[high_limit_count].mclk);
3284
3285         ci_trim_pcie_dpm_states(rdev,
3286                                 state->performance_levels[0].pcie_gen,
3287                                 state->performance_levels[0].pcie_lane,
3288                                 state->performance_levels[high_limit_count].pcie_gen,
3289                                 state->performance_levels[high_limit_count].pcie_lane);
3290
3291         return 0;
3292 }
3293
3294 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3295 {
3296         struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3297                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3298         struct radeon_clock_voltage_dependency_table *vddc_table =
3299                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3300         u32 requested_voltage = 0;
3301         u32 i;
3302
3303         if (disp_voltage_table == NULL)
3304                 return -EINVAL;
3305         if (!disp_voltage_table->count)
3306                 return -EINVAL;
3307
3308         for (i = 0; i < disp_voltage_table->count; i++) {
3309                 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3310                         requested_voltage = disp_voltage_table->entries[i].v;
3311         }
3312
3313         for (i = 0; i < vddc_table->count; i++) {
3314                 if (requested_voltage <= vddc_table->entries[i].v) {
3315                         requested_voltage = vddc_table->entries[i].v;
3316                         return (ci_send_msg_to_smc_with_parameter(rdev,
3317                                                                   PPSMC_MSG_VddC_Request,
3318                                                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3319                                 0 : -EINVAL;
3320                 }
3321         }
3322
3323         return -EINVAL;
3324 }
3325
3326 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3327 {
3328         struct ci_power_info *pi = ci_get_pi(rdev);
3329         PPSMC_Result result;
3330
3331         if (!pi->sclk_dpm_key_disabled) {
3332                 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3333                         result = ci_send_msg_to_smc_with_parameter(rdev,
3334                                                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3335                                                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3336                         if (result != PPSMC_Result_OK)
3337                                 return -EINVAL;
3338                 }
3339         }
3340
3341         if (!pi->mclk_dpm_key_disabled) {
3342                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3343                         result = ci_send_msg_to_smc_with_parameter(rdev,
3344                                                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
3345                                                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3346                         if (result != PPSMC_Result_OK)
3347                                 return -EINVAL;
3348                 }
3349         }
3350
3351         if (!pi->pcie_dpm_key_disabled) {
3352                 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3353                         result = ci_send_msg_to_smc_with_parameter(rdev,
3354                                                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
3355                                                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3356                         if (result != PPSMC_Result_OK)
3357                                 return -EINVAL;
3358                 }
3359         }
3360
3361         ci_apply_disp_minimum_voltage_request(rdev);
3362
3363         return 0;
3364 }
3365
3366 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3367                                                    struct radeon_ps *radeon_state)
3368 {
3369         struct ci_power_info *pi = ci_get_pi(rdev);
3370         struct ci_ps *state = ci_get_ps(radeon_state);
3371         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3372         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3373         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3374         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3375         u32 i;
3376
3377         pi->need_update_smu7_dpm_table = 0;
3378
3379         for (i = 0; i < sclk_table->count; i++) {
3380                 if (sclk == sclk_table->dpm_levels[i].value)
3381                         break;
3382         }
3383
3384         if (i >= sclk_table->count) {
3385                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3386         } else {
3387                 /* XXX check display min clock requirements */
3388                 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3389                         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3390         }
3391
3392         for (i = 0; i < mclk_table->count; i++) {
3393                 if (mclk == mclk_table->dpm_levels[i].value)
3394                         break;
3395         }
3396
3397         if (i >= mclk_table->count)
3398                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3399
3400         if (rdev->pm.dpm.current_active_crtc_count !=
3401             rdev->pm.dpm.new_active_crtc_count)
3402                 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3403 }
3404
3405 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3406                                                        struct radeon_ps *radeon_state)
3407 {
3408         struct ci_power_info *pi = ci_get_pi(rdev);
3409         struct ci_ps *state = ci_get_ps(radeon_state);
3410         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3411         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3412         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3413         int ret;
3414
3415         if (!pi->need_update_smu7_dpm_table)
3416                 return 0;
3417
3418         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3419                 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3420
3421         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3422                 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3423
3424         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3425                 ret = ci_populate_all_graphic_levels(rdev);
3426                 if (ret)
3427                         return ret;
3428         }
3429
3430         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3431                 ret = ci_populate_all_memory_levels(rdev);
3432                 if (ret)
3433                         return ret;
3434         }
3435
3436         return 0;
3437 }
3438
3439 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3440 {
3441         struct ci_power_info *pi = ci_get_pi(rdev);
3442         const struct radeon_clock_and_voltage_limits *max_limits;
3443         int i;
3444
3445         if (rdev->pm.dpm.ac_power)
3446                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3447         else
3448                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3449
3450         if (enable) {
3451                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3452
3453                 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3454                         if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3455                                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3456
3457                                 if (!pi->caps_uvd_dpm)
3458                                         break;
3459                         }
3460                 }
3461
3462                 ci_send_msg_to_smc_with_parameter(rdev,
3463                                                   PPSMC_MSG_UVDDPM_SetEnabledMask,
3464                                                   pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3465
3466                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3467                         pi->uvd_enabled = true;
3468                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3469                         ci_send_msg_to_smc_with_parameter(rdev,
3470                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
3471                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3472                 }
3473         } else {
3474                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3475                         pi->uvd_enabled = false;
3476                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3477                         ci_send_msg_to_smc_with_parameter(rdev,
3478                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
3479                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3480                 }
3481         }
3482
3483         return (ci_send_msg_to_smc(rdev, enable ?
3484                                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3485                 0 : -EINVAL;
3486 }
3487
3488 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3489 {
3490         struct ci_power_info *pi = ci_get_pi(rdev);
3491         const struct radeon_clock_and_voltage_limits *max_limits;
3492         int i;
3493
3494         if (rdev->pm.dpm.ac_power)
3495                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3496         else
3497                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3498
3499         if (enable) {
3500                 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3501                 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3502                         if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3503                                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3504
3505                                 if (!pi->caps_vce_dpm)
3506                                         break;
3507                         }
3508                 }
3509
3510                 ci_send_msg_to_smc_with_parameter(rdev,
3511                                                   PPSMC_MSG_VCEDPM_SetEnabledMask,
3512                                                   pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3513         }
3514
3515         return (ci_send_msg_to_smc(rdev, enable ?
3516                                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3517                 0 : -EINVAL;
3518 }
3519
3520 #if 0
3521 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3522 {
3523         struct ci_power_info *pi = ci_get_pi(rdev);
3524         const struct radeon_clock_and_voltage_limits *max_limits;
3525         int i;
3526
3527         if (rdev->pm.dpm.ac_power)
3528                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3529         else
3530                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3531
3532         if (enable) {
3533                 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3534                 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3535                         if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3536                                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3537
3538                                 if (!pi->caps_samu_dpm)
3539                                         break;
3540                         }
3541                 }
3542
3543                 ci_send_msg_to_smc_with_parameter(rdev,
3544                                                   PPSMC_MSG_SAMUDPM_SetEnabledMask,
3545                                                   pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3546         }
3547         return (ci_send_msg_to_smc(rdev, enable ?
3548                                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3549                 0 : -EINVAL;
3550 }
3551
3552 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3553 {
3554         struct ci_power_info *pi = ci_get_pi(rdev);
3555         const struct radeon_clock_and_voltage_limits *max_limits;
3556         int i;
3557
3558         if (rdev->pm.dpm.ac_power)
3559                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3560         else
3561                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3562
3563         if (enable) {
3564                 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3565                 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3566                         if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3567                                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3568
3569                                 if (!pi->caps_acp_dpm)
3570                                         break;
3571                         }
3572                 }
3573
3574                 ci_send_msg_to_smc_with_parameter(rdev,
3575                                                   PPSMC_MSG_ACPDPM_SetEnabledMask,
3576                                                   pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3577         }
3578
3579         return (ci_send_msg_to_smc(rdev, enable ?
3580                                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3581                 0 : -EINVAL;
3582 }
3583 #endif
3584
3585 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3586 {
3587         struct ci_power_info *pi = ci_get_pi(rdev);
3588         u32 tmp;
3589
3590         if (!gate) {
3591                 if (pi->caps_uvd_dpm ||
3592                     (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3593                         pi->smc_state_table.UvdBootLevel = 0;
3594                 else
3595                         pi->smc_state_table.UvdBootLevel =
3596                                 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3597
3598                 tmp = RREG32_SMC(DPM_TABLE_475);
3599                 tmp &= ~UvdBootLevel_MASK;
3600                 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3601                 WREG32_SMC(DPM_TABLE_475, tmp);
3602         }
3603
3604         return ci_enable_uvd_dpm(rdev, !gate);
3605 }
3606
3607 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3608 {
3609         u8 i;
3610         u32 min_evclk = 30000; /* ??? */
3611         struct radeon_vce_clock_voltage_dependency_table *table =
3612                 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3613
3614         for (i = 0; i < table->count; i++) {
3615                 if (table->entries[i].evclk >= min_evclk)
3616                         return i;
3617         }
3618
3619         return table->count - 1;
3620 }
3621
3622 static int ci_update_vce_dpm(struct radeon_device *rdev,
3623                              struct radeon_ps *radeon_new_state,
3624                              struct radeon_ps *radeon_current_state)
3625 {
3626         struct ci_power_info *pi = ci_get_pi(rdev);
3627         int ret = 0;
3628         u32 tmp;
3629
3630         if (radeon_current_state->evclk != radeon_new_state->evclk) {
3631                 if (radeon_new_state->evclk) {
3632                         /* turn the clocks on when encoding */
3633                         cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
3634
3635                         pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3636                         tmp = RREG32_SMC(DPM_TABLE_475);
3637                         tmp &= ~VceBootLevel_MASK;
3638                         tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3639                         WREG32_SMC(DPM_TABLE_475, tmp);
3640
3641                         ret = ci_enable_vce_dpm(rdev, true);
3642                 } else {
3643                         /* turn the clocks off when not encoding */
3644                         cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
3645
3646                         ret = ci_enable_vce_dpm(rdev, false);
3647                 }
3648         }
3649         return ret;
3650 }
3651
3652 #if 0
3653 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3654 {
3655         return ci_enable_samu_dpm(rdev, gate);
3656 }
3657
3658 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3659 {
3660         struct ci_power_info *pi = ci_get_pi(rdev);
3661         u32 tmp;
3662
3663         if (!gate) {
3664                 pi->smc_state_table.AcpBootLevel = 0;
3665
3666                 tmp = RREG32_SMC(DPM_TABLE_475);
3667                 tmp &= ~AcpBootLevel_MASK;
3668                 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3669                 WREG32_SMC(DPM_TABLE_475, tmp);
3670         }
3671
3672         return ci_enable_acp_dpm(rdev, !gate);
3673 }
3674 #endif
3675
3676 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3677                                              struct radeon_ps *radeon_state)
3678 {
3679         struct ci_power_info *pi = ci_get_pi(rdev);
3680         int ret;
3681
3682         ret = ci_trim_dpm_states(rdev, radeon_state);
3683         if (ret)
3684                 return ret;
3685
3686         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3687                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3688         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3689                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3690         pi->last_mclk_dpm_enable_mask =
3691                 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3692         if (pi->uvd_enabled) {
3693                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3694                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3695         }
3696         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3697                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3698
3699         return 0;
3700 }
3701
3702 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3703                                        u32 level_mask)
3704 {
3705         u32 level = 0;
3706
3707         while ((level_mask & (1 << level)) == 0)
3708                 level++;
3709
3710         return level;
3711 }
3712
3713
3714 int ci_dpm_force_performance_level(struct radeon_device *rdev,
3715                                    enum radeon_dpm_forced_level level)
3716 {
3717         struct ci_power_info *pi = ci_get_pi(rdev);
3718         PPSMC_Result smc_result;
3719         u32 tmp, levels, i;
3720         int ret;
3721
3722         if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3723                 if ((!pi->sclk_dpm_key_disabled) &&
3724                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3725                         levels = 0;
3726                         tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3727                         while (tmp >>= 1)
3728                                 levels++;
3729                         if (levels) {
3730                                 ret = ci_dpm_force_state_sclk(rdev, levels);
3731                                 if (ret)
3732                                         return ret;
3733                                 for (i = 0; i < rdev->usec_timeout; i++) {
3734                                         tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3735                                                CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3736                                         if (tmp == levels)
3737                                                 break;
3738                                         udelay(1);
3739                                 }
3740                         }
3741                 }
3742                 if ((!pi->mclk_dpm_key_disabled) &&
3743                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3744                         levels = 0;
3745                         tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3746                         while (tmp >>= 1)
3747                                 levels++;
3748                         if (levels) {
3749                                 ret = ci_dpm_force_state_mclk(rdev, levels);
3750                                 if (ret)
3751                                         return ret;
3752                                 for (i = 0; i < rdev->usec_timeout; i++) {
3753                                         tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3754                                                CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3755                                         if (tmp == levels)
3756                                                 break;
3757                                         udelay(1);
3758                                 }
3759                         }
3760                 }
3761                 if ((!pi->pcie_dpm_key_disabled) &&
3762                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3763                         levels = 0;
3764                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3765                         while (tmp >>= 1)
3766                                 levels++;
3767                         if (levels) {
3768                                 ret = ci_dpm_force_state_pcie(rdev, level);
3769                                 if (ret)
3770                                         return ret;
3771                                 for (i = 0; i < rdev->usec_timeout; i++) {
3772                                         tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3773                                                CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3774                                         if (tmp == levels)
3775                                                 break;
3776                                         udelay(1);
3777                                 }
3778                         }
3779                 }
3780         } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3781                 if ((!pi->sclk_dpm_key_disabled) &&
3782                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3783                         levels = ci_get_lowest_enabled_level(rdev,
3784                                                              pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3785                         ret = ci_dpm_force_state_sclk(rdev, levels);
3786                         if (ret)
3787                                 return ret;
3788                         for (i = 0; i < rdev->usec_timeout; i++) {
3789                                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3790                                        CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3791                                 if (tmp == levels)
3792                                         break;
3793                                 udelay(1);
3794                         }
3795                 }
3796                 if ((!pi->mclk_dpm_key_disabled) &&
3797                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3798                         levels = ci_get_lowest_enabled_level(rdev,
3799                                                              pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3800                         ret = ci_dpm_force_state_mclk(rdev, levels);
3801                         if (ret)
3802                                 return ret;
3803                         for (i = 0; i < rdev->usec_timeout; i++) {
3804                                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3805                                        CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3806                                 if (tmp == levels)
3807                                         break;
3808                                 udelay(1);
3809                         }
3810                 }
3811                 if ((!pi->pcie_dpm_key_disabled) &&
3812                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3813                         levels = ci_get_lowest_enabled_level(rdev,
3814                                                              pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3815                         ret = ci_dpm_force_state_pcie(rdev, levels);
3816                         if (ret)
3817                                 return ret;
3818                         for (i = 0; i < rdev->usec_timeout; i++) {
3819                                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3820                                        CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3821                                 if (tmp == levels)
3822                                         break;
3823                                 udelay(1);
3824                         }
3825                 }
3826         } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3827                 if (!pi->sclk_dpm_key_disabled) {
3828                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3829                         if (smc_result != PPSMC_Result_OK)
3830                                 return -EINVAL;
3831                 }
3832                 if (!pi->mclk_dpm_key_disabled) {
3833                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3834                         if (smc_result != PPSMC_Result_OK)
3835                                 return -EINVAL;
3836                 }
3837                 if (!pi->pcie_dpm_key_disabled) {
3838                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3839                         if (smc_result != PPSMC_Result_OK)
3840                                 return -EINVAL;
3841                 }
3842         }
3843
3844         rdev->pm.dpm.forced_level = level;
3845
3846         return 0;
3847 }
3848
3849 static int ci_set_mc_special_registers(struct radeon_device *rdev,
3850                                        struct ci_mc_reg_table *table)
3851 {
3852         struct ci_power_info *pi = ci_get_pi(rdev);
3853         u8 i, j, k;
3854         u32 temp_reg;
3855
3856         for (i = 0, j = table->last; i < table->last; i++) {
3857                 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3858                         return -EINVAL;
3859                 switch(table->mc_reg_address[i].s1 << 2) {
3860                 case MC_SEQ_MISC1:
3861                         temp_reg = RREG32(MC_PMG_CMD_EMRS);
3862                         table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3863                         table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3864                         for (k = 0; k < table->num_entries; k++) {
3865                                 table->mc_reg_table_entry[k].mc_data[j] =
3866                                         ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3867                         }
3868                         j++;
3869                         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3870                                 return -EINVAL;
3871
3872                         temp_reg = RREG32(MC_PMG_CMD_MRS);
3873                         table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3874                         table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3875                         for (k = 0; k < table->num_entries; k++) {
3876                                 table->mc_reg_table_entry[k].mc_data[j] =
3877                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3878                                 if (!pi->mem_gddr5)
3879                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3880                         }
3881                         j++;
3882                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3883                                 return -EINVAL;
3884
3885                         if (!pi->mem_gddr5) {
3886                                 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3887                                 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3888                                 for (k = 0; k < table->num_entries; k++) {
3889                                         table->mc_reg_table_entry[k].mc_data[j] =
3890                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3891                                 }
3892                                 j++;
3893                                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3894                                         return -EINVAL;
3895                         }
3896                         break;
3897                 case MC_SEQ_RESERVE_M:
3898                         temp_reg = RREG32(MC_PMG_CMD_MRS1);
3899                         table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3900                         table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3901                         for (k = 0; k < table->num_entries; k++) {
3902                                 table->mc_reg_table_entry[k].mc_data[j] =
3903                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3904                         }
3905                         j++;
3906                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3907                                 return -EINVAL;
3908                         break;
3909                 default:
3910                         break;
3911                 }
3912
3913         }
3914
3915         table->last = j;
3916
3917         return 0;
3918 }
3919
3920 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3921 {
3922         bool result = true;
3923
3924         switch(in_reg) {
3925         case MC_SEQ_RAS_TIMING >> 2:
3926                 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3927                 break;
3928         case MC_SEQ_DLL_STBY >> 2:
3929                 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3930                 break;
3931         case MC_SEQ_G5PDX_CMD0 >> 2:
3932                 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3933                 break;
3934         case MC_SEQ_G5PDX_CMD1 >> 2:
3935                 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3936                 break;
3937         case MC_SEQ_G5PDX_CTRL >> 2:
3938                 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3939                 break;
3940         case MC_SEQ_CAS_TIMING >> 2:
3941                 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3942             break;
3943         case MC_SEQ_MISC_TIMING >> 2:
3944                 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3945                 break;
3946         case MC_SEQ_MISC_TIMING2 >> 2:
3947                 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3948                 break;
3949         case MC_SEQ_PMG_DVS_CMD >> 2:
3950                 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3951                 break;
3952         case MC_SEQ_PMG_DVS_CTL >> 2:
3953                 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3954                 break;
3955         case MC_SEQ_RD_CTL_D0 >> 2:
3956                 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3957                 break;
3958         case MC_SEQ_RD_CTL_D1 >> 2:
3959                 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3960                 break;
3961         case MC_SEQ_WR_CTL_D0 >> 2:
3962                 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3963                 break;
3964         case MC_SEQ_WR_CTL_D1 >> 2:
3965                 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3966                 break;
3967         case MC_PMG_CMD_EMRS >> 2:
3968                 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3969                 break;
3970         case MC_PMG_CMD_MRS >> 2:
3971                 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3972                 break;
3973         case MC_PMG_CMD_MRS1 >> 2:
3974                 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3975                 break;
3976         case MC_SEQ_PMG_TIMING >> 2:
3977                 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3978                 break;
3979         case MC_PMG_CMD_MRS2 >> 2:
3980                 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3981                 break;
3982         case MC_SEQ_WR_CTL_2 >> 2:
3983                 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3984                 break;
3985         default:
3986                 result = false;
3987                 break;
3988         }
3989
3990         return result;
3991 }
3992
3993 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3994 {
3995         u8 i, j;
3996
3997         for (i = 0; i < table->last; i++) {
3998                 for (j = 1; j < table->num_entries; j++) {
3999                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4000                             table->mc_reg_table_entry[j].mc_data[i]) {
4001                                 table->valid_flag |= 1 << i;
4002                                 break;
4003                         }
4004                 }
4005         }
4006 }
4007
4008 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4009 {
4010         u32 i;
4011         u16 address;
4012
4013         for (i = 0; i < table->last; i++) {
4014                 table->mc_reg_address[i].s0 =
4015                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4016                         address : table->mc_reg_address[i].s1;
4017         }
4018 }
4019
4020 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4021                                       struct ci_mc_reg_table *ci_table)
4022 {
4023         u8 i, j;
4024
4025         if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4026                 return -EINVAL;
4027         if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4028                 return -EINVAL;
4029
4030         for (i = 0; i < table->last; i++)
4031                 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4032
4033         ci_table->last = table->last;
4034
4035         for (i = 0; i < table->num_entries; i++) {
4036                 ci_table->mc_reg_table_entry[i].mclk_max =
4037                         table->mc_reg_table_entry[i].mclk_max;
4038                 for (j = 0; j < table->last; j++)
4039                         ci_table->mc_reg_table_entry[i].mc_data[j] =
4040                                 table->mc_reg_table_entry[i].mc_data[j];
4041         }
4042         ci_table->num_entries = table->num_entries;
4043
4044         return 0;
4045 }
4046
4047 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4048 {
4049         struct ci_power_info *pi = ci_get_pi(rdev);
4050         struct atom_mc_reg_table *table;
4051         struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4052         u8 module_index = rv770_get_memory_module_index(rdev);
4053         int ret;
4054
4055         table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4056         if (!table)
4057                 return -ENOMEM;
4058
4059         WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4060         WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4061         WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4062         WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4063         WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4064         WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4065         WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4066         WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4067         WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4068         WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4069         WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4070         WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4071         WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4072         WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4073         WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4074         WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4075         WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4076         WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4077         WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4078         WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4079
4080         ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4081         if (ret)
4082                 goto init_mc_done;
4083
4084         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4085         if (ret)
4086                 goto init_mc_done;
4087
4088         ci_set_s0_mc_reg_index(ci_table);
4089
4090         ret = ci_set_mc_special_registers(rdev, ci_table);
4091         if (ret)
4092                 goto init_mc_done;
4093
4094         ci_set_valid_flag(ci_table);
4095
4096 init_mc_done:
4097         kfree(table);
4098
4099         return ret;
4100 }
4101
4102 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4103                                         SMU7_Discrete_MCRegisters *mc_reg_table)
4104 {
4105         struct ci_power_info *pi = ci_get_pi(rdev);
4106         u32 i, j;
4107
4108         for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4109                 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4110                         if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4111                                 return -EINVAL;
4112                         mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4113                         mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4114                         i++;
4115                 }
4116         }
4117
4118         mc_reg_table->last = (u8)i;
4119
4120         return 0;
4121 }
4122
4123 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4124                                     SMU7_Discrete_MCRegisterSet *data,
4125                                     u32 num_entries, u32 valid_flag)
4126 {
4127         u32 i, j;
4128
4129         for (i = 0, j = 0; j < num_entries; j++) {
4130                 if (valid_flag & (1 << j)) {
4131                         data->value[i] = cpu_to_be32(entry->mc_data[j]);
4132                         i++;
4133                 }
4134         }
4135 }
4136
4137 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4138                                                  const u32 memory_clock,
4139                                                  SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4140 {
4141         struct ci_power_info *pi = ci_get_pi(rdev);
4142         u32 i = 0;
4143
4144         for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4145                 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4146                         break;
4147         }
4148
4149         if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4150                 --i;
4151
4152         ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4153                                 mc_reg_table_data, pi->mc_reg_table.last,
4154                                 pi->mc_reg_table.valid_flag);
4155 }
4156
4157 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4158                                            SMU7_Discrete_MCRegisters *mc_reg_table)
4159 {
4160         struct ci_power_info *pi = ci_get_pi(rdev);
4161         u32 i;
4162
4163         for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4164                 ci_convert_mc_reg_table_entry_to_smc(rdev,
4165                                                      pi->dpm_table.mclk_table.dpm_levels[i].value,
4166                                                      &mc_reg_table->data[i]);
4167 }
4168
4169 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4170 {
4171         struct ci_power_info *pi = ci_get_pi(rdev);
4172         int ret;
4173
4174         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4175
4176         ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4177         if (ret)
4178                 return ret;
4179         ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4180
4181         return ci_copy_bytes_to_smc(rdev,
4182                                     pi->mc_reg_table_start,
4183                                     (u8 *)&pi->smc_mc_reg_table,
4184                                     sizeof(SMU7_Discrete_MCRegisters),
4185                                     pi->sram_end);
4186 }
4187
4188 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4189 {
4190         struct ci_power_info *pi = ci_get_pi(rdev);
4191
4192         if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4193                 return 0;
4194
4195         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4196
4197         ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4198
4199         return ci_copy_bytes_to_smc(rdev,
4200                                     pi->mc_reg_table_start +
4201                                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4202                                     (u8 *)&pi->smc_mc_reg_table.data[0],
4203                                     sizeof(SMU7_Discrete_MCRegisterSet) *
4204                                     pi->dpm_table.mclk_table.count,
4205                                     pi->sram_end);
4206 }
4207
4208 static void ci_enable_voltage_control(struct radeon_device *rdev)
4209 {
4210         u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4211
4212         tmp |= VOLT_PWRMGT_EN;
4213         WREG32_SMC(GENERAL_PWRMGT, tmp);
4214 }
4215
4216 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4217                                                       struct radeon_ps *radeon_state)
4218 {
4219         struct ci_ps *state = ci_get_ps(radeon_state);
4220         int i;
4221         u16 pcie_speed, max_speed = 0;
4222
4223         for (i = 0; i < state->performance_level_count; i++) {
4224                 pcie_speed = state->performance_levels[i].pcie_gen;
4225                 if (max_speed < pcie_speed)
4226                         max_speed = pcie_speed;
4227         }
4228
4229         return max_speed;
4230 }
4231
4232 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4233 {
4234         u32 speed_cntl = 0;
4235
4236         speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4237         speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4238
4239         return (u16)speed_cntl;
4240 }
4241
4242 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4243 {
4244         u32 link_width = 0;
4245
4246         link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4247         link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4248
4249         switch (link_width) {
4250         case RADEON_PCIE_LC_LINK_WIDTH_X1:
4251                 return 1;
4252         case RADEON_PCIE_LC_LINK_WIDTH_X2:
4253                 return 2;
4254         case RADEON_PCIE_LC_LINK_WIDTH_X4:
4255                 return 4;
4256         case RADEON_PCIE_LC_LINK_WIDTH_X8:
4257                 return 8;
4258         case RADEON_PCIE_LC_LINK_WIDTH_X12:
4259                 /* not actually supported */
4260                 return 12;
4261         case RADEON_PCIE_LC_LINK_WIDTH_X0:
4262         case RADEON_PCIE_LC_LINK_WIDTH_X16:
4263         default:
4264                 return 16;
4265         }
4266 }
4267
4268 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4269                                                              struct radeon_ps *radeon_new_state,
4270                                                              struct radeon_ps *radeon_current_state)
4271 {
4272         struct ci_power_info *pi = ci_get_pi(rdev);
4273         enum radeon_pcie_gen target_link_speed =
4274                 ci_get_maximum_link_speed(rdev, radeon_new_state);
4275         enum radeon_pcie_gen current_link_speed;
4276
4277         if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4278                 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4279         else
4280                 current_link_speed = pi->force_pcie_gen;
4281
4282         pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4283         pi->pspp_notify_required = false;
4284         if (target_link_speed > current_link_speed) {
4285                 switch (target_link_speed) {
4286 #ifdef CONFIG_ACPI
4287                 case RADEON_PCIE_GEN3:
4288                         if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4289                                 break;
4290                         pi->force_pcie_gen = RADEON_PCIE_GEN2;
4291                         if (current_link_speed == RADEON_PCIE_GEN2)
4292                                 break;
4293                 case RADEON_PCIE_GEN2:
4294                         if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4295                                 break;
4296 #endif
4297                 default:
4298                         pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4299                         break;
4300                 }
4301         } else {
4302                 if (target_link_speed < current_link_speed)
4303                         pi->pspp_notify_required = true;
4304         }
4305 }
4306
4307 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4308                                                            struct radeon_ps *radeon_new_state,
4309                                                            struct radeon_ps *radeon_current_state)
4310 {
4311         struct ci_power_info *pi = ci_get_pi(rdev);
4312         enum radeon_pcie_gen target_link_speed =
4313                 ci_get_maximum_link_speed(rdev, radeon_new_state);
4314         u8 request;
4315
4316         if (pi->pspp_notify_required) {
4317                 if (target_link_speed == RADEON_PCIE_GEN3)
4318                         request = PCIE_PERF_REQ_PECI_GEN3;
4319                 else if (target_link_speed == RADEON_PCIE_GEN2)
4320                         request = PCIE_PERF_REQ_PECI_GEN2;
4321                 else
4322                         request = PCIE_PERF_REQ_PECI_GEN1;
4323
4324                 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4325                     (ci_get_current_pcie_speed(rdev) > 0))
4326                         return;
4327
4328 #ifdef CONFIG_ACPI
4329                 radeon_acpi_pcie_performance_request(rdev, request, false);
4330 #endif
4331         }
4332 }
4333
4334 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4335 {
4336         struct ci_power_info *pi = ci_get_pi(rdev);
4337         struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4338                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4339         struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4340                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4341         struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4342                 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4343
4344         if (allowed_sclk_vddc_table == NULL)
4345                 return -EINVAL;
4346         if (allowed_sclk_vddc_table->count < 1)
4347                 return -EINVAL;
4348         if (allowed_mclk_vddc_table == NULL)
4349                 return -EINVAL;
4350         if (allowed_mclk_vddc_table->count < 1)
4351                 return -EINVAL;
4352         if (allowed_mclk_vddci_table == NULL)
4353                 return -EINVAL;
4354         if (allowed_mclk_vddci_table->count < 1)
4355                 return -EINVAL;
4356
4357         pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4358         pi->max_vddc_in_pp_table =
4359                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4360
4361         pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4362         pi->max_vddci_in_pp_table =
4363                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4364
4365         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4366                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4367         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4368                 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4369         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4370                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4371         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4372                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4373
4374         return 0;
4375 }
4376
4377 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4378 {
4379         struct ci_power_info *pi = ci_get_pi(rdev);
4380         struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4381         u32 leakage_index;
4382
4383         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4384                 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4385                         *vddc = leakage_table->actual_voltage[leakage_index];
4386                         break;
4387                 }
4388         }
4389 }
4390
4391 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4392 {
4393         struct ci_power_info *pi = ci_get_pi(rdev);
4394         struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4395         u32 leakage_index;
4396
4397         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4398                 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4399                         *vddci = leakage_table->actual_voltage[leakage_index];
4400                         break;
4401                 }
4402         }
4403 }
4404
4405 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4406                                                                       struct radeon_clock_voltage_dependency_table *table)
4407 {
4408         u32 i;
4409
4410         if (table) {
4411                 for (i = 0; i < table->count; i++)
4412                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4413         }
4414 }
4415
4416 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4417                                                                        struct radeon_clock_voltage_dependency_table *table)
4418 {
4419         u32 i;
4420
4421         if (table) {
4422                 for (i = 0; i < table->count; i++)
4423                         ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4424         }
4425 }
4426
4427 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4428                                                                           struct radeon_vce_clock_voltage_dependency_table *table)
4429 {
4430         u32 i;
4431
4432         if (table) {
4433                 for (i = 0; i < table->count; i++)
4434                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4435         }
4436 }
4437
4438 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4439                                                                           struct radeon_uvd_clock_voltage_dependency_table *table)
4440 {
4441         u32 i;
4442
4443         if (table) {
4444                 for (i = 0; i < table->count; i++)
4445                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4446         }
4447 }
4448
4449 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4450                                                                    struct radeon_phase_shedding_limits_table *table)
4451 {
4452         u32 i;
4453
4454         if (table) {
4455                 for (i = 0; i < table->count; i++)
4456                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4457         }
4458 }
4459
4460 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4461                                                             struct radeon_clock_and_voltage_limits *table)
4462 {
4463         if (table) {
4464                 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4465                 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4466         }
4467 }
4468
4469 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4470                                                          struct radeon_cac_leakage_table *table)
4471 {
4472         u32 i;
4473
4474         if (table) {
4475                 for (i = 0; i < table->count; i++)
4476                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4477         }
4478 }
4479
4480 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4481 {
4482
4483         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4484                                                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4485         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4486                                                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4487         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4488                                                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4489         ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4490                                                                    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4491         ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4492                                                                       &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4493         ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4494                                                                       &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4495         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4496                                                                   &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4497         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4498                                                                   &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4499         ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4500                                                                &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4501         ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4502                                                         &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4503         ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4504                                                         &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4505         ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4506                                                      &rdev->pm.dpm.dyn_state.cac_leakage_table);
4507
4508 }
4509
4510 static void ci_get_memory_type(struct radeon_device *rdev)
4511 {
4512         struct ci_power_info *pi = ci_get_pi(rdev);
4513         u32 tmp;
4514
4515         tmp = RREG32(MC_SEQ_MISC0);
4516
4517         if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4518             MC_SEQ_MISC0_GDDR5_VALUE)
4519                 pi->mem_gddr5 = true;
4520         else
4521                 pi->mem_gddr5 = false;
4522
4523 }
4524
4525 static void ci_update_current_ps(struct radeon_device *rdev,
4526                                  struct radeon_ps *rps)
4527 {
4528         struct ci_ps *new_ps = ci_get_ps(rps);
4529         struct ci_power_info *pi = ci_get_pi(rdev);
4530
4531         pi->current_rps = *rps;
4532         pi->current_ps = *new_ps;
4533         pi->current_rps.ps_priv = &pi->current_ps;
4534 }
4535
4536 static void ci_update_requested_ps(struct radeon_device *rdev,
4537                                    struct radeon_ps *rps)
4538 {
4539         struct ci_ps *new_ps = ci_get_ps(rps);
4540         struct ci_power_info *pi = ci_get_pi(rdev);
4541
4542         pi->requested_rps = *rps;
4543         pi->requested_ps = *new_ps;
4544         pi->requested_rps.ps_priv = &pi->requested_ps;
4545 }
4546
4547 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4548 {
4549         struct ci_power_info *pi = ci_get_pi(rdev);
4550         struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4551         struct radeon_ps *new_ps = &requested_ps;
4552
4553         ci_update_requested_ps(rdev, new_ps);
4554
4555         ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4556
4557         return 0;
4558 }
4559
4560 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4561 {
4562         struct ci_power_info *pi = ci_get_pi(rdev);
4563         struct radeon_ps *new_ps = &pi->requested_rps;
4564
4565         ci_update_current_ps(rdev, new_ps);
4566 }
4567
4568
4569 void ci_dpm_setup_asic(struct radeon_device *rdev)
4570 {
4571         int r;
4572
4573         r = ci_mc_load_microcode(rdev);
4574         if (r)
4575                 DRM_ERROR("Failed to load MC firmware!\n");
4576         ci_read_clock_registers(rdev);
4577         ci_get_memory_type(rdev);
4578         ci_enable_acpi_power_management(rdev);
4579         ci_init_sclk_t(rdev);
4580 }
4581
4582 int ci_dpm_enable(struct radeon_device *rdev)
4583 {
4584         struct ci_power_info *pi = ci_get_pi(rdev);
4585         struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4586         int ret;
4587
4588         if (ci_is_smc_running(rdev))
4589                 return -EINVAL;
4590         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4591                 ci_enable_voltage_control(rdev);
4592                 ret = ci_construct_voltage_tables(rdev);
4593                 if (ret) {
4594                         DRM_ERROR("ci_construct_voltage_tables failed\n");
4595                         return ret;
4596                 }
4597         }
4598         if (pi->caps_dynamic_ac_timing) {
4599                 ret = ci_initialize_mc_reg_table(rdev);
4600                 if (ret)
4601                         pi->caps_dynamic_ac_timing = false;
4602         }
4603         if (pi->dynamic_ss)
4604                 ci_enable_spread_spectrum(rdev, true);
4605         if (pi->thermal_protection)
4606                 ci_enable_thermal_protection(rdev, true);
4607         ci_program_sstp(rdev);
4608         ci_enable_display_gap(rdev);
4609         ci_program_vc(rdev);
4610         ret = ci_upload_firmware(rdev);
4611         if (ret) {
4612                 DRM_ERROR("ci_upload_firmware failed\n");
4613                 return ret;
4614         }
4615         ret = ci_process_firmware_header(rdev);
4616         if (ret) {
4617                 DRM_ERROR("ci_process_firmware_header failed\n");
4618                 return ret;
4619         }
4620         ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4621         if (ret) {
4622                 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4623                 return ret;
4624         }
4625         ret = ci_init_smc_table(rdev);
4626         if (ret) {
4627                 DRM_ERROR("ci_init_smc_table failed\n");
4628                 return ret;
4629         }
4630         ret = ci_init_arb_table_index(rdev);
4631         if (ret) {
4632                 DRM_ERROR("ci_init_arb_table_index failed\n");
4633                 return ret;
4634         }
4635         if (pi->caps_dynamic_ac_timing) {
4636                 ret = ci_populate_initial_mc_reg_table(rdev);
4637                 if (ret) {
4638                         DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4639                         return ret;
4640                 }
4641         }
4642         ret = ci_populate_pm_base(rdev);
4643         if (ret) {
4644                 DRM_ERROR("ci_populate_pm_base failed\n");
4645                 return ret;
4646         }
4647         ci_dpm_start_smc(rdev);
4648         ci_enable_vr_hot_gpio_interrupt(rdev);
4649         ret = ci_notify_smc_display_change(rdev, false);
4650         if (ret) {
4651                 DRM_ERROR("ci_notify_smc_display_change failed\n");
4652                 return ret;
4653         }
4654         ci_enable_sclk_control(rdev, true);
4655         ret = ci_enable_ulv(rdev, true);
4656         if (ret) {
4657                 DRM_ERROR("ci_enable_ulv failed\n");
4658                 return ret;
4659         }
4660         ret = ci_enable_ds_master_switch(rdev, true);
4661         if (ret) {
4662                 DRM_ERROR("ci_enable_ds_master_switch failed\n");
4663                 return ret;
4664         }
4665         ret = ci_start_dpm(rdev);
4666         if (ret) {
4667                 DRM_ERROR("ci_start_dpm failed\n");
4668                 return ret;
4669         }
4670         ret = ci_enable_didt(rdev, true);
4671         if (ret) {
4672                 DRM_ERROR("ci_enable_didt failed\n");
4673                 return ret;
4674         }
4675         ret = ci_enable_smc_cac(rdev, true);
4676         if (ret) {
4677                 DRM_ERROR("ci_enable_smc_cac failed\n");
4678                 return ret;
4679         }
4680         ret = ci_enable_power_containment(rdev, true);
4681         if (ret) {
4682                 DRM_ERROR("ci_enable_power_containment failed\n");
4683                 return ret;
4684         }
4685
4686         ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4687
4688         ci_update_current_ps(rdev, boot_ps);
4689
4690         return 0;
4691 }
4692
4693 int ci_dpm_late_enable(struct radeon_device *rdev)
4694 {
4695         int ret;
4696
4697         if (rdev->irq.installed &&
4698             r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4699 #if 0
4700                 PPSMC_Result result;
4701 #endif
4702                 ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4703                 if (ret) {
4704                         DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4705                         return ret;
4706                 }
4707                 rdev->irq.dpm_thermal = true;
4708                 radeon_irq_set(rdev);
4709 #if 0
4710                 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4711
4712                 if (result != PPSMC_Result_OK)
4713                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4714 #endif
4715         }
4716
4717         ci_dpm_powergate_uvd(rdev, true);
4718
4719         return 0;
4720 }
4721
4722 void ci_dpm_disable(struct radeon_device *rdev)
4723 {
4724         struct ci_power_info *pi = ci_get_pi(rdev);
4725         struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4726
4727         ci_dpm_powergate_uvd(rdev, false);
4728
4729         if (!ci_is_smc_running(rdev))
4730                 return;
4731
4732         if (pi->thermal_protection)
4733                 ci_enable_thermal_protection(rdev, false);
4734         ci_enable_power_containment(rdev, false);
4735         ci_enable_smc_cac(rdev, false);
4736         ci_enable_didt(rdev, false);
4737         ci_enable_spread_spectrum(rdev, false);
4738         ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4739         ci_stop_dpm(rdev);
4740         ci_enable_ds_master_switch(rdev, true);
4741         ci_enable_ulv(rdev, false);
4742         ci_clear_vc(rdev);
4743         ci_reset_to_default(rdev);
4744         ci_dpm_stop_smc(rdev);
4745         ci_force_switch_to_arb_f0(rdev);
4746
4747         ci_update_current_ps(rdev, boot_ps);
4748 }
4749
4750 int ci_dpm_set_power_state(struct radeon_device *rdev)
4751 {
4752         struct ci_power_info *pi = ci_get_pi(rdev);
4753         struct radeon_ps *new_ps = &pi->requested_rps;
4754         struct radeon_ps *old_ps = &pi->current_rps;
4755         int ret;
4756
4757         ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4758         if (pi->pcie_performance_request)
4759                 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4760         ret = ci_freeze_sclk_mclk_dpm(rdev);
4761         if (ret) {
4762                 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4763                 return ret;
4764         }
4765         ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4766         if (ret) {
4767                 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4768                 return ret;
4769         }
4770         ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4771         if (ret) {
4772                 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4773                 return ret;
4774         }
4775
4776         ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4777         if (ret) {
4778                 DRM_ERROR("ci_update_vce_dpm failed\n");
4779                 return ret;
4780         }
4781
4782         ret = ci_update_sclk_t(rdev);
4783         if (ret) {
4784                 DRM_ERROR("ci_update_sclk_t failed\n");
4785                 return ret;
4786         }
4787         if (pi->caps_dynamic_ac_timing) {
4788                 ret = ci_update_and_upload_mc_reg_table(rdev);
4789                 if (ret) {
4790                         DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4791                         return ret;
4792                 }
4793         }
4794         ret = ci_program_memory_timing_parameters(rdev);
4795         if (ret) {
4796                 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4797                 return ret;
4798         }
4799         ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4800         if (ret) {
4801                 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4802                 return ret;
4803         }
4804         ret = ci_upload_dpm_level_enable_mask(rdev);
4805         if (ret) {
4806                 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4807                 return ret;
4808         }
4809         if (pi->pcie_performance_request)
4810                 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4811
4812         return 0;
4813 }
4814
4815 int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4816 {
4817         return ci_power_control_set_level(rdev);
4818 }
4819
4820 void ci_dpm_reset_asic(struct radeon_device *rdev)
4821 {
4822         ci_set_boot_state(rdev);
4823 }
4824
4825 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4826 {
4827         ci_program_display_gap(rdev);
4828 }
4829
4830 union power_info {
4831         struct _ATOM_POWERPLAY_INFO info;
4832         struct _ATOM_POWERPLAY_INFO_V2 info_2;
4833         struct _ATOM_POWERPLAY_INFO_V3 info_3;
4834         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4835         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4836         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4837 };
4838
4839 union pplib_clock_info {
4840         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4841         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4842         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4843         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4844         struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4845         struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4846 };
4847
4848 union pplib_power_state {
4849         struct _ATOM_PPLIB_STATE v1;
4850         struct _ATOM_PPLIB_STATE_V2 v2;
4851 };
4852
4853 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4854                                           struct radeon_ps *rps,
4855                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4856                                           u8 table_rev)
4857 {
4858         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4859         rps->class = le16_to_cpu(non_clock_info->usClassification);
4860         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4861
4862         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4863                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4864                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4865         } else {
4866                 rps->vclk = 0;
4867                 rps->dclk = 0;
4868         }
4869
4870         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4871                 rdev->pm.dpm.boot_ps = rps;
4872         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4873                 rdev->pm.dpm.uvd_ps = rps;
4874 }
4875
4876 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4877                                       struct radeon_ps *rps, int index,
4878                                       union pplib_clock_info *clock_info)
4879 {
4880         struct ci_power_info *pi = ci_get_pi(rdev);
4881         struct ci_ps *ps = ci_get_ps(rps);
4882         struct ci_pl *pl = &ps->performance_levels[index];
4883
4884         ps->performance_level_count = index + 1;
4885
4886         pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4887         pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4888         pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4889         pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4890
4891         pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4892                                                  pi->sys_pcie_mask,
4893                                                  pi->vbios_boot_state.pcie_gen_bootup_value,
4894                                                  clock_info->ci.ucPCIEGen);
4895         pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4896                                                    pi->vbios_boot_state.pcie_lane_bootup_value,
4897                                                    le16_to_cpu(clock_info->ci.usPCIELane));
4898
4899         if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4900                 pi->acpi_pcie_gen = pl->pcie_gen;
4901         }
4902
4903         if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4904                 pi->ulv.supported = true;
4905                 pi->ulv.pl = *pl;
4906                 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4907         }
4908
4909         /* patch up boot state */
4910         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4911                 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4912                 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4913                 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4914                 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4915         }
4916
4917         switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4918         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4919                 pi->use_pcie_powersaving_levels = true;
4920                 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4921                         pi->pcie_gen_powersaving.max = pl->pcie_gen;
4922                 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4923                         pi->pcie_gen_powersaving.min = pl->pcie_gen;
4924                 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4925                         pi->pcie_lane_powersaving.max = pl->pcie_lane;
4926                 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4927                         pi->pcie_lane_powersaving.min = pl->pcie_lane;
4928                 break;
4929         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4930                 pi->use_pcie_performance_levels = true;
4931                 if (pi->pcie_gen_performance.max < pl->pcie_gen)
4932                         pi->pcie_gen_performance.max = pl->pcie_gen;
4933                 if (pi->pcie_gen_performance.min > pl->pcie_gen)
4934                         pi->pcie_gen_performance.min = pl->pcie_gen;
4935                 if (pi->pcie_lane_performance.max < pl->pcie_lane)
4936                         pi->pcie_lane_performance.max = pl->pcie_lane;
4937                 if (pi->pcie_lane_performance.min > pl->pcie_lane)
4938                         pi->pcie_lane_performance.min = pl->pcie_lane;
4939                 break;
4940         default:
4941                 break;
4942         }
4943 }
4944
4945 static int ci_parse_power_table(struct radeon_device *rdev)
4946 {
4947         struct radeon_mode_info *mode_info = &rdev->mode_info;
4948         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4949         union pplib_power_state *power_state;
4950         int i, j, k, non_clock_array_index, clock_array_index;
4951         union pplib_clock_info *clock_info;
4952         struct _StateArray *state_array;
4953         struct _ClockInfoArray *clock_info_array;
4954         struct _NonClockInfoArray *non_clock_info_array;
4955         union power_info *power_info;
4956         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4957         u16 data_offset;
4958         u8 frev, crev;
4959         u8 *power_state_offset;
4960         struct ci_ps *ps;
4961
4962         if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4963                                    &frev, &crev, &data_offset))
4964                 return -EINVAL;
4965         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4966
4967         state_array = (struct _StateArray *)
4968                 (mode_info->atom_context->bios + data_offset +
4969                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
4970         clock_info_array = (struct _ClockInfoArray *)
4971                 (mode_info->atom_context->bios + data_offset +
4972                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4973         non_clock_info_array = (struct _NonClockInfoArray *)
4974                 (mode_info->atom_context->bios + data_offset +
4975                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4976
4977         rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4978                                   state_array->ucNumEntries, GFP_KERNEL);
4979         if (!rdev->pm.dpm.ps)
4980                 return -ENOMEM;
4981         power_state_offset = (u8 *)state_array->states;
4982         for (i = 0; i < state_array->ucNumEntries; i++) {
4983                 u8 *idx;
4984                 power_state = (union pplib_power_state *)power_state_offset;
4985                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
4986                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4987                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
4988                 if (!rdev->pm.power_state[i].clock_info)
4989                         return -EINVAL;
4990                 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4991                 if (ps == NULL) {
4992                         kfree(rdev->pm.dpm.ps);
4993                         return -ENOMEM;
4994                 }
4995                 rdev->pm.dpm.ps[i].ps_priv = ps;
4996                 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4997                                               non_clock_info,
4998                                               non_clock_info_array->ucEntrySize);
4999                 k = 0;
5000                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5001                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5002                         clock_array_index = idx[j];
5003                         if (clock_array_index >= clock_info_array->ucNumEntries)
5004                                 continue;
5005                         if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5006                                 break;
5007                         clock_info = (union pplib_clock_info *)
5008                                 ((u8 *)&clock_info_array->clockInfo[0] +
5009                                  (clock_array_index * clock_info_array->ucEntrySize));
5010                         ci_parse_pplib_clock_info(rdev,
5011                                                   &rdev->pm.dpm.ps[i], k,
5012                                                   clock_info);
5013                         k++;
5014                 }
5015                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5016         }
5017         rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5018
5019         /* fill in the vce power states */
5020         for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5021                 u32 sclk, mclk;
5022                 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5023                 clock_info = (union pplib_clock_info *)
5024                         &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5025                 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5026                 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5027                 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5028                 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5029                 rdev->pm.dpm.vce_states[i].sclk = sclk;
5030                 rdev->pm.dpm.vce_states[i].mclk = mclk;
5031         }
5032
5033         return 0;
5034 }
5035
5036 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5037                                     struct ci_vbios_boot_state *boot_state)
5038 {
5039         struct radeon_mode_info *mode_info = &rdev->mode_info;
5040         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5041         ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5042         u8 frev, crev;
5043         u16 data_offset;
5044
5045         if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5046                                    &frev, &crev, &data_offset)) {
5047                 firmware_info =
5048                         (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5049                                                     data_offset);
5050                 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5051                 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5052                 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5053                 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5054                 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5055                 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5056                 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5057
5058                 return 0;
5059         }
5060         return -EINVAL;
5061 }
5062
5063 void ci_dpm_fini(struct radeon_device *rdev)
5064 {
5065         int i;
5066
5067         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5068                 kfree(rdev->pm.dpm.ps[i].ps_priv);
5069         }
5070         kfree(rdev->pm.dpm.ps);
5071         kfree(rdev->pm.dpm.priv);
5072         kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5073         r600_free_extended_power_table(rdev);
5074 }
5075
5076 int ci_dpm_init(struct radeon_device *rdev)
5077 {
5078         int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5079         u16 data_offset, size;
5080         u8 frev, crev;
5081         struct ci_power_info *pi;
5082         int ret;
5083         u32 mask;
5084
5085         pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5086         if (pi == NULL)
5087                 return -ENOMEM;
5088         rdev->pm.dpm.priv = pi;
5089
5090         ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5091         if (ret)
5092                 pi->sys_pcie_mask = 0;
5093         else
5094                 pi->sys_pcie_mask = mask;
5095         pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5096
5097         pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5098         pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5099         pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5100         pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5101
5102         pi->pcie_lane_performance.max = 0;
5103         pi->pcie_lane_performance.min = 16;
5104         pi->pcie_lane_powersaving.max = 0;
5105         pi->pcie_lane_powersaving.min = 16;
5106
5107         ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5108         if (ret) {
5109                 ci_dpm_fini(rdev);
5110                 return ret;
5111         }
5112
5113         ret = r600_get_platform_caps(rdev);
5114         if (ret) {
5115                 ci_dpm_fini(rdev);
5116                 return ret;
5117         }
5118
5119         ret = r600_parse_extended_power_table(rdev);
5120         if (ret) {
5121                 ci_dpm_fini(rdev);
5122                 return ret;
5123         }
5124
5125         ret = ci_parse_power_table(rdev);
5126         if (ret) {
5127                 ci_dpm_fini(rdev);
5128                 return ret;
5129         }
5130
5131         pi->dll_default_on = false;
5132         pi->sram_end = SMC_RAM_END;
5133
5134         pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5135         pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5136         pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5137         pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5138         pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5139         pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5140         pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5141         pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5142
5143         pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5144
5145         pi->sclk_dpm_key_disabled = 0;
5146         pi->mclk_dpm_key_disabled = 0;
5147         pi->pcie_dpm_key_disabled = 0;
5148
5149         pi->caps_sclk_ds = true;
5150
5151         pi->mclk_strobe_mode_threshold = 40000;
5152         pi->mclk_stutter_mode_threshold = 40000;
5153         pi->mclk_edc_enable_threshold = 40000;
5154         pi->mclk_edc_wr_enable_threshold = 40000;
5155
5156         ci_initialize_powertune_defaults(rdev);
5157
5158         pi->caps_fps = false;
5159
5160         pi->caps_sclk_throttle_low_notification = false;
5161
5162         pi->caps_uvd_dpm = true;
5163         pi->caps_vce_dpm = true;
5164
5165         ci_get_leakage_voltages(rdev);
5166         ci_patch_dependency_tables_with_leakage(rdev);
5167         ci_set_private_data_variables_based_on_pptable(rdev);
5168
5169         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5170                 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5171         if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5172                 ci_dpm_fini(rdev);
5173                 return -ENOMEM;
5174         }
5175         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5176         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5177         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5178         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5179         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5180         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5181         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5182         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5183         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5184
5185         rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5186         rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5187         rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5188
5189         rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5190         rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5191         rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5192         rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5193
5194         if (rdev->family == CHIP_HAWAII) {
5195                 pi->thermal_temp_setting.temperature_low = 94500;
5196                 pi->thermal_temp_setting.temperature_high = 95000;
5197                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5198         } else {
5199                 pi->thermal_temp_setting.temperature_low = 99500;
5200                 pi->thermal_temp_setting.temperature_high = 100000;
5201                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5202         }
5203
5204         pi->uvd_enabled = false;
5205
5206         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5207         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5208         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5209         if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5210                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5211         else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5212                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5213
5214         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5215                 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5216                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5217                 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5218                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5219                 else
5220                         rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5221         }
5222
5223         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5224                 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5225                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5226                 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5227                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5228                 else
5229                         rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5230         }
5231
5232         pi->vddc_phase_shed_control = true;
5233
5234 #if defined(CONFIG_ACPI)
5235         pi->pcie_performance_request =
5236                 radeon_acpi_is_pcie_performance_request_supported(rdev);
5237 #else
5238         pi->pcie_performance_request = false;
5239 #endif
5240
5241         if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5242                                    &frev, &crev, &data_offset)) {
5243                 pi->caps_sclk_ss_support = true;
5244                 pi->caps_mclk_ss_support = true;
5245                 pi->dynamic_ss = true;
5246         } else {
5247                 pi->caps_sclk_ss_support = false;
5248                 pi->caps_mclk_ss_support = false;
5249                 pi->dynamic_ss = true;
5250         }
5251
5252         if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5253                 pi->thermal_protection = true;
5254         else
5255                 pi->thermal_protection = false;
5256
5257         pi->caps_dynamic_ac_timing = true;
5258
5259         pi->uvd_power_gated = false;
5260
5261         /* make sure dc limits are valid */
5262         if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5263             (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5264                 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5265                         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5266
5267         return 0;
5268 }
5269
5270 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5271                                                     struct seq_file *m)
5272 {
5273         u32 sclk = ci_get_average_sclk_freq(rdev);
5274         u32 mclk = ci_get_average_mclk_freq(rdev);
5275
5276         seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5277                    sclk, mclk);
5278 }
5279
5280 void ci_dpm_print_power_state(struct radeon_device *rdev,
5281                               struct radeon_ps *rps)
5282 {
5283         struct ci_ps *ps = ci_get_ps(rps);
5284         struct ci_pl *pl;
5285         int i;
5286
5287         r600_dpm_print_class_info(rps->class, rps->class2);
5288         r600_dpm_print_cap_info(rps->caps);
5289         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5290         for (i = 0; i < ps->performance_level_count; i++) {
5291                 pl = &ps->performance_levels[i];
5292                 printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5293                        i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5294         }
5295         r600_dpm_print_ps_status(rdev, rps);
5296 }
5297
5298 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5299 {
5300         struct ci_power_info *pi = ci_get_pi(rdev);
5301         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5302
5303         if (low)
5304                 return requested_state->performance_levels[0].sclk;
5305         else
5306                 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5307 }
5308
5309 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5310 {
5311         struct ci_power_info *pi = ci_get_pi(rdev);
5312         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5313
5314         if (low)
5315                 return requested_state->performance_levels[0].mclk;
5316         else
5317                 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5318 }