Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[deliverable/linux.git] / drivers / gpu / drm / radeon / ci_dpm.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "radeon.h"
27 #include "radeon_asic.h"
28 #include "radeon_ucode.h"
29 #include "cikd.h"
30 #include "r600_dpm.h"
31 #include "ci_dpm.h"
32 #include "atom.h"
33 #include <linux/seq_file.h>
34
35 #define MC_CG_ARB_FREQ_F0 0x0a
36 #define MC_CG_ARB_FREQ_F1 0x0b
37 #define MC_CG_ARB_FREQ_F2 0x0c
38 #define MC_CG_ARB_FREQ_F3 0x0d
39
40 #define SMC_RAM_END 0x40000
41
42 #define VOLTAGE_SCALE 4
43 #define VOLTAGE_VID_OFFSET_SCALE1 625
44 #define VOLTAGE_VID_OFFSET_SCALE2 100
45
46 static const struct ci_pt_defaults defaults_hawaii_xt =
47 {
48 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
49 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
50 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
51 };
52
53 static const struct ci_pt_defaults defaults_hawaii_pro =
54 {
55 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
56 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
57 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
58 };
59
60 static const struct ci_pt_defaults defaults_bonaire_xt =
61 {
62 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
63 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
64 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
65 };
66
67 static const struct ci_pt_defaults defaults_bonaire_pro =
68 {
69 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
70 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
71 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
72 };
73
74 static const struct ci_pt_defaults defaults_saturn_xt =
75 {
76 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
77 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
78 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
79 };
80
81 static const struct ci_pt_defaults defaults_saturn_pro =
82 {
83 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
84 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
85 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
86 };
87
88 static const struct ci_pt_config_reg didt_config_ci[] =
89 {
90 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
103 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
104 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
105 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
106 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
157 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
158 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
159 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
160 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
161 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162 { 0xFFFFFFFF }
163 };
164
165 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
166 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
167 u32 arb_freq_src, u32 arb_freq_dest);
168 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
169 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
170 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
171 u32 max_voltage_steps,
172 struct atom_voltage_table *voltage_table);
173 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
174 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
175 extern int ci_mc_load_microcode(struct radeon_device *rdev);
176 extern void cik_update_cg(struct radeon_device *rdev,
177 u32 block, bool enable);
178
179 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
180 struct atom_voltage_table_entry *voltage_table,
181 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
182 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
183 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
184 u32 target_tdp);
185 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
186
187 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
188 PPSMC_Msg msg, u32 parameter);
189
190 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev);
191 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
192
193 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
194 {
195 struct ci_power_info *pi = rdev->pm.dpm.priv;
196
197 return pi;
198 }
199
200 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
201 {
202 struct ci_ps *ps = rps->ps_priv;
203
204 return ps;
205 }
206
207 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
208 {
209 struct ci_power_info *pi = ci_get_pi(rdev);
210
211 switch (rdev->pdev->device) {
212 case 0x6649:
213 case 0x6650:
214 case 0x6651:
215 case 0x6658:
216 case 0x665C:
217 case 0x665D:
218 default:
219 pi->powertune_defaults = &defaults_bonaire_xt;
220 break;
221 case 0x6640:
222 case 0x6641:
223 case 0x6646:
224 case 0x6647:
225 pi->powertune_defaults = &defaults_saturn_xt;
226 break;
227 case 0x67B8:
228 case 0x67B0:
229 pi->powertune_defaults = &defaults_hawaii_xt;
230 break;
231 case 0x67BA:
232 case 0x67B1:
233 pi->powertune_defaults = &defaults_hawaii_pro;
234 break;
235 case 0x67A0:
236 case 0x67A1:
237 case 0x67A2:
238 case 0x67A8:
239 case 0x67A9:
240 case 0x67AA:
241 case 0x67B9:
242 case 0x67BE:
243 pi->powertune_defaults = &defaults_bonaire_xt;
244 break;
245 }
246
247 pi->dte_tj_offset = 0;
248
249 pi->caps_power_containment = true;
250 pi->caps_cac = false;
251 pi->caps_sq_ramping = false;
252 pi->caps_db_ramping = false;
253 pi->caps_td_ramping = false;
254 pi->caps_tcp_ramping = false;
255
256 if (pi->caps_power_containment) {
257 pi->caps_cac = true;
258 if (rdev->family == CHIP_HAWAII)
259 pi->enable_bapm_feature = false;
260 else
261 pi->enable_bapm_feature = true;
262 pi->enable_tdc_limit_feature = true;
263 pi->enable_pkg_pwr_tracking_feature = true;
264 }
265 }
266
267 static u8 ci_convert_to_vid(u16 vddc)
268 {
269 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
270 }
271
272 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
273 {
274 struct ci_power_info *pi = ci_get_pi(rdev);
275 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
276 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
277 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
278 u32 i;
279
280 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
281 return -EINVAL;
282 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
283 return -EINVAL;
284 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
285 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
286 return -EINVAL;
287
288 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
289 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
290 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
291 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
292 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
293 } else {
294 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
295 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
296 }
297 }
298 return 0;
299 }
300
301 static int ci_populate_vddc_vid(struct radeon_device *rdev)
302 {
303 struct ci_power_info *pi = ci_get_pi(rdev);
304 u8 *vid = pi->smc_powertune_table.VddCVid;
305 u32 i;
306
307 if (pi->vddc_voltage_table.count > 8)
308 return -EINVAL;
309
310 for (i = 0; i < pi->vddc_voltage_table.count; i++)
311 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
312
313 return 0;
314 }
315
316 static int ci_populate_svi_load_line(struct radeon_device *rdev)
317 {
318 struct ci_power_info *pi = ci_get_pi(rdev);
319 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
320
321 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
322 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
323 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
324 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
325
326 return 0;
327 }
328
329 static int ci_populate_tdc_limit(struct radeon_device *rdev)
330 {
331 struct ci_power_info *pi = ci_get_pi(rdev);
332 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
333 u16 tdc_limit;
334
335 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
336 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
337 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
338 pt_defaults->tdc_vddc_throttle_release_limit_perc;
339 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
340
341 return 0;
342 }
343
344 static int ci_populate_dw8(struct radeon_device *rdev)
345 {
346 struct ci_power_info *pi = ci_get_pi(rdev);
347 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
348 int ret;
349
350 ret = ci_read_smc_sram_dword(rdev,
351 SMU7_FIRMWARE_HEADER_LOCATION +
352 offsetof(SMU7_Firmware_Header, PmFuseTable) +
353 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
354 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
355 pi->sram_end);
356 if (ret)
357 return -EINVAL;
358 else
359 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
360
361 return 0;
362 }
363
364 static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
365 {
366 struct ci_power_info *pi = ci_get_pi(rdev);
367
368 if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
369 (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
370 rdev->pm.dpm.fan.fan_output_sensitivity =
371 rdev->pm.dpm.fan.default_fan_output_sensitivity;
372
373 pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
374 cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
375
376 return 0;
377 }
378
379 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
380 {
381 struct ci_power_info *pi = ci_get_pi(rdev);
382 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
383 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
384 int i, min, max;
385
386 min = max = hi_vid[0];
387 for (i = 0; i < 8; i++) {
388 if (0 != hi_vid[i]) {
389 if (min > hi_vid[i])
390 min = hi_vid[i];
391 if (max < hi_vid[i])
392 max = hi_vid[i];
393 }
394
395 if (0 != lo_vid[i]) {
396 if (min > lo_vid[i])
397 min = lo_vid[i];
398 if (max < lo_vid[i])
399 max = lo_vid[i];
400 }
401 }
402
403 if ((min == 0) || (max == 0))
404 return -EINVAL;
405 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
406 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
407
408 return 0;
409 }
410
411 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
412 {
413 struct ci_power_info *pi = ci_get_pi(rdev);
414 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
415 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
416 struct radeon_cac_tdp_table *cac_tdp_table =
417 rdev->pm.dpm.dyn_state.cac_tdp_table;
418
419 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
420 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
421
422 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
423 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
424
425 return 0;
426 }
427
428 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
429 {
430 struct ci_power_info *pi = ci_get_pi(rdev);
431 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
432 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
433 struct radeon_cac_tdp_table *cac_tdp_table =
434 rdev->pm.dpm.dyn_state.cac_tdp_table;
435 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
436 int i, j, k;
437 const u16 *def1;
438 const u16 *def2;
439
440 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
441 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
442
443 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
444 dpm_table->GpuTjMax =
445 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
446 dpm_table->GpuTjHyst = 8;
447
448 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
449
450 if (ppm) {
451 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
452 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
453 } else {
454 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
455 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
456 }
457
458 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
459 def1 = pt_defaults->bapmti_r;
460 def2 = pt_defaults->bapmti_rc;
461
462 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
463 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
464 for (k = 0; k < SMU7_DTE_SINKS; k++) {
465 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
466 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
467 def1++;
468 def2++;
469 }
470 }
471 }
472
473 return 0;
474 }
475
476 static int ci_populate_pm_base(struct radeon_device *rdev)
477 {
478 struct ci_power_info *pi = ci_get_pi(rdev);
479 u32 pm_fuse_table_offset;
480 int ret;
481
482 if (pi->caps_power_containment) {
483 ret = ci_read_smc_sram_dword(rdev,
484 SMU7_FIRMWARE_HEADER_LOCATION +
485 offsetof(SMU7_Firmware_Header, PmFuseTable),
486 &pm_fuse_table_offset, pi->sram_end);
487 if (ret)
488 return ret;
489 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
490 if (ret)
491 return ret;
492 ret = ci_populate_vddc_vid(rdev);
493 if (ret)
494 return ret;
495 ret = ci_populate_svi_load_line(rdev);
496 if (ret)
497 return ret;
498 ret = ci_populate_tdc_limit(rdev);
499 if (ret)
500 return ret;
501 ret = ci_populate_dw8(rdev);
502 if (ret)
503 return ret;
504 ret = ci_populate_fuzzy_fan(rdev);
505 if (ret)
506 return ret;
507 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
508 if (ret)
509 return ret;
510 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
511 if (ret)
512 return ret;
513 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
514 (u8 *)&pi->smc_powertune_table,
515 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
516 if (ret)
517 return ret;
518 }
519
520 return 0;
521 }
522
523 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
524 {
525 struct ci_power_info *pi = ci_get_pi(rdev);
526 u32 data;
527
528 if (pi->caps_sq_ramping) {
529 data = RREG32_DIDT(DIDT_SQ_CTRL0);
530 if (enable)
531 data |= DIDT_CTRL_EN;
532 else
533 data &= ~DIDT_CTRL_EN;
534 WREG32_DIDT(DIDT_SQ_CTRL0, data);
535 }
536
537 if (pi->caps_db_ramping) {
538 data = RREG32_DIDT(DIDT_DB_CTRL0);
539 if (enable)
540 data |= DIDT_CTRL_EN;
541 else
542 data &= ~DIDT_CTRL_EN;
543 WREG32_DIDT(DIDT_DB_CTRL0, data);
544 }
545
546 if (pi->caps_td_ramping) {
547 data = RREG32_DIDT(DIDT_TD_CTRL0);
548 if (enable)
549 data |= DIDT_CTRL_EN;
550 else
551 data &= ~DIDT_CTRL_EN;
552 WREG32_DIDT(DIDT_TD_CTRL0, data);
553 }
554
555 if (pi->caps_tcp_ramping) {
556 data = RREG32_DIDT(DIDT_TCP_CTRL0);
557 if (enable)
558 data |= DIDT_CTRL_EN;
559 else
560 data &= ~DIDT_CTRL_EN;
561 WREG32_DIDT(DIDT_TCP_CTRL0, data);
562 }
563 }
564
565 static int ci_program_pt_config_registers(struct radeon_device *rdev,
566 const struct ci_pt_config_reg *cac_config_regs)
567 {
568 const struct ci_pt_config_reg *config_regs = cac_config_regs;
569 u32 data;
570 u32 cache = 0;
571
572 if (config_regs == NULL)
573 return -EINVAL;
574
575 while (config_regs->offset != 0xFFFFFFFF) {
576 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
577 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
578 } else {
579 switch (config_regs->type) {
580 case CISLANDS_CONFIGREG_SMC_IND:
581 data = RREG32_SMC(config_regs->offset);
582 break;
583 case CISLANDS_CONFIGREG_DIDT_IND:
584 data = RREG32_DIDT(config_regs->offset);
585 break;
586 default:
587 data = RREG32(config_regs->offset << 2);
588 break;
589 }
590
591 data &= ~config_regs->mask;
592 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
593 data |= cache;
594
595 switch (config_regs->type) {
596 case CISLANDS_CONFIGREG_SMC_IND:
597 WREG32_SMC(config_regs->offset, data);
598 break;
599 case CISLANDS_CONFIGREG_DIDT_IND:
600 WREG32_DIDT(config_regs->offset, data);
601 break;
602 default:
603 WREG32(config_regs->offset << 2, data);
604 break;
605 }
606 cache = 0;
607 }
608 config_regs++;
609 }
610 return 0;
611 }
612
613 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
614 {
615 struct ci_power_info *pi = ci_get_pi(rdev);
616 int ret;
617
618 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
619 pi->caps_td_ramping || pi->caps_tcp_ramping) {
620 cik_enter_rlc_safe_mode(rdev);
621
622 if (enable) {
623 ret = ci_program_pt_config_registers(rdev, didt_config_ci);
624 if (ret) {
625 cik_exit_rlc_safe_mode(rdev);
626 return ret;
627 }
628 }
629
630 ci_do_enable_didt(rdev, enable);
631
632 cik_exit_rlc_safe_mode(rdev);
633 }
634
635 return 0;
636 }
637
638 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
639 {
640 struct ci_power_info *pi = ci_get_pi(rdev);
641 PPSMC_Result smc_result;
642 int ret = 0;
643
644 if (enable) {
645 pi->power_containment_features = 0;
646 if (pi->caps_power_containment) {
647 if (pi->enable_bapm_feature) {
648 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
649 if (smc_result != PPSMC_Result_OK)
650 ret = -EINVAL;
651 else
652 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
653 }
654
655 if (pi->enable_tdc_limit_feature) {
656 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
657 if (smc_result != PPSMC_Result_OK)
658 ret = -EINVAL;
659 else
660 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
661 }
662
663 if (pi->enable_pkg_pwr_tracking_feature) {
664 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
665 if (smc_result != PPSMC_Result_OK) {
666 ret = -EINVAL;
667 } else {
668 struct radeon_cac_tdp_table *cac_tdp_table =
669 rdev->pm.dpm.dyn_state.cac_tdp_table;
670 u32 default_pwr_limit =
671 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
672
673 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
674
675 ci_set_power_limit(rdev, default_pwr_limit);
676 }
677 }
678 }
679 } else {
680 if (pi->caps_power_containment && pi->power_containment_features) {
681 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
682 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
683
684 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
685 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
686
687 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
688 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
689 pi->power_containment_features = 0;
690 }
691 }
692
693 return ret;
694 }
695
696 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
697 {
698 struct ci_power_info *pi = ci_get_pi(rdev);
699 PPSMC_Result smc_result;
700 int ret = 0;
701
702 if (pi->caps_cac) {
703 if (enable) {
704 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
705 if (smc_result != PPSMC_Result_OK) {
706 ret = -EINVAL;
707 pi->cac_enabled = false;
708 } else {
709 pi->cac_enabled = true;
710 }
711 } else if (pi->cac_enabled) {
712 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
713 pi->cac_enabled = false;
714 }
715 }
716
717 return ret;
718 }
719
720 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
721 bool enable)
722 {
723 struct ci_power_info *pi = ci_get_pi(rdev);
724 PPSMC_Result smc_result = PPSMC_Result_OK;
725
726 if (pi->thermal_sclk_dpm_enabled) {
727 if (enable)
728 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
729 else
730 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
731 }
732
733 if (smc_result == PPSMC_Result_OK)
734 return 0;
735 else
736 return -EINVAL;
737 }
738
739 static int ci_power_control_set_level(struct radeon_device *rdev)
740 {
741 struct ci_power_info *pi = ci_get_pi(rdev);
742 struct radeon_cac_tdp_table *cac_tdp_table =
743 rdev->pm.dpm.dyn_state.cac_tdp_table;
744 s32 adjust_percent;
745 s32 target_tdp;
746 int ret = 0;
747 bool adjust_polarity = false; /* ??? */
748
749 if (pi->caps_power_containment) {
750 adjust_percent = adjust_polarity ?
751 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
752 target_tdp = ((100 + adjust_percent) *
753 (s32)cac_tdp_table->configurable_tdp) / 100;
754
755 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
756 }
757
758 return ret;
759 }
760
761 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
762 {
763 struct ci_power_info *pi = ci_get_pi(rdev);
764
765 if (pi->uvd_power_gated == gate)
766 return;
767
768 pi->uvd_power_gated = gate;
769
770 ci_update_uvd_dpm(rdev, gate);
771 }
772
773 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
774 {
775 struct ci_power_info *pi = ci_get_pi(rdev);
776 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
777 u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
778
779 if (vblank_time < switch_limit)
780 return true;
781 else
782 return false;
783
784 }
785
786 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
787 struct radeon_ps *rps)
788 {
789 struct ci_ps *ps = ci_get_ps(rps);
790 struct ci_power_info *pi = ci_get_pi(rdev);
791 struct radeon_clock_and_voltage_limits *max_limits;
792 bool disable_mclk_switching;
793 u32 sclk, mclk;
794 int i;
795
796 if (rps->vce_active) {
797 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
798 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
799 } else {
800 rps->evclk = 0;
801 rps->ecclk = 0;
802 }
803
804 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
805 ci_dpm_vblank_too_short(rdev))
806 disable_mclk_switching = true;
807 else
808 disable_mclk_switching = false;
809
810 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
811 pi->battery_state = true;
812 else
813 pi->battery_state = false;
814
815 if (rdev->pm.dpm.ac_power)
816 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
817 else
818 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
819
820 if (rdev->pm.dpm.ac_power == false) {
821 for (i = 0; i < ps->performance_level_count; i++) {
822 if (ps->performance_levels[i].mclk > max_limits->mclk)
823 ps->performance_levels[i].mclk = max_limits->mclk;
824 if (ps->performance_levels[i].sclk > max_limits->sclk)
825 ps->performance_levels[i].sclk = max_limits->sclk;
826 }
827 }
828
829 /* XXX validate the min clocks required for display */
830
831 if (disable_mclk_switching) {
832 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
833 sclk = ps->performance_levels[0].sclk;
834 } else {
835 mclk = ps->performance_levels[0].mclk;
836 sclk = ps->performance_levels[0].sclk;
837 }
838
839 if (rps->vce_active) {
840 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
841 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
842 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
843 mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
844 }
845
846 ps->performance_levels[0].sclk = sclk;
847 ps->performance_levels[0].mclk = mclk;
848
849 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
850 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
851
852 if (disable_mclk_switching) {
853 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
854 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
855 } else {
856 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
857 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
858 }
859 }
860
861 static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
862 int min_temp, int max_temp)
863 {
864 int low_temp = 0 * 1000;
865 int high_temp = 255 * 1000;
866 u32 tmp;
867
868 if (low_temp < min_temp)
869 low_temp = min_temp;
870 if (high_temp > max_temp)
871 high_temp = max_temp;
872 if (high_temp < low_temp) {
873 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
874 return -EINVAL;
875 }
876
877 tmp = RREG32_SMC(CG_THERMAL_INT);
878 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
879 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
880 CI_DIG_THERM_INTL(low_temp / 1000);
881 WREG32_SMC(CG_THERMAL_INT, tmp);
882
883 #if 0
884 /* XXX: need to figure out how to handle this properly */
885 tmp = RREG32_SMC(CG_THERMAL_CTRL);
886 tmp &= DIG_THERM_DPM_MASK;
887 tmp |= DIG_THERM_DPM(high_temp / 1000);
888 WREG32_SMC(CG_THERMAL_CTRL, tmp);
889 #endif
890
891 rdev->pm.dpm.thermal.min_temp = low_temp;
892 rdev->pm.dpm.thermal.max_temp = high_temp;
893
894 return 0;
895 }
896
897 static int ci_thermal_enable_alert(struct radeon_device *rdev,
898 bool enable)
899 {
900 u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
901 PPSMC_Result result;
902
903 if (enable) {
904 thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
905 WREG32_SMC(CG_THERMAL_INT, thermal_int);
906 rdev->irq.dpm_thermal = false;
907 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
908 if (result != PPSMC_Result_OK) {
909 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
910 return -EINVAL;
911 }
912 } else {
913 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
914 WREG32_SMC(CG_THERMAL_INT, thermal_int);
915 rdev->irq.dpm_thermal = true;
916 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
917 if (result != PPSMC_Result_OK) {
918 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
919 return -EINVAL;
920 }
921 }
922
923 return 0;
924 }
925
926 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
927 {
928 struct ci_power_info *pi = ci_get_pi(rdev);
929 u32 tmp;
930
931 if (pi->fan_ctrl_is_in_default_mode) {
932 tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
933 pi->fan_ctrl_default_mode = tmp;
934 tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
935 pi->t_min = tmp;
936 pi->fan_ctrl_is_in_default_mode = false;
937 }
938
939 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
940 tmp |= TMIN(0);
941 WREG32_SMC(CG_FDO_CTRL2, tmp);
942
943 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
944 tmp |= FDO_PWM_MODE(mode);
945 WREG32_SMC(CG_FDO_CTRL2, tmp);
946 }
947
948 static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
949 {
950 struct ci_power_info *pi = ci_get_pi(rdev);
951 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
952 u32 duty100;
953 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
954 u16 fdo_min, slope1, slope2;
955 u32 reference_clock, tmp;
956 int ret;
957 u64 tmp64;
958
959 if (!pi->fan_table_start) {
960 rdev->pm.dpm.fan.ucode_fan_control = false;
961 return 0;
962 }
963
964 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
965
966 if (duty100 == 0) {
967 rdev->pm.dpm.fan.ucode_fan_control = false;
968 return 0;
969 }
970
971 tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
972 do_div(tmp64, 10000);
973 fdo_min = (u16)tmp64;
974
975 t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
976 t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
977
978 pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
979 pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
980
981 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
982 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
983
984 fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
985 fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
986 fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
987
988 fan_table.Slope1 = cpu_to_be16(slope1);
989 fan_table.Slope2 = cpu_to_be16(slope2);
990
991 fan_table.FdoMin = cpu_to_be16(fdo_min);
992
993 fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
994
995 fan_table.HystUp = cpu_to_be16(1);
996
997 fan_table.HystSlope = cpu_to_be16(1);
998
999 fan_table.TempRespLim = cpu_to_be16(5);
1000
1001 reference_clock = radeon_get_xclk(rdev);
1002
1003 fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
1004 reference_clock) / 1600);
1005
1006 fan_table.FdoMax = cpu_to_be16((u16)duty100);
1007
1008 tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
1009 fan_table.TempSrc = (uint8_t)tmp;
1010
1011 ret = ci_copy_bytes_to_smc(rdev,
1012 pi->fan_table_start,
1013 (u8 *)(&fan_table),
1014 sizeof(fan_table),
1015 pi->sram_end);
1016
1017 if (ret) {
1018 DRM_ERROR("Failed to load fan table to the SMC.");
1019 rdev->pm.dpm.fan.ucode_fan_control = false;
1020 }
1021
1022 return 0;
1023 }
1024
1025 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
1026 {
1027 struct ci_power_info *pi = ci_get_pi(rdev);
1028 PPSMC_Result ret;
1029
1030 if (pi->caps_od_fuzzy_fan_control_support) {
1031 ret = ci_send_msg_to_smc_with_parameter(rdev,
1032 PPSMC_StartFanControl,
1033 FAN_CONTROL_FUZZY);
1034 if (ret != PPSMC_Result_OK)
1035 return -EINVAL;
1036 ret = ci_send_msg_to_smc_with_parameter(rdev,
1037 PPSMC_MSG_SetFanPwmMax,
1038 rdev->pm.dpm.fan.default_max_fan_pwm);
1039 if (ret != PPSMC_Result_OK)
1040 return -EINVAL;
1041 } else {
1042 ret = ci_send_msg_to_smc_with_parameter(rdev,
1043 PPSMC_StartFanControl,
1044 FAN_CONTROL_TABLE);
1045 if (ret != PPSMC_Result_OK)
1046 return -EINVAL;
1047 }
1048
1049 pi->fan_is_controlled_by_smc = true;
1050 return 0;
1051 }
1052
1053 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
1054 {
1055 PPSMC_Result ret;
1056 struct ci_power_info *pi = ci_get_pi(rdev);
1057
1058 ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
1059 if (ret == PPSMC_Result_OK) {
1060 pi->fan_is_controlled_by_smc = false;
1061 return 0;
1062 } else
1063 return -EINVAL;
1064 }
1065
1066 int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
1067 u32 *speed)
1068 {
1069 u32 duty, duty100;
1070 u64 tmp64;
1071
1072 if (rdev->pm.no_fan)
1073 return -ENOENT;
1074
1075 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1076 duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
1077
1078 if (duty100 == 0)
1079 return -EINVAL;
1080
1081 tmp64 = (u64)duty * 100;
1082 do_div(tmp64, duty100);
1083 *speed = (u32)tmp64;
1084
1085 if (*speed > 100)
1086 *speed = 100;
1087
1088 return 0;
1089 }
1090
1091 int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
1092 u32 speed)
1093 {
1094 u32 tmp;
1095 u32 duty, duty100;
1096 u64 tmp64;
1097 struct ci_power_info *pi = ci_get_pi(rdev);
1098
1099 if (rdev->pm.no_fan)
1100 return -ENOENT;
1101
1102 if (pi->fan_is_controlled_by_smc)
1103 return -EINVAL;
1104
1105 if (speed > 100)
1106 return -EINVAL;
1107
1108 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1109
1110 if (duty100 == 0)
1111 return -EINVAL;
1112
1113 tmp64 = (u64)speed * duty100;
1114 do_div(tmp64, 100);
1115 duty = (u32)tmp64;
1116
1117 tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
1118 tmp |= FDO_STATIC_DUTY(duty);
1119 WREG32_SMC(CG_FDO_CTRL0, tmp);
1120
1121 return 0;
1122 }
1123
1124 void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
1125 {
1126 if (mode) {
1127 /* stop auto-manage */
1128 if (rdev->pm.dpm.fan.ucode_fan_control)
1129 ci_fan_ctrl_stop_smc_fan_control(rdev);
1130 ci_fan_ctrl_set_static_mode(rdev, mode);
1131 } else {
1132 /* restart auto-manage */
1133 if (rdev->pm.dpm.fan.ucode_fan_control)
1134 ci_thermal_start_smc_fan_control(rdev);
1135 else
1136 ci_fan_ctrl_set_default_mode(rdev);
1137 }
1138 }
1139
1140 u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev)
1141 {
1142 struct ci_power_info *pi = ci_get_pi(rdev);
1143 u32 tmp;
1144
1145 if (pi->fan_is_controlled_by_smc)
1146 return 0;
1147
1148 tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
1149 return (tmp >> FDO_PWM_MODE_SHIFT);
1150 }
1151
1152 #if 0
1153 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
1154 u32 *speed)
1155 {
1156 u32 tach_period;
1157 u32 xclk = radeon_get_xclk(rdev);
1158
1159 if (rdev->pm.no_fan)
1160 return -ENOENT;
1161
1162 if (rdev->pm.fan_pulses_per_revolution == 0)
1163 return -ENOENT;
1164
1165 tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
1166 if (tach_period == 0)
1167 return -ENOENT;
1168
1169 *speed = 60 * xclk * 10000 / tach_period;
1170
1171 return 0;
1172 }
1173
1174 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1175 u32 speed)
1176 {
1177 u32 tach_period, tmp;
1178 u32 xclk = radeon_get_xclk(rdev);
1179
1180 if (rdev->pm.no_fan)
1181 return -ENOENT;
1182
1183 if (rdev->pm.fan_pulses_per_revolution == 0)
1184 return -ENOENT;
1185
1186 if ((speed < rdev->pm.fan_min_rpm) ||
1187 (speed > rdev->pm.fan_max_rpm))
1188 return -EINVAL;
1189
1190 if (rdev->pm.dpm.fan.ucode_fan_control)
1191 ci_fan_ctrl_stop_smc_fan_control(rdev);
1192
1193 tach_period = 60 * xclk * 10000 / (8 * speed);
1194 tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
1195 tmp |= TARGET_PERIOD(tach_period);
1196 WREG32_SMC(CG_TACH_CTRL, tmp);
1197
1198 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
1199
1200 return 0;
1201 }
1202 #endif
1203
1204 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1205 {
1206 struct ci_power_info *pi = ci_get_pi(rdev);
1207 u32 tmp;
1208
1209 if (!pi->fan_ctrl_is_in_default_mode) {
1210 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
1211 tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1212 WREG32_SMC(CG_FDO_CTRL2, tmp);
1213
1214 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
1215 tmp |= TMIN(pi->t_min);
1216 WREG32_SMC(CG_FDO_CTRL2, tmp);
1217 pi->fan_ctrl_is_in_default_mode = true;
1218 }
1219 }
1220
1221 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
1222 {
1223 if (rdev->pm.dpm.fan.ucode_fan_control) {
1224 ci_fan_ctrl_start_smc_fan_control(rdev);
1225 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1226 }
1227 }
1228
1229 static void ci_thermal_initialize(struct radeon_device *rdev)
1230 {
1231 u32 tmp;
1232
1233 if (rdev->pm.fan_pulses_per_revolution) {
1234 tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
1235 tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
1236 WREG32_SMC(CG_TACH_CTRL, tmp);
1237 }
1238
1239 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
1240 tmp |= TACH_PWM_RESP_RATE(0x28);
1241 WREG32_SMC(CG_FDO_CTRL2, tmp);
1242 }
1243
1244 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
1245 {
1246 int ret;
1247
1248 ci_thermal_initialize(rdev);
1249 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1250 if (ret)
1251 return ret;
1252 ret = ci_thermal_enable_alert(rdev, true);
1253 if (ret)
1254 return ret;
1255 if (rdev->pm.dpm.fan.ucode_fan_control) {
1256 ret = ci_thermal_setup_fan_table(rdev);
1257 if (ret)
1258 return ret;
1259 ci_thermal_start_smc_fan_control(rdev);
1260 }
1261
1262 return 0;
1263 }
1264
1265 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
1266 {
1267 if (!rdev->pm.no_fan)
1268 ci_fan_ctrl_set_default_mode(rdev);
1269 }
1270
1271 #if 0
1272 static int ci_read_smc_soft_register(struct radeon_device *rdev,
1273 u16 reg_offset, u32 *value)
1274 {
1275 struct ci_power_info *pi = ci_get_pi(rdev);
1276
1277 return ci_read_smc_sram_dword(rdev,
1278 pi->soft_regs_start + reg_offset,
1279 value, pi->sram_end);
1280 }
1281 #endif
1282
1283 static int ci_write_smc_soft_register(struct radeon_device *rdev,
1284 u16 reg_offset, u32 value)
1285 {
1286 struct ci_power_info *pi = ci_get_pi(rdev);
1287
1288 return ci_write_smc_sram_dword(rdev,
1289 pi->soft_regs_start + reg_offset,
1290 value, pi->sram_end);
1291 }
1292
1293 static void ci_init_fps_limits(struct radeon_device *rdev)
1294 {
1295 struct ci_power_info *pi = ci_get_pi(rdev);
1296 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1297
1298 if (pi->caps_fps) {
1299 u16 tmp;
1300
1301 tmp = 45;
1302 table->FpsHighT = cpu_to_be16(tmp);
1303
1304 tmp = 30;
1305 table->FpsLowT = cpu_to_be16(tmp);
1306 }
1307 }
1308
1309 static int ci_update_sclk_t(struct radeon_device *rdev)
1310 {
1311 struct ci_power_info *pi = ci_get_pi(rdev);
1312 int ret = 0;
1313 u32 low_sclk_interrupt_t = 0;
1314
1315 if (pi->caps_sclk_throttle_low_notification) {
1316 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1317
1318 ret = ci_copy_bytes_to_smc(rdev,
1319 pi->dpm_table_start +
1320 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1321 (u8 *)&low_sclk_interrupt_t,
1322 sizeof(u32), pi->sram_end);
1323
1324 }
1325
1326 return ret;
1327 }
1328
1329 static void ci_get_leakage_voltages(struct radeon_device *rdev)
1330 {
1331 struct ci_power_info *pi = ci_get_pi(rdev);
1332 u16 leakage_id, virtual_voltage_id;
1333 u16 vddc, vddci;
1334 int i;
1335
1336 pi->vddc_leakage.count = 0;
1337 pi->vddci_leakage.count = 0;
1338
1339 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1340 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1341 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1342 if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
1343 continue;
1344 if (vddc != 0 && vddc != virtual_voltage_id) {
1345 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1346 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1347 pi->vddc_leakage.count++;
1348 }
1349 }
1350 } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
1351 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1352 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1353 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
1354 virtual_voltage_id,
1355 leakage_id) == 0) {
1356 if (vddc != 0 && vddc != virtual_voltage_id) {
1357 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1358 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1359 pi->vddc_leakage.count++;
1360 }
1361 if (vddci != 0 && vddci != virtual_voltage_id) {
1362 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1363 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1364 pi->vddci_leakage.count++;
1365 }
1366 }
1367 }
1368 }
1369 }
1370
1371 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1372 {
1373 struct ci_power_info *pi = ci_get_pi(rdev);
1374 bool want_thermal_protection;
1375 enum radeon_dpm_event_src dpm_event_src;
1376 u32 tmp;
1377
1378 switch (sources) {
1379 case 0:
1380 default:
1381 want_thermal_protection = false;
1382 break;
1383 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1384 want_thermal_protection = true;
1385 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1386 break;
1387 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1388 want_thermal_protection = true;
1389 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1390 break;
1391 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1392 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1393 want_thermal_protection = true;
1394 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1395 break;
1396 }
1397
1398 if (want_thermal_protection) {
1399 #if 0
1400 /* XXX: need to figure out how to handle this properly */
1401 tmp = RREG32_SMC(CG_THERMAL_CTRL);
1402 tmp &= DPM_EVENT_SRC_MASK;
1403 tmp |= DPM_EVENT_SRC(dpm_event_src);
1404 WREG32_SMC(CG_THERMAL_CTRL, tmp);
1405 #endif
1406
1407 tmp = RREG32_SMC(GENERAL_PWRMGT);
1408 if (pi->thermal_protection)
1409 tmp &= ~THERMAL_PROTECTION_DIS;
1410 else
1411 tmp |= THERMAL_PROTECTION_DIS;
1412 WREG32_SMC(GENERAL_PWRMGT, tmp);
1413 } else {
1414 tmp = RREG32_SMC(GENERAL_PWRMGT);
1415 tmp |= THERMAL_PROTECTION_DIS;
1416 WREG32_SMC(GENERAL_PWRMGT, tmp);
1417 }
1418 }
1419
1420 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1421 enum radeon_dpm_auto_throttle_src source,
1422 bool enable)
1423 {
1424 struct ci_power_info *pi = ci_get_pi(rdev);
1425
1426 if (enable) {
1427 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1428 pi->active_auto_throttle_sources |= 1 << source;
1429 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1430 }
1431 } else {
1432 if (pi->active_auto_throttle_sources & (1 << source)) {
1433 pi->active_auto_throttle_sources &= ~(1 << source);
1434 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1435 }
1436 }
1437 }
1438
1439 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1440 {
1441 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1442 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1443 }
1444
1445 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1446 {
1447 struct ci_power_info *pi = ci_get_pi(rdev);
1448 PPSMC_Result smc_result;
1449
1450 if (!pi->need_update_smu7_dpm_table)
1451 return 0;
1452
1453 if ((!pi->sclk_dpm_key_disabled) &&
1454 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1455 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1456 if (smc_result != PPSMC_Result_OK)
1457 return -EINVAL;
1458 }
1459
1460 if ((!pi->mclk_dpm_key_disabled) &&
1461 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1462 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1463 if (smc_result != PPSMC_Result_OK)
1464 return -EINVAL;
1465 }
1466
1467 pi->need_update_smu7_dpm_table = 0;
1468 return 0;
1469 }
1470
1471 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1472 {
1473 struct ci_power_info *pi = ci_get_pi(rdev);
1474 PPSMC_Result smc_result;
1475
1476 if (enable) {
1477 if (!pi->sclk_dpm_key_disabled) {
1478 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1479 if (smc_result != PPSMC_Result_OK)
1480 return -EINVAL;
1481 }
1482
1483 if (!pi->mclk_dpm_key_disabled) {
1484 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1485 if (smc_result != PPSMC_Result_OK)
1486 return -EINVAL;
1487
1488 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1489
1490 WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1491 WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1492 WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1493
1494 udelay(10);
1495
1496 WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1497 WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1498 WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1499 }
1500 } else {
1501 if (!pi->sclk_dpm_key_disabled) {
1502 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1503 if (smc_result != PPSMC_Result_OK)
1504 return -EINVAL;
1505 }
1506
1507 if (!pi->mclk_dpm_key_disabled) {
1508 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1509 if (smc_result != PPSMC_Result_OK)
1510 return -EINVAL;
1511 }
1512 }
1513
1514 return 0;
1515 }
1516
1517 static int ci_start_dpm(struct radeon_device *rdev)
1518 {
1519 struct ci_power_info *pi = ci_get_pi(rdev);
1520 PPSMC_Result smc_result;
1521 int ret;
1522 u32 tmp;
1523
1524 tmp = RREG32_SMC(GENERAL_PWRMGT);
1525 tmp |= GLOBAL_PWRMGT_EN;
1526 WREG32_SMC(GENERAL_PWRMGT, tmp);
1527
1528 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1529 tmp |= DYNAMIC_PM_EN;
1530 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1531
1532 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1533
1534 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1535
1536 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1537 if (smc_result != PPSMC_Result_OK)
1538 return -EINVAL;
1539
1540 ret = ci_enable_sclk_mclk_dpm(rdev, true);
1541 if (ret)
1542 return ret;
1543
1544 if (!pi->pcie_dpm_key_disabled) {
1545 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1546 if (smc_result != PPSMC_Result_OK)
1547 return -EINVAL;
1548 }
1549
1550 return 0;
1551 }
1552
1553 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1554 {
1555 struct ci_power_info *pi = ci_get_pi(rdev);
1556 PPSMC_Result smc_result;
1557
1558 if (!pi->need_update_smu7_dpm_table)
1559 return 0;
1560
1561 if ((!pi->sclk_dpm_key_disabled) &&
1562 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1563 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1564 if (smc_result != PPSMC_Result_OK)
1565 return -EINVAL;
1566 }
1567
1568 if ((!pi->mclk_dpm_key_disabled) &&
1569 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1570 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1571 if (smc_result != PPSMC_Result_OK)
1572 return -EINVAL;
1573 }
1574
1575 return 0;
1576 }
1577
1578 static int ci_stop_dpm(struct radeon_device *rdev)
1579 {
1580 struct ci_power_info *pi = ci_get_pi(rdev);
1581 PPSMC_Result smc_result;
1582 int ret;
1583 u32 tmp;
1584
1585 tmp = RREG32_SMC(GENERAL_PWRMGT);
1586 tmp &= ~GLOBAL_PWRMGT_EN;
1587 WREG32_SMC(GENERAL_PWRMGT, tmp);
1588
1589 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1590 tmp &= ~DYNAMIC_PM_EN;
1591 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1592
1593 if (!pi->pcie_dpm_key_disabled) {
1594 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1595 if (smc_result != PPSMC_Result_OK)
1596 return -EINVAL;
1597 }
1598
1599 ret = ci_enable_sclk_mclk_dpm(rdev, false);
1600 if (ret)
1601 return ret;
1602
1603 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1604 if (smc_result != PPSMC_Result_OK)
1605 return -EINVAL;
1606
1607 return 0;
1608 }
1609
1610 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1611 {
1612 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1613
1614 if (enable)
1615 tmp &= ~SCLK_PWRMGT_OFF;
1616 else
1617 tmp |= SCLK_PWRMGT_OFF;
1618 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1619 }
1620
1621 #if 0
1622 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1623 bool ac_power)
1624 {
1625 struct ci_power_info *pi = ci_get_pi(rdev);
1626 struct radeon_cac_tdp_table *cac_tdp_table =
1627 rdev->pm.dpm.dyn_state.cac_tdp_table;
1628 u32 power_limit;
1629
1630 if (ac_power)
1631 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1632 else
1633 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1634
1635 ci_set_power_limit(rdev, power_limit);
1636
1637 if (pi->caps_automatic_dc_transition) {
1638 if (ac_power)
1639 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1640 else
1641 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1642 }
1643
1644 return 0;
1645 }
1646 #endif
1647
1648 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1649 PPSMC_Msg msg, u32 parameter)
1650 {
1651 WREG32(SMC_MSG_ARG_0, parameter);
1652 return ci_send_msg_to_smc(rdev, msg);
1653 }
1654
1655 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1656 PPSMC_Msg msg, u32 *parameter)
1657 {
1658 PPSMC_Result smc_result;
1659
1660 smc_result = ci_send_msg_to_smc(rdev, msg);
1661
1662 if ((smc_result == PPSMC_Result_OK) && parameter)
1663 *parameter = RREG32(SMC_MSG_ARG_0);
1664
1665 return smc_result;
1666 }
1667
1668 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1669 {
1670 struct ci_power_info *pi = ci_get_pi(rdev);
1671
1672 if (!pi->sclk_dpm_key_disabled) {
1673 PPSMC_Result smc_result =
1674 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1675 if (smc_result != PPSMC_Result_OK)
1676 return -EINVAL;
1677 }
1678
1679 return 0;
1680 }
1681
1682 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1683 {
1684 struct ci_power_info *pi = ci_get_pi(rdev);
1685
1686 if (!pi->mclk_dpm_key_disabled) {
1687 PPSMC_Result smc_result =
1688 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1689 if (smc_result != PPSMC_Result_OK)
1690 return -EINVAL;
1691 }
1692
1693 return 0;
1694 }
1695
1696 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1697 {
1698 struct ci_power_info *pi = ci_get_pi(rdev);
1699
1700 if (!pi->pcie_dpm_key_disabled) {
1701 PPSMC_Result smc_result =
1702 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1703 if (smc_result != PPSMC_Result_OK)
1704 return -EINVAL;
1705 }
1706
1707 return 0;
1708 }
1709
1710 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1711 {
1712 struct ci_power_info *pi = ci_get_pi(rdev);
1713
1714 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1715 PPSMC_Result smc_result =
1716 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1717 if (smc_result != PPSMC_Result_OK)
1718 return -EINVAL;
1719 }
1720
1721 return 0;
1722 }
1723
1724 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1725 u32 target_tdp)
1726 {
1727 PPSMC_Result smc_result =
1728 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1729 if (smc_result != PPSMC_Result_OK)
1730 return -EINVAL;
1731 return 0;
1732 }
1733
1734 #if 0
1735 static int ci_set_boot_state(struct radeon_device *rdev)
1736 {
1737 return ci_enable_sclk_mclk_dpm(rdev, false);
1738 }
1739 #endif
1740
1741 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1742 {
1743 u32 sclk_freq;
1744 PPSMC_Result smc_result =
1745 ci_send_msg_to_smc_return_parameter(rdev,
1746 PPSMC_MSG_API_GetSclkFrequency,
1747 &sclk_freq);
1748 if (smc_result != PPSMC_Result_OK)
1749 sclk_freq = 0;
1750
1751 return sclk_freq;
1752 }
1753
1754 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1755 {
1756 u32 mclk_freq;
1757 PPSMC_Result smc_result =
1758 ci_send_msg_to_smc_return_parameter(rdev,
1759 PPSMC_MSG_API_GetMclkFrequency,
1760 &mclk_freq);
1761 if (smc_result != PPSMC_Result_OK)
1762 mclk_freq = 0;
1763
1764 return mclk_freq;
1765 }
1766
1767 static void ci_dpm_start_smc(struct radeon_device *rdev)
1768 {
1769 int i;
1770
1771 ci_program_jump_on_start(rdev);
1772 ci_start_smc_clock(rdev);
1773 ci_start_smc(rdev);
1774 for (i = 0; i < rdev->usec_timeout; i++) {
1775 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1776 break;
1777 }
1778 }
1779
1780 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1781 {
1782 ci_reset_smc(rdev);
1783 ci_stop_smc_clock(rdev);
1784 }
1785
1786 static int ci_process_firmware_header(struct radeon_device *rdev)
1787 {
1788 struct ci_power_info *pi = ci_get_pi(rdev);
1789 u32 tmp;
1790 int ret;
1791
1792 ret = ci_read_smc_sram_dword(rdev,
1793 SMU7_FIRMWARE_HEADER_LOCATION +
1794 offsetof(SMU7_Firmware_Header, DpmTable),
1795 &tmp, pi->sram_end);
1796 if (ret)
1797 return ret;
1798
1799 pi->dpm_table_start = tmp;
1800
1801 ret = ci_read_smc_sram_dword(rdev,
1802 SMU7_FIRMWARE_HEADER_LOCATION +
1803 offsetof(SMU7_Firmware_Header, SoftRegisters),
1804 &tmp, pi->sram_end);
1805 if (ret)
1806 return ret;
1807
1808 pi->soft_regs_start = tmp;
1809
1810 ret = ci_read_smc_sram_dword(rdev,
1811 SMU7_FIRMWARE_HEADER_LOCATION +
1812 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1813 &tmp, pi->sram_end);
1814 if (ret)
1815 return ret;
1816
1817 pi->mc_reg_table_start = tmp;
1818
1819 ret = ci_read_smc_sram_dword(rdev,
1820 SMU7_FIRMWARE_HEADER_LOCATION +
1821 offsetof(SMU7_Firmware_Header, FanTable),
1822 &tmp, pi->sram_end);
1823 if (ret)
1824 return ret;
1825
1826 pi->fan_table_start = tmp;
1827
1828 ret = ci_read_smc_sram_dword(rdev,
1829 SMU7_FIRMWARE_HEADER_LOCATION +
1830 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1831 &tmp, pi->sram_end);
1832 if (ret)
1833 return ret;
1834
1835 pi->arb_table_start = tmp;
1836
1837 return 0;
1838 }
1839
1840 static void ci_read_clock_registers(struct radeon_device *rdev)
1841 {
1842 struct ci_power_info *pi = ci_get_pi(rdev);
1843
1844 pi->clock_registers.cg_spll_func_cntl =
1845 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1846 pi->clock_registers.cg_spll_func_cntl_2 =
1847 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1848 pi->clock_registers.cg_spll_func_cntl_3 =
1849 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1850 pi->clock_registers.cg_spll_func_cntl_4 =
1851 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1852 pi->clock_registers.cg_spll_spread_spectrum =
1853 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1854 pi->clock_registers.cg_spll_spread_spectrum_2 =
1855 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1856 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1857 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1858 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1859 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1860 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1861 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1862 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1863 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1864 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1865 }
1866
1867 static void ci_init_sclk_t(struct radeon_device *rdev)
1868 {
1869 struct ci_power_info *pi = ci_get_pi(rdev);
1870
1871 pi->low_sclk_interrupt_t = 0;
1872 }
1873
1874 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1875 bool enable)
1876 {
1877 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1878
1879 if (enable)
1880 tmp &= ~THERMAL_PROTECTION_DIS;
1881 else
1882 tmp |= THERMAL_PROTECTION_DIS;
1883 WREG32_SMC(GENERAL_PWRMGT, tmp);
1884 }
1885
1886 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1887 {
1888 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1889
1890 tmp |= STATIC_PM_EN;
1891
1892 WREG32_SMC(GENERAL_PWRMGT, tmp);
1893 }
1894
1895 #if 0
1896 static int ci_enter_ulp_state(struct radeon_device *rdev)
1897 {
1898
1899 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1900
1901 udelay(25000);
1902
1903 return 0;
1904 }
1905
1906 static int ci_exit_ulp_state(struct radeon_device *rdev)
1907 {
1908 int i;
1909
1910 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1911
1912 udelay(7000);
1913
1914 for (i = 0; i < rdev->usec_timeout; i++) {
1915 if (RREG32(SMC_RESP_0) == 1)
1916 break;
1917 udelay(1000);
1918 }
1919
1920 return 0;
1921 }
1922 #endif
1923
1924 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1925 bool has_display)
1926 {
1927 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1928
1929 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
1930 }
1931
1932 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1933 bool enable)
1934 {
1935 struct ci_power_info *pi = ci_get_pi(rdev);
1936
1937 if (enable) {
1938 if (pi->caps_sclk_ds) {
1939 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1940 return -EINVAL;
1941 } else {
1942 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1943 return -EINVAL;
1944 }
1945 } else {
1946 if (pi->caps_sclk_ds) {
1947 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1948 return -EINVAL;
1949 }
1950 }
1951
1952 return 0;
1953 }
1954
1955 static void ci_program_display_gap(struct radeon_device *rdev)
1956 {
1957 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1958 u32 pre_vbi_time_in_us;
1959 u32 frame_time_in_us;
1960 u32 ref_clock = rdev->clock.spll.reference_freq;
1961 u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1962 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1963
1964 tmp &= ~DISP_GAP_MASK;
1965 if (rdev->pm.dpm.new_active_crtc_count > 0)
1966 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1967 else
1968 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1969 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1970
1971 if (refresh_rate == 0)
1972 refresh_rate = 60;
1973 if (vblank_time == 0xffffffff)
1974 vblank_time = 500;
1975 frame_time_in_us = 1000000 / refresh_rate;
1976 pre_vbi_time_in_us =
1977 frame_time_in_us - 200 - vblank_time;
1978 tmp = pre_vbi_time_in_us * (ref_clock / 100);
1979
1980 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1981 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1982 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1983
1984
1985 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1986
1987 }
1988
1989 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1990 {
1991 struct ci_power_info *pi = ci_get_pi(rdev);
1992 u32 tmp;
1993
1994 if (enable) {
1995 if (pi->caps_sclk_ss_support) {
1996 tmp = RREG32_SMC(GENERAL_PWRMGT);
1997 tmp |= DYN_SPREAD_SPECTRUM_EN;
1998 WREG32_SMC(GENERAL_PWRMGT, tmp);
1999 }
2000 } else {
2001 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
2002 tmp &= ~SSEN;
2003 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
2004
2005 tmp = RREG32_SMC(GENERAL_PWRMGT);
2006 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
2007 WREG32_SMC(GENERAL_PWRMGT, tmp);
2008 }
2009 }
2010
2011 static void ci_program_sstp(struct radeon_device *rdev)
2012 {
2013 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
2014 }
2015
2016 static void ci_enable_display_gap(struct radeon_device *rdev)
2017 {
2018 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
2019
2020 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
2021 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
2022 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
2023
2024 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
2025 }
2026
2027 static void ci_program_vc(struct radeon_device *rdev)
2028 {
2029 u32 tmp;
2030
2031 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2032 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
2033 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2034
2035 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
2036 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
2037 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
2038 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
2039 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
2040 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
2041 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
2042 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
2043 }
2044
2045 static void ci_clear_vc(struct radeon_device *rdev)
2046 {
2047 u32 tmp;
2048
2049 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2050 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
2051 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2052
2053 WREG32_SMC(CG_FTV_0, 0);
2054 WREG32_SMC(CG_FTV_1, 0);
2055 WREG32_SMC(CG_FTV_2, 0);
2056 WREG32_SMC(CG_FTV_3, 0);
2057 WREG32_SMC(CG_FTV_4, 0);
2058 WREG32_SMC(CG_FTV_5, 0);
2059 WREG32_SMC(CG_FTV_6, 0);
2060 WREG32_SMC(CG_FTV_7, 0);
2061 }
2062
2063 static int ci_upload_firmware(struct radeon_device *rdev)
2064 {
2065 struct ci_power_info *pi = ci_get_pi(rdev);
2066 int i, ret;
2067
2068 for (i = 0; i < rdev->usec_timeout; i++) {
2069 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
2070 break;
2071 }
2072 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
2073
2074 ci_stop_smc_clock(rdev);
2075 ci_reset_smc(rdev);
2076
2077 ret = ci_load_smc_ucode(rdev, pi->sram_end);
2078
2079 return ret;
2080
2081 }
2082
2083 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
2084 struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
2085 struct atom_voltage_table *voltage_table)
2086 {
2087 u32 i;
2088
2089 if (voltage_dependency_table == NULL)
2090 return -EINVAL;
2091
2092 voltage_table->mask_low = 0;
2093 voltage_table->phase_delay = 0;
2094
2095 voltage_table->count = voltage_dependency_table->count;
2096 for (i = 0; i < voltage_table->count; i++) {
2097 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2098 voltage_table->entries[i].smio_low = 0;
2099 }
2100
2101 return 0;
2102 }
2103
2104 static int ci_construct_voltage_tables(struct radeon_device *rdev)
2105 {
2106 struct ci_power_info *pi = ci_get_pi(rdev);
2107 int ret;
2108
2109 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2110 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
2111 VOLTAGE_OBJ_GPIO_LUT,
2112 &pi->vddc_voltage_table);
2113 if (ret)
2114 return ret;
2115 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2116 ret = ci_get_svi2_voltage_table(rdev,
2117 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2118 &pi->vddc_voltage_table);
2119 if (ret)
2120 return ret;
2121 }
2122
2123 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2124 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
2125 &pi->vddc_voltage_table);
2126
2127 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2128 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
2129 VOLTAGE_OBJ_GPIO_LUT,
2130 &pi->vddci_voltage_table);
2131 if (ret)
2132 return ret;
2133 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2134 ret = ci_get_svi2_voltage_table(rdev,
2135 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2136 &pi->vddci_voltage_table);
2137 if (ret)
2138 return ret;
2139 }
2140
2141 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2142 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
2143 &pi->vddci_voltage_table);
2144
2145 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2146 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
2147 VOLTAGE_OBJ_GPIO_LUT,
2148 &pi->mvdd_voltage_table);
2149 if (ret)
2150 return ret;
2151 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2152 ret = ci_get_svi2_voltage_table(rdev,
2153 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2154 &pi->mvdd_voltage_table);
2155 if (ret)
2156 return ret;
2157 }
2158
2159 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2160 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
2161 &pi->mvdd_voltage_table);
2162
2163 return 0;
2164 }
2165
2166 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
2167 struct atom_voltage_table_entry *voltage_table,
2168 SMU7_Discrete_VoltageLevel *smc_voltage_table)
2169 {
2170 int ret;
2171
2172 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
2173 &smc_voltage_table->StdVoltageHiSidd,
2174 &smc_voltage_table->StdVoltageLoSidd);
2175
2176 if (ret) {
2177 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2178 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2179 }
2180
2181 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2182 smc_voltage_table->StdVoltageHiSidd =
2183 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2184 smc_voltage_table->StdVoltageLoSidd =
2185 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2186 }
2187
2188 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
2189 SMU7_Discrete_DpmTable *table)
2190 {
2191 struct ci_power_info *pi = ci_get_pi(rdev);
2192 unsigned int count;
2193
2194 table->VddcLevelCount = pi->vddc_voltage_table.count;
2195 for (count = 0; count < table->VddcLevelCount; count++) {
2196 ci_populate_smc_voltage_table(rdev,
2197 &pi->vddc_voltage_table.entries[count],
2198 &table->VddcLevel[count]);
2199
2200 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2201 table->VddcLevel[count].Smio |=
2202 pi->vddc_voltage_table.entries[count].smio_low;
2203 else
2204 table->VddcLevel[count].Smio = 0;
2205 }
2206 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2207
2208 return 0;
2209 }
2210
2211 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
2212 SMU7_Discrete_DpmTable *table)
2213 {
2214 unsigned int count;
2215 struct ci_power_info *pi = ci_get_pi(rdev);
2216
2217 table->VddciLevelCount = pi->vddci_voltage_table.count;
2218 for (count = 0; count < table->VddciLevelCount; count++) {
2219 ci_populate_smc_voltage_table(rdev,
2220 &pi->vddci_voltage_table.entries[count],
2221 &table->VddciLevel[count]);
2222
2223 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2224 table->VddciLevel[count].Smio |=
2225 pi->vddci_voltage_table.entries[count].smio_low;
2226 else
2227 table->VddciLevel[count].Smio = 0;
2228 }
2229 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2230
2231 return 0;
2232 }
2233
2234 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
2235 SMU7_Discrete_DpmTable *table)
2236 {
2237 struct ci_power_info *pi = ci_get_pi(rdev);
2238 unsigned int count;
2239
2240 table->MvddLevelCount = pi->mvdd_voltage_table.count;
2241 for (count = 0; count < table->MvddLevelCount; count++) {
2242 ci_populate_smc_voltage_table(rdev,
2243 &pi->mvdd_voltage_table.entries[count],
2244 &table->MvddLevel[count]);
2245
2246 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2247 table->MvddLevel[count].Smio |=
2248 pi->mvdd_voltage_table.entries[count].smio_low;
2249 else
2250 table->MvddLevel[count].Smio = 0;
2251 }
2252 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2253
2254 return 0;
2255 }
2256
2257 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
2258 SMU7_Discrete_DpmTable *table)
2259 {
2260 int ret;
2261
2262 ret = ci_populate_smc_vddc_table(rdev, table);
2263 if (ret)
2264 return ret;
2265
2266 ret = ci_populate_smc_vddci_table(rdev, table);
2267 if (ret)
2268 return ret;
2269
2270 ret = ci_populate_smc_mvdd_table(rdev, table);
2271 if (ret)
2272 return ret;
2273
2274 return 0;
2275 }
2276
2277 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
2278 SMU7_Discrete_VoltageLevel *voltage)
2279 {
2280 struct ci_power_info *pi = ci_get_pi(rdev);
2281 u32 i = 0;
2282
2283 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2284 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2285 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2286 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2287 break;
2288 }
2289 }
2290
2291 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2292 return -EINVAL;
2293 }
2294
2295 return -EINVAL;
2296 }
2297
2298 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
2299 struct atom_voltage_table_entry *voltage_table,
2300 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2301 {
2302 u16 v_index, idx;
2303 bool voltage_found = false;
2304 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2305 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2306
2307 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2308 return -EINVAL;
2309
2310 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2311 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2312 if (voltage_table->value ==
2313 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2314 voltage_found = true;
2315 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2316 idx = v_index;
2317 else
2318 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2319 *std_voltage_lo_sidd =
2320 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2321 *std_voltage_hi_sidd =
2322 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2323 break;
2324 }
2325 }
2326
2327 if (!voltage_found) {
2328 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2329 if (voltage_table->value <=
2330 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2331 voltage_found = true;
2332 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2333 idx = v_index;
2334 else
2335 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2336 *std_voltage_lo_sidd =
2337 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2338 *std_voltage_hi_sidd =
2339 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2340 break;
2341 }
2342 }
2343 }
2344 }
2345
2346 return 0;
2347 }
2348
2349 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
2350 const struct radeon_phase_shedding_limits_table *limits,
2351 u32 sclk,
2352 u32 *phase_shedding)
2353 {
2354 unsigned int i;
2355
2356 *phase_shedding = 1;
2357
2358 for (i = 0; i < limits->count; i++) {
2359 if (sclk < limits->entries[i].sclk) {
2360 *phase_shedding = i;
2361 break;
2362 }
2363 }
2364 }
2365
2366 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
2367 const struct radeon_phase_shedding_limits_table *limits,
2368 u32 mclk,
2369 u32 *phase_shedding)
2370 {
2371 unsigned int i;
2372
2373 *phase_shedding = 1;
2374
2375 for (i = 0; i < limits->count; i++) {
2376 if (mclk < limits->entries[i].mclk) {
2377 *phase_shedding = i;
2378 break;
2379 }
2380 }
2381 }
2382
2383 static int ci_init_arb_table_index(struct radeon_device *rdev)
2384 {
2385 struct ci_power_info *pi = ci_get_pi(rdev);
2386 u32 tmp;
2387 int ret;
2388
2389 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
2390 &tmp, pi->sram_end);
2391 if (ret)
2392 return ret;
2393
2394 tmp &= 0x00FFFFFF;
2395 tmp |= MC_CG_ARB_FREQ_F1 << 24;
2396
2397 return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2398 tmp, pi->sram_end);
2399 }
2400
2401 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2402 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2403 u32 clock, u32 *voltage)
2404 {
2405 u32 i = 0;
2406
2407 if (allowed_clock_voltage_table->count == 0)
2408 return -EINVAL;
2409
2410 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2411 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2412 *voltage = allowed_clock_voltage_table->entries[i].v;
2413 return 0;
2414 }
2415 }
2416
2417 *voltage = allowed_clock_voltage_table->entries[i-1].v;
2418
2419 return 0;
2420 }
2421
2422 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2423 u32 sclk, u32 min_sclk_in_sr)
2424 {
2425 u32 i;
2426 u32 tmp;
2427 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2428 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2429
2430 if (sclk < min)
2431 return 0;
2432
2433 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
2434 tmp = sclk / (1 << i);
2435 if (tmp >= min || i == 0)
2436 break;
2437 }
2438
2439 return (u8)i;
2440 }
2441
2442 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2443 {
2444 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2445 }
2446
2447 static int ci_reset_to_default(struct radeon_device *rdev)
2448 {
2449 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2450 0 : -EINVAL;
2451 }
2452
2453 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2454 {
2455 u32 tmp;
2456
2457 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2458
2459 if (tmp == MC_CG_ARB_FREQ_F0)
2460 return 0;
2461
2462 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2463 }
2464
2465 static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2466 const u32 engine_clock,
2467 const u32 memory_clock,
2468 u32 *dram_timimg2)
2469 {
2470 bool patch;
2471 u32 tmp, tmp2;
2472
2473 tmp = RREG32(MC_SEQ_MISC0);
2474 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2475
2476 if (patch &&
2477 ((rdev->pdev->device == 0x67B0) ||
2478 (rdev->pdev->device == 0x67B1))) {
2479 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2480 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2481 *dram_timimg2 &= ~0x00ff0000;
2482 *dram_timimg2 |= tmp2 << 16;
2483 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2484 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2485 *dram_timimg2 &= ~0x00ff0000;
2486 *dram_timimg2 |= tmp2 << 16;
2487 }
2488 }
2489 }
2490
2491
2492 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2493 u32 sclk,
2494 u32 mclk,
2495 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2496 {
2497 u32 dram_timing;
2498 u32 dram_timing2;
2499 u32 burst_time;
2500
2501 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2502
2503 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
2504 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2505 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2506
2507 ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2508
2509 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2510 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2511 arb_regs->McArbBurstTime = (u8)burst_time;
2512
2513 return 0;
2514 }
2515
2516 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2517 {
2518 struct ci_power_info *pi = ci_get_pi(rdev);
2519 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2520 u32 i, j;
2521 int ret = 0;
2522
2523 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2524
2525 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2526 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2527 ret = ci_populate_memory_timing_parameters(rdev,
2528 pi->dpm_table.sclk_table.dpm_levels[i].value,
2529 pi->dpm_table.mclk_table.dpm_levels[j].value,
2530 &arb_regs.entries[i][j]);
2531 if (ret)
2532 break;
2533 }
2534 }
2535
2536 if (ret == 0)
2537 ret = ci_copy_bytes_to_smc(rdev,
2538 pi->arb_table_start,
2539 (u8 *)&arb_regs,
2540 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2541 pi->sram_end);
2542
2543 return ret;
2544 }
2545
2546 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2547 {
2548 struct ci_power_info *pi = ci_get_pi(rdev);
2549
2550 if (pi->need_update_smu7_dpm_table == 0)
2551 return 0;
2552
2553 return ci_do_program_memory_timing_parameters(rdev);
2554 }
2555
2556 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2557 struct radeon_ps *radeon_boot_state)
2558 {
2559 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2560 struct ci_power_info *pi = ci_get_pi(rdev);
2561 u32 level = 0;
2562
2563 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2564 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2565 boot_state->performance_levels[0].sclk) {
2566 pi->smc_state_table.GraphicsBootLevel = level;
2567 break;
2568 }
2569 }
2570
2571 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2572 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2573 boot_state->performance_levels[0].mclk) {
2574 pi->smc_state_table.MemoryBootLevel = level;
2575 break;
2576 }
2577 }
2578 }
2579
2580 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2581 {
2582 u32 i;
2583 u32 mask_value = 0;
2584
2585 for (i = dpm_table->count; i > 0; i--) {
2586 mask_value = mask_value << 1;
2587 if (dpm_table->dpm_levels[i-1].enabled)
2588 mask_value |= 0x1;
2589 else
2590 mask_value &= 0xFFFFFFFE;
2591 }
2592
2593 return mask_value;
2594 }
2595
2596 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2597 SMU7_Discrete_DpmTable *table)
2598 {
2599 struct ci_power_info *pi = ci_get_pi(rdev);
2600 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2601 u32 i;
2602
2603 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2604 table->LinkLevel[i].PcieGenSpeed =
2605 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2606 table->LinkLevel[i].PcieLaneCount =
2607 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2608 table->LinkLevel[i].EnabledForActivity = 1;
2609 table->LinkLevel[i].DownT = cpu_to_be32(5);
2610 table->LinkLevel[i].UpT = cpu_to_be32(30);
2611 }
2612
2613 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2614 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2615 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2616 }
2617
2618 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2619 SMU7_Discrete_DpmTable *table)
2620 {
2621 u32 count;
2622 struct atom_clock_dividers dividers;
2623 int ret = -EINVAL;
2624
2625 table->UvdLevelCount =
2626 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2627
2628 for (count = 0; count < table->UvdLevelCount; count++) {
2629 table->UvdLevel[count].VclkFrequency =
2630 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2631 table->UvdLevel[count].DclkFrequency =
2632 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2633 table->UvdLevel[count].MinVddc =
2634 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2635 table->UvdLevel[count].MinVddcPhases = 1;
2636
2637 ret = radeon_atom_get_clock_dividers(rdev,
2638 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2639 table->UvdLevel[count].VclkFrequency, false, &dividers);
2640 if (ret)
2641 return ret;
2642
2643 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2644
2645 ret = radeon_atom_get_clock_dividers(rdev,
2646 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2647 table->UvdLevel[count].DclkFrequency, false, &dividers);
2648 if (ret)
2649 return ret;
2650
2651 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2652
2653 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2654 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2655 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2656 }
2657
2658 return ret;
2659 }
2660
2661 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2662 SMU7_Discrete_DpmTable *table)
2663 {
2664 u32 count;
2665 struct atom_clock_dividers dividers;
2666 int ret = -EINVAL;
2667
2668 table->VceLevelCount =
2669 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2670
2671 for (count = 0; count < table->VceLevelCount; count++) {
2672 table->VceLevel[count].Frequency =
2673 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2674 table->VceLevel[count].MinVoltage =
2675 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2676 table->VceLevel[count].MinPhases = 1;
2677
2678 ret = radeon_atom_get_clock_dividers(rdev,
2679 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2680 table->VceLevel[count].Frequency, false, &dividers);
2681 if (ret)
2682 return ret;
2683
2684 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2685
2686 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2687 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2688 }
2689
2690 return ret;
2691
2692 }
2693
2694 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2695 SMU7_Discrete_DpmTable *table)
2696 {
2697 u32 count;
2698 struct atom_clock_dividers dividers;
2699 int ret = -EINVAL;
2700
2701 table->AcpLevelCount = (u8)
2702 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2703
2704 for (count = 0; count < table->AcpLevelCount; count++) {
2705 table->AcpLevel[count].Frequency =
2706 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2707 table->AcpLevel[count].MinVoltage =
2708 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2709 table->AcpLevel[count].MinPhases = 1;
2710
2711 ret = radeon_atom_get_clock_dividers(rdev,
2712 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2713 table->AcpLevel[count].Frequency, false, &dividers);
2714 if (ret)
2715 return ret;
2716
2717 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2718
2719 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2720 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2721 }
2722
2723 return ret;
2724 }
2725
2726 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2727 SMU7_Discrete_DpmTable *table)
2728 {
2729 u32 count;
2730 struct atom_clock_dividers dividers;
2731 int ret = -EINVAL;
2732
2733 table->SamuLevelCount =
2734 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2735
2736 for (count = 0; count < table->SamuLevelCount; count++) {
2737 table->SamuLevel[count].Frequency =
2738 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2739 table->SamuLevel[count].MinVoltage =
2740 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2741 table->SamuLevel[count].MinPhases = 1;
2742
2743 ret = radeon_atom_get_clock_dividers(rdev,
2744 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2745 table->SamuLevel[count].Frequency, false, &dividers);
2746 if (ret)
2747 return ret;
2748
2749 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2750
2751 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2752 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2753 }
2754
2755 return ret;
2756 }
2757
2758 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2759 u32 memory_clock,
2760 SMU7_Discrete_MemoryLevel *mclk,
2761 bool strobe_mode,
2762 bool dll_state_on)
2763 {
2764 struct ci_power_info *pi = ci_get_pi(rdev);
2765 u32 dll_cntl = pi->clock_registers.dll_cntl;
2766 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2767 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2768 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2769 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2770 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2771 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2772 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2773 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2774 struct atom_mpll_param mpll_param;
2775 int ret;
2776
2777 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2778 if (ret)
2779 return ret;
2780
2781 mpll_func_cntl &= ~BWCTRL_MASK;
2782 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2783
2784 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2785 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2786 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2787
2788 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2789 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2790
2791 if (pi->mem_gddr5) {
2792 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2793 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2794 YCLK_POST_DIV(mpll_param.post_div);
2795 }
2796
2797 if (pi->caps_mclk_ss_support) {
2798 struct radeon_atom_ss ss;
2799 u32 freq_nom;
2800 u32 tmp;
2801 u32 reference_clock = rdev->clock.mpll.reference_freq;
2802
2803 if (mpll_param.qdr == 1)
2804 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2805 else
2806 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2807
2808 tmp = (freq_nom / reference_clock);
2809 tmp = tmp * tmp;
2810 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2811 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2812 u32 clks = reference_clock * 5 / ss.rate;
2813 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2814
2815 mpll_ss1 &= ~CLKV_MASK;
2816 mpll_ss1 |= CLKV(clkv);
2817
2818 mpll_ss2 &= ~CLKS_MASK;
2819 mpll_ss2 |= CLKS(clks);
2820 }
2821 }
2822
2823 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2824 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2825
2826 if (dll_state_on)
2827 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2828 else
2829 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2830
2831 mclk->MclkFrequency = memory_clock;
2832 mclk->MpllFuncCntl = mpll_func_cntl;
2833 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2834 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2835 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2836 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2837 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2838 mclk->DllCntl = dll_cntl;
2839 mclk->MpllSs1 = mpll_ss1;
2840 mclk->MpllSs2 = mpll_ss2;
2841
2842 return 0;
2843 }
2844
2845 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2846 u32 memory_clock,
2847 SMU7_Discrete_MemoryLevel *memory_level)
2848 {
2849 struct ci_power_info *pi = ci_get_pi(rdev);
2850 int ret;
2851 bool dll_state_on;
2852
2853 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2854 ret = ci_get_dependency_volt_by_clk(rdev,
2855 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2856 memory_clock, &memory_level->MinVddc);
2857 if (ret)
2858 return ret;
2859 }
2860
2861 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2862 ret = ci_get_dependency_volt_by_clk(rdev,
2863 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2864 memory_clock, &memory_level->MinVddci);
2865 if (ret)
2866 return ret;
2867 }
2868
2869 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2870 ret = ci_get_dependency_volt_by_clk(rdev,
2871 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2872 memory_clock, &memory_level->MinMvdd);
2873 if (ret)
2874 return ret;
2875 }
2876
2877 memory_level->MinVddcPhases = 1;
2878
2879 if (pi->vddc_phase_shed_control)
2880 ci_populate_phase_value_based_on_mclk(rdev,
2881 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2882 memory_clock,
2883 &memory_level->MinVddcPhases);
2884
2885 memory_level->EnabledForThrottle = 1;
2886 memory_level->UpH = 0;
2887 memory_level->DownH = 100;
2888 memory_level->VoltageDownH = 0;
2889 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2890
2891 memory_level->StutterEnable = false;
2892 memory_level->StrobeEnable = false;
2893 memory_level->EdcReadEnable = false;
2894 memory_level->EdcWriteEnable = false;
2895 memory_level->RttEnable = false;
2896
2897 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2898
2899 if (pi->mclk_stutter_mode_threshold &&
2900 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2901 (pi->uvd_enabled == false) &&
2902 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2903 (rdev->pm.dpm.new_active_crtc_count <= 2))
2904 memory_level->StutterEnable = true;
2905
2906 if (pi->mclk_strobe_mode_threshold &&
2907 (memory_clock <= pi->mclk_strobe_mode_threshold))
2908 memory_level->StrobeEnable = 1;
2909
2910 if (pi->mem_gddr5) {
2911 memory_level->StrobeRatio =
2912 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2913 if (pi->mclk_edc_enable_threshold &&
2914 (memory_clock > pi->mclk_edc_enable_threshold))
2915 memory_level->EdcReadEnable = true;
2916
2917 if (pi->mclk_edc_wr_enable_threshold &&
2918 (memory_clock > pi->mclk_edc_wr_enable_threshold))
2919 memory_level->EdcWriteEnable = true;
2920
2921 if (memory_level->StrobeEnable) {
2922 if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2923 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2924 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2925 else
2926 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2927 } else {
2928 dll_state_on = pi->dll_default_on;
2929 }
2930 } else {
2931 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2932 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2933 }
2934
2935 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2936 if (ret)
2937 return ret;
2938
2939 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2940 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2941 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2942 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2943
2944 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2945 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2946 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2947 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2948 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2949 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2950 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2951 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2952 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2953 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2954 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2955
2956 return 0;
2957 }
2958
2959 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2960 SMU7_Discrete_DpmTable *table)
2961 {
2962 struct ci_power_info *pi = ci_get_pi(rdev);
2963 struct atom_clock_dividers dividers;
2964 SMU7_Discrete_VoltageLevel voltage_level;
2965 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2966 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2967 u32 dll_cntl = pi->clock_registers.dll_cntl;
2968 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2969 int ret;
2970
2971 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2972
2973 if (pi->acpi_vddc)
2974 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2975 else
2976 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2977
2978 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2979
2980 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2981
2982 ret = radeon_atom_get_clock_dividers(rdev,
2983 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2984 table->ACPILevel.SclkFrequency, false, &dividers);
2985 if (ret)
2986 return ret;
2987
2988 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2989 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2990 table->ACPILevel.DeepSleepDivId = 0;
2991
2992 spll_func_cntl &= ~SPLL_PWRON;
2993 spll_func_cntl |= SPLL_RESET;
2994
2995 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2996 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2997
2998 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2999 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3000 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3001 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3002 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3003 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3004 table->ACPILevel.CcPwrDynRm = 0;
3005 table->ACPILevel.CcPwrDynRm1 = 0;
3006
3007 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3008 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3009 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3010 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3011 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3012 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3013 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3014 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3015 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3016 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3017 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3018
3019 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3020 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3021
3022 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3023 if (pi->acpi_vddci)
3024 table->MemoryACPILevel.MinVddci =
3025 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3026 else
3027 table->MemoryACPILevel.MinVddci =
3028 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3029 }
3030
3031 if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
3032 table->MemoryACPILevel.MinMvdd = 0;
3033 else
3034 table->MemoryACPILevel.MinMvdd =
3035 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3036
3037 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
3038 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
3039
3040 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
3041
3042 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3043 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3044 table->MemoryACPILevel.MpllAdFuncCntl =
3045 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3046 table->MemoryACPILevel.MpllDqFuncCntl =
3047 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3048 table->MemoryACPILevel.MpllFuncCntl =
3049 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3050 table->MemoryACPILevel.MpllFuncCntl_1 =
3051 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3052 table->MemoryACPILevel.MpllFuncCntl_2 =
3053 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3054 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3055 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3056
3057 table->MemoryACPILevel.EnabledForThrottle = 0;
3058 table->MemoryACPILevel.EnabledForActivity = 0;
3059 table->MemoryACPILevel.UpH = 0;
3060 table->MemoryACPILevel.DownH = 100;
3061 table->MemoryACPILevel.VoltageDownH = 0;
3062 table->MemoryACPILevel.ActivityLevel =
3063 cpu_to_be16((u16)pi->mclk_activity_target);
3064
3065 table->MemoryACPILevel.StutterEnable = false;
3066 table->MemoryACPILevel.StrobeEnable = false;
3067 table->MemoryACPILevel.EdcReadEnable = false;
3068 table->MemoryACPILevel.EdcWriteEnable = false;
3069 table->MemoryACPILevel.RttEnable = false;
3070
3071 return 0;
3072 }
3073
3074
3075 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
3076 {
3077 struct ci_power_info *pi = ci_get_pi(rdev);
3078 struct ci_ulv_parm *ulv = &pi->ulv;
3079
3080 if (ulv->supported) {
3081 if (enable)
3082 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3083 0 : -EINVAL;
3084 else
3085 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3086 0 : -EINVAL;
3087 }
3088
3089 return 0;
3090 }
3091
3092 static int ci_populate_ulv_level(struct radeon_device *rdev,
3093 SMU7_Discrete_Ulv *state)
3094 {
3095 struct ci_power_info *pi = ci_get_pi(rdev);
3096 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
3097
3098 state->CcPwrDynRm = 0;
3099 state->CcPwrDynRm1 = 0;
3100
3101 if (ulv_voltage == 0) {
3102 pi->ulv.supported = false;
3103 return 0;
3104 }
3105
3106 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3107 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3108 state->VddcOffset = 0;
3109 else
3110 state->VddcOffset =
3111 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3112 } else {
3113 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3114 state->VddcOffsetVid = 0;
3115 else
3116 state->VddcOffsetVid = (u8)
3117 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3118 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3119 }
3120 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3121
3122 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3123 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3124 state->VddcOffset = cpu_to_be16(state->VddcOffset);
3125
3126 return 0;
3127 }
3128
3129 static int ci_calculate_sclk_params(struct radeon_device *rdev,
3130 u32 engine_clock,
3131 SMU7_Discrete_GraphicsLevel *sclk)
3132 {
3133 struct ci_power_info *pi = ci_get_pi(rdev);
3134 struct atom_clock_dividers dividers;
3135 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3136 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3137 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3138 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3139 u32 reference_clock = rdev->clock.spll.reference_freq;
3140 u32 reference_divider;
3141 u32 fbdiv;
3142 int ret;
3143
3144 ret = radeon_atom_get_clock_dividers(rdev,
3145 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3146 engine_clock, false, &dividers);
3147 if (ret)
3148 return ret;
3149
3150 reference_divider = 1 + dividers.ref_div;
3151 fbdiv = dividers.fb_div & 0x3FFFFFF;
3152
3153 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
3154 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
3155 spll_func_cntl_3 |= SPLL_DITHEN;
3156
3157 if (pi->caps_sclk_ss_support) {
3158 struct radeon_atom_ss ss;
3159 u32 vco_freq = engine_clock * dividers.post_div;
3160
3161 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
3162 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3163 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3164 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3165
3166 cg_spll_spread_spectrum &= ~CLK_S_MASK;
3167 cg_spll_spread_spectrum |= CLK_S(clk_s);
3168 cg_spll_spread_spectrum |= SSEN;
3169
3170 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
3171 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
3172 }
3173 }
3174
3175 sclk->SclkFrequency = engine_clock;
3176 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3177 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3178 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3179 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
3180 sclk->SclkDid = (u8)dividers.post_divider;
3181
3182 return 0;
3183 }
3184
3185 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
3186 u32 engine_clock,
3187 u16 sclk_activity_level_t,
3188 SMU7_Discrete_GraphicsLevel *graphic_level)
3189 {
3190 struct ci_power_info *pi = ci_get_pi(rdev);
3191 int ret;
3192
3193 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
3194 if (ret)
3195 return ret;
3196
3197 ret = ci_get_dependency_volt_by_clk(rdev,
3198 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3199 engine_clock, &graphic_level->MinVddc);
3200 if (ret)
3201 return ret;
3202
3203 graphic_level->SclkFrequency = engine_clock;
3204
3205 graphic_level->Flags = 0;
3206 graphic_level->MinVddcPhases = 1;
3207
3208 if (pi->vddc_phase_shed_control)
3209 ci_populate_phase_value_based_on_sclk(rdev,
3210 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
3211 engine_clock,
3212 &graphic_level->MinVddcPhases);
3213
3214 graphic_level->ActivityLevel = sclk_activity_level_t;
3215
3216 graphic_level->CcPwrDynRm = 0;
3217 graphic_level->CcPwrDynRm1 = 0;
3218 graphic_level->EnabledForThrottle = 1;
3219 graphic_level->UpH = 0;
3220 graphic_level->DownH = 0;
3221 graphic_level->VoltageDownH = 0;
3222 graphic_level->PowerThrottle = 0;
3223
3224 if (pi->caps_sclk_ds)
3225 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
3226 engine_clock,
3227 CISLAND_MINIMUM_ENGINE_CLOCK);
3228
3229 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3230
3231 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3232 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3233 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3234 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3235 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3236 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3237 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3238 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3239 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3240 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3241 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3242
3243 return 0;
3244 }
3245
3246 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
3247 {
3248 struct ci_power_info *pi = ci_get_pi(rdev);
3249 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3250 u32 level_array_address = pi->dpm_table_start +
3251 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3252 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3253 SMU7_MAX_LEVELS_GRAPHICS;
3254 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3255 u32 i, ret;
3256
3257 memset(levels, 0, level_array_size);
3258
3259 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3260 ret = ci_populate_single_graphic_level(rdev,
3261 dpm_table->sclk_table.dpm_levels[i].value,
3262 (u16)pi->activity_target[i],
3263 &pi->smc_state_table.GraphicsLevel[i]);
3264 if (ret)
3265 return ret;
3266 if (i > 1)
3267 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3268 if (i == (dpm_table->sclk_table.count - 1))
3269 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3270 PPSMC_DISPLAY_WATERMARK_HIGH;
3271 }
3272 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3273
3274 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3275 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3276 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3277
3278 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3279 (u8 *)levels, level_array_size,
3280 pi->sram_end);
3281 if (ret)
3282 return ret;
3283
3284 return 0;
3285 }
3286
3287 static int ci_populate_ulv_state(struct radeon_device *rdev,
3288 SMU7_Discrete_Ulv *ulv_level)
3289 {
3290 return ci_populate_ulv_level(rdev, ulv_level);
3291 }
3292
3293 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
3294 {
3295 struct ci_power_info *pi = ci_get_pi(rdev);
3296 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3297 u32 level_array_address = pi->dpm_table_start +
3298 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3299 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3300 SMU7_MAX_LEVELS_MEMORY;
3301 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3302 u32 i, ret;
3303
3304 memset(levels, 0, level_array_size);
3305
3306 for (i = 0; i < dpm_table->mclk_table.count; i++) {
3307 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3308 return -EINVAL;
3309 ret = ci_populate_single_memory_level(rdev,
3310 dpm_table->mclk_table.dpm_levels[i].value,
3311 &pi->smc_state_table.MemoryLevel[i]);
3312 if (ret)
3313 return ret;
3314 }
3315
3316 pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3317
3318 if ((dpm_table->mclk_table.count >= 2) &&
3319 ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
3320 pi->smc_state_table.MemoryLevel[1].MinVddc =
3321 pi->smc_state_table.MemoryLevel[0].MinVddc;
3322 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3323 pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3324 }
3325
3326 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3327
3328 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3329 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3330 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3331
3332 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3333 PPSMC_DISPLAY_WATERMARK_HIGH;
3334
3335 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3336 (u8 *)levels, level_array_size,
3337 pi->sram_end);
3338 if (ret)
3339 return ret;
3340
3341 return 0;
3342 }
3343
3344 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
3345 struct ci_single_dpm_table* dpm_table,
3346 u32 count)
3347 {
3348 u32 i;
3349
3350 dpm_table->count = count;
3351 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3352 dpm_table->dpm_levels[i].enabled = false;
3353 }
3354
3355 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3356 u32 index, u32 pcie_gen, u32 pcie_lanes)
3357 {
3358 dpm_table->dpm_levels[index].value = pcie_gen;
3359 dpm_table->dpm_levels[index].param1 = pcie_lanes;
3360 dpm_table->dpm_levels[index].enabled = true;
3361 }
3362
3363 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
3364 {
3365 struct ci_power_info *pi = ci_get_pi(rdev);
3366
3367 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3368 return -EINVAL;
3369
3370 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3371 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3372 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3373 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3374 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3375 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3376 }
3377
3378 ci_reset_single_dpm_table(rdev,
3379 &pi->dpm_table.pcie_speed_table,
3380 SMU7_MAX_LEVELS_LINK);
3381
3382 if (rdev->family == CHIP_BONAIRE)
3383 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3384 pi->pcie_gen_powersaving.min,
3385 pi->pcie_lane_powersaving.max);
3386 else
3387 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3388 pi->pcie_gen_powersaving.min,
3389 pi->pcie_lane_powersaving.min);
3390 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3391 pi->pcie_gen_performance.min,
3392 pi->pcie_lane_performance.min);
3393 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3394 pi->pcie_gen_powersaving.min,
3395 pi->pcie_lane_powersaving.max);
3396 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3397 pi->pcie_gen_performance.min,
3398 pi->pcie_lane_performance.max);
3399 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3400 pi->pcie_gen_powersaving.max,
3401 pi->pcie_lane_powersaving.max);
3402 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3403 pi->pcie_gen_performance.max,
3404 pi->pcie_lane_performance.max);
3405
3406 pi->dpm_table.pcie_speed_table.count = 6;
3407
3408 return 0;
3409 }
3410
3411 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
3412 {
3413 struct ci_power_info *pi = ci_get_pi(rdev);
3414 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3415 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3416 struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
3417 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3418 struct radeon_cac_leakage_table *std_voltage_table =
3419 &rdev->pm.dpm.dyn_state.cac_leakage_table;
3420 u32 i;
3421
3422 if (allowed_sclk_vddc_table == NULL)
3423 return -EINVAL;
3424 if (allowed_sclk_vddc_table->count < 1)
3425 return -EINVAL;
3426 if (allowed_mclk_table == NULL)
3427 return -EINVAL;
3428 if (allowed_mclk_table->count < 1)
3429 return -EINVAL;
3430
3431 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3432
3433 ci_reset_single_dpm_table(rdev,
3434 &pi->dpm_table.sclk_table,
3435 SMU7_MAX_LEVELS_GRAPHICS);
3436 ci_reset_single_dpm_table(rdev,
3437 &pi->dpm_table.mclk_table,
3438 SMU7_MAX_LEVELS_MEMORY);
3439 ci_reset_single_dpm_table(rdev,
3440 &pi->dpm_table.vddc_table,
3441 SMU7_MAX_LEVELS_VDDC);
3442 ci_reset_single_dpm_table(rdev,
3443 &pi->dpm_table.vddci_table,
3444 SMU7_MAX_LEVELS_VDDCI);
3445 ci_reset_single_dpm_table(rdev,
3446 &pi->dpm_table.mvdd_table,
3447 SMU7_MAX_LEVELS_MVDD);
3448
3449 pi->dpm_table.sclk_table.count = 0;
3450 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3451 if ((i == 0) ||
3452 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3453 allowed_sclk_vddc_table->entries[i].clk)) {
3454 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3455 allowed_sclk_vddc_table->entries[i].clk;
3456 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3457 (i == 0) ? true : false;
3458 pi->dpm_table.sclk_table.count++;
3459 }
3460 }
3461
3462 pi->dpm_table.mclk_table.count = 0;
3463 for (i = 0; i < allowed_mclk_table->count; i++) {
3464 if ((i == 0) ||
3465 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3466 allowed_mclk_table->entries[i].clk)) {
3467 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3468 allowed_mclk_table->entries[i].clk;
3469 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3470 (i == 0) ? true : false;
3471 pi->dpm_table.mclk_table.count++;
3472 }
3473 }
3474
3475 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3476 pi->dpm_table.vddc_table.dpm_levels[i].value =
3477 allowed_sclk_vddc_table->entries[i].v;
3478 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3479 std_voltage_table->entries[i].leakage;
3480 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3481 }
3482 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3483
3484 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3485 if (allowed_mclk_table) {
3486 for (i = 0; i < allowed_mclk_table->count; i++) {
3487 pi->dpm_table.vddci_table.dpm_levels[i].value =
3488 allowed_mclk_table->entries[i].v;
3489 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3490 }
3491 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3492 }
3493
3494 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3495 if (allowed_mclk_table) {
3496 for (i = 0; i < allowed_mclk_table->count; i++) {
3497 pi->dpm_table.mvdd_table.dpm_levels[i].value =
3498 allowed_mclk_table->entries[i].v;
3499 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3500 }
3501 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3502 }
3503
3504 ci_setup_default_pcie_tables(rdev);
3505
3506 return 0;
3507 }
3508
3509 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3510 u32 value, u32 *boot_level)
3511 {
3512 u32 i;
3513 int ret = -EINVAL;
3514
3515 for(i = 0; i < table->count; i++) {
3516 if (value == table->dpm_levels[i].value) {
3517 *boot_level = i;
3518 ret = 0;
3519 }
3520 }
3521
3522 return ret;
3523 }
3524
3525 static int ci_init_smc_table(struct radeon_device *rdev)
3526 {
3527 struct ci_power_info *pi = ci_get_pi(rdev);
3528 struct ci_ulv_parm *ulv = &pi->ulv;
3529 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3530 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3531 int ret;
3532
3533 ret = ci_setup_default_dpm_tables(rdev);
3534 if (ret)
3535 return ret;
3536
3537 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3538 ci_populate_smc_voltage_tables(rdev, table);
3539
3540 ci_init_fps_limits(rdev);
3541
3542 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3543 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3544
3545 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3546 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3547
3548 if (pi->mem_gddr5)
3549 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3550
3551 if (ulv->supported) {
3552 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3553 if (ret)
3554 return ret;
3555 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3556 }
3557
3558 ret = ci_populate_all_graphic_levels(rdev);
3559 if (ret)
3560 return ret;
3561
3562 ret = ci_populate_all_memory_levels(rdev);
3563 if (ret)
3564 return ret;
3565
3566 ci_populate_smc_link_level(rdev, table);
3567
3568 ret = ci_populate_smc_acpi_level(rdev, table);
3569 if (ret)
3570 return ret;
3571
3572 ret = ci_populate_smc_vce_level(rdev, table);
3573 if (ret)
3574 return ret;
3575
3576 ret = ci_populate_smc_acp_level(rdev, table);
3577 if (ret)
3578 return ret;
3579
3580 ret = ci_populate_smc_samu_level(rdev, table);
3581 if (ret)
3582 return ret;
3583
3584 ret = ci_do_program_memory_timing_parameters(rdev);
3585 if (ret)
3586 return ret;
3587
3588 ret = ci_populate_smc_uvd_level(rdev, table);
3589 if (ret)
3590 return ret;
3591
3592 table->UvdBootLevel = 0;
3593 table->VceBootLevel = 0;
3594 table->AcpBootLevel = 0;
3595 table->SamuBootLevel = 0;
3596 table->GraphicsBootLevel = 0;
3597 table->MemoryBootLevel = 0;
3598
3599 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3600 pi->vbios_boot_state.sclk_bootup_value,
3601 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3602
3603 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3604 pi->vbios_boot_state.mclk_bootup_value,
3605 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3606
3607 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3608 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3609 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3610
3611 ci_populate_smc_initial_state(rdev, radeon_boot_state);
3612
3613 ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3614 if (ret)
3615 return ret;
3616
3617 table->UVDInterval = 1;
3618 table->VCEInterval = 1;
3619 table->ACPInterval = 1;
3620 table->SAMUInterval = 1;
3621 table->GraphicsVoltageChangeEnable = 1;
3622 table->GraphicsThermThrottleEnable = 1;
3623 table->GraphicsInterval = 1;
3624 table->VoltageInterval = 1;
3625 table->ThermalInterval = 1;
3626 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3627 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3628 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3629 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3630 table->MemoryVoltageChangeEnable = 1;
3631 table->MemoryInterval = 1;
3632 table->VoltageResponseTime = 0;
3633 table->VddcVddciDelta = 4000;
3634 table->PhaseResponseTime = 0;
3635 table->MemoryThermThrottleEnable = 1;
3636 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3637 table->PCIeGenInterval = 1;
3638 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3639 table->SVI2Enable = 1;
3640 else
3641 table->SVI2Enable = 0;
3642
3643 table->ThermGpio = 17;
3644 table->SclkStepSize = 0x4000;
3645
3646 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3647 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3648 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3649 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3650 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3651 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3652 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3653 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3654 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3655 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3656 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3657 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3658 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3659 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3660
3661 ret = ci_copy_bytes_to_smc(rdev,
3662 pi->dpm_table_start +
3663 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3664 (u8 *)&table->SystemFlags,
3665 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3666 pi->sram_end);
3667 if (ret)
3668 return ret;
3669
3670 return 0;
3671 }
3672
3673 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3674 struct ci_single_dpm_table *dpm_table,
3675 u32 low_limit, u32 high_limit)
3676 {
3677 u32 i;
3678
3679 for (i = 0; i < dpm_table->count; i++) {
3680 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3681 (dpm_table->dpm_levels[i].value > high_limit))
3682 dpm_table->dpm_levels[i].enabled = false;
3683 else
3684 dpm_table->dpm_levels[i].enabled = true;
3685 }
3686 }
3687
3688 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3689 u32 speed_low, u32 lanes_low,
3690 u32 speed_high, u32 lanes_high)
3691 {
3692 struct ci_power_info *pi = ci_get_pi(rdev);
3693 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3694 u32 i, j;
3695
3696 for (i = 0; i < pcie_table->count; i++) {
3697 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3698 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3699 (pcie_table->dpm_levels[i].value > speed_high) ||
3700 (pcie_table->dpm_levels[i].param1 > lanes_high))
3701 pcie_table->dpm_levels[i].enabled = false;
3702 else
3703 pcie_table->dpm_levels[i].enabled = true;
3704 }
3705
3706 for (i = 0; i < pcie_table->count; i++) {
3707 if (pcie_table->dpm_levels[i].enabled) {
3708 for (j = i + 1; j < pcie_table->count; j++) {
3709 if (pcie_table->dpm_levels[j].enabled) {
3710 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3711 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3712 pcie_table->dpm_levels[j].enabled = false;
3713 }
3714 }
3715 }
3716 }
3717 }
3718
3719 static int ci_trim_dpm_states(struct radeon_device *rdev,
3720 struct radeon_ps *radeon_state)
3721 {
3722 struct ci_ps *state = ci_get_ps(radeon_state);
3723 struct ci_power_info *pi = ci_get_pi(rdev);
3724 u32 high_limit_count;
3725
3726 if (state->performance_level_count < 1)
3727 return -EINVAL;
3728
3729 if (state->performance_level_count == 1)
3730 high_limit_count = 0;
3731 else
3732 high_limit_count = 1;
3733
3734 ci_trim_single_dpm_states(rdev,
3735 &pi->dpm_table.sclk_table,
3736 state->performance_levels[0].sclk,
3737 state->performance_levels[high_limit_count].sclk);
3738
3739 ci_trim_single_dpm_states(rdev,
3740 &pi->dpm_table.mclk_table,
3741 state->performance_levels[0].mclk,
3742 state->performance_levels[high_limit_count].mclk);
3743
3744 ci_trim_pcie_dpm_states(rdev,
3745 state->performance_levels[0].pcie_gen,
3746 state->performance_levels[0].pcie_lane,
3747 state->performance_levels[high_limit_count].pcie_gen,
3748 state->performance_levels[high_limit_count].pcie_lane);
3749
3750 return 0;
3751 }
3752
3753 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3754 {
3755 struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3756 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3757 struct radeon_clock_voltage_dependency_table *vddc_table =
3758 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3759 u32 requested_voltage = 0;
3760 u32 i;
3761
3762 if (disp_voltage_table == NULL)
3763 return -EINVAL;
3764 if (!disp_voltage_table->count)
3765 return -EINVAL;
3766
3767 for (i = 0; i < disp_voltage_table->count; i++) {
3768 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3769 requested_voltage = disp_voltage_table->entries[i].v;
3770 }
3771
3772 for (i = 0; i < vddc_table->count; i++) {
3773 if (requested_voltage <= vddc_table->entries[i].v) {
3774 requested_voltage = vddc_table->entries[i].v;
3775 return (ci_send_msg_to_smc_with_parameter(rdev,
3776 PPSMC_MSG_VddC_Request,
3777 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3778 0 : -EINVAL;
3779 }
3780 }
3781
3782 return -EINVAL;
3783 }
3784
3785 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3786 {
3787 struct ci_power_info *pi = ci_get_pi(rdev);
3788 PPSMC_Result result;
3789
3790 ci_apply_disp_minimum_voltage_request(rdev);
3791
3792 if (!pi->sclk_dpm_key_disabled) {
3793 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3794 result = ci_send_msg_to_smc_with_parameter(rdev,
3795 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3796 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3797 if (result != PPSMC_Result_OK)
3798 return -EINVAL;
3799 }
3800 }
3801
3802 if (!pi->mclk_dpm_key_disabled) {
3803 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3804 result = ci_send_msg_to_smc_with_parameter(rdev,
3805 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3806 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3807 if (result != PPSMC_Result_OK)
3808 return -EINVAL;
3809 }
3810 }
3811 #if 0
3812 if (!pi->pcie_dpm_key_disabled) {
3813 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3814 result = ci_send_msg_to_smc_with_parameter(rdev,
3815 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3816 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3817 if (result != PPSMC_Result_OK)
3818 return -EINVAL;
3819 }
3820 }
3821 #endif
3822 return 0;
3823 }
3824
3825 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3826 struct radeon_ps *radeon_state)
3827 {
3828 struct ci_power_info *pi = ci_get_pi(rdev);
3829 struct ci_ps *state = ci_get_ps(radeon_state);
3830 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3831 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3832 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3833 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3834 u32 i;
3835
3836 pi->need_update_smu7_dpm_table = 0;
3837
3838 for (i = 0; i < sclk_table->count; i++) {
3839 if (sclk == sclk_table->dpm_levels[i].value)
3840 break;
3841 }
3842
3843 if (i >= sclk_table->count) {
3844 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3845 } else {
3846 /* XXX The current code always reprogrammed the sclk levels,
3847 * but we don't currently handle disp sclk requirements
3848 * so just skip it.
3849 */
3850 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3851 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3852 }
3853
3854 for (i = 0; i < mclk_table->count; i++) {
3855 if (mclk == mclk_table->dpm_levels[i].value)
3856 break;
3857 }
3858
3859 if (i >= mclk_table->count)
3860 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3861
3862 if (rdev->pm.dpm.current_active_crtc_count !=
3863 rdev->pm.dpm.new_active_crtc_count)
3864 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3865 }
3866
3867 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3868 struct radeon_ps *radeon_state)
3869 {
3870 struct ci_power_info *pi = ci_get_pi(rdev);
3871 struct ci_ps *state = ci_get_ps(radeon_state);
3872 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3873 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3874 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3875 int ret;
3876
3877 if (!pi->need_update_smu7_dpm_table)
3878 return 0;
3879
3880 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3881 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3882
3883 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3884 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3885
3886 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3887 ret = ci_populate_all_graphic_levels(rdev);
3888 if (ret)
3889 return ret;
3890 }
3891
3892 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3893 ret = ci_populate_all_memory_levels(rdev);
3894 if (ret)
3895 return ret;
3896 }
3897
3898 return 0;
3899 }
3900
3901 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3902 {
3903 struct ci_power_info *pi = ci_get_pi(rdev);
3904 const struct radeon_clock_and_voltage_limits *max_limits;
3905 int i;
3906
3907 if (rdev->pm.dpm.ac_power)
3908 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3909 else
3910 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3911
3912 if (enable) {
3913 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3914
3915 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3916 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3917 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3918
3919 if (!pi->caps_uvd_dpm)
3920 break;
3921 }
3922 }
3923
3924 ci_send_msg_to_smc_with_parameter(rdev,
3925 PPSMC_MSG_UVDDPM_SetEnabledMask,
3926 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3927
3928 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3929 pi->uvd_enabled = true;
3930 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3931 ci_send_msg_to_smc_with_parameter(rdev,
3932 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3933 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3934 }
3935 } else {
3936 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3937 pi->uvd_enabled = false;
3938 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3939 ci_send_msg_to_smc_with_parameter(rdev,
3940 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3941 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3942 }
3943 }
3944
3945 return (ci_send_msg_to_smc(rdev, enable ?
3946 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3947 0 : -EINVAL;
3948 }
3949
3950 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3951 {
3952 struct ci_power_info *pi = ci_get_pi(rdev);
3953 const struct radeon_clock_and_voltage_limits *max_limits;
3954 int i;
3955
3956 if (rdev->pm.dpm.ac_power)
3957 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3958 else
3959 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3960
3961 if (enable) {
3962 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3963 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3964 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3965 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3966
3967 if (!pi->caps_vce_dpm)
3968 break;
3969 }
3970 }
3971
3972 ci_send_msg_to_smc_with_parameter(rdev,
3973 PPSMC_MSG_VCEDPM_SetEnabledMask,
3974 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3975 }
3976
3977 return (ci_send_msg_to_smc(rdev, enable ?
3978 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3979 0 : -EINVAL;
3980 }
3981
3982 #if 0
3983 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3984 {
3985 struct ci_power_info *pi = ci_get_pi(rdev);
3986 const struct radeon_clock_and_voltage_limits *max_limits;
3987 int i;
3988
3989 if (rdev->pm.dpm.ac_power)
3990 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3991 else
3992 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3993
3994 if (enable) {
3995 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3996 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3997 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3998 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3999
4000 if (!pi->caps_samu_dpm)
4001 break;
4002 }
4003 }
4004
4005 ci_send_msg_to_smc_with_parameter(rdev,
4006 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4007 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4008 }
4009 return (ci_send_msg_to_smc(rdev, enable ?
4010 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4011 0 : -EINVAL;
4012 }
4013
4014 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
4015 {
4016 struct ci_power_info *pi = ci_get_pi(rdev);
4017 const struct radeon_clock_and_voltage_limits *max_limits;
4018 int i;
4019
4020 if (rdev->pm.dpm.ac_power)
4021 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4022 else
4023 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4024
4025 if (enable) {
4026 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4027 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4028 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4029 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4030
4031 if (!pi->caps_acp_dpm)
4032 break;
4033 }
4034 }
4035
4036 ci_send_msg_to_smc_with_parameter(rdev,
4037 PPSMC_MSG_ACPDPM_SetEnabledMask,
4038 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4039 }
4040
4041 return (ci_send_msg_to_smc(rdev, enable ?
4042 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4043 0 : -EINVAL;
4044 }
4045 #endif
4046
4047 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
4048 {
4049 struct ci_power_info *pi = ci_get_pi(rdev);
4050 u32 tmp;
4051
4052 if (!gate) {
4053 if (pi->caps_uvd_dpm ||
4054 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4055 pi->smc_state_table.UvdBootLevel = 0;
4056 else
4057 pi->smc_state_table.UvdBootLevel =
4058 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4059
4060 tmp = RREG32_SMC(DPM_TABLE_475);
4061 tmp &= ~UvdBootLevel_MASK;
4062 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
4063 WREG32_SMC(DPM_TABLE_475, tmp);
4064 }
4065
4066 return ci_enable_uvd_dpm(rdev, !gate);
4067 }
4068
4069 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
4070 {
4071 u8 i;
4072 u32 min_evclk = 30000; /* ??? */
4073 struct radeon_vce_clock_voltage_dependency_table *table =
4074 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4075
4076 for (i = 0; i < table->count; i++) {
4077 if (table->entries[i].evclk >= min_evclk)
4078 return i;
4079 }
4080
4081 return table->count - 1;
4082 }
4083
4084 static int ci_update_vce_dpm(struct radeon_device *rdev,
4085 struct radeon_ps *radeon_new_state,
4086 struct radeon_ps *radeon_current_state)
4087 {
4088 struct ci_power_info *pi = ci_get_pi(rdev);
4089 int ret = 0;
4090 u32 tmp;
4091
4092 if (radeon_current_state->evclk != radeon_new_state->evclk) {
4093 if (radeon_new_state->evclk) {
4094 /* turn the clocks on when encoding */
4095 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
4096
4097 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
4098 tmp = RREG32_SMC(DPM_TABLE_475);
4099 tmp &= ~VceBootLevel_MASK;
4100 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
4101 WREG32_SMC(DPM_TABLE_475, tmp);
4102
4103 ret = ci_enable_vce_dpm(rdev, true);
4104 } else {
4105 /* turn the clocks off when not encoding */
4106 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
4107
4108 ret = ci_enable_vce_dpm(rdev, false);
4109 }
4110 }
4111 return ret;
4112 }
4113
4114 #if 0
4115 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
4116 {
4117 return ci_enable_samu_dpm(rdev, gate);
4118 }
4119
4120 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
4121 {
4122 struct ci_power_info *pi = ci_get_pi(rdev);
4123 u32 tmp;
4124
4125 if (!gate) {
4126 pi->smc_state_table.AcpBootLevel = 0;
4127
4128 tmp = RREG32_SMC(DPM_TABLE_475);
4129 tmp &= ~AcpBootLevel_MASK;
4130 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4131 WREG32_SMC(DPM_TABLE_475, tmp);
4132 }
4133
4134 return ci_enable_acp_dpm(rdev, !gate);
4135 }
4136 #endif
4137
4138 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
4139 struct radeon_ps *radeon_state)
4140 {
4141 struct ci_power_info *pi = ci_get_pi(rdev);
4142 int ret;
4143
4144 ret = ci_trim_dpm_states(rdev, radeon_state);
4145 if (ret)
4146 return ret;
4147
4148 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4149 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4150 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4151 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4152 pi->last_mclk_dpm_enable_mask =
4153 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4154 if (pi->uvd_enabled) {
4155 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4156 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4157 }
4158 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4159 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4160
4161 return 0;
4162 }
4163
4164 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
4165 u32 level_mask)
4166 {
4167 u32 level = 0;
4168
4169 while ((level_mask & (1 << level)) == 0)
4170 level++;
4171
4172 return level;
4173 }
4174
4175
4176 int ci_dpm_force_performance_level(struct radeon_device *rdev,
4177 enum radeon_dpm_forced_level level)
4178 {
4179 struct ci_power_info *pi = ci_get_pi(rdev);
4180 u32 tmp, levels, i;
4181 int ret;
4182
4183 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
4184 if ((!pi->pcie_dpm_key_disabled) &&
4185 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4186 levels = 0;
4187 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4188 while (tmp >>= 1)
4189 levels++;
4190 if (levels) {
4191 ret = ci_dpm_force_state_pcie(rdev, level);
4192 if (ret)
4193 return ret;
4194 for (i = 0; i < rdev->usec_timeout; i++) {
4195 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4196 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4197 if (tmp == levels)
4198 break;
4199 udelay(1);
4200 }
4201 }
4202 }
4203 if ((!pi->sclk_dpm_key_disabled) &&
4204 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4205 levels = 0;
4206 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4207 while (tmp >>= 1)
4208 levels++;
4209 if (levels) {
4210 ret = ci_dpm_force_state_sclk(rdev, levels);
4211 if (ret)
4212 return ret;
4213 for (i = 0; i < rdev->usec_timeout; i++) {
4214 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4215 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4216 if (tmp == levels)
4217 break;
4218 udelay(1);
4219 }
4220 }
4221 }
4222 if ((!pi->mclk_dpm_key_disabled) &&
4223 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4224 levels = 0;
4225 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4226 while (tmp >>= 1)
4227 levels++;
4228 if (levels) {
4229 ret = ci_dpm_force_state_mclk(rdev, levels);
4230 if (ret)
4231 return ret;
4232 for (i = 0; i < rdev->usec_timeout; i++) {
4233 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4234 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4235 if (tmp == levels)
4236 break;
4237 udelay(1);
4238 }
4239 }
4240 }
4241 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
4242 if ((!pi->sclk_dpm_key_disabled) &&
4243 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4244 levels = ci_get_lowest_enabled_level(rdev,
4245 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4246 ret = ci_dpm_force_state_sclk(rdev, levels);
4247 if (ret)
4248 return ret;
4249 for (i = 0; i < rdev->usec_timeout; i++) {
4250 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4251 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4252 if (tmp == levels)
4253 break;
4254 udelay(1);
4255 }
4256 }
4257 if ((!pi->mclk_dpm_key_disabled) &&
4258 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4259 levels = ci_get_lowest_enabled_level(rdev,
4260 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4261 ret = ci_dpm_force_state_mclk(rdev, levels);
4262 if (ret)
4263 return ret;
4264 for (i = 0; i < rdev->usec_timeout; i++) {
4265 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4266 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4267 if (tmp == levels)
4268 break;
4269 udelay(1);
4270 }
4271 }
4272 if ((!pi->pcie_dpm_key_disabled) &&
4273 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4274 levels = ci_get_lowest_enabled_level(rdev,
4275 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4276 ret = ci_dpm_force_state_pcie(rdev, levels);
4277 if (ret)
4278 return ret;
4279 for (i = 0; i < rdev->usec_timeout; i++) {
4280 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4281 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4282 if (tmp == levels)
4283 break;
4284 udelay(1);
4285 }
4286 }
4287 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
4288 if (!pi->pcie_dpm_key_disabled) {
4289 PPSMC_Result smc_result;
4290
4291 smc_result = ci_send_msg_to_smc(rdev,
4292 PPSMC_MSG_PCIeDPM_UnForceLevel);
4293 if (smc_result != PPSMC_Result_OK)
4294 return -EINVAL;
4295 }
4296 ret = ci_upload_dpm_level_enable_mask(rdev);
4297 if (ret)
4298 return ret;
4299 }
4300
4301 rdev->pm.dpm.forced_level = level;
4302
4303 return 0;
4304 }
4305
4306 static int ci_set_mc_special_registers(struct radeon_device *rdev,
4307 struct ci_mc_reg_table *table)
4308 {
4309 struct ci_power_info *pi = ci_get_pi(rdev);
4310 u8 i, j, k;
4311 u32 temp_reg;
4312
4313 for (i = 0, j = table->last; i < table->last; i++) {
4314 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4315 return -EINVAL;
4316 switch(table->mc_reg_address[i].s1 << 2) {
4317 case MC_SEQ_MISC1:
4318 temp_reg = RREG32(MC_PMG_CMD_EMRS);
4319 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
4320 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4321 for (k = 0; k < table->num_entries; k++) {
4322 table->mc_reg_table_entry[k].mc_data[j] =
4323 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4324 }
4325 j++;
4326 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4327 return -EINVAL;
4328
4329 temp_reg = RREG32(MC_PMG_CMD_MRS);
4330 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
4331 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4332 for (k = 0; k < table->num_entries; k++) {
4333 table->mc_reg_table_entry[k].mc_data[j] =
4334 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4335 if (!pi->mem_gddr5)
4336 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4337 }
4338 j++;
4339 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4340 return -EINVAL;
4341
4342 if (!pi->mem_gddr5) {
4343 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
4344 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
4345 for (k = 0; k < table->num_entries; k++) {
4346 table->mc_reg_table_entry[k].mc_data[j] =
4347 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4348 }
4349 j++;
4350 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4351 return -EINVAL;
4352 }
4353 break;
4354 case MC_SEQ_RESERVE_M:
4355 temp_reg = RREG32(MC_PMG_CMD_MRS1);
4356 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
4357 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4358 for (k = 0; k < table->num_entries; k++) {
4359 table->mc_reg_table_entry[k].mc_data[j] =
4360 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4361 }
4362 j++;
4363 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4364 return -EINVAL;
4365 break;
4366 default:
4367 break;
4368 }
4369
4370 }
4371
4372 table->last = j;
4373
4374 return 0;
4375 }
4376
4377 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4378 {
4379 bool result = true;
4380
4381 switch(in_reg) {
4382 case MC_SEQ_RAS_TIMING >> 2:
4383 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
4384 break;
4385 case MC_SEQ_DLL_STBY >> 2:
4386 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
4387 break;
4388 case MC_SEQ_G5PDX_CMD0 >> 2:
4389 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
4390 break;
4391 case MC_SEQ_G5PDX_CMD1 >> 2:
4392 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
4393 break;
4394 case MC_SEQ_G5PDX_CTRL >> 2:
4395 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
4396 break;
4397 case MC_SEQ_CAS_TIMING >> 2:
4398 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
4399 break;
4400 case MC_SEQ_MISC_TIMING >> 2:
4401 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
4402 break;
4403 case MC_SEQ_MISC_TIMING2 >> 2:
4404 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
4405 break;
4406 case MC_SEQ_PMG_DVS_CMD >> 2:
4407 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
4408 break;
4409 case MC_SEQ_PMG_DVS_CTL >> 2:
4410 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
4411 break;
4412 case MC_SEQ_RD_CTL_D0 >> 2:
4413 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
4414 break;
4415 case MC_SEQ_RD_CTL_D1 >> 2:
4416 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
4417 break;
4418 case MC_SEQ_WR_CTL_D0 >> 2:
4419 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
4420 break;
4421 case MC_SEQ_WR_CTL_D1 >> 2:
4422 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
4423 break;
4424 case MC_PMG_CMD_EMRS >> 2:
4425 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4426 break;
4427 case MC_PMG_CMD_MRS >> 2:
4428 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4429 break;
4430 case MC_PMG_CMD_MRS1 >> 2:
4431 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4432 break;
4433 case MC_SEQ_PMG_TIMING >> 2:
4434 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
4435 break;
4436 case MC_PMG_CMD_MRS2 >> 2:
4437 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
4438 break;
4439 case MC_SEQ_WR_CTL_2 >> 2:
4440 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4441 break;
4442 default:
4443 result = false;
4444 break;
4445 }
4446
4447 return result;
4448 }
4449
4450 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4451 {
4452 u8 i, j;
4453
4454 for (i = 0; i < table->last; i++) {
4455 for (j = 1; j < table->num_entries; j++) {
4456 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4457 table->mc_reg_table_entry[j].mc_data[i]) {
4458 table->valid_flag |= 1 << i;
4459 break;
4460 }
4461 }
4462 }
4463 }
4464
4465 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4466 {
4467 u32 i;
4468 u16 address;
4469
4470 for (i = 0; i < table->last; i++) {
4471 table->mc_reg_address[i].s0 =
4472 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4473 address : table->mc_reg_address[i].s1;
4474 }
4475 }
4476
4477 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4478 struct ci_mc_reg_table *ci_table)
4479 {
4480 u8 i, j;
4481
4482 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4483 return -EINVAL;
4484 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4485 return -EINVAL;
4486
4487 for (i = 0; i < table->last; i++)
4488 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4489
4490 ci_table->last = table->last;
4491
4492 for (i = 0; i < table->num_entries; i++) {
4493 ci_table->mc_reg_table_entry[i].mclk_max =
4494 table->mc_reg_table_entry[i].mclk_max;
4495 for (j = 0; j < table->last; j++)
4496 ci_table->mc_reg_table_entry[i].mc_data[j] =
4497 table->mc_reg_table_entry[i].mc_data[j];
4498 }
4499 ci_table->num_entries = table->num_entries;
4500
4501 return 0;
4502 }
4503
4504 static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4505 struct ci_mc_reg_table *table)
4506 {
4507 u8 i, k;
4508 u32 tmp;
4509 bool patch;
4510
4511 tmp = RREG32(MC_SEQ_MISC0);
4512 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4513
4514 if (patch &&
4515 ((rdev->pdev->device == 0x67B0) ||
4516 (rdev->pdev->device == 0x67B1))) {
4517 for (i = 0; i < table->last; i++) {
4518 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4519 return -EINVAL;
4520 switch(table->mc_reg_address[i].s1 >> 2) {
4521 case MC_SEQ_MISC1:
4522 for (k = 0; k < table->num_entries; k++) {
4523 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4524 (table->mc_reg_table_entry[k].mclk_max == 137500))
4525 table->mc_reg_table_entry[k].mc_data[i] =
4526 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4527 0x00000007;
4528 }
4529 break;
4530 case MC_SEQ_WR_CTL_D0:
4531 for (k = 0; k < table->num_entries; k++) {
4532 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4533 (table->mc_reg_table_entry[k].mclk_max == 137500))
4534 table->mc_reg_table_entry[k].mc_data[i] =
4535 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4536 0x0000D0DD;
4537 }
4538 break;
4539 case MC_SEQ_WR_CTL_D1:
4540 for (k = 0; k < table->num_entries; k++) {
4541 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4542 (table->mc_reg_table_entry[k].mclk_max == 137500))
4543 table->mc_reg_table_entry[k].mc_data[i] =
4544 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4545 0x0000D0DD;
4546 }
4547 break;
4548 case MC_SEQ_WR_CTL_2:
4549 for (k = 0; k < table->num_entries; k++) {
4550 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4551 (table->mc_reg_table_entry[k].mclk_max == 137500))
4552 table->mc_reg_table_entry[k].mc_data[i] = 0;
4553 }
4554 break;
4555 case MC_SEQ_CAS_TIMING:
4556 for (k = 0; k < table->num_entries; k++) {
4557 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4558 table->mc_reg_table_entry[k].mc_data[i] =
4559 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4560 0x000C0140;
4561 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4562 table->mc_reg_table_entry[k].mc_data[i] =
4563 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4564 0x000C0150;
4565 }
4566 break;
4567 case MC_SEQ_MISC_TIMING:
4568 for (k = 0; k < table->num_entries; k++) {
4569 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4570 table->mc_reg_table_entry[k].mc_data[i] =
4571 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4572 0x00000030;
4573 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4574 table->mc_reg_table_entry[k].mc_data[i] =
4575 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4576 0x00000035;
4577 }
4578 break;
4579 default:
4580 break;
4581 }
4582 }
4583
4584 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4585 tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4586 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4587 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4588 WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4589 }
4590
4591 return 0;
4592 }
4593
4594 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4595 {
4596 struct ci_power_info *pi = ci_get_pi(rdev);
4597 struct atom_mc_reg_table *table;
4598 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4599 u8 module_index = rv770_get_memory_module_index(rdev);
4600 int ret;
4601
4602 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4603 if (!table)
4604 return -ENOMEM;
4605
4606 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4607 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4608 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4609 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4610 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4611 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4612 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4613 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4614 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4615 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4616 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4617 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4618 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4619 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4620 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4621 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4622 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4623 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4624 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4625 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4626
4627 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4628 if (ret)
4629 goto init_mc_done;
4630
4631 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4632 if (ret)
4633 goto init_mc_done;
4634
4635 ci_set_s0_mc_reg_index(ci_table);
4636
4637 ret = ci_register_patching_mc_seq(rdev, ci_table);
4638 if (ret)
4639 goto init_mc_done;
4640
4641 ret = ci_set_mc_special_registers(rdev, ci_table);
4642 if (ret)
4643 goto init_mc_done;
4644
4645 ci_set_valid_flag(ci_table);
4646
4647 init_mc_done:
4648 kfree(table);
4649
4650 return ret;
4651 }
4652
4653 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4654 SMU7_Discrete_MCRegisters *mc_reg_table)
4655 {
4656 struct ci_power_info *pi = ci_get_pi(rdev);
4657 u32 i, j;
4658
4659 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4660 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4661 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4662 return -EINVAL;
4663 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4664 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4665 i++;
4666 }
4667 }
4668
4669 mc_reg_table->last = (u8)i;
4670
4671 return 0;
4672 }
4673
4674 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4675 SMU7_Discrete_MCRegisterSet *data,
4676 u32 num_entries, u32 valid_flag)
4677 {
4678 u32 i, j;
4679
4680 for (i = 0, j = 0; j < num_entries; j++) {
4681 if (valid_flag & (1 << j)) {
4682 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4683 i++;
4684 }
4685 }
4686 }
4687
4688 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4689 const u32 memory_clock,
4690 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4691 {
4692 struct ci_power_info *pi = ci_get_pi(rdev);
4693 u32 i = 0;
4694
4695 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4696 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4697 break;
4698 }
4699
4700 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4701 --i;
4702
4703 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4704 mc_reg_table_data, pi->mc_reg_table.last,
4705 pi->mc_reg_table.valid_flag);
4706 }
4707
4708 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4709 SMU7_Discrete_MCRegisters *mc_reg_table)
4710 {
4711 struct ci_power_info *pi = ci_get_pi(rdev);
4712 u32 i;
4713
4714 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4715 ci_convert_mc_reg_table_entry_to_smc(rdev,
4716 pi->dpm_table.mclk_table.dpm_levels[i].value,
4717 &mc_reg_table->data[i]);
4718 }
4719
4720 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4721 {
4722 struct ci_power_info *pi = ci_get_pi(rdev);
4723 int ret;
4724
4725 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4726
4727 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4728 if (ret)
4729 return ret;
4730 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4731
4732 return ci_copy_bytes_to_smc(rdev,
4733 pi->mc_reg_table_start,
4734 (u8 *)&pi->smc_mc_reg_table,
4735 sizeof(SMU7_Discrete_MCRegisters),
4736 pi->sram_end);
4737 }
4738
4739 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4740 {
4741 struct ci_power_info *pi = ci_get_pi(rdev);
4742
4743 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4744 return 0;
4745
4746 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4747
4748 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4749
4750 return ci_copy_bytes_to_smc(rdev,
4751 pi->mc_reg_table_start +
4752 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4753 (u8 *)&pi->smc_mc_reg_table.data[0],
4754 sizeof(SMU7_Discrete_MCRegisterSet) *
4755 pi->dpm_table.mclk_table.count,
4756 pi->sram_end);
4757 }
4758
4759 static void ci_enable_voltage_control(struct radeon_device *rdev)
4760 {
4761 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4762
4763 tmp |= VOLT_PWRMGT_EN;
4764 WREG32_SMC(GENERAL_PWRMGT, tmp);
4765 }
4766
4767 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4768 struct radeon_ps *radeon_state)
4769 {
4770 struct ci_ps *state = ci_get_ps(radeon_state);
4771 int i;
4772 u16 pcie_speed, max_speed = 0;
4773
4774 for (i = 0; i < state->performance_level_count; i++) {
4775 pcie_speed = state->performance_levels[i].pcie_gen;
4776 if (max_speed < pcie_speed)
4777 max_speed = pcie_speed;
4778 }
4779
4780 return max_speed;
4781 }
4782
4783 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4784 {
4785 u32 speed_cntl = 0;
4786
4787 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4788 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4789
4790 return (u16)speed_cntl;
4791 }
4792
4793 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4794 {
4795 u32 link_width = 0;
4796
4797 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4798 link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4799
4800 switch (link_width) {
4801 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4802 return 1;
4803 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4804 return 2;
4805 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4806 return 4;
4807 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4808 return 8;
4809 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4810 /* not actually supported */
4811 return 12;
4812 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4813 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4814 default:
4815 return 16;
4816 }
4817 }
4818
4819 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4820 struct radeon_ps *radeon_new_state,
4821 struct radeon_ps *radeon_current_state)
4822 {
4823 struct ci_power_info *pi = ci_get_pi(rdev);
4824 enum radeon_pcie_gen target_link_speed =
4825 ci_get_maximum_link_speed(rdev, radeon_new_state);
4826 enum radeon_pcie_gen current_link_speed;
4827
4828 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4829 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4830 else
4831 current_link_speed = pi->force_pcie_gen;
4832
4833 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4834 pi->pspp_notify_required = false;
4835 if (target_link_speed > current_link_speed) {
4836 switch (target_link_speed) {
4837 #ifdef CONFIG_ACPI
4838 case RADEON_PCIE_GEN3:
4839 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4840 break;
4841 pi->force_pcie_gen = RADEON_PCIE_GEN2;
4842 if (current_link_speed == RADEON_PCIE_GEN2)
4843 break;
4844 case RADEON_PCIE_GEN2:
4845 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4846 break;
4847 #endif
4848 default:
4849 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4850 break;
4851 }
4852 } else {
4853 if (target_link_speed < current_link_speed)
4854 pi->pspp_notify_required = true;
4855 }
4856 }
4857
4858 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4859 struct radeon_ps *radeon_new_state,
4860 struct radeon_ps *radeon_current_state)
4861 {
4862 struct ci_power_info *pi = ci_get_pi(rdev);
4863 enum radeon_pcie_gen target_link_speed =
4864 ci_get_maximum_link_speed(rdev, radeon_new_state);
4865 u8 request;
4866
4867 if (pi->pspp_notify_required) {
4868 if (target_link_speed == RADEON_PCIE_GEN3)
4869 request = PCIE_PERF_REQ_PECI_GEN3;
4870 else if (target_link_speed == RADEON_PCIE_GEN2)
4871 request = PCIE_PERF_REQ_PECI_GEN2;
4872 else
4873 request = PCIE_PERF_REQ_PECI_GEN1;
4874
4875 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4876 (ci_get_current_pcie_speed(rdev) > 0))
4877 return;
4878
4879 #ifdef CONFIG_ACPI
4880 radeon_acpi_pcie_performance_request(rdev, request, false);
4881 #endif
4882 }
4883 }
4884
4885 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4886 {
4887 struct ci_power_info *pi = ci_get_pi(rdev);
4888 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4889 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4890 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4891 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4892 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4893 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4894
4895 if (allowed_sclk_vddc_table == NULL)
4896 return -EINVAL;
4897 if (allowed_sclk_vddc_table->count < 1)
4898 return -EINVAL;
4899 if (allowed_mclk_vddc_table == NULL)
4900 return -EINVAL;
4901 if (allowed_mclk_vddc_table->count < 1)
4902 return -EINVAL;
4903 if (allowed_mclk_vddci_table == NULL)
4904 return -EINVAL;
4905 if (allowed_mclk_vddci_table->count < 1)
4906 return -EINVAL;
4907
4908 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4909 pi->max_vddc_in_pp_table =
4910 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4911
4912 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4913 pi->max_vddci_in_pp_table =
4914 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4915
4916 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4917 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4918 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4919 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4920 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4921 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4922 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4923 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4924
4925 return 0;
4926 }
4927
4928 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4929 {
4930 struct ci_power_info *pi = ci_get_pi(rdev);
4931 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4932 u32 leakage_index;
4933
4934 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4935 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4936 *vddc = leakage_table->actual_voltage[leakage_index];
4937 break;
4938 }
4939 }
4940 }
4941
4942 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4943 {
4944 struct ci_power_info *pi = ci_get_pi(rdev);
4945 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4946 u32 leakage_index;
4947
4948 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4949 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4950 *vddci = leakage_table->actual_voltage[leakage_index];
4951 break;
4952 }
4953 }
4954 }
4955
4956 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4957 struct radeon_clock_voltage_dependency_table *table)
4958 {
4959 u32 i;
4960
4961 if (table) {
4962 for (i = 0; i < table->count; i++)
4963 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4964 }
4965 }
4966
4967 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4968 struct radeon_clock_voltage_dependency_table *table)
4969 {
4970 u32 i;
4971
4972 if (table) {
4973 for (i = 0; i < table->count; i++)
4974 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4975 }
4976 }
4977
4978 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4979 struct radeon_vce_clock_voltage_dependency_table *table)
4980 {
4981 u32 i;
4982
4983 if (table) {
4984 for (i = 0; i < table->count; i++)
4985 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4986 }
4987 }
4988
4989 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4990 struct radeon_uvd_clock_voltage_dependency_table *table)
4991 {
4992 u32 i;
4993
4994 if (table) {
4995 for (i = 0; i < table->count; i++)
4996 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4997 }
4998 }
4999
5000 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
5001 struct radeon_phase_shedding_limits_table *table)
5002 {
5003 u32 i;
5004
5005 if (table) {
5006 for (i = 0; i < table->count; i++)
5007 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
5008 }
5009 }
5010
5011 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
5012 struct radeon_clock_and_voltage_limits *table)
5013 {
5014 if (table) {
5015 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
5016 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
5017 }
5018 }
5019
5020 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
5021 struct radeon_cac_leakage_table *table)
5022 {
5023 u32 i;
5024
5025 if (table) {
5026 for (i = 0; i < table->count; i++)
5027 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
5028 }
5029 }
5030
5031 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
5032 {
5033
5034 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5035 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5036 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5037 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5038 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5039 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5040 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
5041 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5042 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5043 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5044 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5045 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5046 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5047 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5048 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5049 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5050 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
5051 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
5052 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5053 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5054 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5055 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5056 ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
5057 &rdev->pm.dpm.dyn_state.cac_leakage_table);
5058
5059 }
5060
5061 static void ci_get_memory_type(struct radeon_device *rdev)
5062 {
5063 struct ci_power_info *pi = ci_get_pi(rdev);
5064 u32 tmp;
5065
5066 tmp = RREG32(MC_SEQ_MISC0);
5067
5068 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
5069 MC_SEQ_MISC0_GDDR5_VALUE)
5070 pi->mem_gddr5 = true;
5071 else
5072 pi->mem_gddr5 = false;
5073
5074 }
5075
5076 static void ci_update_current_ps(struct radeon_device *rdev,
5077 struct radeon_ps *rps)
5078 {
5079 struct ci_ps *new_ps = ci_get_ps(rps);
5080 struct ci_power_info *pi = ci_get_pi(rdev);
5081
5082 pi->current_rps = *rps;
5083 pi->current_ps = *new_ps;
5084 pi->current_rps.ps_priv = &pi->current_ps;
5085 }
5086
5087 static void ci_update_requested_ps(struct radeon_device *rdev,
5088 struct radeon_ps *rps)
5089 {
5090 struct ci_ps *new_ps = ci_get_ps(rps);
5091 struct ci_power_info *pi = ci_get_pi(rdev);
5092
5093 pi->requested_rps = *rps;
5094 pi->requested_ps = *new_ps;
5095 pi->requested_rps.ps_priv = &pi->requested_ps;
5096 }
5097
5098 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
5099 {
5100 struct ci_power_info *pi = ci_get_pi(rdev);
5101 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5102 struct radeon_ps *new_ps = &requested_ps;
5103
5104 ci_update_requested_ps(rdev, new_ps);
5105
5106 ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
5107
5108 return 0;
5109 }
5110
5111 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
5112 {
5113 struct ci_power_info *pi = ci_get_pi(rdev);
5114 struct radeon_ps *new_ps = &pi->requested_rps;
5115
5116 ci_update_current_ps(rdev, new_ps);
5117 }
5118
5119
5120 void ci_dpm_setup_asic(struct radeon_device *rdev)
5121 {
5122 int r;
5123
5124 r = ci_mc_load_microcode(rdev);
5125 if (r)
5126 DRM_ERROR("Failed to load MC firmware!\n");
5127 ci_read_clock_registers(rdev);
5128 ci_get_memory_type(rdev);
5129 ci_enable_acpi_power_management(rdev);
5130 ci_init_sclk_t(rdev);
5131 }
5132
5133 int ci_dpm_enable(struct radeon_device *rdev)
5134 {
5135 struct ci_power_info *pi = ci_get_pi(rdev);
5136 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5137 int ret;
5138
5139 if (ci_is_smc_running(rdev))
5140 return -EINVAL;
5141 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5142 ci_enable_voltage_control(rdev);
5143 ret = ci_construct_voltage_tables(rdev);
5144 if (ret) {
5145 DRM_ERROR("ci_construct_voltage_tables failed\n");
5146 return ret;
5147 }
5148 }
5149 if (pi->caps_dynamic_ac_timing) {
5150 ret = ci_initialize_mc_reg_table(rdev);
5151 if (ret)
5152 pi->caps_dynamic_ac_timing = false;
5153 }
5154 if (pi->dynamic_ss)
5155 ci_enable_spread_spectrum(rdev, true);
5156 if (pi->thermal_protection)
5157 ci_enable_thermal_protection(rdev, true);
5158 ci_program_sstp(rdev);
5159 ci_enable_display_gap(rdev);
5160 ci_program_vc(rdev);
5161 ret = ci_upload_firmware(rdev);
5162 if (ret) {
5163 DRM_ERROR("ci_upload_firmware failed\n");
5164 return ret;
5165 }
5166 ret = ci_process_firmware_header(rdev);
5167 if (ret) {
5168 DRM_ERROR("ci_process_firmware_header failed\n");
5169 return ret;
5170 }
5171 ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
5172 if (ret) {
5173 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5174 return ret;
5175 }
5176 ret = ci_init_smc_table(rdev);
5177 if (ret) {
5178 DRM_ERROR("ci_init_smc_table failed\n");
5179 return ret;
5180 }
5181 ret = ci_init_arb_table_index(rdev);
5182 if (ret) {
5183 DRM_ERROR("ci_init_arb_table_index failed\n");
5184 return ret;
5185 }
5186 if (pi->caps_dynamic_ac_timing) {
5187 ret = ci_populate_initial_mc_reg_table(rdev);
5188 if (ret) {
5189 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5190 return ret;
5191 }
5192 }
5193 ret = ci_populate_pm_base(rdev);
5194 if (ret) {
5195 DRM_ERROR("ci_populate_pm_base failed\n");
5196 return ret;
5197 }
5198 ci_dpm_start_smc(rdev);
5199 ci_enable_vr_hot_gpio_interrupt(rdev);
5200 ret = ci_notify_smc_display_change(rdev, false);
5201 if (ret) {
5202 DRM_ERROR("ci_notify_smc_display_change failed\n");
5203 return ret;
5204 }
5205 ci_enable_sclk_control(rdev, true);
5206 ret = ci_enable_ulv(rdev, true);
5207 if (ret) {
5208 DRM_ERROR("ci_enable_ulv failed\n");
5209 return ret;
5210 }
5211 ret = ci_enable_ds_master_switch(rdev, true);
5212 if (ret) {
5213 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5214 return ret;
5215 }
5216 ret = ci_start_dpm(rdev);
5217 if (ret) {
5218 DRM_ERROR("ci_start_dpm failed\n");
5219 return ret;
5220 }
5221 ret = ci_enable_didt(rdev, true);
5222 if (ret) {
5223 DRM_ERROR("ci_enable_didt failed\n");
5224 return ret;
5225 }
5226 ret = ci_enable_smc_cac(rdev, true);
5227 if (ret) {
5228 DRM_ERROR("ci_enable_smc_cac failed\n");
5229 return ret;
5230 }
5231 ret = ci_enable_power_containment(rdev, true);
5232 if (ret) {
5233 DRM_ERROR("ci_enable_power_containment failed\n");
5234 return ret;
5235 }
5236
5237 ret = ci_power_control_set_level(rdev);
5238 if (ret) {
5239 DRM_ERROR("ci_power_control_set_level failed\n");
5240 return ret;
5241 }
5242
5243 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5244
5245 ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
5246 if (ret) {
5247 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5248 return ret;
5249 }
5250
5251 ci_thermal_start_thermal_controller(rdev);
5252
5253 ci_update_current_ps(rdev, boot_ps);
5254
5255 return 0;
5256 }
5257
5258 static int ci_set_temperature_range(struct radeon_device *rdev)
5259 {
5260 int ret;
5261
5262 ret = ci_thermal_enable_alert(rdev, false);
5263 if (ret)
5264 return ret;
5265 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5266 if (ret)
5267 return ret;
5268 ret = ci_thermal_enable_alert(rdev, true);
5269 if (ret)
5270 return ret;
5271
5272 return ret;
5273 }
5274
5275 int ci_dpm_late_enable(struct radeon_device *rdev)
5276 {
5277 int ret;
5278
5279 ret = ci_set_temperature_range(rdev);
5280 if (ret)
5281 return ret;
5282
5283 ci_dpm_powergate_uvd(rdev, true);
5284
5285 return 0;
5286 }
5287
5288 void ci_dpm_disable(struct radeon_device *rdev)
5289 {
5290 struct ci_power_info *pi = ci_get_pi(rdev);
5291 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5292
5293 ci_dpm_powergate_uvd(rdev, false);
5294
5295 if (!ci_is_smc_running(rdev))
5296 return;
5297
5298 ci_thermal_stop_thermal_controller(rdev);
5299
5300 if (pi->thermal_protection)
5301 ci_enable_thermal_protection(rdev, false);
5302 ci_enable_power_containment(rdev, false);
5303 ci_enable_smc_cac(rdev, false);
5304 ci_enable_didt(rdev, false);
5305 ci_enable_spread_spectrum(rdev, false);
5306 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5307 ci_stop_dpm(rdev);
5308 ci_enable_ds_master_switch(rdev, false);
5309 ci_enable_ulv(rdev, false);
5310 ci_clear_vc(rdev);
5311 ci_reset_to_default(rdev);
5312 ci_dpm_stop_smc(rdev);
5313 ci_force_switch_to_arb_f0(rdev);
5314 ci_enable_thermal_based_sclk_dpm(rdev, false);
5315
5316 ci_update_current_ps(rdev, boot_ps);
5317 }
5318
5319 int ci_dpm_set_power_state(struct radeon_device *rdev)
5320 {
5321 struct ci_power_info *pi = ci_get_pi(rdev);
5322 struct radeon_ps *new_ps = &pi->requested_rps;
5323 struct radeon_ps *old_ps = &pi->current_rps;
5324 int ret;
5325
5326 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
5327 if (pi->pcie_performance_request)
5328 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5329 ret = ci_freeze_sclk_mclk_dpm(rdev);
5330 if (ret) {
5331 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5332 return ret;
5333 }
5334 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
5335 if (ret) {
5336 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5337 return ret;
5338 }
5339 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
5340 if (ret) {
5341 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5342 return ret;
5343 }
5344
5345 ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
5346 if (ret) {
5347 DRM_ERROR("ci_update_vce_dpm failed\n");
5348 return ret;
5349 }
5350
5351 ret = ci_update_sclk_t(rdev);
5352 if (ret) {
5353 DRM_ERROR("ci_update_sclk_t failed\n");
5354 return ret;
5355 }
5356 if (pi->caps_dynamic_ac_timing) {
5357 ret = ci_update_and_upload_mc_reg_table(rdev);
5358 if (ret) {
5359 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5360 return ret;
5361 }
5362 }
5363 ret = ci_program_memory_timing_parameters(rdev);
5364 if (ret) {
5365 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5366 return ret;
5367 }
5368 ret = ci_unfreeze_sclk_mclk_dpm(rdev);
5369 if (ret) {
5370 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5371 return ret;
5372 }
5373 ret = ci_upload_dpm_level_enable_mask(rdev);
5374 if (ret) {
5375 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5376 return ret;
5377 }
5378 if (pi->pcie_performance_request)
5379 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5380
5381 return 0;
5382 }
5383
5384 #if 0
5385 void ci_dpm_reset_asic(struct radeon_device *rdev)
5386 {
5387 ci_set_boot_state(rdev);
5388 }
5389 #endif
5390
5391 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
5392 {
5393 ci_program_display_gap(rdev);
5394 }
5395
5396 union power_info {
5397 struct _ATOM_POWERPLAY_INFO info;
5398 struct _ATOM_POWERPLAY_INFO_V2 info_2;
5399 struct _ATOM_POWERPLAY_INFO_V3 info_3;
5400 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5401 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5402 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5403 };
5404
5405 union pplib_clock_info {
5406 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5407 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5408 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5409 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5410 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5411 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5412 };
5413
5414 union pplib_power_state {
5415 struct _ATOM_PPLIB_STATE v1;
5416 struct _ATOM_PPLIB_STATE_V2 v2;
5417 };
5418
5419 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
5420 struct radeon_ps *rps,
5421 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5422 u8 table_rev)
5423 {
5424 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5425 rps->class = le16_to_cpu(non_clock_info->usClassification);
5426 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5427
5428 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5429 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5430 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5431 } else {
5432 rps->vclk = 0;
5433 rps->dclk = 0;
5434 }
5435
5436 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5437 rdev->pm.dpm.boot_ps = rps;
5438 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5439 rdev->pm.dpm.uvd_ps = rps;
5440 }
5441
5442 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
5443 struct radeon_ps *rps, int index,
5444 union pplib_clock_info *clock_info)
5445 {
5446 struct ci_power_info *pi = ci_get_pi(rdev);
5447 struct ci_ps *ps = ci_get_ps(rps);
5448 struct ci_pl *pl = &ps->performance_levels[index];
5449
5450 ps->performance_level_count = index + 1;
5451
5452 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5453 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5454 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5455 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5456
5457 pl->pcie_gen = r600_get_pcie_gen_support(rdev,
5458 pi->sys_pcie_mask,
5459 pi->vbios_boot_state.pcie_gen_bootup_value,
5460 clock_info->ci.ucPCIEGen);
5461 pl->pcie_lane = r600_get_pcie_lane_support(rdev,
5462 pi->vbios_boot_state.pcie_lane_bootup_value,
5463 le16_to_cpu(clock_info->ci.usPCIELane));
5464
5465 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5466 pi->acpi_pcie_gen = pl->pcie_gen;
5467 }
5468
5469 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5470 pi->ulv.supported = true;
5471 pi->ulv.pl = *pl;
5472 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5473 }
5474
5475 /* patch up boot state */
5476 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5477 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5478 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5479 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5480 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5481 }
5482
5483 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5484 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5485 pi->use_pcie_powersaving_levels = true;
5486 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5487 pi->pcie_gen_powersaving.max = pl->pcie_gen;
5488 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5489 pi->pcie_gen_powersaving.min = pl->pcie_gen;
5490 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5491 pi->pcie_lane_powersaving.max = pl->pcie_lane;
5492 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5493 pi->pcie_lane_powersaving.min = pl->pcie_lane;
5494 break;
5495 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5496 pi->use_pcie_performance_levels = true;
5497 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5498 pi->pcie_gen_performance.max = pl->pcie_gen;
5499 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5500 pi->pcie_gen_performance.min = pl->pcie_gen;
5501 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5502 pi->pcie_lane_performance.max = pl->pcie_lane;
5503 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5504 pi->pcie_lane_performance.min = pl->pcie_lane;
5505 break;
5506 default:
5507 break;
5508 }
5509 }
5510
5511 static int ci_parse_power_table(struct radeon_device *rdev)
5512 {
5513 struct radeon_mode_info *mode_info = &rdev->mode_info;
5514 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5515 union pplib_power_state *power_state;
5516 int i, j, k, non_clock_array_index, clock_array_index;
5517 union pplib_clock_info *clock_info;
5518 struct _StateArray *state_array;
5519 struct _ClockInfoArray *clock_info_array;
5520 struct _NonClockInfoArray *non_clock_info_array;
5521 union power_info *power_info;
5522 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5523 u16 data_offset;
5524 u8 frev, crev;
5525 u8 *power_state_offset;
5526 struct ci_ps *ps;
5527
5528 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
5529 &frev, &crev, &data_offset))
5530 return -EINVAL;
5531 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5532
5533 state_array = (struct _StateArray *)
5534 (mode_info->atom_context->bios + data_offset +
5535 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5536 clock_info_array = (struct _ClockInfoArray *)
5537 (mode_info->atom_context->bios + data_offset +
5538 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5539 non_clock_info_array = (struct _NonClockInfoArray *)
5540 (mode_info->atom_context->bios + data_offset +
5541 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5542
5543 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
5544 state_array->ucNumEntries, GFP_KERNEL);
5545 if (!rdev->pm.dpm.ps)
5546 return -ENOMEM;
5547 power_state_offset = (u8 *)state_array->states;
5548 for (i = 0; i < state_array->ucNumEntries; i++) {
5549 u8 *idx;
5550 power_state = (union pplib_power_state *)power_state_offset;
5551 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5552 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5553 &non_clock_info_array->nonClockInfo[non_clock_array_index];
5554 if (!rdev->pm.power_state[i].clock_info)
5555 return -EINVAL;
5556 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5557 if (ps == NULL) {
5558 kfree(rdev->pm.dpm.ps);
5559 return -ENOMEM;
5560 }
5561 rdev->pm.dpm.ps[i].ps_priv = ps;
5562 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5563 non_clock_info,
5564 non_clock_info_array->ucEntrySize);
5565 k = 0;
5566 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5567 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5568 clock_array_index = idx[j];
5569 if (clock_array_index >= clock_info_array->ucNumEntries)
5570 continue;
5571 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5572 break;
5573 clock_info = (union pplib_clock_info *)
5574 ((u8 *)&clock_info_array->clockInfo[0] +
5575 (clock_array_index * clock_info_array->ucEntrySize));
5576 ci_parse_pplib_clock_info(rdev,
5577 &rdev->pm.dpm.ps[i], k,
5578 clock_info);
5579 k++;
5580 }
5581 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5582 }
5583 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5584
5585 /* fill in the vce power states */
5586 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5587 u32 sclk, mclk;
5588 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5589 clock_info = (union pplib_clock_info *)
5590 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5591 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5592 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5593 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5594 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5595 rdev->pm.dpm.vce_states[i].sclk = sclk;
5596 rdev->pm.dpm.vce_states[i].mclk = mclk;
5597 }
5598
5599 return 0;
5600 }
5601
5602 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5603 struct ci_vbios_boot_state *boot_state)
5604 {
5605 struct radeon_mode_info *mode_info = &rdev->mode_info;
5606 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5607 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5608 u8 frev, crev;
5609 u16 data_offset;
5610
5611 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5612 &frev, &crev, &data_offset)) {
5613 firmware_info =
5614 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5615 data_offset);
5616 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5617 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5618 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5619 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5620 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5621 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5622 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5623
5624 return 0;
5625 }
5626 return -EINVAL;
5627 }
5628
5629 void ci_dpm_fini(struct radeon_device *rdev)
5630 {
5631 int i;
5632
5633 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5634 kfree(rdev->pm.dpm.ps[i].ps_priv);
5635 }
5636 kfree(rdev->pm.dpm.ps);
5637 kfree(rdev->pm.dpm.priv);
5638 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5639 r600_free_extended_power_table(rdev);
5640 }
5641
5642 int ci_dpm_init(struct radeon_device *rdev)
5643 {
5644 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5645 SMU7_Discrete_DpmTable *dpm_table;
5646 struct radeon_gpio_rec gpio;
5647 u16 data_offset, size;
5648 u8 frev, crev;
5649 struct ci_power_info *pi;
5650 int ret;
5651 u32 mask;
5652
5653 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5654 if (pi == NULL)
5655 return -ENOMEM;
5656 rdev->pm.dpm.priv = pi;
5657
5658 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5659 if (ret)
5660 pi->sys_pcie_mask = 0;
5661 else
5662 pi->sys_pcie_mask = mask;
5663 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5664
5665 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5666 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5667 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5668 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5669
5670 pi->pcie_lane_performance.max = 0;
5671 pi->pcie_lane_performance.min = 16;
5672 pi->pcie_lane_powersaving.max = 0;
5673 pi->pcie_lane_powersaving.min = 16;
5674
5675 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5676 if (ret) {
5677 ci_dpm_fini(rdev);
5678 return ret;
5679 }
5680
5681 ret = r600_get_platform_caps(rdev);
5682 if (ret) {
5683 ci_dpm_fini(rdev);
5684 return ret;
5685 }
5686
5687 ret = r600_parse_extended_power_table(rdev);
5688 if (ret) {
5689 ci_dpm_fini(rdev);
5690 return ret;
5691 }
5692
5693 ret = ci_parse_power_table(rdev);
5694 if (ret) {
5695 ci_dpm_fini(rdev);
5696 return ret;
5697 }
5698
5699 pi->dll_default_on = false;
5700 pi->sram_end = SMC_RAM_END;
5701
5702 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5703 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5704 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5705 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5706 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5707 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5708 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5709 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5710
5711 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5712
5713 pi->sclk_dpm_key_disabled = 0;
5714 pi->mclk_dpm_key_disabled = 0;
5715 pi->pcie_dpm_key_disabled = 0;
5716 pi->thermal_sclk_dpm_enabled = 0;
5717
5718 /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5719 if ((rdev->pdev->device == 0x6658) &&
5720 (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5721 pi->mclk_dpm_key_disabled = 1;
5722 }
5723
5724 pi->caps_sclk_ds = true;
5725
5726 pi->mclk_strobe_mode_threshold = 40000;
5727 pi->mclk_stutter_mode_threshold = 40000;
5728 pi->mclk_edc_enable_threshold = 40000;
5729 pi->mclk_edc_wr_enable_threshold = 40000;
5730
5731 ci_initialize_powertune_defaults(rdev);
5732
5733 pi->caps_fps = false;
5734
5735 pi->caps_sclk_throttle_low_notification = false;
5736
5737 pi->caps_uvd_dpm = true;
5738 pi->caps_vce_dpm = true;
5739
5740 ci_get_leakage_voltages(rdev);
5741 ci_patch_dependency_tables_with_leakage(rdev);
5742 ci_set_private_data_variables_based_on_pptable(rdev);
5743
5744 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5745 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5746 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5747 ci_dpm_fini(rdev);
5748 return -ENOMEM;
5749 }
5750 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5751 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5752 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5753 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5754 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5755 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5756 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5757 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5758 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5759
5760 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5761 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5762 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5763
5764 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5765 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5766 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5767 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5768
5769 if (rdev->family == CHIP_HAWAII) {
5770 pi->thermal_temp_setting.temperature_low = 94500;
5771 pi->thermal_temp_setting.temperature_high = 95000;
5772 pi->thermal_temp_setting.temperature_shutdown = 104000;
5773 } else {
5774 pi->thermal_temp_setting.temperature_low = 99500;
5775 pi->thermal_temp_setting.temperature_high = 100000;
5776 pi->thermal_temp_setting.temperature_shutdown = 104000;
5777 }
5778
5779 pi->uvd_enabled = false;
5780
5781 dpm_table = &pi->smc_state_table;
5782
5783 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5784 if (gpio.valid) {
5785 dpm_table->VRHotGpio = gpio.shift;
5786 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5787 } else {
5788 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5789 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5790 }
5791
5792 gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5793 if (gpio.valid) {
5794 dpm_table->AcDcGpio = gpio.shift;
5795 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5796 } else {
5797 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5798 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5799 }
5800
5801 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5802 if (gpio.valid) {
5803 u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5804
5805 switch (gpio.shift) {
5806 case 0:
5807 tmp &= ~GNB_SLOW_MODE_MASK;
5808 tmp |= GNB_SLOW_MODE(1);
5809 break;
5810 case 1:
5811 tmp &= ~GNB_SLOW_MODE_MASK;
5812 tmp |= GNB_SLOW_MODE(2);
5813 break;
5814 case 2:
5815 tmp |= GNB_SLOW;
5816 break;
5817 case 3:
5818 tmp |= FORCE_NB_PS1;
5819 break;
5820 case 4:
5821 tmp |= DPM_ENABLED;
5822 break;
5823 default:
5824 DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
5825 break;
5826 }
5827 WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5828 }
5829
5830 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5831 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5832 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5833 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5834 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5835 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5836 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5837
5838 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5839 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5840 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5841 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5842 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5843 else
5844 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5845 }
5846
5847 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5848 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5849 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5850 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5851 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5852 else
5853 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5854 }
5855
5856 pi->vddc_phase_shed_control = true;
5857
5858 #if defined(CONFIG_ACPI)
5859 pi->pcie_performance_request =
5860 radeon_acpi_is_pcie_performance_request_supported(rdev);
5861 #else
5862 pi->pcie_performance_request = false;
5863 #endif
5864
5865 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5866 &frev, &crev, &data_offset)) {
5867 pi->caps_sclk_ss_support = true;
5868 pi->caps_mclk_ss_support = true;
5869 pi->dynamic_ss = true;
5870 } else {
5871 pi->caps_sclk_ss_support = false;
5872 pi->caps_mclk_ss_support = false;
5873 pi->dynamic_ss = true;
5874 }
5875
5876 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5877 pi->thermal_protection = true;
5878 else
5879 pi->thermal_protection = false;
5880
5881 pi->caps_dynamic_ac_timing = true;
5882
5883 pi->uvd_power_gated = false;
5884
5885 /* make sure dc limits are valid */
5886 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5887 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5888 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5889 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5890
5891 pi->fan_ctrl_is_in_default_mode = true;
5892
5893 return 0;
5894 }
5895
5896 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5897 struct seq_file *m)
5898 {
5899 struct ci_power_info *pi = ci_get_pi(rdev);
5900 struct radeon_ps *rps = &pi->current_rps;
5901 u32 sclk = ci_get_average_sclk_freq(rdev);
5902 u32 mclk = ci_get_average_mclk_freq(rdev);
5903
5904 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
5905 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
5906 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
5907 sclk, mclk);
5908 }
5909
5910 void ci_dpm_print_power_state(struct radeon_device *rdev,
5911 struct radeon_ps *rps)
5912 {
5913 struct ci_ps *ps = ci_get_ps(rps);
5914 struct ci_pl *pl;
5915 int i;
5916
5917 r600_dpm_print_class_info(rps->class, rps->class2);
5918 r600_dpm_print_cap_info(rps->caps);
5919 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5920 for (i = 0; i < ps->performance_level_count; i++) {
5921 pl = &ps->performance_levels[i];
5922 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5923 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5924 }
5925 r600_dpm_print_ps_status(rdev, rps);
5926 }
5927
5928 u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
5929 {
5930 u32 sclk = ci_get_average_sclk_freq(rdev);
5931
5932 return sclk;
5933 }
5934
5935 u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
5936 {
5937 u32 mclk = ci_get_average_mclk_freq(rdev);
5938
5939 return mclk;
5940 }
5941
5942 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5943 {
5944 struct ci_power_info *pi = ci_get_pi(rdev);
5945 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5946
5947 if (low)
5948 return requested_state->performance_levels[0].sclk;
5949 else
5950 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5951 }
5952
5953 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5954 {
5955 struct ci_power_info *pi = ci_get_pi(rdev);
5956 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5957
5958 if (low)
5959 return requested_state->performance_levels[0].mclk;
5960 else
5961 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5962 }
This page took 0.198932 seconds and 6 git commands to generate.