Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / ci_dpm.c
CommitLineData
a2e73f56
AD
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "amdgpu_pm.h"
28#include "amdgpu_ucode.h"
29#include "cikd.h"
30#include "amdgpu_dpm.h"
31#include "ci_dpm.h"
32#include "gfx_v7_0.h"
33#include "atom.h"
50171ebe 34#include "amd_pcie.h"
a2e73f56
AD
35#include <linux/seq_file.h>
36
37#include "smu/smu_7_0_1_d.h"
38#include "smu/smu_7_0_1_sh_mask.h"
39
40#include "dce/dce_8_0_d.h"
41#include "dce/dce_8_0_sh_mask.h"
42
43#include "bif/bif_4_1_d.h"
44#include "bif/bif_4_1_sh_mask.h"
45
46#include "gca/gfx_7_2_d.h"
47#include "gca/gfx_7_2_sh_mask.h"
48
49#include "gmc/gmc_7_1_d.h"
50#include "gmc/gmc_7_1_sh_mask.h"
51
52MODULE_FIRMWARE("radeon/bonaire_smc.bin");
2254c219 53MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
a2e73f56 54MODULE_FIRMWARE("radeon/hawaii_smc.bin");
2254c219 55MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
a2e73f56
AD
56
57#define MC_CG_ARB_FREQ_F0 0x0a
58#define MC_CG_ARB_FREQ_F1 0x0b
59#define MC_CG_ARB_FREQ_F2 0x0c
60#define MC_CG_ARB_FREQ_F3 0x0d
61
62#define SMC_RAM_END 0x40000
63
64#define VOLTAGE_SCALE 4
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67
68static const struct ci_pt_defaults defaults_hawaii_xt =
69{
70 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
72 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73};
74
75static const struct ci_pt_defaults defaults_hawaii_pro =
76{
77 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
79 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80};
81
82static const struct ci_pt_defaults defaults_bonaire_xt =
83{
84 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
86 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87};
88
5ef82929 89#if 0
a2e73f56
AD
90static const struct ci_pt_defaults defaults_bonaire_pro =
91{
92 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
94 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
95};
5ef82929 96#endif
a2e73f56
AD
97
98static const struct ci_pt_defaults defaults_saturn_xt =
99{
100 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
102 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
103};
104
529d8c5a 105#if 0
a2e73f56
AD
106static const struct ci_pt_defaults defaults_saturn_pro =
107{
108 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
109 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
110 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
111};
529d8c5a 112#endif
a2e73f56
AD
113
114static const struct ci_pt_config_reg didt_config_ci[] =
115{
116 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
147 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
148 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
149 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
151 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
165 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
166 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
167 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
168 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
169 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
183 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
184 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
185 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
186 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
187 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
188 { 0xFFFFFFFF }
189};
190
191static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
192{
193 return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
194}
195
196#define MC_CG_ARB_FREQ_F0 0x0a
197#define MC_CG_ARB_FREQ_F1 0x0b
198#define MC_CG_ARB_FREQ_F2 0x0c
199#define MC_CG_ARB_FREQ_F3 0x0d
200
201static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
202 u32 arb_freq_src, u32 arb_freq_dest)
203{
204 u32 mc_arb_dram_timing;
205 u32 mc_arb_dram_timing2;
206 u32 burst_time;
207 u32 mc_cg_config;
208
209 switch (arb_freq_src) {
210 case MC_CG_ARB_FREQ_F0:
211 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
212 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
213 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
214 MC_ARB_BURST_TIME__STATE0__SHIFT;
215 break;
216 case MC_CG_ARB_FREQ_F1:
217 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1);
218 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
219 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
220 MC_ARB_BURST_TIME__STATE1__SHIFT;
221 break;
222 default:
223 return -EINVAL;
224 }
225
226 switch (arb_freq_dest) {
227 case MC_CG_ARB_FREQ_F0:
228 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
229 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
230 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
231 ~MC_ARB_BURST_TIME__STATE0_MASK);
232 break;
233 case MC_CG_ARB_FREQ_F1:
234 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
235 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
236 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
237 ~MC_ARB_BURST_TIME__STATE1_MASK);
238 break;
239 default:
240 return -EINVAL;
241 }
242
243 mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
244 WREG32(mmMC_CG_CONFIG, mc_cg_config);
245 WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
246 ~MC_ARB_CG__CG_ARB_REQ_MASK);
247
248 return 0;
249}
250
251static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
252{
253 u8 mc_para_index;
254
255 if (memory_clock < 10000)
256 mc_para_index = 0;
257 else if (memory_clock >= 80000)
258 mc_para_index = 0x0f;
259 else
260 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
261 return mc_para_index;
262}
263
264static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
265{
266 u8 mc_para_index;
267
268 if (strobe_mode) {
269 if (memory_clock < 12500)
270 mc_para_index = 0x00;
271 else if (memory_clock > 47500)
272 mc_para_index = 0x0f;
273 else
274 mc_para_index = (u8)((memory_clock - 10000) / 2500);
275 } else {
276 if (memory_clock < 65000)
277 mc_para_index = 0x00;
278 else if (memory_clock > 135000)
279 mc_para_index = 0x0f;
280 else
281 mc_para_index = (u8)((memory_clock - 60000) / 5000);
282 }
283 return mc_para_index;
284}
285
286static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
287 u32 max_voltage_steps,
288 struct atom_voltage_table *voltage_table)
289{
290 unsigned int i, diff;
291
292 if (voltage_table->count <= max_voltage_steps)
293 return;
294
295 diff = voltage_table->count - max_voltage_steps;
296
297 for (i = 0; i < max_voltage_steps; i++)
298 voltage_table->entries[i] = voltage_table->entries[i + diff];
299
300 voltage_table->count = max_voltage_steps;
301}
302
303static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
304 struct atom_voltage_table_entry *voltage_table,
305 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
306static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
307static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
308 u32 target_tdp);
309static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
310static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
311static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
312
313static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
314 PPSMC_Msg msg, u32 parameter);
315static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
316static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
317
318static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
319{
320 struct ci_power_info *pi = adev->pm.dpm.priv;
321
322 return pi;
323}
324
325static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
326{
327 struct ci_ps *ps = rps->ps_priv;
328
329 return ps;
330}
331
332static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
333{
334 struct ci_power_info *pi = ci_get_pi(adev);
335
336 switch (adev->pdev->device) {
337 case 0x6649:
338 case 0x6650:
339 case 0x6651:
340 case 0x6658:
341 case 0x665C:
342 case 0x665D:
343 default:
344 pi->powertune_defaults = &defaults_bonaire_xt;
345 break;
346 case 0x6640:
347 case 0x6641:
348 case 0x6646:
349 case 0x6647:
350 pi->powertune_defaults = &defaults_saturn_xt;
351 break;
352 case 0x67B8:
353 case 0x67B0:
354 pi->powertune_defaults = &defaults_hawaii_xt;
355 break;
356 case 0x67BA:
357 case 0x67B1:
358 pi->powertune_defaults = &defaults_hawaii_pro;
359 break;
360 case 0x67A0:
361 case 0x67A1:
362 case 0x67A2:
363 case 0x67A8:
364 case 0x67A9:
365 case 0x67AA:
366 case 0x67B9:
367 case 0x67BE:
368 pi->powertune_defaults = &defaults_bonaire_xt;
369 break;
370 }
371
372 pi->dte_tj_offset = 0;
373
374 pi->caps_power_containment = true;
375 pi->caps_cac = false;
376 pi->caps_sq_ramping = false;
377 pi->caps_db_ramping = false;
378 pi->caps_td_ramping = false;
379 pi->caps_tcp_ramping = false;
380
381 if (pi->caps_power_containment) {
382 pi->caps_cac = true;
383 if (adev->asic_type == CHIP_HAWAII)
384 pi->enable_bapm_feature = false;
385 else
386 pi->enable_bapm_feature = true;
387 pi->enable_tdc_limit_feature = true;
388 pi->enable_pkg_pwr_tracking_feature = true;
389 }
390}
391
392static u8 ci_convert_to_vid(u16 vddc)
393{
394 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
395}
396
397static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
398{
399 struct ci_power_info *pi = ci_get_pi(adev);
400 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
401 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
402 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
403 u32 i;
404
405 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
406 return -EINVAL;
407 if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
408 return -EINVAL;
409 if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
410 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
411 return -EINVAL;
412
413 for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
414 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
415 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
416 hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
417 hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
418 } else {
419 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
420 hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
421 }
422 }
423 return 0;
424}
425
426static int ci_populate_vddc_vid(struct amdgpu_device *adev)
427{
428 struct ci_power_info *pi = ci_get_pi(adev);
429 u8 *vid = pi->smc_powertune_table.VddCVid;
430 u32 i;
431
432 if (pi->vddc_voltage_table.count > 8)
433 return -EINVAL;
434
435 for (i = 0; i < pi->vddc_voltage_table.count; i++)
436 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
437
438 return 0;
439}
440
441static int ci_populate_svi_load_line(struct amdgpu_device *adev)
442{
443 struct ci_power_info *pi = ci_get_pi(adev);
444 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
445
446 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
447 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
448 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
449 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
450
451 return 0;
452}
453
454static int ci_populate_tdc_limit(struct amdgpu_device *adev)
455{
456 struct ci_power_info *pi = ci_get_pi(adev);
457 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
458 u16 tdc_limit;
459
460 tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
461 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
462 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
463 pt_defaults->tdc_vddc_throttle_release_limit_perc;
464 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
465
466 return 0;
467}
468
469static int ci_populate_dw8(struct amdgpu_device *adev)
470{
471 struct ci_power_info *pi = ci_get_pi(adev);
472 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
473 int ret;
474
475 ret = amdgpu_ci_read_smc_sram_dword(adev,
476 SMU7_FIRMWARE_HEADER_LOCATION +
477 offsetof(SMU7_Firmware_Header, PmFuseTable) +
478 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
479 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
480 pi->sram_end);
481 if (ret)
482 return -EINVAL;
483 else
484 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
485
486 return 0;
487}
488
489static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
490{
491 struct ci_power_info *pi = ci_get_pi(adev);
492
493 if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
494 (adev->pm.dpm.fan.fan_output_sensitivity == 0))
495 adev->pm.dpm.fan.fan_output_sensitivity =
496 adev->pm.dpm.fan.default_fan_output_sensitivity;
497
498 pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
499 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
500
501 return 0;
502}
503
504static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
505{
506 struct ci_power_info *pi = ci_get_pi(adev);
507 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
508 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
509 int i, min, max;
510
511 min = max = hi_vid[0];
512 for (i = 0; i < 8; i++) {
513 if (0 != hi_vid[i]) {
514 if (min > hi_vid[i])
515 min = hi_vid[i];
516 if (max < hi_vid[i])
517 max = hi_vid[i];
518 }
519
520 if (0 != lo_vid[i]) {
521 if (min > lo_vid[i])
522 min = lo_vid[i];
523 if (max < lo_vid[i])
524 max = lo_vid[i];
525 }
526 }
527
528 if ((min == 0) || (max == 0))
529 return -EINVAL;
530 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
531 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
532
533 return 0;
534}
535
536static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
537{
538 struct ci_power_info *pi = ci_get_pi(adev);
539 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
540 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
541 struct amdgpu_cac_tdp_table *cac_tdp_table =
542 adev->pm.dpm.dyn_state.cac_tdp_table;
543
544 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
545 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
546
547 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
548 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
549
550 return 0;
551}
552
553static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
554{
555 struct ci_power_info *pi = ci_get_pi(adev);
556 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
557 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
558 struct amdgpu_cac_tdp_table *cac_tdp_table =
559 adev->pm.dpm.dyn_state.cac_tdp_table;
560 struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
561 int i, j, k;
562 const u16 *def1;
563 const u16 *def2;
564
565 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
566 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
567
568 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
569 dpm_table->GpuTjMax =
570 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
571 dpm_table->GpuTjHyst = 8;
572
573 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
574
575 if (ppm) {
576 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
577 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
578 } else {
579 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
580 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
581 }
582
583 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
584 def1 = pt_defaults->bapmti_r;
585 def2 = pt_defaults->bapmti_rc;
586
587 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
588 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
589 for (k = 0; k < SMU7_DTE_SINKS; k++) {
590 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
591 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
592 def1++;
593 def2++;
594 }
595 }
596 }
597
598 return 0;
599}
600
601static int ci_populate_pm_base(struct amdgpu_device *adev)
602{
603 struct ci_power_info *pi = ci_get_pi(adev);
604 u32 pm_fuse_table_offset;
605 int ret;
606
607 if (pi->caps_power_containment) {
608 ret = amdgpu_ci_read_smc_sram_dword(adev,
609 SMU7_FIRMWARE_HEADER_LOCATION +
610 offsetof(SMU7_Firmware_Header, PmFuseTable),
611 &pm_fuse_table_offset, pi->sram_end);
612 if (ret)
613 return ret;
614 ret = ci_populate_bapm_vddc_vid_sidd(adev);
615 if (ret)
616 return ret;
617 ret = ci_populate_vddc_vid(adev);
618 if (ret)
619 return ret;
620 ret = ci_populate_svi_load_line(adev);
621 if (ret)
622 return ret;
623 ret = ci_populate_tdc_limit(adev);
624 if (ret)
625 return ret;
626 ret = ci_populate_dw8(adev);
627 if (ret)
628 return ret;
629 ret = ci_populate_fuzzy_fan(adev);
630 if (ret)
631 return ret;
632 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
633 if (ret)
634 return ret;
635 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
636 if (ret)
637 return ret;
638 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
639 (u8 *)&pi->smc_powertune_table,
640 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
641 if (ret)
642 return ret;
643 }
644
645 return 0;
646}
647
648static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
649{
650 struct ci_power_info *pi = ci_get_pi(adev);
651 u32 data;
652
653 if (pi->caps_sq_ramping) {
654 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
655 if (enable)
656 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
657 else
658 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
659 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
660 }
661
662 if (pi->caps_db_ramping) {
663 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
664 if (enable)
665 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
666 else
667 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
668 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
669 }
670
671 if (pi->caps_td_ramping) {
672 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
673 if (enable)
674 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
675 else
676 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
677 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
678 }
679
680 if (pi->caps_tcp_ramping) {
681 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
682 if (enable)
683 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
684 else
685 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
686 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
687 }
688}
689
690static int ci_program_pt_config_registers(struct amdgpu_device *adev,
691 const struct ci_pt_config_reg *cac_config_regs)
692{
693 const struct ci_pt_config_reg *config_regs = cac_config_regs;
694 u32 data;
695 u32 cache = 0;
696
697 if (config_regs == NULL)
698 return -EINVAL;
699
700 while (config_regs->offset != 0xFFFFFFFF) {
701 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
702 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
703 } else {
704 switch (config_regs->type) {
705 case CISLANDS_CONFIGREG_SMC_IND:
706 data = RREG32_SMC(config_regs->offset);
707 break;
708 case CISLANDS_CONFIGREG_DIDT_IND:
709 data = RREG32_DIDT(config_regs->offset);
710 break;
711 default:
712 data = RREG32(config_regs->offset);
713 break;
714 }
715
716 data &= ~config_regs->mask;
717 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
718 data |= cache;
719
720 switch (config_regs->type) {
721 case CISLANDS_CONFIGREG_SMC_IND:
722 WREG32_SMC(config_regs->offset, data);
723 break;
724 case CISLANDS_CONFIGREG_DIDT_IND:
725 WREG32_DIDT(config_regs->offset, data);
726 break;
727 default:
728 WREG32(config_regs->offset, data);
729 break;
730 }
731 cache = 0;
732 }
733 config_regs++;
734 }
735 return 0;
736}
737
738static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
739{
740 struct ci_power_info *pi = ci_get_pi(adev);
741 int ret;
742
743 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
744 pi->caps_td_ramping || pi->caps_tcp_ramping) {
06120a1e 745 adev->gfx.rlc.funcs->enter_safe_mode(adev);
a2e73f56
AD
746
747 if (enable) {
748 ret = ci_program_pt_config_registers(adev, didt_config_ci);
749 if (ret) {
06120a1e 750 adev->gfx.rlc.funcs->exit_safe_mode(adev);
a2e73f56
AD
751 return ret;
752 }
753 }
754
755 ci_do_enable_didt(adev, enable);
756
06120a1e 757 adev->gfx.rlc.funcs->exit_safe_mode(adev);
a2e73f56
AD
758 }
759
760 return 0;
761}
762
763static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
764{
765 struct ci_power_info *pi = ci_get_pi(adev);
766 PPSMC_Result smc_result;
767 int ret = 0;
768
769 if (enable) {
770 pi->power_containment_features = 0;
771 if (pi->caps_power_containment) {
772 if (pi->enable_bapm_feature) {
773 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
774 if (smc_result != PPSMC_Result_OK)
775 ret = -EINVAL;
776 else
777 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
778 }
779
780 if (pi->enable_tdc_limit_feature) {
781 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
782 if (smc_result != PPSMC_Result_OK)
783 ret = -EINVAL;
784 else
785 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
786 }
787
788 if (pi->enable_pkg_pwr_tracking_feature) {
789 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
790 if (smc_result != PPSMC_Result_OK) {
791 ret = -EINVAL;
792 } else {
793 struct amdgpu_cac_tdp_table *cac_tdp_table =
794 adev->pm.dpm.dyn_state.cac_tdp_table;
795 u32 default_pwr_limit =
796 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
797
798 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
799
800 ci_set_power_limit(adev, default_pwr_limit);
801 }
802 }
803 }
804 } else {
805 if (pi->caps_power_containment && pi->power_containment_features) {
806 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
807 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
808
809 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
810 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
811
812 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
813 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
814 pi->power_containment_features = 0;
815 }
816 }
817
818 return ret;
819}
820
821static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
822{
823 struct ci_power_info *pi = ci_get_pi(adev);
824 PPSMC_Result smc_result;
825 int ret = 0;
826
827 if (pi->caps_cac) {
828 if (enable) {
829 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
830 if (smc_result != PPSMC_Result_OK) {
831 ret = -EINVAL;
832 pi->cac_enabled = false;
833 } else {
834 pi->cac_enabled = true;
835 }
836 } else if (pi->cac_enabled) {
837 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
838 pi->cac_enabled = false;
839 }
840 }
841
842 return ret;
843}
844
845static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
846 bool enable)
847{
848 struct ci_power_info *pi = ci_get_pi(adev);
849 PPSMC_Result smc_result = PPSMC_Result_OK;
850
851 if (pi->thermal_sclk_dpm_enabled) {
852 if (enable)
853 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
854 else
855 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
856 }
857
858 if (smc_result == PPSMC_Result_OK)
859 return 0;
860 else
861 return -EINVAL;
862}
863
864static int ci_power_control_set_level(struct amdgpu_device *adev)
865{
866 struct ci_power_info *pi = ci_get_pi(adev);
867 struct amdgpu_cac_tdp_table *cac_tdp_table =
868 adev->pm.dpm.dyn_state.cac_tdp_table;
869 s32 adjust_percent;
870 s32 target_tdp;
871 int ret = 0;
872 bool adjust_polarity = false; /* ??? */
873
874 if (pi->caps_power_containment) {
875 adjust_percent = adjust_polarity ?
876 adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
877 target_tdp = ((100 + adjust_percent) *
878 (s32)cac_tdp_table->configurable_tdp) / 100;
879
880 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
881 }
882
883 return ret;
884}
885
886static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
887{
888 struct ci_power_info *pi = ci_get_pi(adev);
889
890 if (pi->uvd_power_gated == gate)
891 return;
892
893 pi->uvd_power_gated = gate;
894
895 ci_update_uvd_dpm(adev, gate);
896}
897
898static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
899{
900 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
81c59f54 901 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
a2e73f56
AD
902
903 if (vblank_time < switch_limit)
904 return true;
905 else
906 return false;
907
908}
909
910static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
911 struct amdgpu_ps *rps)
912{
913 struct ci_ps *ps = ci_get_ps(rps);
914 struct ci_power_info *pi = ci_get_pi(adev);
915 struct amdgpu_clock_and_voltage_limits *max_limits;
916 bool disable_mclk_switching;
917 u32 sclk, mclk;
918 int i;
919
920 if (rps->vce_active) {
921 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
922 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
923 } else {
924 rps->evclk = 0;
925 rps->ecclk = 0;
926 }
927
928 if ((adev->pm.dpm.new_active_crtc_count > 1) ||
929 ci_dpm_vblank_too_short(adev))
930 disable_mclk_switching = true;
931 else
932 disable_mclk_switching = false;
933
934 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
935 pi->battery_state = true;
936 else
937 pi->battery_state = false;
938
939 if (adev->pm.dpm.ac_power)
940 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
941 else
942 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
943
944 if (adev->pm.dpm.ac_power == false) {
945 for (i = 0; i < ps->performance_level_count; i++) {
946 if (ps->performance_levels[i].mclk > max_limits->mclk)
947 ps->performance_levels[i].mclk = max_limits->mclk;
948 if (ps->performance_levels[i].sclk > max_limits->sclk)
949 ps->performance_levels[i].sclk = max_limits->sclk;
950 }
951 }
952
953 /* XXX validate the min clocks required for display */
954
955 if (disable_mclk_switching) {
956 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
957 sclk = ps->performance_levels[0].sclk;
958 } else {
959 mclk = ps->performance_levels[0].mclk;
960 sclk = ps->performance_levels[0].sclk;
961 }
962
963 if (rps->vce_active) {
964 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
965 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
966 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
967 mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
968 }
969
970 ps->performance_levels[0].sclk = sclk;
971 ps->performance_levels[0].mclk = mclk;
972
973 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
974 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
975
976 if (disable_mclk_switching) {
977 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
978 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
979 } else {
980 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
981 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
982 }
983}
984
985static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
986 int min_temp, int max_temp)
987{
988 int low_temp = 0 * 1000;
989 int high_temp = 255 * 1000;
990 u32 tmp;
991
992 if (low_temp < min_temp)
993 low_temp = min_temp;
994 if (high_temp > max_temp)
995 high_temp = max_temp;
996 if (high_temp < low_temp) {
997 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
998 return -EINVAL;
999 }
1000
1001 tmp = RREG32_SMC(ixCG_THERMAL_INT);
1002 tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1003 tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1004 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1005 WREG32_SMC(ixCG_THERMAL_INT, tmp);
1006
1007#if 0
1008 /* XXX: need to figure out how to handle this properly */
1009 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1010 tmp &= DIG_THERM_DPM_MASK;
1011 tmp |= DIG_THERM_DPM(high_temp / 1000);
1012 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1013#endif
1014
1015 adev->pm.dpm.thermal.min_temp = low_temp;
1016 adev->pm.dpm.thermal.max_temp = high_temp;
1017 return 0;
1018}
1019
1020static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1021 bool enable)
1022{
1023 u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1024 PPSMC_Result result;
1025
1026 if (enable) {
1027 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1028 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1029 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1030 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1031 if (result != PPSMC_Result_OK) {
1032 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1033 return -EINVAL;
1034 }
1035 } else {
1036 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1037 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1038 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1039 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1040 if (result != PPSMC_Result_OK) {
1041 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1042 return -EINVAL;
1043 }
1044 }
1045
1046 return 0;
1047}
1048
1049static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1050{
1051 struct ci_power_info *pi = ci_get_pi(adev);
1052 u32 tmp;
1053
1054 if (pi->fan_ctrl_is_in_default_mode) {
1055 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1056 >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1057 pi->fan_ctrl_default_mode = tmp;
1058 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1059 >> CG_FDO_CTRL2__TMIN__SHIFT;
1060 pi->t_min = tmp;
1061 pi->fan_ctrl_is_in_default_mode = false;
1062 }
1063
1064 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1065 tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1066 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1067
1068 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1069 tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1070 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1071}
1072
1073static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1074{
1075 struct ci_power_info *pi = ci_get_pi(adev);
1076 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1077 u32 duty100;
1078 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1079 u16 fdo_min, slope1, slope2;
1080 u32 reference_clock, tmp;
1081 int ret;
1082 u64 tmp64;
1083
1084 if (!pi->fan_table_start) {
1085 adev->pm.dpm.fan.ucode_fan_control = false;
1086 return 0;
1087 }
1088
1089 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1090 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1091
1092 if (duty100 == 0) {
1093 adev->pm.dpm.fan.ucode_fan_control = false;
1094 return 0;
1095 }
1096
1097 tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1098 do_div(tmp64, 10000);
1099 fdo_min = (u16)tmp64;
1100
1101 t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1102 t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1103
1104 pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1105 pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1106
1107 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1108 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1109
1110 fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1111 fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1112 fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1113
1114 fan_table.Slope1 = cpu_to_be16(slope1);
1115 fan_table.Slope2 = cpu_to_be16(slope2);
1116
1117 fan_table.FdoMin = cpu_to_be16(fdo_min);
1118
1119 fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1120
1121 fan_table.HystUp = cpu_to_be16(1);
1122
1123 fan_table.HystSlope = cpu_to_be16(1);
1124
1125 fan_table.TempRespLim = cpu_to_be16(5);
1126
1127 reference_clock = amdgpu_asic_get_xclk(adev);
1128
1129 fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1130 reference_clock) / 1600);
1131
1132 fan_table.FdoMax = cpu_to_be16((u16)duty100);
1133
1134 tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1135 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1136 fan_table.TempSrc = (uint8_t)tmp;
1137
1138 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1139 pi->fan_table_start,
1140 (u8 *)(&fan_table),
1141 sizeof(fan_table),
1142 pi->sram_end);
1143
1144 if (ret) {
1145 DRM_ERROR("Failed to load fan table to the SMC.");
1146 adev->pm.dpm.fan.ucode_fan_control = false;
1147 }
1148
1149 return 0;
1150}
1151
1152static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1153{
1154 struct ci_power_info *pi = ci_get_pi(adev);
1155 PPSMC_Result ret;
1156
1157 if (pi->caps_od_fuzzy_fan_control_support) {
1158 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1159 PPSMC_StartFanControl,
1160 FAN_CONTROL_FUZZY);
1161 if (ret != PPSMC_Result_OK)
1162 return -EINVAL;
1163 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1164 PPSMC_MSG_SetFanPwmMax,
1165 adev->pm.dpm.fan.default_max_fan_pwm);
1166 if (ret != PPSMC_Result_OK)
1167 return -EINVAL;
1168 } else {
1169 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1170 PPSMC_StartFanControl,
1171 FAN_CONTROL_TABLE);
1172 if (ret != PPSMC_Result_OK)
1173 return -EINVAL;
1174 }
1175
1176 pi->fan_is_controlled_by_smc = true;
1177 return 0;
1178}
1179
1180
1181static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1182{
1183 PPSMC_Result ret;
1184 struct ci_power_info *pi = ci_get_pi(adev);
1185
1186 ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1187 if (ret == PPSMC_Result_OK) {
1188 pi->fan_is_controlled_by_smc = false;
1189 return 0;
1190 } else {
1191 return -EINVAL;
1192 }
1193}
1194
1195static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1196 u32 *speed)
1197{
1198 u32 duty, duty100;
1199 u64 tmp64;
1200
1201 if (adev->pm.no_fan)
1202 return -ENOENT;
1203
1204 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1205 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1206 duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1207 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1208
1209 if (duty100 == 0)
1210 return -EINVAL;
1211
1212 tmp64 = (u64)duty * 100;
1213 do_div(tmp64, duty100);
1214 *speed = (u32)tmp64;
1215
1216 if (*speed > 100)
1217 *speed = 100;
1218
1219 return 0;
1220}
1221
1222static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1223 u32 speed)
1224{
1225 u32 tmp;
1226 u32 duty, duty100;
1227 u64 tmp64;
1228 struct ci_power_info *pi = ci_get_pi(adev);
1229
1230 if (adev->pm.no_fan)
1231 return -ENOENT;
1232
1233 if (pi->fan_is_controlled_by_smc)
1234 return -EINVAL;
1235
1236 if (speed > 100)
1237 return -EINVAL;
1238
1239 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1240 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1241
1242 if (duty100 == 0)
1243 return -EINVAL;
1244
1245 tmp64 = (u64)speed * duty100;
1246 do_div(tmp64, 100);
1247 duty = (u32)tmp64;
1248
1249 tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1250 tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1251 WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1252
1253 return 0;
1254}
1255
1256static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1257{
1258 if (mode) {
1259 /* stop auto-manage */
1260 if (adev->pm.dpm.fan.ucode_fan_control)
1261 ci_fan_ctrl_stop_smc_fan_control(adev);
1262 ci_fan_ctrl_set_static_mode(adev, mode);
1263 } else {
1264 /* restart auto-manage */
1265 if (adev->pm.dpm.fan.ucode_fan_control)
1266 ci_thermal_start_smc_fan_control(adev);
1267 else
1268 ci_fan_ctrl_set_default_mode(adev);
1269 }
1270}
1271
1272static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1273{
1274 struct ci_power_info *pi = ci_get_pi(adev);
1275 u32 tmp;
1276
1277 if (pi->fan_is_controlled_by_smc)
1278 return 0;
1279
1280 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1281 return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
1282}
1283
1284#if 0
1285static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1286 u32 *speed)
1287{
1288 u32 tach_period;
1289 u32 xclk = amdgpu_asic_get_xclk(adev);
1290
1291 if (adev->pm.no_fan)
1292 return -ENOENT;
1293
1294 if (adev->pm.fan_pulses_per_revolution == 0)
1295 return -ENOENT;
1296
1297 tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1298 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1299 if (tach_period == 0)
1300 return -ENOENT;
1301
1302 *speed = 60 * xclk * 10000 / tach_period;
1303
1304 return 0;
1305}
1306
1307static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1308 u32 speed)
1309{
1310 u32 tach_period, tmp;
1311 u32 xclk = amdgpu_asic_get_xclk(adev);
1312
1313 if (adev->pm.no_fan)
1314 return -ENOENT;
1315
1316 if (adev->pm.fan_pulses_per_revolution == 0)
1317 return -ENOENT;
1318
1319 if ((speed < adev->pm.fan_min_rpm) ||
1320 (speed > adev->pm.fan_max_rpm))
1321 return -EINVAL;
1322
1323 if (adev->pm.dpm.fan.ucode_fan_control)
1324 ci_fan_ctrl_stop_smc_fan_control(adev);
1325
1326 tach_period = 60 * xclk * 10000 / (8 * speed);
1327 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1328 tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1329 WREG32_SMC(CG_TACH_CTRL, tmp);
1330
1331 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1332
1333 return 0;
1334}
1335#endif
1336
1337static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1338{
1339 struct ci_power_info *pi = ci_get_pi(adev);
1340 u32 tmp;
1341
1342 if (!pi->fan_ctrl_is_in_default_mode) {
1343 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1344 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1345 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1346
1347 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1348 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1349 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1350 pi->fan_ctrl_is_in_default_mode = true;
1351 }
1352}
1353
1354static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1355{
1356 if (adev->pm.dpm.fan.ucode_fan_control) {
1357 ci_fan_ctrl_start_smc_fan_control(adev);
1358 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1359 }
1360}
1361
1362static void ci_thermal_initialize(struct amdgpu_device *adev)
1363{
1364 u32 tmp;
1365
1366 if (adev->pm.fan_pulses_per_revolution) {
1367 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1368 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1369 << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1370 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1371 }
1372
1373 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1374 tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1375 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1376}
1377
1378static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1379{
1380 int ret;
1381
1382 ci_thermal_initialize(adev);
1383 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1384 if (ret)
1385 return ret;
1386 ret = ci_thermal_enable_alert(adev, true);
1387 if (ret)
1388 return ret;
1389 if (adev->pm.dpm.fan.ucode_fan_control) {
1390 ret = ci_thermal_setup_fan_table(adev);
1391 if (ret)
1392 return ret;
1393 ci_thermal_start_smc_fan_control(adev);
1394 }
1395
1396 return 0;
1397}
1398
1399static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1400{
1401 if (!adev->pm.no_fan)
1402 ci_fan_ctrl_set_default_mode(adev);
1403}
1404
a2e73f56
AD
1405static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1406 u16 reg_offset, u32 *value)
1407{
1408 struct ci_power_info *pi = ci_get_pi(adev);
1409
1410 return amdgpu_ci_read_smc_sram_dword(adev,
1411 pi->soft_regs_start + reg_offset,
1412 value, pi->sram_end);
1413}
a2e73f56
AD
1414
1415static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1416 u16 reg_offset, u32 value)
1417{
1418 struct ci_power_info *pi = ci_get_pi(adev);
1419
1420 return amdgpu_ci_write_smc_sram_dword(adev,
1421 pi->soft_regs_start + reg_offset,
1422 value, pi->sram_end);
1423}
1424
1425static void ci_init_fps_limits(struct amdgpu_device *adev)
1426{
1427 struct ci_power_info *pi = ci_get_pi(adev);
1428 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1429
1430 if (pi->caps_fps) {
1431 u16 tmp;
1432
1433 tmp = 45;
1434 table->FpsHighT = cpu_to_be16(tmp);
1435
1436 tmp = 30;
1437 table->FpsLowT = cpu_to_be16(tmp);
1438 }
1439}
1440
1441static int ci_update_sclk_t(struct amdgpu_device *adev)
1442{
1443 struct ci_power_info *pi = ci_get_pi(adev);
1444 int ret = 0;
1445 u32 low_sclk_interrupt_t = 0;
1446
1447 if (pi->caps_sclk_throttle_low_notification) {
1448 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1449
1450 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1451 pi->dpm_table_start +
1452 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1453 (u8 *)&low_sclk_interrupt_t,
1454 sizeof(u32), pi->sram_end);
1455
1456 }
1457
1458 return ret;
1459}
1460
1461static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1462{
1463 struct ci_power_info *pi = ci_get_pi(adev);
1464 u16 leakage_id, virtual_voltage_id;
1465 u16 vddc, vddci;
1466 int i;
1467
1468 pi->vddc_leakage.count = 0;
1469 pi->vddci_leakage.count = 0;
1470
1471 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1472 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1473 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1474 if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1475 continue;
1476 if (vddc != 0 && vddc != virtual_voltage_id) {
1477 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1478 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1479 pi->vddc_leakage.count++;
1480 }
1481 }
1482 } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1483 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1484 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1485 if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1486 virtual_voltage_id,
1487 leakage_id) == 0) {
1488 if (vddc != 0 && vddc != virtual_voltage_id) {
1489 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1490 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1491 pi->vddc_leakage.count++;
1492 }
1493 if (vddci != 0 && vddci != virtual_voltage_id) {
1494 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1495 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1496 pi->vddci_leakage.count++;
1497 }
1498 }
1499 }
1500 }
1501}
1502
1503static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1504{
1505 struct ci_power_info *pi = ci_get_pi(adev);
1506 bool want_thermal_protection;
1507 enum amdgpu_dpm_event_src dpm_event_src;
1508 u32 tmp;
1509
1510 switch (sources) {
1511 case 0:
1512 default:
1513 want_thermal_protection = false;
1514 break;
1515 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1516 want_thermal_protection = true;
1517 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1518 break;
1519 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1520 want_thermal_protection = true;
1521 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1522 break;
1523 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1524 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1525 want_thermal_protection = true;
1526 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1527 break;
1528 }
1529
1530 if (want_thermal_protection) {
1531#if 0
1532 /* XXX: need to figure out how to handle this properly */
1533 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1534 tmp &= DPM_EVENT_SRC_MASK;
1535 tmp |= DPM_EVENT_SRC(dpm_event_src);
1536 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1537#endif
1538
1539 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1540 if (pi->thermal_protection)
1541 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1542 else
1543 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1544 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1545 } else {
1546 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1547 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1548 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1549 }
1550}
1551
1552static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1553 enum amdgpu_dpm_auto_throttle_src source,
1554 bool enable)
1555{
1556 struct ci_power_info *pi = ci_get_pi(adev);
1557
1558 if (enable) {
1559 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1560 pi->active_auto_throttle_sources |= 1 << source;
1561 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1562 }
1563 } else {
1564 if (pi->active_auto_throttle_sources & (1 << source)) {
1565 pi->active_auto_throttle_sources &= ~(1 << source);
1566 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1567 }
1568 }
1569}
1570
1571static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1572{
1573 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1574 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1575}
1576
1577static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1578{
1579 struct ci_power_info *pi = ci_get_pi(adev);
1580 PPSMC_Result smc_result;
1581
1582 if (!pi->need_update_smu7_dpm_table)
1583 return 0;
1584
1585 if ((!pi->sclk_dpm_key_disabled) &&
1586 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1587 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1588 if (smc_result != PPSMC_Result_OK)
1589 return -EINVAL;
1590 }
1591
1592 if ((!pi->mclk_dpm_key_disabled) &&
1593 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1594 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1595 if (smc_result != PPSMC_Result_OK)
1596 return -EINVAL;
1597 }
1598
1599 pi->need_update_smu7_dpm_table = 0;
1600 return 0;
1601}
1602
1603static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1604{
1605 struct ci_power_info *pi = ci_get_pi(adev);
1606 PPSMC_Result smc_result;
1607
1608 if (enable) {
1609 if (!pi->sclk_dpm_key_disabled) {
1610 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1611 if (smc_result != PPSMC_Result_OK)
1612 return -EINVAL;
1613 }
1614
1615 if (!pi->mclk_dpm_key_disabled) {
1616 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1617 if (smc_result != PPSMC_Result_OK)
1618 return -EINVAL;
1619
1620 WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1621 ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1622
1623 WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1624 WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1625 WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1626
1627 udelay(10);
1628
1629 WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1630 WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1631 WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1632 }
1633 } else {
1634 if (!pi->sclk_dpm_key_disabled) {
1635 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1636 if (smc_result != PPSMC_Result_OK)
1637 return -EINVAL;
1638 }
1639
1640 if (!pi->mclk_dpm_key_disabled) {
1641 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1642 if (smc_result != PPSMC_Result_OK)
1643 return -EINVAL;
1644 }
1645 }
1646
1647 return 0;
1648}
1649
1650static int ci_start_dpm(struct amdgpu_device *adev)
1651{
1652 struct ci_power_info *pi = ci_get_pi(adev);
1653 PPSMC_Result smc_result;
1654 int ret;
1655 u32 tmp;
1656
1657 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1658 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1659 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1660
1661 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1662 tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1663 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1664
1665 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1666
1667 WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1668
1669 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1670 if (smc_result != PPSMC_Result_OK)
1671 return -EINVAL;
1672
1673 ret = ci_enable_sclk_mclk_dpm(adev, true);
1674 if (ret)
1675 return ret;
1676
1677 if (!pi->pcie_dpm_key_disabled) {
1678 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1679 if (smc_result != PPSMC_Result_OK)
1680 return -EINVAL;
1681 }
1682
1683 return 0;
1684}
1685
1686static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1687{
1688 struct ci_power_info *pi = ci_get_pi(adev);
1689 PPSMC_Result smc_result;
1690
1691 if (!pi->need_update_smu7_dpm_table)
1692 return 0;
1693
1694 if ((!pi->sclk_dpm_key_disabled) &&
1695 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1696 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1697 if (smc_result != PPSMC_Result_OK)
1698 return -EINVAL;
1699 }
1700
1701 if ((!pi->mclk_dpm_key_disabled) &&
1702 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1703 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1704 if (smc_result != PPSMC_Result_OK)
1705 return -EINVAL;
1706 }
1707
1708 return 0;
1709}
1710
1711static int ci_stop_dpm(struct amdgpu_device *adev)
1712{
1713 struct ci_power_info *pi = ci_get_pi(adev);
1714 PPSMC_Result smc_result;
1715 int ret;
1716 u32 tmp;
1717
1718 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1719 tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1720 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1721
1722 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1723 tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1724 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1725
1726 if (!pi->pcie_dpm_key_disabled) {
1727 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1728 if (smc_result != PPSMC_Result_OK)
1729 return -EINVAL;
1730 }
1731
1732 ret = ci_enable_sclk_mclk_dpm(adev, false);
1733 if (ret)
1734 return ret;
1735
1736 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1737 if (smc_result != PPSMC_Result_OK)
1738 return -EINVAL;
1739
1740 return 0;
1741}
1742
1743static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1744{
1745 u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1746
1747 if (enable)
1748 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1749 else
1750 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1751 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1752}
1753
1754#if 0
1755static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1756 bool ac_power)
1757{
1758 struct ci_power_info *pi = ci_get_pi(adev);
1759 struct amdgpu_cac_tdp_table *cac_tdp_table =
1760 adev->pm.dpm.dyn_state.cac_tdp_table;
1761 u32 power_limit;
1762
1763 if (ac_power)
1764 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1765 else
1766 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1767
1768 ci_set_power_limit(adev, power_limit);
1769
1770 if (pi->caps_automatic_dc_transition) {
1771 if (ac_power)
1772 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1773 else
1774 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1775 }
1776
1777 return 0;
1778}
1779#endif
1780
1781static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1782 PPSMC_Msg msg, u32 parameter)
1783{
1784 WREG32(mmSMC_MSG_ARG_0, parameter);
1785 return amdgpu_ci_send_msg_to_smc(adev, msg);
1786}
1787
1788static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1789 PPSMC_Msg msg, u32 *parameter)
1790{
1791 PPSMC_Result smc_result;
1792
1793 smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1794
1795 if ((smc_result == PPSMC_Result_OK) && parameter)
1796 *parameter = RREG32(mmSMC_MSG_ARG_0);
1797
1798 return smc_result;
1799}
1800
1801static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1802{
1803 struct ci_power_info *pi = ci_get_pi(adev);
1804
1805 if (!pi->sclk_dpm_key_disabled) {
1806 PPSMC_Result smc_result =
1807 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1808 if (smc_result != PPSMC_Result_OK)
1809 return -EINVAL;
1810 }
1811
1812 return 0;
1813}
1814
1815static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1816{
1817 struct ci_power_info *pi = ci_get_pi(adev);
1818
1819 if (!pi->mclk_dpm_key_disabled) {
1820 PPSMC_Result smc_result =
1821 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1822 if (smc_result != PPSMC_Result_OK)
1823 return -EINVAL;
1824 }
1825
1826 return 0;
1827}
1828
1829static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1830{
1831 struct ci_power_info *pi = ci_get_pi(adev);
1832
1833 if (!pi->pcie_dpm_key_disabled) {
1834 PPSMC_Result smc_result =
1835 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1836 if (smc_result != PPSMC_Result_OK)
1837 return -EINVAL;
1838 }
1839
1840 return 0;
1841}
1842
1843static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1844{
1845 struct ci_power_info *pi = ci_get_pi(adev);
1846
1847 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1848 PPSMC_Result smc_result =
1849 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1850 if (smc_result != PPSMC_Result_OK)
1851 return -EINVAL;
1852 }
1853
1854 return 0;
1855}
1856
1857static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1858 u32 target_tdp)
1859{
1860 PPSMC_Result smc_result =
1861 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1862 if (smc_result != PPSMC_Result_OK)
1863 return -EINVAL;
1864 return 0;
1865}
1866
1867#if 0
1868static int ci_set_boot_state(struct amdgpu_device *adev)
1869{
1870 return ci_enable_sclk_mclk_dpm(adev, false);
1871}
1872#endif
1873
1874static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1875{
1876 u32 sclk_freq;
1877 PPSMC_Result smc_result =
1878 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1879 PPSMC_MSG_API_GetSclkFrequency,
1880 &sclk_freq);
1881 if (smc_result != PPSMC_Result_OK)
1882 sclk_freq = 0;
1883
1884 return sclk_freq;
1885}
1886
1887static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1888{
1889 u32 mclk_freq;
1890 PPSMC_Result smc_result =
1891 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1892 PPSMC_MSG_API_GetMclkFrequency,
1893 &mclk_freq);
1894 if (smc_result != PPSMC_Result_OK)
1895 mclk_freq = 0;
1896
1897 return mclk_freq;
1898}
1899
1900static void ci_dpm_start_smc(struct amdgpu_device *adev)
1901{
1902 int i;
1903
1904 amdgpu_ci_program_jump_on_start(adev);
1905 amdgpu_ci_start_smc_clock(adev);
1906 amdgpu_ci_start_smc(adev);
1907 for (i = 0; i < adev->usec_timeout; i++) {
1908 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1909 break;
1910 }
1911}
1912
1913static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1914{
1915 amdgpu_ci_reset_smc(adev);
1916 amdgpu_ci_stop_smc_clock(adev);
1917}
1918
1919static int ci_process_firmware_header(struct amdgpu_device *adev)
1920{
1921 struct ci_power_info *pi = ci_get_pi(adev);
1922 u32 tmp;
1923 int ret;
1924
1925 ret = amdgpu_ci_read_smc_sram_dword(adev,
1926 SMU7_FIRMWARE_HEADER_LOCATION +
1927 offsetof(SMU7_Firmware_Header, DpmTable),
1928 &tmp, pi->sram_end);
1929 if (ret)
1930 return ret;
1931
1932 pi->dpm_table_start = tmp;
1933
1934 ret = amdgpu_ci_read_smc_sram_dword(adev,
1935 SMU7_FIRMWARE_HEADER_LOCATION +
1936 offsetof(SMU7_Firmware_Header, SoftRegisters),
1937 &tmp, pi->sram_end);
1938 if (ret)
1939 return ret;
1940
1941 pi->soft_regs_start = tmp;
1942
1943 ret = amdgpu_ci_read_smc_sram_dword(adev,
1944 SMU7_FIRMWARE_HEADER_LOCATION +
1945 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1946 &tmp, pi->sram_end);
1947 if (ret)
1948 return ret;
1949
1950 pi->mc_reg_table_start = tmp;
1951
1952 ret = amdgpu_ci_read_smc_sram_dword(adev,
1953 SMU7_FIRMWARE_HEADER_LOCATION +
1954 offsetof(SMU7_Firmware_Header, FanTable),
1955 &tmp, pi->sram_end);
1956 if (ret)
1957 return ret;
1958
1959 pi->fan_table_start = tmp;
1960
1961 ret = amdgpu_ci_read_smc_sram_dword(adev,
1962 SMU7_FIRMWARE_HEADER_LOCATION +
1963 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1964 &tmp, pi->sram_end);
1965 if (ret)
1966 return ret;
1967
1968 pi->arb_table_start = tmp;
1969
1970 return 0;
1971}
1972
1973static void ci_read_clock_registers(struct amdgpu_device *adev)
1974{
1975 struct ci_power_info *pi = ci_get_pi(adev);
1976
1977 pi->clock_registers.cg_spll_func_cntl =
1978 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1979 pi->clock_registers.cg_spll_func_cntl_2 =
1980 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1981 pi->clock_registers.cg_spll_func_cntl_3 =
1982 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1983 pi->clock_registers.cg_spll_func_cntl_4 =
1984 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
1985 pi->clock_registers.cg_spll_spread_spectrum =
1986 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
1987 pi->clock_registers.cg_spll_spread_spectrum_2 =
1988 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
1989 pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
1990 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
1991 pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
1992 pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
1993 pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
1994 pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
1995 pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
1996 pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
1997 pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
1998}
1999
2000static void ci_init_sclk_t(struct amdgpu_device *adev)
2001{
2002 struct ci_power_info *pi = ci_get_pi(adev);
2003
2004 pi->low_sclk_interrupt_t = 0;
2005}
2006
2007static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2008 bool enable)
2009{
2010 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2011
2012 if (enable)
2013 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2014 else
2015 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2016 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2017}
2018
2019static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2020{
2021 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2022
2023 tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2024
2025 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2026}
2027
2028#if 0
2029static int ci_enter_ulp_state(struct amdgpu_device *adev)
2030{
2031
2032 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2033
2034 udelay(25000);
2035
2036 return 0;
2037}
2038
2039static int ci_exit_ulp_state(struct amdgpu_device *adev)
2040{
2041 int i;
2042
2043 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2044
2045 udelay(7000);
2046
2047 for (i = 0; i < adev->usec_timeout; i++) {
2048 if (RREG32(mmSMC_RESP_0) == 1)
2049 break;
2050 udelay(1000);
2051 }
2052
2053 return 0;
2054}
2055#endif
2056
2057static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2058 bool has_display)
2059{
2060 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2061
2062 return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
2063}
2064
2065static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2066 bool enable)
2067{
2068 struct ci_power_info *pi = ci_get_pi(adev);
2069
2070 if (enable) {
2071 if (pi->caps_sclk_ds) {
2072 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2073 return -EINVAL;
2074 } else {
2075 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2076 return -EINVAL;
2077 }
2078 } else {
2079 if (pi->caps_sclk_ds) {
2080 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2081 return -EINVAL;
2082 }
2083 }
2084
2085 return 0;
2086}
2087
2088static void ci_program_display_gap(struct amdgpu_device *adev)
2089{
2090 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2091 u32 pre_vbi_time_in_us;
2092 u32 frame_time_in_us;
2093 u32 ref_clock = adev->clock.spll.reference_freq;
2094 u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2095 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2096
2097 tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2098 if (adev->pm.dpm.new_active_crtc_count > 0)
2099 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2100 else
2101 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2102 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2103
2104 if (refresh_rate == 0)
2105 refresh_rate = 60;
2106 if (vblank_time == 0xffffffff)
2107 vblank_time = 500;
2108 frame_time_in_us = 1000000 / refresh_rate;
2109 pre_vbi_time_in_us =
2110 frame_time_in_us - 200 - vblank_time;
2111 tmp = pre_vbi_time_in_us * (ref_clock / 100);
2112
2113 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2114 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2115 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2116
2117
2118 ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2119
2120}
2121
2122static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2123{
2124 struct ci_power_info *pi = ci_get_pi(adev);
2125 u32 tmp;
2126
2127 if (enable) {
2128 if (pi->caps_sclk_ss_support) {
2129 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2130 tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2131 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2132 }
2133 } else {
2134 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2135 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2136 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2137
2138 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2139 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2140 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2141 }
2142}
2143
2144static void ci_program_sstp(struct amdgpu_device *adev)
2145{
2146 WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2147 ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2148 (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2149}
2150
2151static void ci_enable_display_gap(struct amdgpu_device *adev)
2152{
2153 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2154
2155 tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2156 CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2157 tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2158 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2159
2160 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2161}
2162
2163static void ci_program_vc(struct amdgpu_device *adev)
2164{
2165 u32 tmp;
2166
2167 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2168 tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2169 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2170
2171 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2172 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2173 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2174 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2175 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2176 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2177 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2178 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2179}
2180
2181static void ci_clear_vc(struct amdgpu_device *adev)
2182{
2183 u32 tmp;
2184
2185 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2186 tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2187 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2188
2189 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2190 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2191 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2192 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2193 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2194 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2195 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2196 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2197}
2198
2199static int ci_upload_firmware(struct amdgpu_device *adev)
2200{
2201 struct ci_power_info *pi = ci_get_pi(adev);
2202 int i, ret;
2203
2204 for (i = 0; i < adev->usec_timeout; i++) {
2205 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2206 break;
2207 }
2208 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2209
2210 amdgpu_ci_stop_smc_clock(adev);
2211 amdgpu_ci_reset_smc(adev);
2212
2213 ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
2214
2215 return ret;
2216
2217}
2218
2219static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2220 struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2221 struct atom_voltage_table *voltage_table)
2222{
2223 u32 i;
2224
2225 if (voltage_dependency_table == NULL)
2226 return -EINVAL;
2227
2228 voltage_table->mask_low = 0;
2229 voltage_table->phase_delay = 0;
2230
2231 voltage_table->count = voltage_dependency_table->count;
2232 for (i = 0; i < voltage_table->count; i++) {
2233 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2234 voltage_table->entries[i].smio_low = 0;
2235 }
2236
2237 return 0;
2238}
2239
2240static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2241{
2242 struct ci_power_info *pi = ci_get_pi(adev);
2243 int ret;
2244
2245 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2246 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2247 VOLTAGE_OBJ_GPIO_LUT,
2248 &pi->vddc_voltage_table);
2249 if (ret)
2250 return ret;
2251 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2252 ret = ci_get_svi2_voltage_table(adev,
2253 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2254 &pi->vddc_voltage_table);
2255 if (ret)
2256 return ret;
2257 }
2258
2259 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2260 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2261 &pi->vddc_voltage_table);
2262
2263 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2264 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2265 VOLTAGE_OBJ_GPIO_LUT,
2266 &pi->vddci_voltage_table);
2267 if (ret)
2268 return ret;
2269 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2270 ret = ci_get_svi2_voltage_table(adev,
2271 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2272 &pi->vddci_voltage_table);
2273 if (ret)
2274 return ret;
2275 }
2276
2277 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2278 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2279 &pi->vddci_voltage_table);
2280
2281 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2282 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2283 VOLTAGE_OBJ_GPIO_LUT,
2284 &pi->mvdd_voltage_table);
2285 if (ret)
2286 return ret;
2287 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2288 ret = ci_get_svi2_voltage_table(adev,
2289 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2290 &pi->mvdd_voltage_table);
2291 if (ret)
2292 return ret;
2293 }
2294
2295 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2296 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2297 &pi->mvdd_voltage_table);
2298
2299 return 0;
2300}
2301
2302static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2303 struct atom_voltage_table_entry *voltage_table,
2304 SMU7_Discrete_VoltageLevel *smc_voltage_table)
2305{
2306 int ret;
2307
2308 ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2309 &smc_voltage_table->StdVoltageHiSidd,
2310 &smc_voltage_table->StdVoltageLoSidd);
2311
2312 if (ret) {
2313 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2314 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2315 }
2316
2317 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2318 smc_voltage_table->StdVoltageHiSidd =
2319 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2320 smc_voltage_table->StdVoltageLoSidd =
2321 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2322}
2323
2324static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2325 SMU7_Discrete_DpmTable *table)
2326{
2327 struct ci_power_info *pi = ci_get_pi(adev);
2328 unsigned int count;
2329
2330 table->VddcLevelCount = pi->vddc_voltage_table.count;
2331 for (count = 0; count < table->VddcLevelCount; count++) {
2332 ci_populate_smc_voltage_table(adev,
2333 &pi->vddc_voltage_table.entries[count],
2334 &table->VddcLevel[count]);
2335
2336 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2337 table->VddcLevel[count].Smio |=
2338 pi->vddc_voltage_table.entries[count].smio_low;
2339 else
2340 table->VddcLevel[count].Smio = 0;
2341 }
2342 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2343
2344 return 0;
2345}
2346
2347static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2348 SMU7_Discrete_DpmTable *table)
2349{
2350 unsigned int count;
2351 struct ci_power_info *pi = ci_get_pi(adev);
2352
2353 table->VddciLevelCount = pi->vddci_voltage_table.count;
2354 for (count = 0; count < table->VddciLevelCount; count++) {
2355 ci_populate_smc_voltage_table(adev,
2356 &pi->vddci_voltage_table.entries[count],
2357 &table->VddciLevel[count]);
2358
2359 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2360 table->VddciLevel[count].Smio |=
2361 pi->vddci_voltage_table.entries[count].smio_low;
2362 else
2363 table->VddciLevel[count].Smio = 0;
2364 }
2365 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2366
2367 return 0;
2368}
2369
2370static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2371 SMU7_Discrete_DpmTable *table)
2372{
2373 struct ci_power_info *pi = ci_get_pi(adev);
2374 unsigned int count;
2375
2376 table->MvddLevelCount = pi->mvdd_voltage_table.count;
2377 for (count = 0; count < table->MvddLevelCount; count++) {
2378 ci_populate_smc_voltage_table(adev,
2379 &pi->mvdd_voltage_table.entries[count],
2380 &table->MvddLevel[count]);
2381
2382 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2383 table->MvddLevel[count].Smio |=
2384 pi->mvdd_voltage_table.entries[count].smio_low;
2385 else
2386 table->MvddLevel[count].Smio = 0;
2387 }
2388 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2389
2390 return 0;
2391}
2392
2393static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2394 SMU7_Discrete_DpmTable *table)
2395{
2396 int ret;
2397
2398 ret = ci_populate_smc_vddc_table(adev, table);
2399 if (ret)
2400 return ret;
2401
2402 ret = ci_populate_smc_vddci_table(adev, table);
2403 if (ret)
2404 return ret;
2405
2406 ret = ci_populate_smc_mvdd_table(adev, table);
2407 if (ret)
2408 return ret;
2409
2410 return 0;
2411}
2412
2413static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2414 SMU7_Discrete_VoltageLevel *voltage)
2415{
2416 struct ci_power_info *pi = ci_get_pi(adev);
2417 u32 i = 0;
2418
2419 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2420 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2421 if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2422 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2423 break;
2424 }
2425 }
2426
2427 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2428 return -EINVAL;
2429 }
2430
2431 return -EINVAL;
2432}
2433
2434static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2435 struct atom_voltage_table_entry *voltage_table,
2436 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2437{
2438 u16 v_index, idx;
2439 bool voltage_found = false;
2440 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2441 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2442
2443 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2444 return -EINVAL;
2445
2446 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2447 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2448 if (voltage_table->value ==
2449 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2450 voltage_found = true;
2451 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2452 idx = v_index;
2453 else
2454 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2455 *std_voltage_lo_sidd =
2456 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2457 *std_voltage_hi_sidd =
2458 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2459 break;
2460 }
2461 }
2462
2463 if (!voltage_found) {
2464 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2465 if (voltage_table->value <=
2466 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2467 voltage_found = true;
2468 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2469 idx = v_index;
2470 else
2471 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2472 *std_voltage_lo_sidd =
2473 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2474 *std_voltage_hi_sidd =
2475 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2476 break;
2477 }
2478 }
2479 }
2480 }
2481
2482 return 0;
2483}
2484
2485static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2486 const struct amdgpu_phase_shedding_limits_table *limits,
2487 u32 sclk,
2488 u32 *phase_shedding)
2489{
2490 unsigned int i;
2491
2492 *phase_shedding = 1;
2493
2494 for (i = 0; i < limits->count; i++) {
2495 if (sclk < limits->entries[i].sclk) {
2496 *phase_shedding = i;
2497 break;
2498 }
2499 }
2500}
2501
2502static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2503 const struct amdgpu_phase_shedding_limits_table *limits,
2504 u32 mclk,
2505 u32 *phase_shedding)
2506{
2507 unsigned int i;
2508
2509 *phase_shedding = 1;
2510
2511 for (i = 0; i < limits->count; i++) {
2512 if (mclk < limits->entries[i].mclk) {
2513 *phase_shedding = i;
2514 break;
2515 }
2516 }
2517}
2518
2519static int ci_init_arb_table_index(struct amdgpu_device *adev)
2520{
2521 struct ci_power_info *pi = ci_get_pi(adev);
2522 u32 tmp;
2523 int ret;
2524
2525 ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2526 &tmp, pi->sram_end);
2527 if (ret)
2528 return ret;
2529
2530 tmp &= 0x00FFFFFF;
2531 tmp |= MC_CG_ARB_FREQ_F1 << 24;
2532
2533 return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2534 tmp, pi->sram_end);
2535}
2536
2537static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2538 struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2539 u32 clock, u32 *voltage)
2540{
2541 u32 i = 0;
2542
2543 if (allowed_clock_voltage_table->count == 0)
2544 return -EINVAL;
2545
2546 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2547 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2548 *voltage = allowed_clock_voltage_table->entries[i].v;
2549 return 0;
2550 }
2551 }
2552
2553 *voltage = allowed_clock_voltage_table->entries[i-1].v;
2554
2555 return 0;
2556}
2557
438498a8 2558static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
a2e73f56
AD
2559{
2560 u32 i;
2561 u32 tmp;
9887e425 2562 u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
a2e73f56
AD
2563
2564 if (sclk < min)
2565 return 0;
2566
2567 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
354ef928 2568 tmp = sclk >> i;
a2e73f56
AD
2569 if (tmp >= min || i == 0)
2570 break;
2571 }
2572
2573 return (u8)i;
2574}
2575
2576static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2577{
2578 return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2579}
2580
2581static int ci_reset_to_default(struct amdgpu_device *adev)
2582{
2583 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2584 0 : -EINVAL;
2585}
2586
2587static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2588{
2589 u32 tmp;
2590
2591 tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2592
2593 if (tmp == MC_CG_ARB_FREQ_F0)
2594 return 0;
2595
2596 return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2597}
2598
2599static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2600 const u32 engine_clock,
2601 const u32 memory_clock,
2602 u32 *dram_timimg2)
2603{
2604 bool patch;
2605 u32 tmp, tmp2;
2606
2607 tmp = RREG32(mmMC_SEQ_MISC0);
2608 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2609
2610 if (patch &&
2611 ((adev->pdev->device == 0x67B0) ||
2612 (adev->pdev->device == 0x67B1))) {
2613 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2614 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2615 *dram_timimg2 &= ~0x00ff0000;
2616 *dram_timimg2 |= tmp2 << 16;
2617 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2618 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2619 *dram_timimg2 &= ~0x00ff0000;
2620 *dram_timimg2 |= tmp2 << 16;
2621 }
2622 }
2623}
2624
2625static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2626 u32 sclk,
2627 u32 mclk,
2628 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2629{
2630 u32 dram_timing;
2631 u32 dram_timing2;
2632 u32 burst_time;
2633
2634 amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2635
2636 dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
2637 dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2638 burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2639
2640 ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2641
2642 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2643 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2644 arb_regs->McArbBurstTime = (u8)burst_time;
2645
2646 return 0;
2647}
2648
2649static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2650{
2651 struct ci_power_info *pi = ci_get_pi(adev);
2652 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2653 u32 i, j;
2654 int ret = 0;
2655
2656 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2657
2658 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2659 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2660 ret = ci_populate_memory_timing_parameters(adev,
2661 pi->dpm_table.sclk_table.dpm_levels[i].value,
2662 pi->dpm_table.mclk_table.dpm_levels[j].value,
2663 &arb_regs.entries[i][j]);
2664 if (ret)
2665 break;
2666 }
2667 }
2668
2669 if (ret == 0)
2670 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2671 pi->arb_table_start,
2672 (u8 *)&arb_regs,
2673 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2674 pi->sram_end);
2675
2676 return ret;
2677}
2678
2679static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2680{
2681 struct ci_power_info *pi = ci_get_pi(adev);
2682
2683 if (pi->need_update_smu7_dpm_table == 0)
2684 return 0;
2685
2686 return ci_do_program_memory_timing_parameters(adev);
2687}
2688
2689static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2690 struct amdgpu_ps *amdgpu_boot_state)
2691{
2692 struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2693 struct ci_power_info *pi = ci_get_pi(adev);
2694 u32 level = 0;
2695
2696 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2697 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2698 boot_state->performance_levels[0].sclk) {
2699 pi->smc_state_table.GraphicsBootLevel = level;
2700 break;
2701 }
2702 }
2703
2704 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2705 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2706 boot_state->performance_levels[0].mclk) {
2707 pi->smc_state_table.MemoryBootLevel = level;
2708 break;
2709 }
2710 }
2711}
2712
2713static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2714{
2715 u32 i;
2716 u32 mask_value = 0;
2717
2718 for (i = dpm_table->count; i > 0; i--) {
2719 mask_value = mask_value << 1;
2720 if (dpm_table->dpm_levels[i-1].enabled)
2721 mask_value |= 0x1;
2722 else
2723 mask_value &= 0xFFFFFFFE;
2724 }
2725
2726 return mask_value;
2727}
2728
2729static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2730 SMU7_Discrete_DpmTable *table)
2731{
2732 struct ci_power_info *pi = ci_get_pi(adev);
2733 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2734 u32 i;
2735
2736 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2737 table->LinkLevel[i].PcieGenSpeed =
2738 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2739 table->LinkLevel[i].PcieLaneCount =
2740 amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2741 table->LinkLevel[i].EnabledForActivity = 1;
2742 table->LinkLevel[i].DownT = cpu_to_be32(5);
2743 table->LinkLevel[i].UpT = cpu_to_be32(30);
2744 }
2745
2746 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2747 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2748 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2749}
2750
2751static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2752 SMU7_Discrete_DpmTable *table)
2753{
2754 u32 count;
2755 struct atom_clock_dividers dividers;
2756 int ret = -EINVAL;
2757
2758 table->UvdLevelCount =
2759 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2760
2761 for (count = 0; count < table->UvdLevelCount; count++) {
2762 table->UvdLevel[count].VclkFrequency =
2763 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2764 table->UvdLevel[count].DclkFrequency =
2765 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2766 table->UvdLevel[count].MinVddc =
2767 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2768 table->UvdLevel[count].MinVddcPhases = 1;
2769
2770 ret = amdgpu_atombios_get_clock_dividers(adev,
2771 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2772 table->UvdLevel[count].VclkFrequency, false, &dividers);
2773 if (ret)
2774 return ret;
2775
2776 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2777
2778 ret = amdgpu_atombios_get_clock_dividers(adev,
2779 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2780 table->UvdLevel[count].DclkFrequency, false, &dividers);
2781 if (ret)
2782 return ret;
2783
2784 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2785
2786 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2787 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2788 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2789 }
2790
2791 return ret;
2792}
2793
2794static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2795 SMU7_Discrete_DpmTable *table)
2796{
2797 u32 count;
2798 struct atom_clock_dividers dividers;
2799 int ret = -EINVAL;
2800
2801 table->VceLevelCount =
2802 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2803
2804 for (count = 0; count < table->VceLevelCount; count++) {
2805 table->VceLevel[count].Frequency =
2806 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2807 table->VceLevel[count].MinVoltage =
2808 (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2809 table->VceLevel[count].MinPhases = 1;
2810
2811 ret = amdgpu_atombios_get_clock_dividers(adev,
2812 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2813 table->VceLevel[count].Frequency, false, &dividers);
2814 if (ret)
2815 return ret;
2816
2817 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2818
2819 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2820 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2821 }
2822
2823 return ret;
2824
2825}
2826
2827static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2828 SMU7_Discrete_DpmTable *table)
2829{
2830 u32 count;
2831 struct atom_clock_dividers dividers;
2832 int ret = -EINVAL;
2833
2834 table->AcpLevelCount = (u8)
2835 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2836
2837 for (count = 0; count < table->AcpLevelCount; count++) {
2838 table->AcpLevel[count].Frequency =
2839 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2840 table->AcpLevel[count].MinVoltage =
2841 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2842 table->AcpLevel[count].MinPhases = 1;
2843
2844 ret = amdgpu_atombios_get_clock_dividers(adev,
2845 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2846 table->AcpLevel[count].Frequency, false, &dividers);
2847 if (ret)
2848 return ret;
2849
2850 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2851
2852 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2853 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2854 }
2855
2856 return ret;
2857}
2858
2859static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2860 SMU7_Discrete_DpmTable *table)
2861{
2862 u32 count;
2863 struct atom_clock_dividers dividers;
2864 int ret = -EINVAL;
2865
2866 table->SamuLevelCount =
2867 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2868
2869 for (count = 0; count < table->SamuLevelCount; count++) {
2870 table->SamuLevel[count].Frequency =
2871 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2872 table->SamuLevel[count].MinVoltage =
2873 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2874 table->SamuLevel[count].MinPhases = 1;
2875
2876 ret = amdgpu_atombios_get_clock_dividers(adev,
2877 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2878 table->SamuLevel[count].Frequency, false, &dividers);
2879 if (ret)
2880 return ret;
2881
2882 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2883
2884 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2885 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2886 }
2887
2888 return ret;
2889}
2890
2891static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2892 u32 memory_clock,
2893 SMU7_Discrete_MemoryLevel *mclk,
2894 bool strobe_mode,
2895 bool dll_state_on)
2896{
2897 struct ci_power_info *pi = ci_get_pi(adev);
2898 u32 dll_cntl = pi->clock_registers.dll_cntl;
2899 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2900 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2901 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2902 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2903 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2904 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2905 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2906 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2907 struct atom_mpll_param mpll_param;
2908 int ret;
2909
2910 ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2911 if (ret)
2912 return ret;
2913
2914 mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2915 mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2916
2917 mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2918 MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2919 mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2920 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2921 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2922
2923 mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2924 mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2925
81c59f54 2926 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
a2e73f56
AD
2927 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2928 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2929 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2930 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2931 }
2932
2933 if (pi->caps_mclk_ss_support) {
2934 struct amdgpu_atom_ss ss;
2935 u32 freq_nom;
2936 u32 tmp;
2937 u32 reference_clock = adev->clock.mpll.reference_freq;
2938
2939 if (mpll_param.qdr == 1)
2940 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2941 else
2942 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2943
2944 tmp = (freq_nom / reference_clock);
2945 tmp = tmp * tmp;
2946 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2947 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2948 u32 clks = reference_clock * 5 / ss.rate;
2949 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2950
2951 mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2952 mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2953
2954 mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2955 mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2956 }
2957 }
2958
2959 mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2960 mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2961
2962 if (dll_state_on)
2963 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2964 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2965 else
2966 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2967 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2968
2969 mclk->MclkFrequency = memory_clock;
2970 mclk->MpllFuncCntl = mpll_func_cntl;
2971 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2972 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2973 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2974 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2975 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2976 mclk->DllCntl = dll_cntl;
2977 mclk->MpllSs1 = mpll_ss1;
2978 mclk->MpllSs2 = mpll_ss2;
2979
2980 return 0;
2981}
2982
2983static int ci_populate_single_memory_level(struct amdgpu_device *adev,
2984 u32 memory_clock,
2985 SMU7_Discrete_MemoryLevel *memory_level)
2986{
2987 struct ci_power_info *pi = ci_get_pi(adev);
2988 int ret;
2989 bool dll_state_on;
2990
2991 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2992 ret = ci_get_dependency_volt_by_clk(adev,
2993 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2994 memory_clock, &memory_level->MinVddc);
2995 if (ret)
2996 return ret;
2997 }
2998
2999 if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3000 ret = ci_get_dependency_volt_by_clk(adev,
3001 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3002 memory_clock, &memory_level->MinVddci);
3003 if (ret)
3004 return ret;
3005 }
3006
3007 if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3008 ret = ci_get_dependency_volt_by_clk(adev,
3009 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3010 memory_clock, &memory_level->MinMvdd);
3011 if (ret)
3012 return ret;
3013 }
3014
3015 memory_level->MinVddcPhases = 1;
3016
3017 if (pi->vddc_phase_shed_control)
3018 ci_populate_phase_value_based_on_mclk(adev,
3019 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3020 memory_clock,
3021 &memory_level->MinVddcPhases);
3022
3023 memory_level->EnabledForThrottle = 1;
a2e73f56
AD
3024 memory_level->UpH = 0;
3025 memory_level->DownH = 100;
3026 memory_level->VoltageDownH = 0;
3027 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3028
3029 memory_level->StutterEnable = false;
3030 memory_level->StrobeEnable = false;
3031 memory_level->EdcReadEnable = false;
3032 memory_level->EdcWriteEnable = false;
3033 memory_level->RttEnable = false;
3034
3035 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3036
3037 if (pi->mclk_stutter_mode_threshold &&
3038 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
004e29cc 3039 (!pi->uvd_enabled) &&
a2e73f56
AD
3040 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3041 (adev->pm.dpm.new_active_crtc_count <= 2))
3042 memory_level->StutterEnable = true;
3043
3044 if (pi->mclk_strobe_mode_threshold &&
3045 (memory_clock <= pi->mclk_strobe_mode_threshold))
3046 memory_level->StrobeEnable = 1;
3047
81c59f54 3048 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
a2e73f56
AD
3049 memory_level->StrobeRatio =
3050 ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3051 if (pi->mclk_edc_enable_threshold &&
3052 (memory_clock > pi->mclk_edc_enable_threshold))
3053 memory_level->EdcReadEnable = true;
3054
3055 if (pi->mclk_edc_wr_enable_threshold &&
3056 (memory_clock > pi->mclk_edc_wr_enable_threshold))
3057 memory_level->EdcWriteEnable = true;
3058
3059 if (memory_level->StrobeEnable) {
3060 if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3061 ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3062 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3063 else
3064 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3065 } else {
3066 dll_state_on = pi->dll_default_on;
3067 }
3068 } else {
3069 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3070 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3071 }
3072
3073 ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3074 if (ret)
3075 return ret;
3076
3077 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3078 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3079 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3080 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3081
3082 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3083 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3084 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3085 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3086 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3087 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3088 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3089 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3090 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3091 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3092 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3093
3094 return 0;
3095}
3096
3097static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3098 SMU7_Discrete_DpmTable *table)
3099{
3100 struct ci_power_info *pi = ci_get_pi(adev);
3101 struct atom_clock_dividers dividers;
3102 SMU7_Discrete_VoltageLevel voltage_level;
3103 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3104 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3105 u32 dll_cntl = pi->clock_registers.dll_cntl;
3106 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3107 int ret;
3108
3109 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3110
3111 if (pi->acpi_vddc)
3112 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3113 else
3114 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3115
3116 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3117
3118 table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3119
3120 ret = amdgpu_atombios_get_clock_dividers(adev,
3121 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3122 table->ACPILevel.SclkFrequency, false, &dividers);
3123 if (ret)
3124 return ret;
3125
3126 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3127 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3128 table->ACPILevel.DeepSleepDivId = 0;
3129
3130 spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3131 spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3132
3133 spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3134 spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3135
3136 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3137 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3138 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3139 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3140 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3141 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3142 table->ACPILevel.CcPwrDynRm = 0;
3143 table->ACPILevel.CcPwrDynRm1 = 0;
3144
3145 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3146 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3147 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3148 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3149 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3150 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3151 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3152 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3153 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3154 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3155 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3156
3157 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3158 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3159
3160 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3161 if (pi->acpi_vddci)
3162 table->MemoryACPILevel.MinVddci =
3163 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3164 else
3165 table->MemoryACPILevel.MinVddci =
3166 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3167 }
3168
3169 if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3170 table->MemoryACPILevel.MinMvdd = 0;
3171 else
3172 table->MemoryACPILevel.MinMvdd =
3173 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3174
3175 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3176 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3177 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3178 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3179
3180 dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3181
3182 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3183 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3184 table->MemoryACPILevel.MpllAdFuncCntl =
3185 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3186 table->MemoryACPILevel.MpllDqFuncCntl =
3187 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3188 table->MemoryACPILevel.MpllFuncCntl =
3189 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3190 table->MemoryACPILevel.MpllFuncCntl_1 =
3191 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3192 table->MemoryACPILevel.MpllFuncCntl_2 =
3193 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3194 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3195 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3196
3197 table->MemoryACPILevel.EnabledForThrottle = 0;
3198 table->MemoryACPILevel.EnabledForActivity = 0;
3199 table->MemoryACPILevel.UpH = 0;
3200 table->MemoryACPILevel.DownH = 100;
3201 table->MemoryACPILevel.VoltageDownH = 0;
3202 table->MemoryACPILevel.ActivityLevel =
3203 cpu_to_be16((u16)pi->mclk_activity_target);
3204
3205 table->MemoryACPILevel.StutterEnable = false;
3206 table->MemoryACPILevel.StrobeEnable = false;
3207 table->MemoryACPILevel.EdcReadEnable = false;
3208 table->MemoryACPILevel.EdcWriteEnable = false;
3209 table->MemoryACPILevel.RttEnable = false;
3210
3211 return 0;
3212}
3213
3214
3215static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3216{
3217 struct ci_power_info *pi = ci_get_pi(adev);
3218 struct ci_ulv_parm *ulv = &pi->ulv;
3219
3220 if (ulv->supported) {
3221 if (enable)
3222 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3223 0 : -EINVAL;
3224 else
3225 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3226 0 : -EINVAL;
3227 }
3228
3229 return 0;
3230}
3231
3232static int ci_populate_ulv_level(struct amdgpu_device *adev,
3233 SMU7_Discrete_Ulv *state)
3234{
3235 struct ci_power_info *pi = ci_get_pi(adev);
3236 u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3237
3238 state->CcPwrDynRm = 0;
3239 state->CcPwrDynRm1 = 0;
3240
3241 if (ulv_voltage == 0) {
3242 pi->ulv.supported = false;
3243 return 0;
3244 }
3245
3246 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3247 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3248 state->VddcOffset = 0;
3249 else
3250 state->VddcOffset =
3251 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3252 } else {
3253 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3254 state->VddcOffsetVid = 0;
3255 else
3256 state->VddcOffsetVid = (u8)
3257 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3258 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3259 }
3260 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3261
3262 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3263 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3264 state->VddcOffset = cpu_to_be16(state->VddcOffset);
3265
3266 return 0;
3267}
3268
3269static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3270 u32 engine_clock,
3271 SMU7_Discrete_GraphicsLevel *sclk)
3272{
3273 struct ci_power_info *pi = ci_get_pi(adev);
3274 struct atom_clock_dividers dividers;
3275 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3276 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3277 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3278 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3279 u32 reference_clock = adev->clock.spll.reference_freq;
3280 u32 reference_divider;
3281 u32 fbdiv;
3282 int ret;
3283
3284 ret = amdgpu_atombios_get_clock_dividers(adev,
3285 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3286 engine_clock, false, &dividers);
3287 if (ret)
3288 return ret;
3289
3290 reference_divider = 1 + dividers.ref_div;
3291 fbdiv = dividers.fb_div & 0x3FFFFFF;
3292
3293 spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3294 spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3295 spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3296
3297 if (pi->caps_sclk_ss_support) {
3298 struct amdgpu_atom_ss ss;
3299 u32 vco_freq = engine_clock * dividers.post_div;
3300
3301 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3302 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3303 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3304 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3305
3306 cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3307 cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3308 cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3309
3310 cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3311 cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3312 }
3313 }
3314
3315 sclk->SclkFrequency = engine_clock;
3316 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3317 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3318 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3319 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
3320 sclk->SclkDid = (u8)dividers.post_divider;
3321
3322 return 0;
3323}
3324
3325static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3326 u32 engine_clock,
3327 u16 sclk_activity_level_t,
3328 SMU7_Discrete_GraphicsLevel *graphic_level)
3329{
3330 struct ci_power_info *pi = ci_get_pi(adev);
3331 int ret;
3332
3333 ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3334 if (ret)
3335 return ret;
3336
3337 ret = ci_get_dependency_volt_by_clk(adev,
3338 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3339 engine_clock, &graphic_level->MinVddc);
3340 if (ret)
3341 return ret;
3342
3343 graphic_level->SclkFrequency = engine_clock;
3344
3345 graphic_level->Flags = 0;
3346 graphic_level->MinVddcPhases = 1;
3347
3348 if (pi->vddc_phase_shed_control)
3349 ci_populate_phase_value_based_on_sclk(adev,
3350 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3351 engine_clock,
3352 &graphic_level->MinVddcPhases);
3353
3354 graphic_level->ActivityLevel = sclk_activity_level_t;
3355
3356 graphic_level->CcPwrDynRm = 0;
3357 graphic_level->CcPwrDynRm1 = 0;
3358 graphic_level->EnabledForThrottle = 1;
3359 graphic_level->UpH = 0;
3360 graphic_level->DownH = 0;
3361 graphic_level->VoltageDownH = 0;
3362 graphic_level->PowerThrottle = 0;
3363
3364 if (pi->caps_sclk_ds)
438498a8 3365 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
a2e73f56
AD
3366 CISLAND_MINIMUM_ENGINE_CLOCK);
3367
3368 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3369
3370 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3371 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3372 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3373 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3374 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3375 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3376 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3377 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3378 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3379 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3380 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
a2e73f56
AD
3381
3382 return 0;
3383}
3384
3385static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3386{
3387 struct ci_power_info *pi = ci_get_pi(adev);
3388 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3389 u32 level_array_address = pi->dpm_table_start +
3390 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3391 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3392 SMU7_MAX_LEVELS_GRAPHICS;
3393 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3394 u32 i, ret;
3395
3396 memset(levels, 0, level_array_size);
3397
3398 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3399 ret = ci_populate_single_graphic_level(adev,
3400 dpm_table->sclk_table.dpm_levels[i].value,
3401 (u16)pi->activity_target[i],
3402 &pi->smc_state_table.GraphicsLevel[i]);
3403 if (ret)
3404 return ret;
3405 if (i > 1)
3406 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3407 if (i == (dpm_table->sclk_table.count - 1))
3408 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3409 PPSMC_DISPLAY_WATERMARK_HIGH;
3410 }
4223cc3d 3411 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
a2e73f56
AD
3412
3413 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3414 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3415 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3416
3417 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3418 (u8 *)levels, level_array_size,
3419 pi->sram_end);
3420 if (ret)
3421 return ret;
3422
3423 return 0;
3424}
3425
3426static int ci_populate_ulv_state(struct amdgpu_device *adev,
3427 SMU7_Discrete_Ulv *ulv_level)
3428{
3429 return ci_populate_ulv_level(adev, ulv_level);
3430}
3431
3432static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3433{
3434 struct ci_power_info *pi = ci_get_pi(adev);
3435 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3436 u32 level_array_address = pi->dpm_table_start +
3437 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3438 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3439 SMU7_MAX_LEVELS_MEMORY;
3440 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3441 u32 i, ret;
3442
3443 memset(levels, 0, level_array_size);
3444
3445 for (i = 0; i < dpm_table->mclk_table.count; i++) {
3446 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3447 return -EINVAL;
3448 ret = ci_populate_single_memory_level(adev,
3449 dpm_table->mclk_table.dpm_levels[i].value,
3450 &pi->smc_state_table.MemoryLevel[i]);
3451 if (ret)
3452 return ret;
3453 }
3454
4223cc3d
AD
3455 pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3456
a2e73f56
AD
3457 if ((dpm_table->mclk_table.count >= 2) &&
3458 ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3459 pi->smc_state_table.MemoryLevel[1].MinVddc =
3460 pi->smc_state_table.MemoryLevel[0].MinVddc;
3461 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3462 pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3463 }
3464
3465 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3466
3467 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3468 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3469 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3470
3471 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3472 PPSMC_DISPLAY_WATERMARK_HIGH;
3473
3474 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3475 (u8 *)levels, level_array_size,
3476 pi->sram_end);
3477 if (ret)
3478 return ret;
3479
3480 return 0;
3481}
3482
3483static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3484 struct ci_single_dpm_table* dpm_table,
3485 u32 count)
3486{
3487 u32 i;
3488
3489 dpm_table->count = count;
3490 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3491 dpm_table->dpm_levels[i].enabled = false;
3492}
3493
3494static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3495 u32 index, u32 pcie_gen, u32 pcie_lanes)
3496{
3497 dpm_table->dpm_levels[index].value = pcie_gen;
3498 dpm_table->dpm_levels[index].param1 = pcie_lanes;
3499 dpm_table->dpm_levels[index].enabled = true;
3500}
3501
3502static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3503{
3504 struct ci_power_info *pi = ci_get_pi(adev);
3505
3506 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3507 return -EINVAL;
3508
3509 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3510 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3511 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3512 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3513 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3514 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3515 }
3516
3517 ci_reset_single_dpm_table(adev,
3518 &pi->dpm_table.pcie_speed_table,
3519 SMU7_MAX_LEVELS_LINK);
3520
3521 if (adev->asic_type == CHIP_BONAIRE)
3522 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3523 pi->pcie_gen_powersaving.min,
3524 pi->pcie_lane_powersaving.max);
3525 else
3526 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3527 pi->pcie_gen_powersaving.min,
3528 pi->pcie_lane_powersaving.min);
3529 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3530 pi->pcie_gen_performance.min,
3531 pi->pcie_lane_performance.min);
3532 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3533 pi->pcie_gen_powersaving.min,
3534 pi->pcie_lane_powersaving.max);
3535 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3536 pi->pcie_gen_performance.min,
3537 pi->pcie_lane_performance.max);
3538 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3539 pi->pcie_gen_powersaving.max,
3540 pi->pcie_lane_powersaving.max);
3541 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3542 pi->pcie_gen_performance.max,
3543 pi->pcie_lane_performance.max);
3544
3545 pi->dpm_table.pcie_speed_table.count = 6;
3546
3547 return 0;
3548}
3549
3550static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3551{
3552 struct ci_power_info *pi = ci_get_pi(adev);
3553 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3554 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3555 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3556 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3557 struct amdgpu_cac_leakage_table *std_voltage_table =
3558 &adev->pm.dpm.dyn_state.cac_leakage_table;
3559 u32 i;
3560
3561 if (allowed_sclk_vddc_table == NULL)
3562 return -EINVAL;
3563 if (allowed_sclk_vddc_table->count < 1)
3564 return -EINVAL;
3565 if (allowed_mclk_table == NULL)
3566 return -EINVAL;
3567 if (allowed_mclk_table->count < 1)
3568 return -EINVAL;
3569
3570 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3571
3572 ci_reset_single_dpm_table(adev,
3573 &pi->dpm_table.sclk_table,
3574 SMU7_MAX_LEVELS_GRAPHICS);
3575 ci_reset_single_dpm_table(adev,
3576 &pi->dpm_table.mclk_table,
3577 SMU7_MAX_LEVELS_MEMORY);
3578 ci_reset_single_dpm_table(adev,
3579 &pi->dpm_table.vddc_table,
3580 SMU7_MAX_LEVELS_VDDC);
3581 ci_reset_single_dpm_table(adev,
3582 &pi->dpm_table.vddci_table,
3583 SMU7_MAX_LEVELS_VDDCI);
3584 ci_reset_single_dpm_table(adev,
3585 &pi->dpm_table.mvdd_table,
3586 SMU7_MAX_LEVELS_MVDD);
3587
3588 pi->dpm_table.sclk_table.count = 0;
3589 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3590 if ((i == 0) ||
3591 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3592 allowed_sclk_vddc_table->entries[i].clk)) {
3593 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3594 allowed_sclk_vddc_table->entries[i].clk;
3595 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3596 (i == 0) ? true : false;
3597 pi->dpm_table.sclk_table.count++;
3598 }
3599 }
3600
3601 pi->dpm_table.mclk_table.count = 0;
3602 for (i = 0; i < allowed_mclk_table->count; i++) {
3603 if ((i == 0) ||
3604 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3605 allowed_mclk_table->entries[i].clk)) {
3606 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3607 allowed_mclk_table->entries[i].clk;
3608 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3609 (i == 0) ? true : false;
3610 pi->dpm_table.mclk_table.count++;
3611 }
3612 }
3613
3614 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3615 pi->dpm_table.vddc_table.dpm_levels[i].value =
3616 allowed_sclk_vddc_table->entries[i].v;
3617 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3618 std_voltage_table->entries[i].leakage;
3619 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3620 }
3621 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3622
3623 allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3624 if (allowed_mclk_table) {
3625 for (i = 0; i < allowed_mclk_table->count; i++) {
3626 pi->dpm_table.vddci_table.dpm_levels[i].value =
3627 allowed_mclk_table->entries[i].v;
3628 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3629 }
3630 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3631 }
3632
3633 allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3634 if (allowed_mclk_table) {
3635 for (i = 0; i < allowed_mclk_table->count; i++) {
3636 pi->dpm_table.mvdd_table.dpm_levels[i].value =
3637 allowed_mclk_table->entries[i].v;
3638 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3639 }
3640 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3641 }
3642
3643 ci_setup_default_pcie_tables(adev);
3644
3cc25911
EH
3645 /* save a copy of the default DPM table */
3646 memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3647 sizeof(struct ci_dpm_table));
3648
a2e73f56
AD
3649 return 0;
3650}
3651
3652static int ci_find_boot_level(struct ci_single_dpm_table *table,
3653 u32 value, u32 *boot_level)
3654{
3655 u32 i;
3656 int ret = -EINVAL;
3657
3658 for(i = 0; i < table->count; i++) {
3659 if (value == table->dpm_levels[i].value) {
3660 *boot_level = i;
3661 ret = 0;
3662 }
3663 }
3664
3665 return ret;
3666}
3667
3668static int ci_init_smc_table(struct amdgpu_device *adev)
3669{
3670 struct ci_power_info *pi = ci_get_pi(adev);
3671 struct ci_ulv_parm *ulv = &pi->ulv;
3672 struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3673 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3674 int ret;
3675
3676 ret = ci_setup_default_dpm_tables(adev);
3677 if (ret)
3678 return ret;
3679
3680 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3681 ci_populate_smc_voltage_tables(adev, table);
3682
3683 ci_init_fps_limits(adev);
3684
3685 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3686 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3687
3688 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3689 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3690
81c59f54 3691 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
a2e73f56
AD
3692 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3693
3694 if (ulv->supported) {
3695 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3696 if (ret)
3697 return ret;
3698 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3699 }
3700
3701 ret = ci_populate_all_graphic_levels(adev);
3702 if (ret)
3703 return ret;
3704
3705 ret = ci_populate_all_memory_levels(adev);
3706 if (ret)
3707 return ret;
3708
3709 ci_populate_smc_link_level(adev, table);
3710
3711 ret = ci_populate_smc_acpi_level(adev, table);
3712 if (ret)
3713 return ret;
3714
3715 ret = ci_populate_smc_vce_level(adev, table);
3716 if (ret)
3717 return ret;
3718
3719 ret = ci_populate_smc_acp_level(adev, table);
3720 if (ret)
3721 return ret;
3722
3723 ret = ci_populate_smc_samu_level(adev, table);
3724 if (ret)
3725 return ret;
3726
3727 ret = ci_do_program_memory_timing_parameters(adev);
3728 if (ret)
3729 return ret;
3730
3731 ret = ci_populate_smc_uvd_level(adev, table);
3732 if (ret)
3733 return ret;
3734
3735 table->UvdBootLevel = 0;
3736 table->VceBootLevel = 0;
3737 table->AcpBootLevel = 0;
3738 table->SamuBootLevel = 0;
3739 table->GraphicsBootLevel = 0;
3740 table->MemoryBootLevel = 0;
3741
3742 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3743 pi->vbios_boot_state.sclk_bootup_value,
3744 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3745
3746 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3747 pi->vbios_boot_state.mclk_bootup_value,
3748 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3749
3750 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3751 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3752 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3753
3754 ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3755
3756 ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3757 if (ret)
3758 return ret;
3759
3760 table->UVDInterval = 1;
3761 table->VCEInterval = 1;
3762 table->ACPInterval = 1;
3763 table->SAMUInterval = 1;
3764 table->GraphicsVoltageChangeEnable = 1;
3765 table->GraphicsThermThrottleEnable = 1;
3766 table->GraphicsInterval = 1;
3767 table->VoltageInterval = 1;
3768 table->ThermalInterval = 1;
3769 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3770 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3771 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3772 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3773 table->MemoryVoltageChangeEnable = 1;
3774 table->MemoryInterval = 1;
3775 table->VoltageResponseTime = 0;
3776 table->VddcVddciDelta = 4000;
3777 table->PhaseResponseTime = 0;
3778 table->MemoryThermThrottleEnable = 1;
3779 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3780 table->PCIeGenInterval = 1;
3781 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3782 table->SVI2Enable = 1;
3783 else
3784 table->SVI2Enable = 0;
3785
3786 table->ThermGpio = 17;
3787 table->SclkStepSize = 0x4000;
3788
3789 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3790 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3791 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3792 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3793 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3794 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3795 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3796 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3797 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3798 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3799 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3800 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3801 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3802 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3803
3804 ret = amdgpu_ci_copy_bytes_to_smc(adev,
3805 pi->dpm_table_start +
3806 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3807 (u8 *)&table->SystemFlags,
3808 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3809 pi->sram_end);
3810 if (ret)
3811 return ret;
3812
3813 return 0;
3814}
3815
3816static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3817 struct ci_single_dpm_table *dpm_table,
3818 u32 low_limit, u32 high_limit)
3819{
3820 u32 i;
3821
3822 for (i = 0; i < dpm_table->count; i++) {
3823 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3824 (dpm_table->dpm_levels[i].value > high_limit))
3825 dpm_table->dpm_levels[i].enabled = false;
3826 else
3827 dpm_table->dpm_levels[i].enabled = true;
3828 }
3829}
3830
3831static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3832 u32 speed_low, u32 lanes_low,
3833 u32 speed_high, u32 lanes_high)
3834{
3835 struct ci_power_info *pi = ci_get_pi(adev);
3836 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3837 u32 i, j;
3838
3839 for (i = 0; i < pcie_table->count; i++) {
3840 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3841 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3842 (pcie_table->dpm_levels[i].value > speed_high) ||
3843 (pcie_table->dpm_levels[i].param1 > lanes_high))
3844 pcie_table->dpm_levels[i].enabled = false;
3845 else
3846 pcie_table->dpm_levels[i].enabled = true;
3847 }
3848
3849 for (i = 0; i < pcie_table->count; i++) {
3850 if (pcie_table->dpm_levels[i].enabled) {
3851 for (j = i + 1; j < pcie_table->count; j++) {
3852 if (pcie_table->dpm_levels[j].enabled) {
3853 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3854 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3855 pcie_table->dpm_levels[j].enabled = false;
3856 }
3857 }
3858 }
3859 }
3860}
3861
3862static int ci_trim_dpm_states(struct amdgpu_device *adev,
3863 struct amdgpu_ps *amdgpu_state)
3864{
3865 struct ci_ps *state = ci_get_ps(amdgpu_state);
3866 struct ci_power_info *pi = ci_get_pi(adev);
3867 u32 high_limit_count;
3868
3869 if (state->performance_level_count < 1)
3870 return -EINVAL;
3871
3872 if (state->performance_level_count == 1)
3873 high_limit_count = 0;
3874 else
3875 high_limit_count = 1;
3876
3877 ci_trim_single_dpm_states(adev,
3878 &pi->dpm_table.sclk_table,
3879 state->performance_levels[0].sclk,
3880 state->performance_levels[high_limit_count].sclk);
3881
3882 ci_trim_single_dpm_states(adev,
3883 &pi->dpm_table.mclk_table,
3884 state->performance_levels[0].mclk,
3885 state->performance_levels[high_limit_count].mclk);
3886
3887 ci_trim_pcie_dpm_states(adev,
3888 state->performance_levels[0].pcie_gen,
3889 state->performance_levels[0].pcie_lane,
3890 state->performance_levels[high_limit_count].pcie_gen,
3891 state->performance_levels[high_limit_count].pcie_lane);
3892
3893 return 0;
3894}
3895
3896static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3897{
3898 struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3899 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3900 struct amdgpu_clock_voltage_dependency_table *vddc_table =
3901 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3902 u32 requested_voltage = 0;
3903 u32 i;
3904
3905 if (disp_voltage_table == NULL)
3906 return -EINVAL;
3907 if (!disp_voltage_table->count)
3908 return -EINVAL;
3909
3910 for (i = 0; i < disp_voltage_table->count; i++) {
3911 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3912 requested_voltage = disp_voltage_table->entries[i].v;
3913 }
3914
3915 for (i = 0; i < vddc_table->count; i++) {
3916 if (requested_voltage <= vddc_table->entries[i].v) {
3917 requested_voltage = vddc_table->entries[i].v;
3918 return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3919 PPSMC_MSG_VddC_Request,
3920 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3921 0 : -EINVAL;
3922 }
3923 }
3924
3925 return -EINVAL;
3926}
3927
3928static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3929{
3930 struct ci_power_info *pi = ci_get_pi(adev);
3931 PPSMC_Result result;
3932
3933 ci_apply_disp_minimum_voltage_request(adev);
3934
3935 if (!pi->sclk_dpm_key_disabled) {
3936 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3937 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3938 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3939 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3940 if (result != PPSMC_Result_OK)
3941 return -EINVAL;
3942 }
3943 }
3944
3945 if (!pi->mclk_dpm_key_disabled) {
3946 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3947 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3948 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3949 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3950 if (result != PPSMC_Result_OK)
3951 return -EINVAL;
3952 }
3953 }
3954
3955#if 0
3956 if (!pi->pcie_dpm_key_disabled) {
3957 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3958 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3959 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3960 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3961 if (result != PPSMC_Result_OK)
3962 return -EINVAL;
3963 }
3964 }
3965#endif
3966
3967 return 0;
3968}
3969
3970static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
3971 struct amdgpu_ps *amdgpu_state)
3972{
3973 struct ci_power_info *pi = ci_get_pi(adev);
3974 struct ci_ps *state = ci_get_ps(amdgpu_state);
3975 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3976 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3977 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3978 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3979 u32 i;
3980
3981 pi->need_update_smu7_dpm_table = 0;
3982
3983 for (i = 0; i < sclk_table->count; i++) {
3984 if (sclk == sclk_table->dpm_levels[i].value)
3985 break;
3986 }
3987
3988 if (i >= sclk_table->count) {
3989 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3990 } else {
3991 /* XXX check display min clock requirements */
3992 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3993 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3994 }
3995
3996 for (i = 0; i < mclk_table->count; i++) {
3997 if (mclk == mclk_table->dpm_levels[i].value)
3998 break;
3999 }
4000
4001 if (i >= mclk_table->count)
4002 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4003
4004 if (adev->pm.dpm.current_active_crtc_count !=
4005 adev->pm.dpm.new_active_crtc_count)
4006 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4007}
4008
4009static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4010 struct amdgpu_ps *amdgpu_state)
4011{
4012 struct ci_power_info *pi = ci_get_pi(adev);
4013 struct ci_ps *state = ci_get_ps(amdgpu_state);
4014 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4015 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4016 struct ci_dpm_table *dpm_table = &pi->dpm_table;
4017 int ret;
4018
4019 if (!pi->need_update_smu7_dpm_table)
4020 return 0;
4021
4022 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4023 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4024
4025 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4026 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4027
4028 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4029 ret = ci_populate_all_graphic_levels(adev);
4030 if (ret)
4031 return ret;
4032 }
4033
4034 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4035 ret = ci_populate_all_memory_levels(adev);
4036 if (ret)
4037 return ret;
4038 }
4039
4040 return 0;
4041}
4042
4043static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4044{
4045 struct ci_power_info *pi = ci_get_pi(adev);
4046 const struct amdgpu_clock_and_voltage_limits *max_limits;
4047 int i;
4048
4049 if (adev->pm.dpm.ac_power)
4050 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4051 else
4052 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4053
4054 if (enable) {
4055 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4056
4057 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4058 if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4059 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4060
4061 if (!pi->caps_uvd_dpm)
4062 break;
4063 }
4064 }
4065
4066 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4067 PPSMC_MSG_UVDDPM_SetEnabledMask,
4068 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4069
4070 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4071 pi->uvd_enabled = true;
4072 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4073 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4074 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4075 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4076 }
4077 } else {
4078 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4079 pi->uvd_enabled = false;
4080 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4081 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4082 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4083 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4084 }
4085 }
4086
4087 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4088 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4089 0 : -EINVAL;
4090}
4091
4092static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4093{
4094 struct ci_power_info *pi = ci_get_pi(adev);
4095 const struct amdgpu_clock_and_voltage_limits *max_limits;
4096 int i;
4097
4098 if (adev->pm.dpm.ac_power)
4099 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4100 else
4101 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4102
4103 if (enable) {
4104 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4105 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4106 if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4107 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4108
4109 if (!pi->caps_vce_dpm)
4110 break;
4111 }
4112 }
4113
4114 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4115 PPSMC_MSG_VCEDPM_SetEnabledMask,
4116 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4117 }
4118
4119 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4120 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4121 0 : -EINVAL;
4122}
4123
4124#if 0
4125static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4126{
4127 struct ci_power_info *pi = ci_get_pi(adev);
4128 const struct amdgpu_clock_and_voltage_limits *max_limits;
4129 int i;
4130
4131 if (adev->pm.dpm.ac_power)
4132 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4133 else
4134 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4135
4136 if (enable) {
4137 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4138 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4139 if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4140 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4141
4142 if (!pi->caps_samu_dpm)
4143 break;
4144 }
4145 }
4146
4147 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4148 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4149 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4150 }
4151 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4152 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4153 0 : -EINVAL;
4154}
4155
4156static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4157{
4158 struct ci_power_info *pi = ci_get_pi(adev);
4159 const struct amdgpu_clock_and_voltage_limits *max_limits;
4160 int i;
4161
4162 if (adev->pm.dpm.ac_power)
4163 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4164 else
4165 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4166
4167 if (enable) {
4168 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4169 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4170 if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4171 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4172
4173 if (!pi->caps_acp_dpm)
4174 break;
4175 }
4176 }
4177
4178 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4179 PPSMC_MSG_ACPDPM_SetEnabledMask,
4180 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4181 }
4182
4183 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4184 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4185 0 : -EINVAL;
4186}
4187#endif
4188
4189static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4190{
4191 struct ci_power_info *pi = ci_get_pi(adev);
4192 u32 tmp;
4193
4194 if (!gate) {
4195 if (pi->caps_uvd_dpm ||
4196 (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4197 pi->smc_state_table.UvdBootLevel = 0;
4198 else
4199 pi->smc_state_table.UvdBootLevel =
4200 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4201
4202 tmp = RREG32_SMC(ixDPM_TABLE_475);
4203 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4204 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4205 WREG32_SMC(ixDPM_TABLE_475, tmp);
4206 }
4207
4208 return ci_enable_uvd_dpm(adev, !gate);
4209}
4210
4211static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4212{
4213 u8 i;
4214 u32 min_evclk = 30000; /* ??? */
4215 struct amdgpu_vce_clock_voltage_dependency_table *table =
4216 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4217
4218 for (i = 0; i < table->count; i++) {
4219 if (table->entries[i].evclk >= min_evclk)
4220 return i;
4221 }
4222
4223 return table->count - 1;
4224}
4225
4226static int ci_update_vce_dpm(struct amdgpu_device *adev,
4227 struct amdgpu_ps *amdgpu_new_state,
4228 struct amdgpu_ps *amdgpu_current_state)
4229{
4230 struct ci_power_info *pi = ci_get_pi(adev);
4231 int ret = 0;
4232 u32 tmp;
4233
4234 if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4235 if (amdgpu_new_state->evclk) {
4236 /* turn the clocks on when encoding */
5fc3aeeb 4237 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4238 AMD_CG_STATE_UNGATE);
a2e73f56
AD
4239 if (ret)
4240 return ret;
4241
4242 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4243 tmp = RREG32_SMC(ixDPM_TABLE_475);
4244 tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4245 tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4246 WREG32_SMC(ixDPM_TABLE_475, tmp);
4247
4248 ret = ci_enable_vce_dpm(adev, true);
4249 } else {
4250 /* turn the clocks off when not encoding */
5fc3aeeb 4251 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4252 AMD_CG_STATE_GATE);
a2e73f56
AD
4253 if (ret)
4254 return ret;
4255
4256 ret = ci_enable_vce_dpm(adev, false);
4257 }
4258 }
4259 return ret;
4260}
4261
4262#if 0
4263static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4264{
4265 return ci_enable_samu_dpm(adev, gate);
4266}
4267
4268static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4269{
4270 struct ci_power_info *pi = ci_get_pi(adev);
4271 u32 tmp;
4272
4273 if (!gate) {
4274 pi->smc_state_table.AcpBootLevel = 0;
4275
4276 tmp = RREG32_SMC(ixDPM_TABLE_475);
4277 tmp &= ~AcpBootLevel_MASK;
4278 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4279 WREG32_SMC(ixDPM_TABLE_475, tmp);
4280 }
4281
4282 return ci_enable_acp_dpm(adev, !gate);
4283}
4284#endif
4285
4286static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4287 struct amdgpu_ps *amdgpu_state)
4288{
4289 struct ci_power_info *pi = ci_get_pi(adev);
4290 int ret;
4291
4292 ret = ci_trim_dpm_states(adev, amdgpu_state);
4293 if (ret)
4294 return ret;
4295
4296 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4297 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4298 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4299 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4300 pi->last_mclk_dpm_enable_mask =
4301 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4302 if (pi->uvd_enabled) {
4303 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4304 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4305 }
4306 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4307 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4308
4309 return 0;
4310}
4311
4312static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4313 u32 level_mask)
4314{
4315 u32 level = 0;
4316
4317 while ((level_mask & (1 << level)) == 0)
4318 level++;
4319
4320 return level;
4321}
4322
4323
4324static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4325 enum amdgpu_dpm_forced_level level)
4326{
4327 struct ci_power_info *pi = ci_get_pi(adev);
4328 u32 tmp, levels, i;
4329 int ret;
4330
4331 if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
4332 if ((!pi->pcie_dpm_key_disabled) &&
4333 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4334 levels = 0;
4335 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4336 while (tmp >>= 1)
4337 levels++;
4338 if (levels) {
4339 ret = ci_dpm_force_state_pcie(adev, level);
4340 if (ret)
4341 return ret;
4342 for (i = 0; i < adev->usec_timeout; i++) {
4343 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4344 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4345 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4346 if (tmp == levels)
4347 break;
4348 udelay(1);
4349 }
4350 }
4351 }
4352 if ((!pi->sclk_dpm_key_disabled) &&
4353 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4354 levels = 0;
4355 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4356 while (tmp >>= 1)
4357 levels++;
4358 if (levels) {
4359 ret = ci_dpm_force_state_sclk(adev, levels);
4360 if (ret)
4361 return ret;
4362 for (i = 0; i < adev->usec_timeout; i++) {
4363 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4364 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4365 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4366 if (tmp == levels)
4367 break;
4368 udelay(1);
4369 }
4370 }
4371 }
4372 if ((!pi->mclk_dpm_key_disabled) &&
4373 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4374 levels = 0;
4375 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4376 while (tmp >>= 1)
4377 levels++;
4378 if (levels) {
4379 ret = ci_dpm_force_state_mclk(adev, levels);
4380 if (ret)
4381 return ret;
4382 for (i = 0; i < adev->usec_timeout; i++) {
4383 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4384 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4385 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4386 if (tmp == levels)
4387 break;
4388 udelay(1);
4389 }
4390 }
4391 }
a2e73f56
AD
4392 } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
4393 if ((!pi->sclk_dpm_key_disabled) &&
4394 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4395 levels = ci_get_lowest_enabled_level(adev,
4396 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4397 ret = ci_dpm_force_state_sclk(adev, levels);
4398 if (ret)
4399 return ret;
4400 for (i = 0; i < adev->usec_timeout; i++) {
4401 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4402 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4403 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4404 if (tmp == levels)
4405 break;
4406 udelay(1);
4407 }
4408 }
4409 if ((!pi->mclk_dpm_key_disabled) &&
4410 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4411 levels = ci_get_lowest_enabled_level(adev,
4412 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4413 ret = ci_dpm_force_state_mclk(adev, levels);
4414 if (ret)
4415 return ret;
4416 for (i = 0; i < adev->usec_timeout; i++) {
4417 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4418 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4419 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4420 if (tmp == levels)
4421 break;
4422 udelay(1);
4423 }
4424 }
4425 if ((!pi->pcie_dpm_key_disabled) &&
4426 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4427 levels = ci_get_lowest_enabled_level(adev,
4428 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4429 ret = ci_dpm_force_state_pcie(adev, levels);
4430 if (ret)
4431 return ret;
4432 for (i = 0; i < adev->usec_timeout; i++) {
4433 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4434 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4435 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4436 if (tmp == levels)
4437 break;
4438 udelay(1);
4439 }
4440 }
4441 } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
4442 if (!pi->pcie_dpm_key_disabled) {
4443 PPSMC_Result smc_result;
4444
4445 smc_result = amdgpu_ci_send_msg_to_smc(adev,
4446 PPSMC_MSG_PCIeDPM_UnForceLevel);
4447 if (smc_result != PPSMC_Result_OK)
4448 return -EINVAL;
4449 }
4450 ret = ci_upload_dpm_level_enable_mask(adev);
4451 if (ret)
4452 return ret;
4453 }
4454
4455 adev->pm.dpm.forced_level = level;
4456
4457 return 0;
4458}
4459
4460static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4461 struct ci_mc_reg_table *table)
4462{
4463 u8 i, j, k;
4464 u32 temp_reg;
4465
4466 for (i = 0, j = table->last; i < table->last; i++) {
4467 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4468 return -EINVAL;
4469 switch(table->mc_reg_address[i].s1) {
4470 case mmMC_SEQ_MISC1:
4471 temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4472 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4473 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4474 for (k = 0; k < table->num_entries; k++) {
4475 table->mc_reg_table_entry[k].mc_data[j] =
4476 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4477 }
4478 j++;
4479 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4480 return -EINVAL;
4481
4482 temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4483 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4484 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4485 for (k = 0; k < table->num_entries; k++) {
4486 table->mc_reg_table_entry[k].mc_data[j] =
4487 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
81c59f54 4488 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
a2e73f56
AD
4489 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4490 }
4491 j++;
4492 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4493 return -EINVAL;
4494
81c59f54 4495 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
a2e73f56
AD
4496 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4497 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4498 for (k = 0; k < table->num_entries; k++) {
4499 table->mc_reg_table_entry[k].mc_data[j] =
4500 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4501 }
4502 j++;
4503 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4504 return -EINVAL;
4505 }
4506 break;
4507 case mmMC_SEQ_RESERVE_M:
4508 temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4509 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4510 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4511 for (k = 0; k < table->num_entries; k++) {
4512 table->mc_reg_table_entry[k].mc_data[j] =
4513 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4514 }
4515 j++;
4516 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4517 return -EINVAL;
4518 break;
4519 default:
4520 break;
4521 }
4522
4523 }
4524
4525 table->last = j;
4526
4527 return 0;
4528}
4529
4530static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4531{
4532 bool result = true;
4533
4534 switch(in_reg) {
4535 case mmMC_SEQ_RAS_TIMING:
4536 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4537 break;
4538 case mmMC_SEQ_DLL_STBY:
4539 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4540 break;
4541 case mmMC_SEQ_G5PDX_CMD0:
4542 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4543 break;
4544 case mmMC_SEQ_G5PDX_CMD1:
4545 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4546 break;
4547 case mmMC_SEQ_G5PDX_CTRL:
4548 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4549 break;
4550 case mmMC_SEQ_CAS_TIMING:
4551 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4552 break;
4553 case mmMC_SEQ_MISC_TIMING:
4554 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4555 break;
4556 case mmMC_SEQ_MISC_TIMING2:
4557 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4558 break;
4559 case mmMC_SEQ_PMG_DVS_CMD:
4560 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4561 break;
4562 case mmMC_SEQ_PMG_DVS_CTL:
4563 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4564 break;
4565 case mmMC_SEQ_RD_CTL_D0:
4566 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4567 break;
4568 case mmMC_SEQ_RD_CTL_D1:
4569 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4570 break;
4571 case mmMC_SEQ_WR_CTL_D0:
4572 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4573 break;
4574 case mmMC_SEQ_WR_CTL_D1:
4575 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4576 break;
4577 case mmMC_PMG_CMD_EMRS:
4578 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4579 break;
4580 case mmMC_PMG_CMD_MRS:
4581 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4582 break;
4583 case mmMC_PMG_CMD_MRS1:
4584 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4585 break;
4586 case mmMC_SEQ_PMG_TIMING:
4587 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4588 break;
4589 case mmMC_PMG_CMD_MRS2:
4590 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4591 break;
4592 case mmMC_SEQ_WR_CTL_2:
4593 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4594 break;
4595 default:
4596 result = false;
4597 break;
4598 }
4599
4600 return result;
4601}
4602
4603static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4604{
4605 u8 i, j;
4606
4607 for (i = 0; i < table->last; i++) {
4608 for (j = 1; j < table->num_entries; j++) {
4609 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4610 table->mc_reg_table_entry[j].mc_data[i]) {
4611 table->valid_flag |= 1 << i;
4612 break;
4613 }
4614 }
4615 }
4616}
4617
4618static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4619{
4620 u32 i;
4621 u16 address;
4622
4623 for (i = 0; i < table->last; i++) {
4624 table->mc_reg_address[i].s0 =
4625 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4626 address : table->mc_reg_address[i].s1;
4627 }
4628}
4629
4630static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4631 struct ci_mc_reg_table *ci_table)
4632{
4633 u8 i, j;
4634
4635 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4636 return -EINVAL;
4637 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4638 return -EINVAL;
4639
4640 for (i = 0; i < table->last; i++)
4641 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4642
4643 ci_table->last = table->last;
4644
4645 for (i = 0; i < table->num_entries; i++) {
4646 ci_table->mc_reg_table_entry[i].mclk_max =
4647 table->mc_reg_table_entry[i].mclk_max;
4648 for (j = 0; j < table->last; j++)
4649 ci_table->mc_reg_table_entry[i].mc_data[j] =
4650 table->mc_reg_table_entry[i].mc_data[j];
4651 }
4652 ci_table->num_entries = table->num_entries;
4653
4654 return 0;
4655}
4656
4657static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4658 struct ci_mc_reg_table *table)
4659{
4660 u8 i, k;
4661 u32 tmp;
4662 bool patch;
4663
4664 tmp = RREG32(mmMC_SEQ_MISC0);
4665 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4666
4667 if (patch &&
4668 ((adev->pdev->device == 0x67B0) ||
4669 (adev->pdev->device == 0x67B1))) {
4670 for (i = 0; i < table->last; i++) {
4671 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4672 return -EINVAL;
4673 switch (table->mc_reg_address[i].s1) {
4674 case mmMC_SEQ_MISC1:
4675 for (k = 0; k < table->num_entries; k++) {
4676 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4677 (table->mc_reg_table_entry[k].mclk_max == 137500))
4678 table->mc_reg_table_entry[k].mc_data[i] =
4679 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4680 0x00000007;
4681 }
4682 break;
4683 case mmMC_SEQ_WR_CTL_D0:
4684 for (k = 0; k < table->num_entries; k++) {
4685 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4686 (table->mc_reg_table_entry[k].mclk_max == 137500))
4687 table->mc_reg_table_entry[k].mc_data[i] =
4688 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4689 0x0000D0DD;
4690 }
4691 break;
4692 case mmMC_SEQ_WR_CTL_D1:
4693 for (k = 0; k < table->num_entries; k++) {
4694 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4695 (table->mc_reg_table_entry[k].mclk_max == 137500))
4696 table->mc_reg_table_entry[k].mc_data[i] =
4697 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4698 0x0000D0DD;
4699 }
4700 break;
4701 case mmMC_SEQ_WR_CTL_2:
4702 for (k = 0; k < table->num_entries; k++) {
4703 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4704 (table->mc_reg_table_entry[k].mclk_max == 137500))
4705 table->mc_reg_table_entry[k].mc_data[i] = 0;
4706 }
4707 break;
4708 case mmMC_SEQ_CAS_TIMING:
4709 for (k = 0; k < table->num_entries; k++) {
4710 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4711 table->mc_reg_table_entry[k].mc_data[i] =
4712 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4713 0x000C0140;
4714 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4715 table->mc_reg_table_entry[k].mc_data[i] =
4716 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4717 0x000C0150;
4718 }
4719 break;
4720 case mmMC_SEQ_MISC_TIMING:
4721 for (k = 0; k < table->num_entries; k++) {
4722 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4723 table->mc_reg_table_entry[k].mc_data[i] =
4724 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4725 0x00000030;
4726 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4727 table->mc_reg_table_entry[k].mc_data[i] =
4728 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4729 0x00000035;
4730 }
4731 break;
4732 default:
4733 break;
4734 }
4735 }
4736
4737 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4738 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4739 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4740 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4741 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4742 }
4743
4744 return 0;
4745}
4746
4747static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4748{
4749 struct ci_power_info *pi = ci_get_pi(adev);
4750 struct atom_mc_reg_table *table;
4751 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4752 u8 module_index = ci_get_memory_module_index(adev);
4753 int ret;
4754
4755 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4756 if (!table)
4757 return -ENOMEM;
4758
4759 WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4760 WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4761 WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4762 WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4763 WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4764 WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4765 WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4766 WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4767 WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4768 WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4769 WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4770 WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4771 WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4772 WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4773 WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4774 WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4775 WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4776 WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4777 WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4778 WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4779
4780 ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4781 if (ret)
4782 goto init_mc_done;
4783
4784 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4785 if (ret)
4786 goto init_mc_done;
4787
4788 ci_set_s0_mc_reg_index(ci_table);
4789
4790 ret = ci_register_patching_mc_seq(adev, ci_table);
4791 if (ret)
4792 goto init_mc_done;
4793
4794 ret = ci_set_mc_special_registers(adev, ci_table);
4795 if (ret)
4796 goto init_mc_done;
4797
4798 ci_set_valid_flag(ci_table);
4799
4800init_mc_done:
4801 kfree(table);
4802
4803 return ret;
4804}
4805
4806static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4807 SMU7_Discrete_MCRegisters *mc_reg_table)
4808{
4809 struct ci_power_info *pi = ci_get_pi(adev);
4810 u32 i, j;
4811
4812 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4813 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4814 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4815 return -EINVAL;
4816 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4817 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4818 i++;
4819 }
4820 }
4821
4822 mc_reg_table->last = (u8)i;
4823
4824 return 0;
4825}
4826
4827static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4828 SMU7_Discrete_MCRegisterSet *data,
4829 u32 num_entries, u32 valid_flag)
4830{
4831 u32 i, j;
4832
4833 for (i = 0, j = 0; j < num_entries; j++) {
4834 if (valid_flag & (1 << j)) {
4835 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4836 i++;
4837 }
4838 }
4839}
4840
4841static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4842 const u32 memory_clock,
4843 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4844{
4845 struct ci_power_info *pi = ci_get_pi(adev);
4846 u32 i = 0;
4847
4848 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4849 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4850 break;
4851 }
4852
4853 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4854 --i;
4855
4856 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4857 mc_reg_table_data, pi->mc_reg_table.last,
4858 pi->mc_reg_table.valid_flag);
4859}
4860
4861static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4862 SMU7_Discrete_MCRegisters *mc_reg_table)
4863{
4864 struct ci_power_info *pi = ci_get_pi(adev);
4865 u32 i;
4866
4867 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4868 ci_convert_mc_reg_table_entry_to_smc(adev,
4869 pi->dpm_table.mclk_table.dpm_levels[i].value,
4870 &mc_reg_table->data[i]);
4871}
4872
4873static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4874{
4875 struct ci_power_info *pi = ci_get_pi(adev);
4876 int ret;
4877
4878 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4879
4880 ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4881 if (ret)
4882 return ret;
4883 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4884
4885 return amdgpu_ci_copy_bytes_to_smc(adev,
4886 pi->mc_reg_table_start,
4887 (u8 *)&pi->smc_mc_reg_table,
4888 sizeof(SMU7_Discrete_MCRegisters),
4889 pi->sram_end);
4890}
4891
4892static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4893{
4894 struct ci_power_info *pi = ci_get_pi(adev);
4895
4896 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4897 return 0;
4898
4899 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4900
4901 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4902
4903 return amdgpu_ci_copy_bytes_to_smc(adev,
4904 pi->mc_reg_table_start +
4905 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4906 (u8 *)&pi->smc_mc_reg_table.data[0],
4907 sizeof(SMU7_Discrete_MCRegisterSet) *
4908 pi->dpm_table.mclk_table.count,
4909 pi->sram_end);
4910}
4911
4912static void ci_enable_voltage_control(struct amdgpu_device *adev)
4913{
4914 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4915
4916 tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4917 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4918}
4919
4920static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4921 struct amdgpu_ps *amdgpu_state)
4922{
4923 struct ci_ps *state = ci_get_ps(amdgpu_state);
4924 int i;
4925 u16 pcie_speed, max_speed = 0;
4926
4927 for (i = 0; i < state->performance_level_count; i++) {
4928 pcie_speed = state->performance_levels[i].pcie_gen;
4929 if (max_speed < pcie_speed)
4930 max_speed = pcie_speed;
4931 }
4932
4933 return max_speed;
4934}
4935
4936static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4937{
4938 u32 speed_cntl = 0;
4939
4940 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4941 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4942 speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4943
4944 return (u16)speed_cntl;
4945}
4946
4947static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4948{
4949 u32 link_width = 0;
4950
4951 link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4952 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4953 link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4954
4955 switch (link_width) {
4956 case 1:
4957 return 1;
4958 case 2:
4959 return 2;
4960 case 3:
4961 return 4;
4962 case 4:
4963 return 8;
4964 case 0:
4965 case 6:
4966 default:
4967 return 16;
4968 }
4969}
4970
4971static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4972 struct amdgpu_ps *amdgpu_new_state,
4973 struct amdgpu_ps *amdgpu_current_state)
4974{
4975 struct ci_power_info *pi = ci_get_pi(adev);
4976 enum amdgpu_pcie_gen target_link_speed =
4977 ci_get_maximum_link_speed(adev, amdgpu_new_state);
4978 enum amdgpu_pcie_gen current_link_speed;
4979
4980 if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
4981 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
4982 else
4983 current_link_speed = pi->force_pcie_gen;
4984
4985 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
4986 pi->pspp_notify_required = false;
4987 if (target_link_speed > current_link_speed) {
4988 switch (target_link_speed) {
4989#ifdef CONFIG_ACPI
4990 case AMDGPU_PCIE_GEN3:
4991 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4992 break;
4993 pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
4994 if (current_link_speed == AMDGPU_PCIE_GEN2)
4995 break;
4996 case AMDGPU_PCIE_GEN2:
4997 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4998 break;
4999#endif
5000 default:
5001 pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5002 break;
5003 }
5004 } else {
5005 if (target_link_speed < current_link_speed)
5006 pi->pspp_notify_required = true;
5007 }
5008}
5009
5010static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5011 struct amdgpu_ps *amdgpu_new_state,
5012 struct amdgpu_ps *amdgpu_current_state)
5013{
5014 struct ci_power_info *pi = ci_get_pi(adev);
5015 enum amdgpu_pcie_gen target_link_speed =
5016 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5017 u8 request;
5018
5019 if (pi->pspp_notify_required) {
5020 if (target_link_speed == AMDGPU_PCIE_GEN3)
5021 request = PCIE_PERF_REQ_PECI_GEN3;
5022 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5023 request = PCIE_PERF_REQ_PECI_GEN2;
5024 else
5025 request = PCIE_PERF_REQ_PECI_GEN1;
5026
5027 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5028 (ci_get_current_pcie_speed(adev) > 0))
5029 return;
5030
5031#ifdef CONFIG_ACPI
5032 amdgpu_acpi_pcie_performance_request(adev, request, false);
5033#endif
5034 }
5035}
5036
5037static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5038{
5039 struct ci_power_info *pi = ci_get_pi(adev);
5040 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5041 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5042 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5043 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5044 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5045 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5046
5047 if (allowed_sclk_vddc_table == NULL)
5048 return -EINVAL;
5049 if (allowed_sclk_vddc_table->count < 1)
5050 return -EINVAL;
5051 if (allowed_mclk_vddc_table == NULL)
5052 return -EINVAL;
5053 if (allowed_mclk_vddc_table->count < 1)
5054 return -EINVAL;
5055 if (allowed_mclk_vddci_table == NULL)
5056 return -EINVAL;
5057 if (allowed_mclk_vddci_table->count < 1)
5058 return -EINVAL;
5059
5060 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5061 pi->max_vddc_in_pp_table =
5062 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5063
5064 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5065 pi->max_vddci_in_pp_table =
5066 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5067
5068 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5069 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5070 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5071 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5072 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5073 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5074 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5075 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5076
5077 return 0;
5078}
5079
5080static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5081{
5082 struct ci_power_info *pi = ci_get_pi(adev);
5083 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5084 u32 leakage_index;
5085
5086 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5087 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5088 *vddc = leakage_table->actual_voltage[leakage_index];
5089 break;
5090 }
5091 }
5092}
5093
5094static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5095{
5096 struct ci_power_info *pi = ci_get_pi(adev);
5097 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5098 u32 leakage_index;
5099
5100 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5101 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5102 *vddci = leakage_table->actual_voltage[leakage_index];
5103 break;
5104 }
5105 }
5106}
5107
5108static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5109 struct amdgpu_clock_voltage_dependency_table *table)
5110{
5111 u32 i;
5112
5113 if (table) {
5114 for (i = 0; i < table->count; i++)
5115 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5116 }
5117}
5118
5119static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5120 struct amdgpu_clock_voltage_dependency_table *table)
5121{
5122 u32 i;
5123
5124 if (table) {
5125 for (i = 0; i < table->count; i++)
5126 ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5127 }
5128}
5129
5130static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5131 struct amdgpu_vce_clock_voltage_dependency_table *table)
5132{
5133 u32 i;
5134
5135 if (table) {
5136 for (i = 0; i < table->count; i++)
5137 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5138 }
5139}
5140
5141static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5142 struct amdgpu_uvd_clock_voltage_dependency_table *table)
5143{
5144 u32 i;
5145
5146 if (table) {
5147 for (i = 0; i < table->count; i++)
5148 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5149 }
5150}
5151
5152static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5153 struct amdgpu_phase_shedding_limits_table *table)
5154{
5155 u32 i;
5156
5157 if (table) {
5158 for (i = 0; i < table->count; i++)
5159 ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5160 }
5161}
5162
5163static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5164 struct amdgpu_clock_and_voltage_limits *table)
5165{
5166 if (table) {
5167 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5168 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5169 }
5170}
5171
5172static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5173 struct amdgpu_cac_leakage_table *table)
5174{
5175 u32 i;
5176
5177 if (table) {
5178 for (i = 0; i < table->count; i++)
5179 ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5180 }
5181}
5182
5183static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5184{
5185
5186 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5187 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5188 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5189 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5190 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5191 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5192 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5193 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5194 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5195 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5196 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5197 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5198 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5199 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5200 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5201 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5202 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5203 &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5204 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5205 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5206 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5207 &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5208 ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5209 &adev->pm.dpm.dyn_state.cac_leakage_table);
5210
5211}
5212
5213static void ci_update_current_ps(struct amdgpu_device *adev,
5214 struct amdgpu_ps *rps)
5215{
5216 struct ci_ps *new_ps = ci_get_ps(rps);
5217 struct ci_power_info *pi = ci_get_pi(adev);
5218
5219 pi->current_rps = *rps;
5220 pi->current_ps = *new_ps;
5221 pi->current_rps.ps_priv = &pi->current_ps;
5222}
5223
5224static void ci_update_requested_ps(struct amdgpu_device *adev,
5225 struct amdgpu_ps *rps)
5226{
5227 struct ci_ps *new_ps = ci_get_ps(rps);
5228 struct ci_power_info *pi = ci_get_pi(adev);
5229
5230 pi->requested_rps = *rps;
5231 pi->requested_ps = *new_ps;
5232 pi->requested_rps.ps_priv = &pi->requested_ps;
5233}
5234
5235static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5236{
5237 struct ci_power_info *pi = ci_get_pi(adev);
5238 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5239 struct amdgpu_ps *new_ps = &requested_ps;
5240
5241 ci_update_requested_ps(adev, new_ps);
5242
5243 ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5244
5245 return 0;
5246}
5247
5248static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5249{
5250 struct ci_power_info *pi = ci_get_pi(adev);
5251 struct amdgpu_ps *new_ps = &pi->requested_rps;
5252
5253 ci_update_current_ps(adev, new_ps);
5254}
5255
5256
5257static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5258{
5259 ci_read_clock_registers(adev);
5260 ci_enable_acpi_power_management(adev);
5261 ci_init_sclk_t(adev);
5262}
5263
5264static int ci_dpm_enable(struct amdgpu_device *adev)
5265{
5266 struct ci_power_info *pi = ci_get_pi(adev);
5267 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5268 int ret;
5269
5270 if (amdgpu_ci_is_smc_running(adev))
5271 return -EINVAL;
5272 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5273 ci_enable_voltage_control(adev);
5274 ret = ci_construct_voltage_tables(adev);
5275 if (ret) {
5276 DRM_ERROR("ci_construct_voltage_tables failed\n");
5277 return ret;
5278 }
5279 }
5280 if (pi->caps_dynamic_ac_timing) {
5281 ret = ci_initialize_mc_reg_table(adev);
5282 if (ret)
5283 pi->caps_dynamic_ac_timing = false;
5284 }
5285 if (pi->dynamic_ss)
5286 ci_enable_spread_spectrum(adev, true);
5287 if (pi->thermal_protection)
5288 ci_enable_thermal_protection(adev, true);
5289 ci_program_sstp(adev);
5290 ci_enable_display_gap(adev);
5291 ci_program_vc(adev);
5292 ret = ci_upload_firmware(adev);
5293 if (ret) {
5294 DRM_ERROR("ci_upload_firmware failed\n");
5295 return ret;
5296 }
5297 ret = ci_process_firmware_header(adev);
5298 if (ret) {
5299 DRM_ERROR("ci_process_firmware_header failed\n");
5300 return ret;
5301 }
5302 ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5303 if (ret) {
5304 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5305 return ret;
5306 }
5307 ret = ci_init_smc_table(adev);
5308 if (ret) {
5309 DRM_ERROR("ci_init_smc_table failed\n");
5310 return ret;
5311 }
5312 ret = ci_init_arb_table_index(adev);
5313 if (ret) {
5314 DRM_ERROR("ci_init_arb_table_index failed\n");
5315 return ret;
5316 }
5317 if (pi->caps_dynamic_ac_timing) {
5318 ret = ci_populate_initial_mc_reg_table(adev);
5319 if (ret) {
5320 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5321 return ret;
5322 }
5323 }
5324 ret = ci_populate_pm_base(adev);
5325 if (ret) {
5326 DRM_ERROR("ci_populate_pm_base failed\n");
5327 return ret;
5328 }
5329 ci_dpm_start_smc(adev);
5330 ci_enable_vr_hot_gpio_interrupt(adev);
5331 ret = ci_notify_smc_display_change(adev, false);
5332 if (ret) {
5333 DRM_ERROR("ci_notify_smc_display_change failed\n");
5334 return ret;
5335 }
5336 ci_enable_sclk_control(adev, true);
5337 ret = ci_enable_ulv(adev, true);
5338 if (ret) {
5339 DRM_ERROR("ci_enable_ulv failed\n");
5340 return ret;
5341 }
5342 ret = ci_enable_ds_master_switch(adev, true);
5343 if (ret) {
5344 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5345 return ret;
5346 }
5347 ret = ci_start_dpm(adev);
5348 if (ret) {
5349 DRM_ERROR("ci_start_dpm failed\n");
5350 return ret;
5351 }
5352 ret = ci_enable_didt(adev, true);
5353 if (ret) {
5354 DRM_ERROR("ci_enable_didt failed\n");
5355 return ret;
5356 }
5357 ret = ci_enable_smc_cac(adev, true);
5358 if (ret) {
5359 DRM_ERROR("ci_enable_smc_cac failed\n");
5360 return ret;
5361 }
5362 ret = ci_enable_power_containment(adev, true);
5363 if (ret) {
5364 DRM_ERROR("ci_enable_power_containment failed\n");
5365 return ret;
5366 }
5367
5368 ret = ci_power_control_set_level(adev);
5369 if (ret) {
5370 DRM_ERROR("ci_power_control_set_level failed\n");
5371 return ret;
5372 }
5373
5374 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5375
5376 ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5377 if (ret) {
5378 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5379 return ret;
5380 }
5381
5382 ci_thermal_start_thermal_controller(adev);
5383
5384 ci_update_current_ps(adev, boot_ps);
5385
a2e73f56
AD
5386 return 0;
5387}
5388
5389static void ci_dpm_disable(struct amdgpu_device *adev)
5390{
5391 struct ci_power_info *pi = ci_get_pi(adev);
5392 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5393
5394 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5395 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5396 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5397 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5398
5399 ci_dpm_powergate_uvd(adev, false);
5400
5401 if (!amdgpu_ci_is_smc_running(adev))
5402 return;
5403
5404 ci_thermal_stop_thermal_controller(adev);
5405
5406 if (pi->thermal_protection)
5407 ci_enable_thermal_protection(adev, false);
5408 ci_enable_power_containment(adev, false);
5409 ci_enable_smc_cac(adev, false);
5410 ci_enable_didt(adev, false);
5411 ci_enable_spread_spectrum(adev, false);
5412 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5413 ci_stop_dpm(adev);
5414 ci_enable_ds_master_switch(adev, false);
5415 ci_enable_ulv(adev, false);
5416 ci_clear_vc(adev);
5417 ci_reset_to_default(adev);
5418 ci_dpm_stop_smc(adev);
5419 ci_force_switch_to_arb_f0(adev);
5420 ci_enable_thermal_based_sclk_dpm(adev, false);
5421
5422 ci_update_current_ps(adev, boot_ps);
5423}
5424
5425static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5426{
5427 struct ci_power_info *pi = ci_get_pi(adev);
5428 struct amdgpu_ps *new_ps = &pi->requested_rps;
5429 struct amdgpu_ps *old_ps = &pi->current_rps;
5430 int ret;
5431
5432 ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5433 if (pi->pcie_performance_request)
5434 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5435 ret = ci_freeze_sclk_mclk_dpm(adev);
5436 if (ret) {
5437 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5438 return ret;
5439 }
5440 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5441 if (ret) {
5442 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5443 return ret;
5444 }
5445 ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5446 if (ret) {
5447 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5448 return ret;
5449 }
5450
5451 ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5452 if (ret) {
5453 DRM_ERROR("ci_update_vce_dpm failed\n");
5454 return ret;
5455 }
5456
5457 ret = ci_update_sclk_t(adev);
5458 if (ret) {
5459 DRM_ERROR("ci_update_sclk_t failed\n");
5460 return ret;
5461 }
5462 if (pi->caps_dynamic_ac_timing) {
5463 ret = ci_update_and_upload_mc_reg_table(adev);
5464 if (ret) {
5465 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5466 return ret;
5467 }
5468 }
5469 ret = ci_program_memory_timing_parameters(adev);
5470 if (ret) {
5471 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5472 return ret;
5473 }
5474 ret = ci_unfreeze_sclk_mclk_dpm(adev);
5475 if (ret) {
5476 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5477 return ret;
5478 }
5479 ret = ci_upload_dpm_level_enable_mask(adev);
5480 if (ret) {
5481 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5482 return ret;
5483 }
5484 if (pi->pcie_performance_request)
5485 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5486
5487 return 0;
5488}
5489
5490#if 0
5491static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5492{
5493 ci_set_boot_state(adev);
5494}
5495#endif
5496
5497static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5498{
5499 ci_program_display_gap(adev);
5500}
5501
5502union power_info {
5503 struct _ATOM_POWERPLAY_INFO info;
5504 struct _ATOM_POWERPLAY_INFO_V2 info_2;
5505 struct _ATOM_POWERPLAY_INFO_V3 info_3;
5506 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5507 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5508 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5509};
5510
5511union pplib_clock_info {
5512 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5513 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5514 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5515 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5516 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5517 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5518};
5519
5520union pplib_power_state {
5521 struct _ATOM_PPLIB_STATE v1;
5522 struct _ATOM_PPLIB_STATE_V2 v2;
5523};
5524
5525static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5526 struct amdgpu_ps *rps,
5527 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5528 u8 table_rev)
5529{
5530 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5531 rps->class = le16_to_cpu(non_clock_info->usClassification);
5532 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5533
5534 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5535 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5536 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5537 } else {
5538 rps->vclk = 0;
5539 rps->dclk = 0;
5540 }
5541
5542 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5543 adev->pm.dpm.boot_ps = rps;
5544 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5545 adev->pm.dpm.uvd_ps = rps;
5546}
5547
5548static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5549 struct amdgpu_ps *rps, int index,
5550 union pplib_clock_info *clock_info)
5551{
5552 struct ci_power_info *pi = ci_get_pi(adev);
5553 struct ci_ps *ps = ci_get_ps(rps);
5554 struct ci_pl *pl = &ps->performance_levels[index];
5555
5556 ps->performance_level_count = index + 1;
5557
5558 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5559 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5560 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5561 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5562
5563 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5564 pi->sys_pcie_mask,
5565 pi->vbios_boot_state.pcie_gen_bootup_value,
5566 clock_info->ci.ucPCIEGen);
5567 pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5568 pi->vbios_boot_state.pcie_lane_bootup_value,
5569 le16_to_cpu(clock_info->ci.usPCIELane));
5570
5571 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5572 pi->acpi_pcie_gen = pl->pcie_gen;
5573 }
5574
5575 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5576 pi->ulv.supported = true;
5577 pi->ulv.pl = *pl;
5578 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5579 }
5580
5581 /* patch up boot state */
5582 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5583 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5584 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5585 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5586 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5587 }
5588
5589 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5590 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5591 pi->use_pcie_powersaving_levels = true;
5592 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5593 pi->pcie_gen_powersaving.max = pl->pcie_gen;
5594 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5595 pi->pcie_gen_powersaving.min = pl->pcie_gen;
5596 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5597 pi->pcie_lane_powersaving.max = pl->pcie_lane;
5598 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5599 pi->pcie_lane_powersaving.min = pl->pcie_lane;
5600 break;
5601 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5602 pi->use_pcie_performance_levels = true;
5603 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5604 pi->pcie_gen_performance.max = pl->pcie_gen;
5605 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5606 pi->pcie_gen_performance.min = pl->pcie_gen;
5607 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5608 pi->pcie_lane_performance.max = pl->pcie_lane;
5609 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5610 pi->pcie_lane_performance.min = pl->pcie_lane;
5611 break;
5612 default:
5613 break;
5614 }
5615}
5616
5617static int ci_parse_power_table(struct amdgpu_device *adev)
5618{
5619 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5620 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5621 union pplib_power_state *power_state;
5622 int i, j, k, non_clock_array_index, clock_array_index;
5623 union pplib_clock_info *clock_info;
5624 struct _StateArray *state_array;
5625 struct _ClockInfoArray *clock_info_array;
5626 struct _NonClockInfoArray *non_clock_info_array;
5627 union power_info *power_info;
5628 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5629 u16 data_offset;
5630 u8 frev, crev;
5631 u8 *power_state_offset;
5632 struct ci_ps *ps;
5633
5634 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5635 &frev, &crev, &data_offset))
5636 return -EINVAL;
5637 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5638
5639 amdgpu_add_thermal_controller(adev);
5640
5641 state_array = (struct _StateArray *)
5642 (mode_info->atom_context->bios + data_offset +
5643 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5644 clock_info_array = (struct _ClockInfoArray *)
5645 (mode_info->atom_context->bios + data_offset +
5646 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5647 non_clock_info_array = (struct _NonClockInfoArray *)
5648 (mode_info->atom_context->bios + data_offset +
5649 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5650
5651 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5652 state_array->ucNumEntries, GFP_KERNEL);
5653 if (!adev->pm.dpm.ps)
5654 return -ENOMEM;
5655 power_state_offset = (u8 *)state_array->states;
5656 for (i = 0; i < state_array->ucNumEntries; i++) {
5657 u8 *idx;
5658 power_state = (union pplib_power_state *)power_state_offset;
5659 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5660 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5661 &non_clock_info_array->nonClockInfo[non_clock_array_index];
5662 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5663 if (ps == NULL) {
5664 kfree(adev->pm.dpm.ps);
5665 return -ENOMEM;
5666 }
5667 adev->pm.dpm.ps[i].ps_priv = ps;
5668 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5669 non_clock_info,
5670 non_clock_info_array->ucEntrySize);
5671 k = 0;
5672 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5673 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5674 clock_array_index = idx[j];
5675 if (clock_array_index >= clock_info_array->ucNumEntries)
5676 continue;
5677 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5678 break;
5679 clock_info = (union pplib_clock_info *)
5680 ((u8 *)&clock_info_array->clockInfo[0] +
5681 (clock_array_index * clock_info_array->ucEntrySize));
5682 ci_parse_pplib_clock_info(adev,
5683 &adev->pm.dpm.ps[i], k,
5684 clock_info);
5685 k++;
5686 }
5687 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5688 }
5689 adev->pm.dpm.num_ps = state_array->ucNumEntries;
5690
5691 /* fill in the vce power states */
5692 for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
5693 u32 sclk, mclk;
5694 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5695 clock_info = (union pplib_clock_info *)
5696 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5697 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5698 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5699 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5700 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5701 adev->pm.dpm.vce_states[i].sclk = sclk;
5702 adev->pm.dpm.vce_states[i].mclk = mclk;
5703 }
5704
5705 return 0;
5706}
5707
5708static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5709 struct ci_vbios_boot_state *boot_state)
5710{
5711 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5712 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5713 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5714 u8 frev, crev;
5715 u16 data_offset;
5716
5717 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5718 &frev, &crev, &data_offset)) {
5719 firmware_info =
5720 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5721 data_offset);
5722 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5723 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5724 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5725 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5726 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5727 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5728 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5729
5730 return 0;
5731 }
5732 return -EINVAL;
5733}
5734
5735static void ci_dpm_fini(struct amdgpu_device *adev)
5736{
5737 int i;
5738
5739 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5740 kfree(adev->pm.dpm.ps[i].ps_priv);
5741 }
5742 kfree(adev->pm.dpm.ps);
5743 kfree(adev->pm.dpm.priv);
5744 kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5745 amdgpu_free_extended_power_table(adev);
5746}
5747
5748/**
5749 * ci_dpm_init_microcode - load ucode images from disk
5750 *
5751 * @adev: amdgpu_device pointer
5752 *
5753 * Use the firmware interface to load the ucode images into
5754 * the driver (not loaded into hw).
5755 * Returns 0 on success, error on failure.
5756 */
5757static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5758{
5759 const char *chip_name;
5760 char fw_name[30];
5761 int err;
5762
5763 DRM_DEBUG("\n");
5764
5765 switch (adev->asic_type) {
5766 case CHIP_BONAIRE:
2254c219
AD
5767 if ((adev->pdev->revision == 0x80) ||
5768 (adev->pdev->revision == 0x81) ||
5769 (adev->pdev->device == 0x665f))
5770 chip_name = "bonaire_k";
5771 else
5772 chip_name = "bonaire";
a2e73f56
AD
5773 break;
5774 case CHIP_HAWAII:
2254c219
AD
5775 if (adev->pdev->revision == 0x80)
5776 chip_name = "hawaii_k";
5777 else
5778 chip_name = "hawaii";
a2e73f56
AD
5779 break;
5780 case CHIP_KAVERI:
5781 case CHIP_KABINI:
903f75c8 5782 case CHIP_MULLINS:
a2e73f56
AD
5783 default: BUG();
5784 }
5785
5786 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5787 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5788 if (err)
5789 goto out;
5790 err = amdgpu_ucode_validate(adev->pm.fw);
5791
5792out:
5793 if (err) {
5794 printk(KERN_ERR
5795 "cik_smc: Failed to load firmware \"%s\"\n",
5796 fw_name);
5797 release_firmware(adev->pm.fw);
5798 adev->pm.fw = NULL;
5799 }
5800 return err;
5801}
5802
5803static int ci_dpm_init(struct amdgpu_device *adev)
5804{
5805 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5806 SMU7_Discrete_DpmTable *dpm_table;
5807 struct amdgpu_gpio_rec gpio;
5808 u16 data_offset, size;
5809 u8 frev, crev;
5810 struct ci_power_info *pi;
5811 int ret;
a2e73f56 5812
a2e73f56
AD
5813 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5814 if (pi == NULL)
5815 return -ENOMEM;
5816 adev->pm.dpm.priv = pi;
5817
50171ebe
AD
5818 pi->sys_pcie_mask =
5819 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5820 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5821
a2e73f56
AD
5822 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5823
5824 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5825 pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5826 pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5827 pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5828
5829 pi->pcie_lane_performance.max = 0;
5830 pi->pcie_lane_performance.min = 16;
5831 pi->pcie_lane_powersaving.max = 0;
5832 pi->pcie_lane_powersaving.min = 16;
5833
5834 ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5835 if (ret) {
5836 ci_dpm_fini(adev);
5837 return ret;
5838 }
5839
5840 ret = amdgpu_get_platform_caps(adev);
5841 if (ret) {
5842 ci_dpm_fini(adev);
5843 return ret;
5844 }
5845
5846 ret = amdgpu_parse_extended_power_table(adev);
5847 if (ret) {
5848 ci_dpm_fini(adev);
5849 return ret;
5850 }
5851
5852 ret = ci_parse_power_table(adev);
5853 if (ret) {
5854 ci_dpm_fini(adev);
5855 return ret;
5856 }
5857
5858 pi->dll_default_on = false;
5859 pi->sram_end = SMC_RAM_END;
5860
5861 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5862 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5863 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5864 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5865 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5866 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5867 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5868 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5869
5870 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5871
5872 pi->sclk_dpm_key_disabled = 0;
5873 pi->mclk_dpm_key_disabled = 0;
5874 pi->pcie_dpm_key_disabled = 0;
5875 pi->thermal_sclk_dpm_enabled = 0;
5876
5877 pi->caps_sclk_ds = true;
5878
5879 pi->mclk_strobe_mode_threshold = 40000;
5880 pi->mclk_stutter_mode_threshold = 40000;
5881 pi->mclk_edc_enable_threshold = 40000;
5882 pi->mclk_edc_wr_enable_threshold = 40000;
5883
5884 ci_initialize_powertune_defaults(adev);
5885
5886 pi->caps_fps = false;
5887
5888 pi->caps_sclk_throttle_low_notification = false;
5889
5890 pi->caps_uvd_dpm = true;
5891 pi->caps_vce_dpm = true;
5892
5893 ci_get_leakage_voltages(adev);
5894 ci_patch_dependency_tables_with_leakage(adev);
5895 ci_set_private_data_variables_based_on_pptable(adev);
5896
5897 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5898 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5899 if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5900 ci_dpm_fini(adev);
5901 return -ENOMEM;
5902 }
5903 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5904 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5905 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5906 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5907 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5908 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5909 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5910 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5911 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5912
5913 adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5914 adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5915 adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5916
5917 adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5918 adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5919 adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5920 adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5921
5922 if (adev->asic_type == CHIP_HAWAII) {
5923 pi->thermal_temp_setting.temperature_low = 94500;
5924 pi->thermal_temp_setting.temperature_high = 95000;
5925 pi->thermal_temp_setting.temperature_shutdown = 104000;
5926 } else {
5927 pi->thermal_temp_setting.temperature_low = 99500;
5928 pi->thermal_temp_setting.temperature_high = 100000;
5929 pi->thermal_temp_setting.temperature_shutdown = 104000;
5930 }
5931
5932 pi->uvd_enabled = false;
5933
5934 dpm_table = &pi->smc_state_table;
5935
5936 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5937 if (gpio.valid) {
5938 dpm_table->VRHotGpio = gpio.shift;
5939 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5940 } else {
5941 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5942 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5943 }
5944
5945 gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5946 if (gpio.valid) {
5947 dpm_table->AcDcGpio = gpio.shift;
5948 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5949 } else {
5950 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5951 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5952 }
5953
5954 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5955 if (gpio.valid) {
5956 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5957
5958 switch (gpio.shift) {
5959 case 0:
5960 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5961 tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5962 break;
5963 case 1:
5964 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5965 tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5966 break;
5967 case 2:
5968 tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
5969 break;
5970 case 3:
5971 tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
5972 break;
5973 case 4:
5974 tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
5975 break;
5976 default:
5977 DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
5978 break;
5979 }
5980 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
5981 }
5982
5983 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5984 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5985 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5986 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5987 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5988 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5989 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5990
5991 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5992 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5993 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5994 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5995 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5996 else
5997 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5998 }
5999
6000 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6001 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6002 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6003 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6004 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6005 else
6006 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6007 }
6008
6009 pi->vddc_phase_shed_control = true;
6010
6011#if defined(CONFIG_ACPI)
6012 pi->pcie_performance_request =
6013 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6014#else
6015 pi->pcie_performance_request = false;
6016#endif
6017
6018 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6019 &frev, &crev, &data_offset)) {
6020 pi->caps_sclk_ss_support = true;
6021 pi->caps_mclk_ss_support = true;
6022 pi->dynamic_ss = true;
6023 } else {
6024 pi->caps_sclk_ss_support = false;
6025 pi->caps_mclk_ss_support = false;
6026 pi->dynamic_ss = true;
6027 }
6028
6029 if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6030 pi->thermal_protection = true;
6031 else
6032 pi->thermal_protection = false;
6033
6034 pi->caps_dynamic_ac_timing = true;
6035
6036 pi->uvd_power_gated = false;
6037
6038 /* make sure dc limits are valid */
6039 if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6040 (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6041 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6042 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6043
6044 pi->fan_ctrl_is_in_default_mode = true;
6045
6046 return 0;
6047}
6048
6049static void
6050ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6051 struct seq_file *m)
6052{
6053 struct ci_power_info *pi = ci_get_pi(adev);
6054 struct amdgpu_ps *rps = &pi->current_rps;
6055 u32 sclk = ci_get_average_sclk_freq(adev);
6056 u32 mclk = ci_get_average_mclk_freq(adev);
9354573d
RZ
6057 u32 activity_percent = 50;
6058 int ret;
6059
6060 ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6061 &activity_percent);
6062
6063 if (ret == 0) {
6064 activity_percent += 0x80;
6065 activity_percent >>= 8;
6066 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6067 }
a2e73f56
AD
6068
6069 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
6070 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6071 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
6072 sclk, mclk);
9354573d 6073 seq_printf(m, "GPU load: %u %%\n", activity_percent);
a2e73f56
AD
6074}
6075
6076static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6077 struct amdgpu_ps *rps)
6078{
6079 struct ci_ps *ps = ci_get_ps(rps);
6080 struct ci_pl *pl;
6081 int i;
6082
6083 amdgpu_dpm_print_class_info(rps->class, rps->class2);
6084 amdgpu_dpm_print_cap_info(rps->caps);
6085 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6086 for (i = 0; i < ps->performance_level_count; i++) {
6087 pl = &ps->performance_levels[i];
6088 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6089 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6090 }
6091 amdgpu_dpm_print_ps_status(adev, rps);
6092}
6093
6094static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6095{
6096 struct ci_power_info *pi = ci_get_pi(adev);
6097 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6098
6099 if (low)
6100 return requested_state->performance_levels[0].sclk;
6101 else
6102 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6103}
6104
6105static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6106{
6107 struct ci_power_info *pi = ci_get_pi(adev);
6108 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6109
6110 if (low)
6111 return requested_state->performance_levels[0].mclk;
6112 else
6113 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6114}
6115
6116/* get temperature in millidegrees */
6117static int ci_dpm_get_temp(struct amdgpu_device *adev)
6118{
6119 u32 temp;
6120 int actual_temp = 0;
6121
6122 temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6123 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6124
6125 if (temp & 0x200)
6126 actual_temp = 255;
6127 else
6128 actual_temp = temp & 0x1ff;
6129
6130 actual_temp = actual_temp * 1000;
6131
6132 return actual_temp;
6133}
6134
6135static int ci_set_temperature_range(struct amdgpu_device *adev)
6136{
6137 int ret;
6138
6139 ret = ci_thermal_enable_alert(adev, false);
6140 if (ret)
6141 return ret;
6142 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6143 CISLANDS_TEMP_RANGE_MAX);
6144 if (ret)
6145 return ret;
6146 ret = ci_thermal_enable_alert(adev, true);
6147 if (ret)
6148 return ret;
6149 return ret;
6150}
6151
5fc3aeeb 6152static int ci_dpm_early_init(void *handle)
a2e73f56 6153{
5fc3aeeb 6154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6155
a2e73f56
AD
6156 ci_dpm_set_dpm_funcs(adev);
6157 ci_dpm_set_irq_funcs(adev);
6158
6159 return 0;
6160}
6161
5fc3aeeb 6162static int ci_dpm_late_init(void *handle)
a2e73f56
AD
6163{
6164 int ret;
5fc3aeeb 6165 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
6166
6167 if (!amdgpu_dpm)
6168 return 0;
6169
fa022a9b
AD
6170 /* init the sysfs and debugfs files late */
6171 ret = amdgpu_pm_sysfs_init(adev);
6172 if (ret)
6173 return ret;
6174
a2e73f56
AD
6175 ret = ci_set_temperature_range(adev);
6176 if (ret)
6177 return ret;
6178
6179 ci_dpm_powergate_uvd(adev, true);
6180
6181 return 0;
6182}
6183
5fc3aeeb 6184static int ci_dpm_sw_init(void *handle)
a2e73f56
AD
6185{
6186 int ret;
5fc3aeeb 6187 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
6188
6189 ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
6190 if (ret)
6191 return ret;
6192
6193 ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
6194 if (ret)
6195 return ret;
6196
6197 /* default to balanced state */
6198 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6199 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6200 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
6201 adev->pm.default_sclk = adev->clock.default_sclk;
6202 adev->pm.default_mclk = adev->clock.default_mclk;
6203 adev->pm.current_sclk = adev->clock.default_sclk;
6204 adev->pm.current_mclk = adev->clock.default_mclk;
6205 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6206
6207 if (amdgpu_dpm == 0)
6208 return 0;
6209
faad24cf
CK
6210 ret = ci_dpm_init_microcode(adev);
6211 if (ret)
6212 return ret;
6213
a2e73f56
AD
6214 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6215 mutex_lock(&adev->pm.mutex);
6216 ret = ci_dpm_init(adev);
6217 if (ret)
6218 goto dpm_failed;
6219 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6220 if (amdgpu_dpm == 1)
6221 amdgpu_pm_print_power_states(adev);
a2e73f56
AD
6222 mutex_unlock(&adev->pm.mutex);
6223 DRM_INFO("amdgpu: dpm initialized\n");
6224
6225 return 0;
6226
6227dpm_failed:
6228 ci_dpm_fini(adev);
6229 mutex_unlock(&adev->pm.mutex);
6230 DRM_ERROR("amdgpu: dpm initialization failed\n");
6231 return ret;
6232}
6233
5fc3aeeb 6234static int ci_dpm_sw_fini(void *handle)
a2e73f56 6235{
5fc3aeeb 6236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6237
a2e73f56
AD
6238 mutex_lock(&adev->pm.mutex);
6239 amdgpu_pm_sysfs_fini(adev);
6240 ci_dpm_fini(adev);
6241 mutex_unlock(&adev->pm.mutex);
6242
768c95e7
AD
6243 release_firmware(adev->pm.fw);
6244 adev->pm.fw = NULL;
6245
a2e73f56
AD
6246 return 0;
6247}
6248
5fc3aeeb 6249static int ci_dpm_hw_init(void *handle)
a2e73f56
AD
6250{
6251 int ret;
6252
5fc3aeeb 6253 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6254
a2e73f56
AD
6255 if (!amdgpu_dpm)
6256 return 0;
6257
6258 mutex_lock(&adev->pm.mutex);
6259 ci_dpm_setup_asic(adev);
6260 ret = ci_dpm_enable(adev);
6261 if (ret)
6262 adev->pm.dpm_enabled = false;
6263 else
6264 adev->pm.dpm_enabled = true;
6265 mutex_unlock(&adev->pm.mutex);
6266
6267 return ret;
6268}
6269
5fc3aeeb 6270static int ci_dpm_hw_fini(void *handle)
a2e73f56 6271{
5fc3aeeb 6272 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6273
a2e73f56
AD
6274 if (adev->pm.dpm_enabled) {
6275 mutex_lock(&adev->pm.mutex);
6276 ci_dpm_disable(adev);
6277 mutex_unlock(&adev->pm.mutex);
6278 }
6279
6280 return 0;
6281}
6282
5fc3aeeb 6283static int ci_dpm_suspend(void *handle)
a2e73f56 6284{
5fc3aeeb 6285 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6286
a2e73f56
AD
6287 if (adev->pm.dpm_enabled) {
6288 mutex_lock(&adev->pm.mutex);
6289 /* disable dpm */
6290 ci_dpm_disable(adev);
6291 /* reset the power state */
6292 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6293 mutex_unlock(&adev->pm.mutex);
6294 }
6295 return 0;
6296}
6297
5fc3aeeb 6298static int ci_dpm_resume(void *handle)
a2e73f56
AD
6299{
6300 int ret;
5fc3aeeb 6301 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
6302
6303 if (adev->pm.dpm_enabled) {
6304 /* asic init will reset to the boot state */
6305 mutex_lock(&adev->pm.mutex);
6306 ci_dpm_setup_asic(adev);
6307 ret = ci_dpm_enable(adev);
6308 if (ret)
6309 adev->pm.dpm_enabled = false;
6310 else
6311 adev->pm.dpm_enabled = true;
6312 mutex_unlock(&adev->pm.mutex);
6313 if (adev->pm.dpm_enabled)
6314 amdgpu_pm_compute_clocks(adev);
6315 }
6316 return 0;
6317}
6318
5fc3aeeb 6319static bool ci_dpm_is_idle(void *handle)
a2e73f56
AD
6320{
6321 /* XXX */
6322 return true;
6323}
6324
5fc3aeeb 6325static int ci_dpm_wait_for_idle(void *handle)
a2e73f56
AD
6326{
6327 /* XXX */
6328 return 0;
6329}
6330
5fc3aeeb 6331static int ci_dpm_soft_reset(void *handle)
a2e73f56
AD
6332{
6333 return 0;
6334}
6335
6336static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6337 struct amdgpu_irq_src *source,
6338 unsigned type,
6339 enum amdgpu_interrupt_state state)
6340{
6341 u32 cg_thermal_int;
6342
6343 switch (type) {
6344 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6345 switch (state) {
6346 case AMDGPU_IRQ_STATE_DISABLE:
6347 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
c305fd5f 6348 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
a2e73f56
AD
6349 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6350 break;
6351 case AMDGPU_IRQ_STATE_ENABLE:
6352 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
c305fd5f 6353 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
a2e73f56
AD
6354 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6355 break;
6356 default:
6357 break;
6358 }
6359 break;
6360
6361 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6362 switch (state) {
6363 case AMDGPU_IRQ_STATE_DISABLE:
6364 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
c305fd5f 6365 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
a2e73f56
AD
6366 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6367 break;
6368 case AMDGPU_IRQ_STATE_ENABLE:
6369 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
c305fd5f 6370 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
a2e73f56
AD
6371 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6372 break;
6373 default:
6374 break;
6375 }
6376 break;
6377
6378 default:
6379 break;
6380 }
6381 return 0;
6382}
6383
6384static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
edf600da 6385 struct amdgpu_irq_src *source,
a2e73f56
AD
6386 struct amdgpu_iv_entry *entry)
6387{
6388 bool queue_thermal = false;
6389
6390 if (entry == NULL)
6391 return -EINVAL;
6392
6393 switch (entry->src_id) {
6394 case 230: /* thermal low to high */
6395 DRM_DEBUG("IH: thermal low to high\n");
6396 adev->pm.dpm.thermal.high_to_low = false;
6397 queue_thermal = true;
6398 break;
6399 case 231: /* thermal high to low */
6400 DRM_DEBUG("IH: thermal high to low\n");
6401 adev->pm.dpm.thermal.high_to_low = true;
6402 queue_thermal = true;
6403 break;
6404 default:
6405 break;
6406 }
6407
6408 if (queue_thermal)
6409 schedule_work(&adev->pm.dpm.thermal.work);
6410
6411 return 0;
6412}
6413
5fc3aeeb 6414static int ci_dpm_set_clockgating_state(void *handle,
6415 enum amd_clockgating_state state)
a2e73f56
AD
6416{
6417 return 0;
6418}
6419
5fc3aeeb 6420static int ci_dpm_set_powergating_state(void *handle,
6421 enum amd_powergating_state state)
a2e73f56
AD
6422{
6423 return 0;
6424}
6425
19fbc43a
EH
6426static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
6427 enum pp_clock_type type, char *buf)
6428{
6429 struct ci_power_info *pi = ci_get_pi(adev);
6430 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6431 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6432 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6433
6434 int i, now, size = 0;
6435 uint32_t clock, pcie_speed;
6436
6437 switch (type) {
6438 case PP_SCLK:
6439 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6440 clock = RREG32(mmSMC_MSG_ARG_0);
6441
6442 for (i = 0; i < sclk_table->count; i++) {
6443 if (clock > sclk_table->dpm_levels[i].value)
6444 continue;
6445 break;
6446 }
6447 now = i;
6448
6449 for (i = 0; i < sclk_table->count; i++)
6450 size += sprintf(buf + size, "%d: %uMhz %s\n",
6451 i, sclk_table->dpm_levels[i].value / 100,
6452 (i == now) ? "*" : "");
6453 break;
6454 case PP_MCLK:
6455 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6456 clock = RREG32(mmSMC_MSG_ARG_0);
6457
6458 for (i = 0; i < mclk_table->count; i++) {
6459 if (clock > mclk_table->dpm_levels[i].value)
6460 continue;
6461 break;
6462 }
6463 now = i;
6464
6465 for (i = 0; i < mclk_table->count; i++)
6466 size += sprintf(buf + size, "%d: %uMhz %s\n",
6467 i, mclk_table->dpm_levels[i].value / 100,
6468 (i == now) ? "*" : "");
6469 break;
6470 case PP_PCIE:
6471 pcie_speed = ci_get_current_pcie_speed(adev);
6472 for (i = 0; i < pcie_table->count; i++) {
6473 if (pcie_speed != pcie_table->dpm_levels[i].value)
6474 continue;
6475 break;
6476 }
6477 now = i;
6478
6479 for (i = 0; i < pcie_table->count; i++)
6480 size += sprintf(buf + size, "%d: %s %s\n", i,
6481 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6482 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6483 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6484 (i == now) ? "*" : "");
6485 break;
6486 default:
6487 break;
6488 }
6489
6490 return size;
6491}
6492
6493static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
6494 enum pp_clock_type type, uint32_t mask)
6495{
6496 struct ci_power_info *pi = ci_get_pi(adev);
6497
6498 if (adev->pm.dpm.forced_level
6499 != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
6500 return -EINVAL;
6501
6502 switch (type) {
6503 case PP_SCLK:
6504 if (!pi->sclk_dpm_key_disabled)
6505 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6506 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6507 pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6508 break;
6509
6510 case PP_MCLK:
6511 if (!pi->mclk_dpm_key_disabled)
6512 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6513 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6514 pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6515 break;
6516
6517 case PP_PCIE:
6518 {
6519 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6520 uint32_t level = 0;
6521
6522 while (tmp >>= 1)
6523 level++;
6524
6525 if (!pi->pcie_dpm_key_disabled)
6526 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6527 PPSMC_MSG_PCIeDPM_ForceLevel,
6528 level);
6529 break;
6530 }
6531 default:
6532 break;
6533 }
6534
6535 return 0;
6536}
6537
3cc25911
EH
6538static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
6539{
6540 struct ci_power_info *pi = ci_get_pi(adev);
6541 struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6542 struct ci_single_dpm_table *golden_sclk_table =
6543 &(pi->golden_dpm_table.sclk_table);
6544 int value;
6545
6546 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6547 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6548 100 /
6549 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6550
6551 return value;
6552}
6553
6554static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
6555{
6556 struct ci_power_info *pi = ci_get_pi(adev);
6557 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6558 struct ci_single_dpm_table *golden_sclk_table =
6559 &(pi->golden_dpm_table.sclk_table);
6560
6561 if (value > 20)
6562 value = 20;
6563
6564 ps->performance_levels[ps->performance_level_count - 1].sclk =
6565 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6566 value / 100 +
6567 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6568
6569 return 0;
6570}
6571
40899d55
EH
6572static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
6573{
6574 struct ci_power_info *pi = ci_get_pi(adev);
6575 struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6576 struct ci_single_dpm_table *golden_mclk_table =
6577 &(pi->golden_dpm_table.mclk_table);
6578 int value;
6579
6580 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6581 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6582 100 /
6583 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6584
6585 return value;
6586}
6587
6588static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
6589{
6590 struct ci_power_info *pi = ci_get_pi(adev);
6591 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6592 struct ci_single_dpm_table *golden_mclk_table =
6593 &(pi->golden_dpm_table.mclk_table);
6594
6595 if (value > 20)
6596 value = 20;
6597
6598 ps->performance_levels[ps->performance_level_count - 1].mclk =
6599 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6600 value / 100 +
6601 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6602
6603 return 0;
6604}
6605
5fc3aeeb 6606const struct amd_ip_funcs ci_dpm_ip_funcs = {
88a907d6 6607 .name = "ci_dpm",
a2e73f56
AD
6608 .early_init = ci_dpm_early_init,
6609 .late_init = ci_dpm_late_init,
6610 .sw_init = ci_dpm_sw_init,
6611 .sw_fini = ci_dpm_sw_fini,
6612 .hw_init = ci_dpm_hw_init,
6613 .hw_fini = ci_dpm_hw_fini,
6614 .suspend = ci_dpm_suspend,
6615 .resume = ci_dpm_resume,
6616 .is_idle = ci_dpm_is_idle,
6617 .wait_for_idle = ci_dpm_wait_for_idle,
6618 .soft_reset = ci_dpm_soft_reset,
a2e73f56
AD
6619 .set_clockgating_state = ci_dpm_set_clockgating_state,
6620 .set_powergating_state = ci_dpm_set_powergating_state,
6621};
6622
6623static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
6624 .get_temperature = &ci_dpm_get_temp,
6625 .pre_set_power_state = &ci_dpm_pre_set_power_state,
6626 .set_power_state = &ci_dpm_set_power_state,
6627 .post_set_power_state = &ci_dpm_post_set_power_state,
6628 .display_configuration_changed = &ci_dpm_display_configuration_changed,
6629 .get_sclk = &ci_dpm_get_sclk,
6630 .get_mclk = &ci_dpm_get_mclk,
6631 .print_power_state = &ci_dpm_print_power_state,
6632 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6633 .force_performance_level = &ci_dpm_force_performance_level,
6634 .vblank_too_short = &ci_dpm_vblank_too_short,
6635 .powergate_uvd = &ci_dpm_powergate_uvd,
6636 .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6637 .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6638 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6639 .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
19fbc43a
EH
6640 .print_clock_levels = ci_dpm_print_clock_levels,
6641 .force_clock_level = ci_dpm_force_clock_level,
3cc25911
EH
6642 .get_sclk_od = ci_dpm_get_sclk_od,
6643 .set_sclk_od = ci_dpm_set_sclk_od,
40899d55
EH
6644 .get_mclk_od = ci_dpm_get_mclk_od,
6645 .set_mclk_od = ci_dpm_set_mclk_od,
a2e73f56
AD
6646};
6647
6648static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
6649{
6650 if (adev->pm.funcs == NULL)
6651 adev->pm.funcs = &ci_dpm_funcs;
6652}
6653
6654static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6655 .set = ci_dpm_set_interrupt_state,
6656 .process = ci_dpm_process_interrupt,
6657};
6658
6659static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6660{
6661 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6662 adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
6663}
This page took 0.395881 seconds and 5 git commands to generate.