drm/amdgpu: rename amdgpu_ip_funcs to amd_ip_funcs (v2)
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / cz_dpm.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/seq_file.h>
26#include "drmP.h"
27#include "amdgpu.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_atombios.h"
30#include "vid.h"
31#include "vi_dpm.h"
32#include "amdgpu_dpm.h"
33#include "cz_dpm.h"
34#include "cz_ppsmc.h"
35#include "atom.h"
36
37#include "smu/smu_8_0_d.h"
38#include "smu/smu_8_0_sh_mask.h"
39#include "gca/gfx_8_0_d.h"
40#include "gca/gfx_8_0_sh_mask.h"
41#include "gmc/gmc_8_1_d.h"
42#include "bif/bif_5_1_d.h"
43#include "gfx_v8_0.h"
44
564ea790
SJ
45static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
46
aaa36a97
AD
47static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
48{
49 struct cz_ps *ps = rps->ps_priv;
50
51 return ps;
52}
53
54static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev)
55{
56 struct cz_power_info *pi = adev->pm.dpm.priv;
57
58 return pi;
59}
60
61static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
62 uint16_t voltage)
63{
64 uint16_t tmp = 6200 - voltage * 25;
65
66 return tmp;
67}
68
69static void cz_construct_max_power_limits_table(struct amdgpu_device *adev,
70 struct amdgpu_clock_and_voltage_limits *table)
71{
72 struct cz_power_info *pi = cz_get_pi(adev);
73 struct amdgpu_clock_voltage_dependency_table *dep_table =
74 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
75
76 if (dep_table->count > 0) {
77 table->sclk = dep_table->entries[dep_table->count - 1].clk;
78 table->vddc = cz_convert_8bit_index_to_voltage(adev,
79 dep_table->entries[dep_table->count - 1].v);
80 }
81
82 table->mclk = pi->sys_info.nbp_memory_clock[0];
83
84}
85
86union igp_info {
87 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
88 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
89 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
90 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
91};
92
93static int cz_parse_sys_info_table(struct amdgpu_device *adev)
94{
95 struct cz_power_info *pi = cz_get_pi(adev);
96 struct amdgpu_mode_info *mode_info = &adev->mode_info;
97 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
98 union igp_info *igp_info;
99 u8 frev, crev;
100 u16 data_offset;
101 int i = 0;
102
103 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
104 &frev, &crev, &data_offset)) {
105 igp_info = (union igp_info *)(mode_info->atom_context->bios +
106 data_offset);
107
108 if (crev != 9) {
109 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
110 return -EINVAL;
111 }
112 pi->sys_info.bootup_sclk =
113 le32_to_cpu(igp_info->info_9.ulBootUpEngineClock);
114 pi->sys_info.bootup_uma_clk =
115 le32_to_cpu(igp_info->info_9.ulBootUpUMAClock);
116 pi->sys_info.dentist_vco_freq =
117 le32_to_cpu(igp_info->info_9.ulDentistVCOFreq);
118 pi->sys_info.bootup_nb_voltage_index =
119 le16_to_cpu(igp_info->info_9.usBootUpNBVoltage);
120
121 if (igp_info->info_9.ucHtcTmpLmt == 0)
122 pi->sys_info.htc_tmp_lmt = 203;
123 else
124 pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt;
125
126 if (igp_info->info_9.ucHtcHystLmt == 0)
127 pi->sys_info.htc_hyst_lmt = 5;
128 else
129 pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt;
130
131 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
132 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
133 return -EINVAL;
134 }
135
136 if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) &&
137 pi->enable_nb_ps_policy)
138 pi->sys_info.nb_dpm_enable = true;
139 else
140 pi->sys_info.nb_dpm_enable = false;
141
142 for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
143 if (i < CZ_NUM_NBPMEMORY_CLOCK)
144 pi->sys_info.nbp_memory_clock[i] =
145 le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]);
146 pi->sys_info.nbp_n_clock[i] =
147 le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]);
148 }
149
150 for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++)
151 pi->sys_info.display_clock[i] =
152 le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
153
154 for (i = 0; i < CZ_NUM_NBPSTATES; i++)
155 pi->sys_info.nbp_voltage_index[i] =
156 le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]);
157
158 if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) &
159 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
160 pi->caps_enable_dfs_bypass = true;
161
162 pi->sys_info.uma_channel_number =
163 igp_info->info_9.ucUMAChannelNumber;
164
165 cz_construct_max_power_limits_table(adev,
166 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
167 }
168
169 return 0;
170}
171
172static void cz_patch_voltage_values(struct amdgpu_device *adev)
173{
174 int i;
175 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
176 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
177 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
178 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
179 struct amdgpu_clock_voltage_dependency_table *acp_table =
180 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
181
182 if (uvd_table->count) {
183 for (i = 0; i < uvd_table->count; i++)
184 uvd_table->entries[i].v =
185 cz_convert_8bit_index_to_voltage(adev,
186 uvd_table->entries[i].v);
187 }
188
189 if (vce_table->count) {
190 for (i = 0; i < vce_table->count; i++)
191 vce_table->entries[i].v =
192 cz_convert_8bit_index_to_voltage(adev,
193 vce_table->entries[i].v);
194 }
195
196 if (acp_table->count) {
197 for (i = 0; i < acp_table->count; i++)
198 acp_table->entries[i].v =
199 cz_convert_8bit_index_to_voltage(adev,
200 acp_table->entries[i].v);
201 }
202
203}
204
205static void cz_construct_boot_state(struct amdgpu_device *adev)
206{
207 struct cz_power_info *pi = cz_get_pi(adev);
208
209 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
210 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
211 pi->boot_pl.ds_divider_index = 0;
212 pi->boot_pl.ss_divider_index = 0;
213 pi->boot_pl.allow_gnb_slow = 1;
214 pi->boot_pl.force_nbp_state = 0;
215 pi->boot_pl.display_wm = 0;
216 pi->boot_pl.vce_wm = 0;
217
218}
219
220static void cz_patch_boot_state(struct amdgpu_device *adev,
221 struct cz_ps *ps)
222{
223 struct cz_power_info *pi = cz_get_pi(adev);
224
225 ps->num_levels = 1;
226 ps->levels[0] = pi->boot_pl;
227}
228
229union pplib_clock_info {
230 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
231 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
232 struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo;
233};
234
235static void cz_parse_pplib_clock_info(struct amdgpu_device *adev,
236 struct amdgpu_ps *rps, int index,
237 union pplib_clock_info *clock_info)
238{
239 struct cz_power_info *pi = cz_get_pi(adev);
240 struct cz_ps *ps = cz_get_ps(rps);
241 struct cz_pl *pl = &ps->levels[index];
242 struct amdgpu_clock_voltage_dependency_table *table =
243 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
244
245 pl->sclk = table->entries[clock_info->carrizo.index].clk;
246 pl->vddc_index = table->entries[clock_info->carrizo.index].v;
247
248 ps->num_levels = index + 1;
249
250 if (pi->caps_sclk_ds) {
251 pl->ds_divider_index = 5;
252 pl->ss_divider_index = 5;
253 }
254
255}
256
257static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev,
258 struct amdgpu_ps *rps,
259 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
260 u8 table_rev)
261{
262 struct cz_ps *ps = cz_get_ps(rps);
263
264 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
265 rps->class = le16_to_cpu(non_clock_info->usClassification);
266 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
267
268 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
269 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
270 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
271 } else {
272 rps->vclk = 0;
273 rps->dclk = 0;
274 }
275
276 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
277 adev->pm.dpm.boot_ps = rps;
278 cz_patch_boot_state(adev, ps);
279 }
280 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
281 adev->pm.dpm.uvd_ps = rps;
282
283}
284
285union power_info {
286 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
287 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
288 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
289 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
290 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
291};
292
293union pplib_power_state {
294 struct _ATOM_PPLIB_STATE v1;
295 struct _ATOM_PPLIB_STATE_V2 v2;
296};
297
298static int cz_parse_power_table(struct amdgpu_device *adev)
299{
300 struct amdgpu_mode_info *mode_info = &adev->mode_info;
301 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
302 union pplib_power_state *power_state;
303 int i, j, k, non_clock_array_index, clock_array_index;
304 union pplib_clock_info *clock_info;
305 struct _StateArray *state_array;
306 struct _ClockInfoArray *clock_info_array;
307 struct _NonClockInfoArray *non_clock_info_array;
308 union power_info *power_info;
309 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
310 u16 data_offset;
311 u8 frev, crev;
312 u8 *power_state_offset;
313 struct cz_ps *ps;
314
315 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
316 &frev, &crev, &data_offset))
317 return -EINVAL;
318 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
319
320 state_array = (struct _StateArray *)
321 (mode_info->atom_context->bios + data_offset +
322 le16_to_cpu(power_info->pplib.usStateArrayOffset));
323 clock_info_array = (struct _ClockInfoArray *)
324 (mode_info->atom_context->bios + data_offset +
325 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
326 non_clock_info_array = (struct _NonClockInfoArray *)
327 (mode_info->atom_context->bios + data_offset +
328 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
329
330 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
331 state_array->ucNumEntries, GFP_KERNEL);
332
333 if (!adev->pm.dpm.ps)
334 return -ENOMEM;
335
336 power_state_offset = (u8 *)state_array->states;
337 adev->pm.dpm.platform_caps =
338 le32_to_cpu(power_info->pplib.ulPlatformCaps);
339 adev->pm.dpm.backbias_response_time =
340 le16_to_cpu(power_info->pplib.usBackbiasTime);
341 adev->pm.dpm.voltage_response_time =
342 le16_to_cpu(power_info->pplib.usVoltageTime);
343
344 for (i = 0; i < state_array->ucNumEntries; i++) {
345 power_state = (union pplib_power_state *)power_state_offset;
346 non_clock_array_index = power_state->v2.nonClockInfoIndex;
347 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
348 &non_clock_info_array->nonClockInfo[non_clock_array_index];
349
350 ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
351 if (ps == NULL) {
352 kfree(adev->pm.dpm.ps);
353 return -ENOMEM;
354 }
355
356 adev->pm.dpm.ps[i].ps_priv = ps;
357 k = 0;
358 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
359 clock_array_index = power_state->v2.clockInfoIndex[j];
360 if (clock_array_index >= clock_info_array->ucNumEntries)
361 continue;
362 if (k >= CZ_MAX_HARDWARE_POWERLEVELS)
363 break;
364 clock_info = (union pplib_clock_info *)
365 &clock_info_array->clockInfo[clock_array_index *
366 clock_info_array->ucEntrySize];
367 cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i],
368 k, clock_info);
369 k++;
370 }
371 cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
372 non_clock_info,
373 non_clock_info_array->ucEntrySize);
374 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
375 }
376 adev->pm.dpm.num_ps = state_array->ucNumEntries;
377
378 return 0;
379}
380
381static int cz_process_firmware_header(struct amdgpu_device *adev)
382{
383 struct cz_power_info *pi = cz_get_pi(adev);
384 u32 tmp;
385 int ret;
386
387 ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION +
388 offsetof(struct SMU8_Firmware_Header,
389 DpmTable),
390 &tmp, pi->sram_end);
391
392 if (ret == 0)
393 pi->dpm_table_start = tmp;
394
395 return ret;
396}
397
398static int cz_dpm_init(struct amdgpu_device *adev)
399{
400 struct cz_power_info *pi;
401 int ret, i;
402
403 pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL);
404 if (NULL == pi)
405 return -ENOMEM;
406
407 adev->pm.dpm.priv = pi;
408
409 ret = amdgpu_get_platform_caps(adev);
410 if (ret)
411 return ret;
412
413 ret = amdgpu_parse_extended_power_table(adev);
414 if (ret)
415 return ret;
416
417 pi->sram_end = SMC_RAM_END;
418
419 /* set up DPM defaults */
420 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
421 pi->active_target[i] = CZ_AT_DFLT;
422
423 pi->mgcg_cgtt_local0 = 0x0;
424 pi->mgcg_cgtt_local1 = 0x0;
425 pi->clock_slow_down_step = 25000;
426 pi->skip_clock_slow_down = 1;
427 pi->enable_nb_ps_policy = 1;
428 pi->caps_power_containment = true;
429 pi->caps_cac = true;
430 pi->didt_enabled = false;
431 if (pi->didt_enabled) {
432 pi->caps_sq_ramping = true;
433 pi->caps_db_ramping = true;
434 pi->caps_td_ramping = true;
435 pi->caps_tcp_ramping = true;
436 }
437 pi->caps_sclk_ds = true;
438 pi->voting_clients = 0x00c00033;
439 pi->auto_thermal_throttling_enabled = true;
440 pi->bapm_enabled = false;
441 pi->disable_nb_ps3_in_battery = false;
442 pi->voltage_drop_threshold = 0;
443 pi->caps_sclk_throttle_low_notification = false;
444 pi->gfx_pg_threshold = 500;
445 pi->caps_fps = true;
446 /* uvd */
447 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
448 pi->caps_uvd_dpm = true;
449 /* vce */
450 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
451 pi->caps_vce_dpm = true;
452 /* acp */
453 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
454 pi->caps_acp_dpm = true;
455
456 pi->caps_stable_power_state = false;
457 pi->nb_dpm_enabled_by_driver = true;
458 pi->nb_dpm_enabled = false;
459 pi->caps_voltage_island = false;
460 /* flags which indicate need to upload pptable */
461 pi->need_pptable_upload = true;
462
463 ret = cz_parse_sys_info_table(adev);
464 if (ret)
465 return ret;
466
467 cz_patch_voltage_values(adev);
468 cz_construct_boot_state(adev);
469
470 ret = cz_parse_power_table(adev);
471 if (ret)
472 return ret;
473
474 ret = cz_process_firmware_header(adev);
475 if (ret)
476 return ret;
477
478 pi->dpm_enabled = true;
564ea790 479 pi->uvd_dynamic_pg = false;
aaa36a97
AD
480
481 return 0;
482}
483
484static void cz_dpm_fini(struct amdgpu_device *adev)
485{
486 int i;
487
488 for (i = 0; i < adev->pm.dpm.num_ps; i++)
489 kfree(adev->pm.dpm.ps[i].ps_priv);
490
491 kfree(adev->pm.dpm.ps);
492 kfree(adev->pm.dpm.priv);
493 amdgpu_free_extended_power_table(adev);
494}
495
496static void
497cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
498 struct seq_file *m)
499{
500 struct amdgpu_clock_voltage_dependency_table *table =
501 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
502 u32 current_index =
503 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
504 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
505 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
506 u32 sclk, tmp;
507 u16 vddc;
508
509 if (current_index >= NUM_SCLK_LEVELS) {
510 seq_printf(m, "invalid dpm profile %d\n", current_index);
511 } else {
512 sclk = table->entries[current_index].clk;
513 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
514 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
515 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
516 vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
517 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
518 current_index, sclk, vddc);
519 }
520}
521
522static void cz_dpm_print_power_state(struct amdgpu_device *adev,
523 struct amdgpu_ps *rps)
524{
525 int i;
526 struct cz_ps *ps = cz_get_ps(rps);
527
528 amdgpu_dpm_print_class_info(rps->class, rps->class2);
529 amdgpu_dpm_print_cap_info(rps->caps);
530
531 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
532 for (i = 0; i < ps->num_levels; i++) {
533 struct cz_pl *pl = &ps->levels[i];
534
535 DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n",
536 i, pl->sclk,
537 cz_convert_8bit_index_to_voltage(adev, pl->vddc_index));
538 }
539
540 amdgpu_dpm_print_ps_status(adev, rps);
541}
542
543static void cz_dpm_set_funcs(struct amdgpu_device *adev);
544
5fc3aeeb 545static int cz_dpm_early_init(void *handle)
aaa36a97 546{
5fc3aeeb 547 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
548
aaa36a97
AD
549 cz_dpm_set_funcs(adev);
550
551 return 0;
552}
553
564ea790 554
5fc3aeeb 555static int cz_dpm_late_init(void *handle)
564ea790 556{
5fc3aeeb 557 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
558
564ea790
SJ
559 /* powerdown unused blocks for now */
560 cz_dpm_powergate_uvd(adev, true);
561
562 return 0;
563}
564
5fc3aeeb 565static int cz_dpm_sw_init(void *handle)
aaa36a97 566{
5fc3aeeb 567 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
568 int ret = 0;
569 /* fix me to add thermal support TODO */
570
571 /* default to balanced state */
572 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
573 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
574 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
575 adev->pm.default_sclk = adev->clock.default_sclk;
576 adev->pm.default_mclk = adev->clock.default_mclk;
577 adev->pm.current_sclk = adev->clock.default_sclk;
578 adev->pm.current_mclk = adev->clock.default_mclk;
579 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
580
581 if (amdgpu_dpm == 0)
582 return 0;
583
584 mutex_lock(&adev->pm.mutex);
585 ret = cz_dpm_init(adev);
586 if (ret)
587 goto dpm_init_failed;
588
589 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
590 if (amdgpu_dpm == 1)
591 amdgpu_pm_print_power_states(adev);
592
593 ret = amdgpu_pm_sysfs_init(adev);
594 if (ret)
595 goto dpm_init_failed;
596
597 mutex_unlock(&adev->pm.mutex);
598 DRM_INFO("amdgpu: dpm initialized\n");
599
600 return 0;
601
602dpm_init_failed:
603 cz_dpm_fini(adev);
604 mutex_unlock(&adev->pm.mutex);
605 DRM_ERROR("amdgpu: dpm initialization failed\n");
606
607 return ret;
608}
609
5fc3aeeb 610static int cz_dpm_sw_fini(void *handle)
aaa36a97 611{
5fc3aeeb 612 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
613
aaa36a97
AD
614 mutex_lock(&adev->pm.mutex);
615 amdgpu_pm_sysfs_fini(adev);
616 cz_dpm_fini(adev);
617 mutex_unlock(&adev->pm.mutex);
618
619 return 0;
620}
621
622static void cz_reset_ap_mask(struct amdgpu_device *adev)
623{
624 struct cz_power_info *pi = cz_get_pi(adev);
625
626 pi->active_process_mask = 0;
627
628}
629
630static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
631 void **table)
632{
633 int ret = 0;
634
635 ret = cz_smu_download_pptable(adev, table);
636
637 return ret;
638}
639
640static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
641{
642 struct cz_power_info *pi = cz_get_pi(adev);
643 struct SMU8_Fusion_ClkTable *clock_table;
644 struct atom_clock_dividers dividers;
645 void *table = NULL;
646 uint8_t i = 0;
647 int ret = 0;
648
649 struct amdgpu_clock_voltage_dependency_table *vddc_table =
650 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
651 struct amdgpu_clock_voltage_dependency_table *vddgfx_table =
652 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk;
653 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
654 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
655 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
656 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
657 struct amdgpu_clock_voltage_dependency_table *acp_table =
658 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
659
660 if (!pi->need_pptable_upload)
661 return 0;
662
663 ret = cz_dpm_download_pptable_from_smu(adev, &table);
664 if (ret) {
665 DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n");
666 return -EINVAL;
667 }
668
669 clock_table = (struct SMU8_Fusion_ClkTable *)table;
670 /* patch clock table */
671 if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
672 vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
673 uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
674 vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
675 acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) {
676 DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n");
677 return -EINVAL;
678 }
679
680 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
681
682 /* vddc sclk */
683 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
684 (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
685 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
686 (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
687 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
688 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
689 false, &dividers);
690 if (ret)
691 return ret;
692 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
693 (uint8_t)dividers.post_divider;
694
695 /* vddgfx sclk */
696 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
697 (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0;
698
699 /* acp breakdown */
700 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
701 (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
702 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
703 (i < acp_table->count) ? acp_table->entries[i].clk : 0;
704 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
705 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
706 false, &dividers);
707 if (ret)
708 return ret;
709 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
710 (uint8_t)dividers.post_divider;
711
712 /* uvd breakdown */
713 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
714 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
715 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
716 (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
717 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
718 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
719 false, &dividers);
720 if (ret)
721 return ret;
722 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
723 (uint8_t)dividers.post_divider;
724
725 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
726 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
727 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
728 (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
729 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
730 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
731 false, &dividers);
732 if (ret)
733 return ret;
734 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
735 (uint8_t)dividers.post_divider;
736
737 /* vce breakdown */
738 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
739 (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
740 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
741 (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
742 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
743 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
744 false, &dividers);
745 if (ret)
746 return ret;
747 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
748 (uint8_t)dividers.post_divider;
749 }
750
751 /* its time to upload to SMU */
752 ret = cz_smu_upload_pptable(adev);
753 if (ret) {
754 DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n");
755 return ret;
756 }
757
758 return 0;
759}
760
761static void cz_init_sclk_limit(struct amdgpu_device *adev)
762{
763 struct cz_power_info *pi = cz_get_pi(adev);
764 struct amdgpu_clock_voltage_dependency_table *table =
765 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
766 uint32_t clock = 0, level;
767
768 if (!table || !table->count) {
769 DRM_ERROR("Invalid Voltage Dependency table.\n");
770 return;
771 }
772
773 pi->sclk_dpm.soft_min_clk = 0;
774 pi->sclk_dpm.hard_min_clk = 0;
775 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
776 level = cz_get_argument(adev);
777 if (level < table->count)
778 clock = table->entries[level].clk;
779 else {
780 DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
781 clock = table->entries[table->count - 1].clk;
782 }
783
784 pi->sclk_dpm.soft_max_clk = clock;
785 pi->sclk_dpm.hard_max_clk = clock;
786
787}
788
789static void cz_init_uvd_limit(struct amdgpu_device *adev)
790{
791 struct cz_power_info *pi = cz_get_pi(adev);
792 struct amdgpu_uvd_clock_voltage_dependency_table *table =
793 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
794 uint32_t clock = 0, level;
795
796 if (!table || !table->count) {
797 DRM_ERROR("Invalid Voltage Dependency table.\n");
798 return;
799 }
800
801 pi->uvd_dpm.soft_min_clk = 0;
802 pi->uvd_dpm.hard_min_clk = 0;
803 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
804 level = cz_get_argument(adev);
805 if (level < table->count)
806 clock = table->entries[level].vclk;
807 else {
808 DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
809 clock = table->entries[table->count - 1].vclk;
810 }
811
812 pi->uvd_dpm.soft_max_clk = clock;
813 pi->uvd_dpm.hard_max_clk = clock;
814
815}
816
817static void cz_init_vce_limit(struct amdgpu_device *adev)
818{
819 struct cz_power_info *pi = cz_get_pi(adev);
820 struct amdgpu_vce_clock_voltage_dependency_table *table =
821 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
822 uint32_t clock = 0, level;
823
824 if (!table || !table->count) {
825 DRM_ERROR("Invalid Voltage Dependency table.\n");
826 return;
827 }
828
829 pi->vce_dpm.soft_min_clk = 0;
830 pi->vce_dpm.hard_min_clk = 0;
831 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
832 level = cz_get_argument(adev);
833 if (level < table->count)
834 clock = table->entries[level].evclk;
835 else {
836 /* future BIOS would fix this error */
837 DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
838 clock = table->entries[table->count - 1].evclk;
839 }
840
841 pi->vce_dpm.soft_max_clk = clock;
842 pi->vce_dpm.hard_max_clk = clock;
843
844}
845
846static void cz_init_acp_limit(struct amdgpu_device *adev)
847{
848 struct cz_power_info *pi = cz_get_pi(adev);
849 struct amdgpu_clock_voltage_dependency_table *table =
850 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
851 uint32_t clock = 0, level;
852
853 if (!table || !table->count) {
854 DRM_ERROR("Invalid Voltage Dependency table.\n");
855 return;
856 }
857
858 pi->acp_dpm.soft_min_clk = 0;
859 pi->acp_dpm.hard_min_clk = 0;
860 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
861 level = cz_get_argument(adev);
862 if (level < table->count)
863 clock = table->entries[level].clk;
864 else {
865 DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
866 clock = table->entries[table->count - 1].clk;
867 }
868
869 pi->acp_dpm.soft_max_clk = clock;
870 pi->acp_dpm.hard_max_clk = clock;
871
872}
873
874static void cz_init_pg_state(struct amdgpu_device *adev)
875{
876 struct cz_power_info *pi = cz_get_pi(adev);
877
878 pi->uvd_power_gated = false;
879 pi->vce_power_gated = false;
880 pi->acp_power_gated = false;
881
882}
883
884static void cz_init_sclk_threshold(struct amdgpu_device *adev)
885{
886 struct cz_power_info *pi = cz_get_pi(adev);
887
888 pi->low_sclk_interrupt_threshold = 0;
889
890}
891
892static void cz_dpm_setup_asic(struct amdgpu_device *adev)
893{
894 cz_reset_ap_mask(adev);
895 cz_dpm_upload_pptable_to_smu(adev);
896 cz_init_sclk_limit(adev);
897 cz_init_uvd_limit(adev);
898 cz_init_vce_limit(adev);
899 cz_init_acp_limit(adev);
900 cz_init_pg_state(adev);
901 cz_init_sclk_threshold(adev);
902
903}
904
905static bool cz_check_smu_feature(struct amdgpu_device *adev,
906 uint32_t feature)
907{
908 uint32_t smu_feature = 0;
909 int ret;
910
911 ret = cz_send_msg_to_smc_with_parameter(adev,
912 PPSMC_MSG_GetFeatureStatus, 0);
913 if (ret) {
914 DRM_ERROR("Failed to get SMU features from SMC.\n");
915 return false;
916 } else {
917 smu_feature = cz_get_argument(adev);
918 if (feature & smu_feature)
919 return true;
920 }
921
922 return false;
923}
924
925static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev)
926{
927 if (cz_check_smu_feature(adev,
928 SMU_EnabledFeatureScoreboard_SclkDpmOn))
929 return true;
930
931 return false;
932}
933
934static void cz_program_voting_clients(struct amdgpu_device *adev)
935{
936 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
937}
938
939static void cz_clear_voting_clients(struct amdgpu_device *adev)
940{
941 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
942}
943
944static int cz_start_dpm(struct amdgpu_device *adev)
945{
946 int ret = 0;
947
948 if (amdgpu_dpm) {
949 ret = cz_send_msg_to_smc_with_parameter(adev,
950 PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK);
951 if (ret) {
952 DRM_ERROR("SMU feature: SCLK_DPM enable failed\n");
953 return -EINVAL;
954 }
955 }
956
957 return 0;
958}
959
960static int cz_stop_dpm(struct amdgpu_device *adev)
961{
962 int ret = 0;
963
964 if (amdgpu_dpm && adev->pm.dpm_enabled) {
965 ret = cz_send_msg_to_smc_with_parameter(adev,
966 PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK);
967 if (ret) {
968 DRM_ERROR("SMU feature: SCLK_DPM disable failed\n");
969 return -EINVAL;
970 }
971 }
972
973 return 0;
974}
975
976static uint32_t cz_get_sclk_level(struct amdgpu_device *adev,
977 uint32_t clock, uint16_t msg)
978{
979 int i = 0;
980 struct amdgpu_clock_voltage_dependency_table *table =
981 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
982
983 switch (msg) {
984 case PPSMC_MSG_SetSclkSoftMin:
985 case PPSMC_MSG_SetSclkHardMin:
986 for (i = 0; i < table->count; i++)
987 if (clock <= table->entries[i].clk)
988 break;
989 if (i == table->count)
990 i = table->count - 1;
991 break;
992 case PPSMC_MSG_SetSclkSoftMax:
993 case PPSMC_MSG_SetSclkHardMax:
994 for (i = table->count - 1; i >= 0; i--)
995 if (clock >= table->entries[i].clk)
996 break;
997 if (i < 0)
998 i = 0;
999 break;
1000 default:
1001 break;
1002 }
1003
1004 return i;
1005}
1006
1007static int cz_program_bootup_state(struct amdgpu_device *adev)
1008{
1009 struct cz_power_info *pi = cz_get_pi(adev);
1010 uint32_t soft_min_clk = 0;
1011 uint32_t soft_max_clk = 0;
1012 int ret = 0;
1013
1014 pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk;
1015 pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk;
1016
1017 soft_min_clk = cz_get_sclk_level(adev,
1018 pi->sclk_dpm.soft_min_clk,
1019 PPSMC_MSG_SetSclkSoftMin);
1020 soft_max_clk = cz_get_sclk_level(adev,
1021 pi->sclk_dpm.soft_max_clk,
1022 PPSMC_MSG_SetSclkSoftMax);
1023
1024 ret = cz_send_msg_to_smc_with_parameter(adev,
1025 PPSMC_MSG_SetSclkSoftMin, soft_min_clk);
1026 if (ret)
1027 return -EINVAL;
1028
1029 ret = cz_send_msg_to_smc_with_parameter(adev,
1030 PPSMC_MSG_SetSclkSoftMax, soft_max_clk);
1031 if (ret)
1032 return -EINVAL;
1033
1034 return 0;
1035}
1036
1037/* TODO */
1038static int cz_disable_cgpg(struct amdgpu_device *adev)
1039{
1040 return 0;
1041}
1042
1043/* TODO */
1044static int cz_enable_cgpg(struct amdgpu_device *adev)
1045{
1046 return 0;
1047}
1048
1049/* TODO */
1050static int cz_program_pt_config_registers(struct amdgpu_device *adev)
1051{
1052 return 0;
1053}
1054
1055static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable)
1056{
1057 struct cz_power_info *pi = cz_get_pi(adev);
1058 uint32_t reg = 0;
1059
1060 if (pi->caps_sq_ramping) {
1061 reg = RREG32_DIDT(ixDIDT_SQ_CTRL0);
1062 if (enable)
1063 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
1064 else
1065 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
1066 WREG32_DIDT(ixDIDT_SQ_CTRL0, reg);
1067 }
1068 if (pi->caps_db_ramping) {
1069 reg = RREG32_DIDT(ixDIDT_DB_CTRL0);
1070 if (enable)
1071 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1);
1072 else
1073 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0);
1074 WREG32_DIDT(ixDIDT_DB_CTRL0, reg);
1075 }
1076 if (pi->caps_td_ramping) {
1077 reg = RREG32_DIDT(ixDIDT_TD_CTRL0);
1078 if (enable)
1079 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1);
1080 else
1081 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0);
1082 WREG32_DIDT(ixDIDT_TD_CTRL0, reg);
1083 }
1084 if (pi->caps_tcp_ramping) {
1085 reg = RREG32_DIDT(ixDIDT_TCP_CTRL0);
1086 if (enable)
1087 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
1088 else
1089 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
1090 WREG32_DIDT(ixDIDT_TCP_CTRL0, reg);
1091 }
1092
1093}
1094
1095static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
1096{
1097 struct cz_power_info *pi = cz_get_pi(adev);
1098 int ret;
1099
1100 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
1101 pi->caps_td_ramping || pi->caps_tcp_ramping) {
1102 if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
1103 ret = cz_disable_cgpg(adev);
1104 if (ret) {
1105 DRM_ERROR("Pre Di/Dt disable cg/pg failed\n");
1106 return -EINVAL;
1107 }
1108 adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE;
1109 }
1110
1111 ret = cz_program_pt_config_registers(adev);
1112 if (ret) {
1113 DRM_ERROR("Di/Dt config failed\n");
1114 return -EINVAL;
1115 }
1116 cz_do_enable_didt(adev, enable);
1117
1118 if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) {
1119 ret = cz_enable_cgpg(adev);
1120 if (ret) {
1121 DRM_ERROR("Post Di/Dt enable cg/pg failed\n");
1122 return -EINVAL;
1123 }
1124 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1125 }
1126 }
1127
1128 return 0;
1129}
1130
1131/* TODO */
1132static void cz_reset_acp_boot_level(struct amdgpu_device *adev)
1133{
1134}
1135
1136static void cz_update_current_ps(struct amdgpu_device *adev,
1137 struct amdgpu_ps *rps)
1138{
1139 struct cz_power_info *pi = cz_get_pi(adev);
1140 struct cz_ps *ps = cz_get_ps(rps);
1141
1142 pi->current_ps = *ps;
1143 pi->current_rps = *rps;
1144 pi->current_rps.ps_priv = ps;
1145
1146}
1147
1148static void cz_update_requested_ps(struct amdgpu_device *adev,
1149 struct amdgpu_ps *rps)
1150{
1151 struct cz_power_info *pi = cz_get_pi(adev);
1152 struct cz_ps *ps = cz_get_ps(rps);
1153
1154 pi->requested_ps = *ps;
1155 pi->requested_rps = *rps;
1156 pi->requested_rps.ps_priv = ps;
1157
1158}
1159
1160/* PP arbiter support needed TODO */
1161static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
1162 struct amdgpu_ps *new_rps,
1163 struct amdgpu_ps *old_rps)
1164{
1165 struct cz_ps *ps = cz_get_ps(new_rps);
1166 struct cz_power_info *pi = cz_get_pi(adev);
1167 struct amdgpu_clock_and_voltage_limits *limits =
1168 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1169 /* 10kHz memory clock */
1170 uint32_t mclk = 0;
1171
1172 ps->force_high = false;
1173 ps->need_dfs_bypass = true;
1174 pi->video_start = new_rps->dclk || new_rps->vclk ||
1175 new_rps->evclk || new_rps->ecclk;
1176
1177 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
1178 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
1179 pi->battery_state = true;
1180 else
1181 pi->battery_state = false;
1182
1183 if (pi->caps_stable_power_state)
1184 mclk = limits->mclk;
1185
1186 if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1])
1187 ps->force_high = true;
1188
1189}
1190
1191static int cz_dpm_enable(struct amdgpu_device *adev)
1192{
1193 int ret = 0;
1194
1195 /* renable will hang up SMU, so check first */
1196 if (cz_check_for_dpm_enabled(adev))
1197 return -EINVAL;
1198
1199 cz_program_voting_clients(adev);
1200
1201 ret = cz_start_dpm(adev);
1202 if (ret) {
1203 DRM_ERROR("Carrizo DPM enable failed\n");
1204 return -EINVAL;
1205 }
1206
1207 ret = cz_program_bootup_state(adev);
1208 if (ret) {
1209 DRM_ERROR("Carrizo bootup state program failed\n");
1210 return -EINVAL;
1211 }
1212
1213 ret = cz_enable_didt(adev, true);
1214 if (ret) {
1215 DRM_ERROR("Carrizo enable di/dt failed\n");
1216 return -EINVAL;
1217 }
1218
1219 cz_reset_acp_boot_level(adev);
1220
1221 cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
1222
1223 return 0;
1224}
1225
5fc3aeeb 1226static int cz_dpm_hw_init(void *handle)
aaa36a97 1227{
5fc3aeeb 1228 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
46651cc5 1229 int ret = 0;
aaa36a97
AD
1230
1231 mutex_lock(&adev->pm.mutex);
1232
1233 /* init smc in dpm hw init */
1234 ret = cz_smu_init(adev);
1235 if (ret) {
1236 DRM_ERROR("amdgpu: smc initialization failed\n");
1237 mutex_unlock(&adev->pm.mutex);
1238 return ret;
1239 }
1240
1241 /* do the actual fw loading */
1242 ret = cz_smu_start(adev);
1243 if (ret) {
1244 DRM_ERROR("amdgpu: smc start failed\n");
1245 mutex_unlock(&adev->pm.mutex);
1246 return ret;
1247 }
1248
46651cc5
SJ
1249 if (!amdgpu_dpm) {
1250 adev->pm.dpm_enabled = false;
1251 mutex_unlock(&adev->pm.mutex);
1252 return ret;
1253 }
1254
aaa36a97
AD
1255 /* cz dpm setup asic */
1256 cz_dpm_setup_asic(adev);
1257
1258 /* cz dpm enable */
1259 ret = cz_dpm_enable(adev);
1260 if (ret)
1261 adev->pm.dpm_enabled = false;
1262 else
1263 adev->pm.dpm_enabled = true;
1264
1265 mutex_unlock(&adev->pm.mutex);
1266
1267 return 0;
1268}
1269
1270static int cz_dpm_disable(struct amdgpu_device *adev)
1271{
1272 int ret = 0;
1273
1274 if (!cz_check_for_dpm_enabled(adev))
1275 return -EINVAL;
1276
1277 ret = cz_enable_didt(adev, false);
1278 if (ret) {
1279 DRM_ERROR("Carrizo disable di/dt failed\n");
1280 return -EINVAL;
1281 }
1282
564ea790
SJ
1283 /* powerup blocks */
1284 cz_dpm_powergate_uvd(adev, false);
1285
aaa36a97
AD
1286 cz_clear_voting_clients(adev);
1287 cz_stop_dpm(adev);
1288 cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
1289
1290 return 0;
1291}
1292
5fc3aeeb 1293static int cz_dpm_hw_fini(void *handle)
aaa36a97
AD
1294{
1295 int ret = 0;
5fc3aeeb 1296 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1297
1298 mutex_lock(&adev->pm.mutex);
1299
1300 cz_smu_fini(adev);
1301
1302 if (adev->pm.dpm_enabled) {
1303 ret = cz_dpm_disable(adev);
aaa36a97
AD
1304
1305 adev->pm.dpm.current_ps =
1306 adev->pm.dpm.requested_ps =
1307 adev->pm.dpm.boot_ps;
1308 }
1309
1310 adev->pm.dpm_enabled = false;
1311
1312 mutex_unlock(&adev->pm.mutex);
1313
10457457 1314 return ret;
aaa36a97
AD
1315}
1316
5fc3aeeb 1317static int cz_dpm_suspend(void *handle)
aaa36a97
AD
1318{
1319 int ret = 0;
5fc3aeeb 1320 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1321
1322 if (adev->pm.dpm_enabled) {
1323 mutex_lock(&adev->pm.mutex);
1324
1325 ret = cz_dpm_disable(adev);
aaa36a97
AD
1326
1327 adev->pm.dpm.current_ps =
1328 adev->pm.dpm.requested_ps =
1329 adev->pm.dpm.boot_ps;
1330
1331 mutex_unlock(&adev->pm.mutex);
1332 }
1333
10457457 1334 return ret;
aaa36a97
AD
1335}
1336
5fc3aeeb 1337static int cz_dpm_resume(void *handle)
aaa36a97
AD
1338{
1339 int ret = 0;
5fc3aeeb 1340 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1341
1342 mutex_lock(&adev->pm.mutex);
1343 ret = cz_smu_init(adev);
1344 if (ret) {
1345 DRM_ERROR("amdgpu: smc resume failed\n");
1346 mutex_unlock(&adev->pm.mutex);
1347 return ret;
1348 }
1349
1350 /* do the actual fw loading */
1351 ret = cz_smu_start(adev);
1352 if (ret) {
1353 DRM_ERROR("amdgpu: smc start failed\n");
1354 mutex_unlock(&adev->pm.mutex);
1355 return ret;
1356 }
1357
46651cc5
SJ
1358 if (!amdgpu_dpm) {
1359 adev->pm.dpm_enabled = false;
1360 mutex_unlock(&adev->pm.mutex);
1361 return ret;
1362 }
1363
aaa36a97
AD
1364 /* cz dpm setup asic */
1365 cz_dpm_setup_asic(adev);
1366
1367 /* cz dpm enable */
1368 ret = cz_dpm_enable(adev);
1369 if (ret)
1370 adev->pm.dpm_enabled = false;
1371 else
1372 adev->pm.dpm_enabled = true;
1373
1374 mutex_unlock(&adev->pm.mutex);
1375 /* upon resume, re-compute the clocks */
1376 if (adev->pm.dpm_enabled)
1377 amdgpu_pm_compute_clocks(adev);
1378
1379 return 0;
1380}
1381
5fc3aeeb 1382static int cz_dpm_set_clockgating_state(void *handle,
1383 enum amd_clockgating_state state)
aaa36a97
AD
1384{
1385 return 0;
1386}
1387
5fc3aeeb 1388static int cz_dpm_set_powergating_state(void *handle,
1389 enum amd_powergating_state state)
aaa36a97
AD
1390{
1391 return 0;
1392}
1393
1394/* borrowed from KV, need future unify */
1395static int cz_dpm_get_temperature(struct amdgpu_device *adev)
1396{
1397 int actual_temp = 0;
1398 uint32_t temp = RREG32_SMC(0xC0300E0C);
1399
1400 if (temp)
1401 actual_temp = 1000 * ((temp / 8) - 49);
1402
1403 return actual_temp;
1404}
1405
1406static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev)
1407{
1408 struct cz_power_info *pi = cz_get_pi(adev);
1409 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
1410 struct amdgpu_ps *new_ps = &requested_ps;
1411
1412 cz_update_requested_ps(adev, new_ps);
1413 cz_apply_state_adjust_rules(adev, &pi->requested_rps,
1414 &pi->current_rps);
1415
1416 return 0;
1417}
1418
1419static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
1420{
1421 struct cz_power_info *pi = cz_get_pi(adev);
1422 struct amdgpu_clock_and_voltage_limits *limits =
1423 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1424 uint32_t clock, stable_ps_clock = 0;
1425
1426 clock = pi->sclk_dpm.soft_min_clk;
1427
1428 if (pi->caps_stable_power_state) {
1429 stable_ps_clock = limits->sclk * 75 / 100;
1430 if (clock < stable_ps_clock)
1431 clock = stable_ps_clock;
1432 }
1433
1434 if (clock != pi->sclk_dpm.soft_min_clk) {
1435 pi->sclk_dpm.soft_min_clk = clock;
1436 cz_send_msg_to_smc_with_parameter(adev,
1437 PPSMC_MSG_SetSclkSoftMin,
1438 cz_get_sclk_level(adev, clock,
1439 PPSMC_MSG_SetSclkSoftMin));
1440 }
1441
1442 if (pi->caps_stable_power_state &&
1443 pi->sclk_dpm.soft_max_clk != clock) {
1444 pi->sclk_dpm.soft_max_clk = clock;
1445 cz_send_msg_to_smc_with_parameter(adev,
1446 PPSMC_MSG_SetSclkSoftMax,
1447 cz_get_sclk_level(adev, clock,
1448 PPSMC_MSG_SetSclkSoftMax));
1449 } else {
1450 cz_send_msg_to_smc_with_parameter(adev,
1451 PPSMC_MSG_SetSclkSoftMax,
1452 cz_get_sclk_level(adev,
1453 pi->sclk_dpm.soft_max_clk,
1454 PPSMC_MSG_SetSclkSoftMax));
1455 }
1456
1457 return 0;
1458}
1459
1460static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
1461{
1462 int ret = 0;
1463 struct cz_power_info *pi = cz_get_pi(adev);
1464
1465 if (pi->caps_sclk_ds) {
1466 cz_send_msg_to_smc_with_parameter(adev,
1467 PPSMC_MSG_SetMinDeepSleepSclk,
1468 CZ_MIN_DEEP_SLEEP_SCLK);
1469 }
1470
1471 return ret;
1472}
1473
1474/* ?? without dal support, is this still needed in setpowerstate list*/
1475static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
1476{
1477 int ret = 0;
1478 struct cz_power_info *pi = cz_get_pi(adev);
1479
1480 cz_send_msg_to_smc_with_parameter(adev,
1481 PPSMC_MSG_SetWatermarkFrequency,
1482 pi->sclk_dpm.soft_max_clk);
1483
1484 return ret;
1485}
1486
1487static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
1488{
1489 int ret = 0;
1490 struct cz_power_info *pi = cz_get_pi(adev);
1491
1492 /* also depend on dal NBPStateDisableRequired */
1493 if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) {
1494 ret = cz_send_msg_to_smc_with_parameter(adev,
1495 PPSMC_MSG_EnableAllSmuFeatures,
1496 NB_DPM_MASK);
1497 if (ret) {
1498 DRM_ERROR("amdgpu: nb dpm enable failed\n");
1499 return ret;
1500 }
1501 pi->nb_dpm_enabled = true;
1502 }
1503
1504 return ret;
1505}
1506
1507static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
1508 bool enable)
1509{
1510 if (enable)
1511 cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate);
1512 else
1513 cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate);
1514
1515}
1516
1517static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
1518{
1519 int ret = 0;
1520 struct cz_power_info *pi = cz_get_pi(adev);
1521 struct cz_ps *ps = &pi->requested_ps;
1522
1523 if (pi->sys_info.nb_dpm_enable) {
1524 if (ps->force_high)
1525 cz_dpm_nbdpm_lm_pstate_enable(adev, true);
1526 else
1527 cz_dpm_nbdpm_lm_pstate_enable(adev, false);
1528 }
1529
1530 return ret;
1531}
1532
1533/* with dpm enabled */
1534static int cz_dpm_set_power_state(struct amdgpu_device *adev)
1535{
1536 int ret = 0;
1537
1538 cz_dpm_update_sclk_limit(adev);
1539 cz_dpm_set_deep_sleep_sclk_threshold(adev);
1540 cz_dpm_set_watermark_threshold(adev);
1541 cz_dpm_enable_nbdpm(adev);
1542 cz_dpm_update_low_memory_pstate(adev);
1543
1544 return ret;
1545}
1546
1547static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
1548{
1549 struct cz_power_info *pi = cz_get_pi(adev);
1550 struct amdgpu_ps *ps = &pi->requested_rps;
1551
1552 cz_update_current_ps(adev, ps);
1553
1554}
1555
1556static int cz_dpm_force_highest(struct amdgpu_device *adev)
1557{
1558 struct cz_power_info *pi = cz_get_pi(adev);
1559 int ret = 0;
1560
1561 if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) {
1562 pi->sclk_dpm.soft_min_clk =
1563 pi->sclk_dpm.soft_max_clk;
1564 ret = cz_send_msg_to_smc_with_parameter(adev,
1565 PPSMC_MSG_SetSclkSoftMin,
1566 cz_get_sclk_level(adev,
1567 pi->sclk_dpm.soft_min_clk,
1568 PPSMC_MSG_SetSclkSoftMin));
1569 if (ret)
1570 return ret;
1571 }
1572
1573 return ret;
1574}
1575
1576static int cz_dpm_force_lowest(struct amdgpu_device *adev)
1577{
1578 struct cz_power_info *pi = cz_get_pi(adev);
1579 int ret = 0;
1580
1581 if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) {
1582 pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk;
1583 ret = cz_send_msg_to_smc_with_parameter(adev,
1584 PPSMC_MSG_SetSclkSoftMax,
1585 cz_get_sclk_level(adev,
1586 pi->sclk_dpm.soft_max_clk,
1587 PPSMC_MSG_SetSclkSoftMax));
1588 if (ret)
1589 return ret;
1590 }
1591
1592 return ret;
1593}
1594
1595static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev)
1596{
1597 struct cz_power_info *pi = cz_get_pi(adev);
1598
1599 if (!pi->max_sclk_level) {
1600 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
1601 pi->max_sclk_level = cz_get_argument(adev) + 1;
1602 }
1603
1604 if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) {
1605 DRM_ERROR("Invalid max sclk level!\n");
1606 return -EINVAL;
1607 }
1608
1609 return pi->max_sclk_level;
1610}
1611
1612static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
1613{
1614 struct cz_power_info *pi = cz_get_pi(adev);
1615 struct amdgpu_clock_voltage_dependency_table *dep_table =
1616 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1617 uint32_t level = 0;
1618 int ret = 0;
1619
1620 pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk;
1621 level = cz_dpm_get_max_sclk_level(adev) - 1;
1622 if (level < dep_table->count)
1623 pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk;
1624 else
1625 pi->sclk_dpm.soft_max_clk =
1626 dep_table->entries[dep_table->count - 1].clk;
1627
1628 /* get min/max sclk soft value
1629 * notify SMU to execute */
1630 ret = cz_send_msg_to_smc_with_parameter(adev,
1631 PPSMC_MSG_SetSclkSoftMin,
1632 cz_get_sclk_level(adev,
1633 pi->sclk_dpm.soft_min_clk,
1634 PPSMC_MSG_SetSclkSoftMin));
1635 if (ret)
1636 return ret;
1637
1638 ret = cz_send_msg_to_smc_with_parameter(adev,
1639 PPSMC_MSG_SetSclkSoftMax,
1640 cz_get_sclk_level(adev,
1641 pi->sclk_dpm.soft_max_clk,
1642 PPSMC_MSG_SetSclkSoftMax));
1643 if (ret)
1644 return ret;
1645
1646 DRM_INFO("DPM unforce state min=%d, max=%d.\n",
1647 pi->sclk_dpm.soft_min_clk,
1648 pi->sclk_dpm.soft_max_clk);
1649
1650 return 0;
1651}
1652
1653static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1654 enum amdgpu_dpm_forced_level level)
1655{
1656 int ret = 0;
1657
1658 switch (level) {
1659 case AMDGPU_DPM_FORCED_LEVEL_HIGH:
1660 ret = cz_dpm_force_highest(adev);
1661 if (ret)
1662 return ret;
1663 break;
1664 case AMDGPU_DPM_FORCED_LEVEL_LOW:
1665 ret = cz_dpm_force_lowest(adev);
1666 if (ret)
1667 return ret;
1668 break;
1669 case AMDGPU_DPM_FORCED_LEVEL_AUTO:
1670 ret = cz_dpm_unforce_dpm_levels(adev);
1671 if (ret)
1672 return ret;
1673 break;
1674 default:
1675 break;
1676 }
1677
1678 return ret;
1679}
1680
1681/* fix me, display configuration change lists here
1682 * mostly dal related*/
1683static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev)
1684{
1685}
1686
1687static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low)
1688{
1689 struct cz_power_info *pi = cz_get_pi(adev);
1690 struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps);
1691
1692 if (low)
1693 return requested_state->levels[0].sclk;
1694 else
1695 return requested_state->levels[requested_state->num_levels - 1].sclk;
1696
1697}
1698
1699static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low)
1700{
1701 struct cz_power_info *pi = cz_get_pi(adev);
1702
1703 return pi->sys_info.bootup_uma_clk;
1704}
1705
564ea790
SJ
1706static int cz_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
1707{
1708 struct cz_power_info *pi = cz_get_pi(adev);
1709 int ret = 0;
1710
1711 if (enable && pi->caps_uvd_dpm ) {
1712 pi->dpm_flags |= DPMFlags_UVD_Enabled;
1713 DRM_DEBUG("UVD DPM Enabled.\n");
1714
1715 ret = cz_send_msg_to_smc_with_parameter(adev,
1716 PPSMC_MSG_EnableAllSmuFeatures, UVD_DPM_MASK);
1717 } else {
1718 pi->dpm_flags &= ~DPMFlags_UVD_Enabled;
1719 DRM_DEBUG("UVD DPM Stopped\n");
1720
1721 ret = cz_send_msg_to_smc_with_parameter(adev,
1722 PPSMC_MSG_DisableAllSmuFeatures, UVD_DPM_MASK);
1723 }
1724
1725 return ret;
1726}
1727
1728static int cz_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
1729{
1730 return cz_enable_uvd_dpm(adev, !gate);
1731}
1732
1733
1734static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
1735{
1736 struct cz_power_info *pi = cz_get_pi(adev);
1737 int ret;
1738
1739 if (pi->uvd_power_gated == gate)
1740 return;
1741
1742 pi->uvd_power_gated = gate;
1743
1744 if (gate) {
1745 if (pi->caps_uvd_pg) {
1746 /* disable clockgating so we can properly shut down the block */
5fc3aeeb 1747 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1748 AMD_CG_STATE_UNGATE);
564ea790 1749 /* shutdown the UVD block */
5fc3aeeb 1750 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1751 AMD_PG_STATE_GATE);
564ea790
SJ
1752 /* XXX: check for errors */
1753 }
1754 cz_update_uvd_dpm(adev, gate);
1755 if (pi->caps_uvd_pg)
1756 /* power off the UVD block */
1757 cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
1758 } else {
1759 if (pi->caps_uvd_pg) {
1760 /* power on the UVD block */
1761 if (pi->uvd_dynamic_pg)
1762 cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
1763 else
1764 cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
1765 /* re-init the UVD block */
5fc3aeeb 1766 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1767 AMD_PG_STATE_UNGATE);
564ea790 1768 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
5fc3aeeb 1769 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1770 AMD_CG_STATE_GATE);
564ea790
SJ
1771 /* XXX: check for errors */
1772 }
1773 cz_update_uvd_dpm(adev, gate);
1774 }
1775}
1776
5fc3aeeb 1777const struct amd_ip_funcs cz_dpm_ip_funcs = {
aaa36a97 1778 .early_init = cz_dpm_early_init,
564ea790 1779 .late_init = cz_dpm_late_init,
aaa36a97
AD
1780 .sw_init = cz_dpm_sw_init,
1781 .sw_fini = cz_dpm_sw_fini,
1782 .hw_init = cz_dpm_hw_init,
1783 .hw_fini = cz_dpm_hw_fini,
1784 .suspend = cz_dpm_suspend,
1785 .resume = cz_dpm_resume,
1786 .is_idle = NULL,
1787 .wait_for_idle = NULL,
1788 .soft_reset = NULL,
1789 .print_status = NULL,
1790 .set_clockgating_state = cz_dpm_set_clockgating_state,
1791 .set_powergating_state = cz_dpm_set_powergating_state,
1792};
1793
1794static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
1795 .get_temperature = cz_dpm_get_temperature,
1796 .pre_set_power_state = cz_dpm_pre_set_power_state,
1797 .set_power_state = cz_dpm_set_power_state,
1798 .post_set_power_state = cz_dpm_post_set_power_state,
1799 .display_configuration_changed = cz_dpm_display_configuration_changed,
1800 .get_sclk = cz_dpm_get_sclk,
1801 .get_mclk = cz_dpm_get_mclk,
1802 .print_power_state = cz_dpm_print_power_state,
1803 .debugfs_print_current_performance_level =
1804 cz_dpm_debugfs_print_current_performance_level,
1805 .force_performance_level = cz_dpm_force_dpm_level,
1806 .vblank_too_short = NULL,
564ea790 1807 .powergate_uvd = cz_dpm_powergate_uvd,
aaa36a97
AD
1808};
1809
1810static void cz_dpm_set_funcs(struct amdgpu_device *adev)
1811{
1812 if (NULL == adev->pm.funcs)
1813 adev->pm.funcs = &cz_dpm_funcs;
1814}
This page took 0.103419 seconds and 5 git commands to generate.