2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
25 #include "amdgpu_drv.h"
26 #include "amdgpu_pm.h"
27 #include "amdgpu_dpm.h"
29 #include <linux/power_supply.h>
30 #include <linux/hwmon.h>
31 #include <linux/hwmon-sysfs.h>
33 #include "amd_powerplay.h"
35 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
);
37 void amdgpu_pm_acpi_event_handler(struct amdgpu_device
*adev
)
43 if (adev
->pm
.dpm_enabled
) {
44 mutex_lock(&adev
->pm
.mutex
);
45 if (power_supply_is_system_supplied() > 0)
46 adev
->pm
.dpm
.ac_power
= true;
48 adev
->pm
.dpm
.ac_power
= false;
49 if (adev
->pm
.funcs
->enable_bapm
)
50 amdgpu_dpm_enable_bapm(adev
, adev
->pm
.dpm
.ac_power
);
51 mutex_unlock(&adev
->pm
.mutex
);
55 static ssize_t
amdgpu_get_dpm_state(struct device
*dev
,
56 struct device_attribute
*attr
,
59 struct drm_device
*ddev
= dev_get_drvdata(dev
);
60 struct amdgpu_device
*adev
= ddev
->dev_private
;
61 enum amd_pm_state_type pm
;
63 if (adev
->pp_enabled
) {
64 pm
= amdgpu_dpm_get_current_power_state(adev
);
66 pm
= adev
->pm
.dpm
.user_state
;
68 return snprintf(buf
, PAGE_SIZE
, "%s\n",
69 (pm
== POWER_STATE_TYPE_BATTERY
) ? "battery" :
70 (pm
== POWER_STATE_TYPE_BALANCED
) ? "balanced" : "performance");
73 static ssize_t
amdgpu_set_dpm_state(struct device
*dev
,
74 struct device_attribute
*attr
,
78 struct drm_device
*ddev
= dev_get_drvdata(dev
);
79 struct amdgpu_device
*adev
= ddev
->dev_private
;
80 enum amd_pm_state_type state
;
82 if (strncmp("battery", buf
, strlen("battery")) == 0)
83 state
= POWER_STATE_TYPE_BATTERY
;
84 else if (strncmp("balanced", buf
, strlen("balanced")) == 0)
85 state
= POWER_STATE_TYPE_BALANCED
;
86 else if (strncmp("performance", buf
, strlen("performance")) == 0)
87 state
= POWER_STATE_TYPE_PERFORMANCE
;
93 if (adev
->pp_enabled
) {
94 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_ENABLE_USER_STATE
, &state
, NULL
);
96 mutex_lock(&adev
->pm
.mutex
);
97 adev
->pm
.dpm
.user_state
= state
;
98 mutex_unlock(&adev
->pm
.mutex
);
100 /* Can't set dpm state when the card is off */
101 if (!(adev
->flags
& AMD_IS_PX
) ||
102 (ddev
->switch_power_state
== DRM_SWITCH_POWER_ON
))
103 amdgpu_pm_compute_clocks(adev
);
109 static ssize_t
amdgpu_get_dpm_forced_performance_level(struct device
*dev
,
110 struct device_attribute
*attr
,
113 struct drm_device
*ddev
= dev_get_drvdata(dev
);
114 struct amdgpu_device
*adev
= ddev
->dev_private
;
116 if ((adev
->flags
& AMD_IS_PX
) &&
117 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
118 return snprintf(buf
, PAGE_SIZE
, "off\n");
120 if (adev
->pp_enabled
) {
121 enum amd_dpm_forced_level level
;
123 level
= amdgpu_dpm_get_performance_level(adev
);
124 return snprintf(buf
, PAGE_SIZE
, "%s\n",
125 (level
== AMD_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
126 (level
== AMD_DPM_FORCED_LEVEL_LOW
) ? "low" :
127 (level
== AMD_DPM_FORCED_LEVEL_HIGH
) ? "high" :
128 (level
== AMD_DPM_FORCED_LEVEL_MANUAL
) ? "manual" : "unknown");
130 enum amdgpu_dpm_forced_level level
;
132 level
= adev
->pm
.dpm
.forced_level
;
133 return snprintf(buf
, PAGE_SIZE
, "%s\n",
134 (level
== AMDGPU_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
135 (level
== AMDGPU_DPM_FORCED_LEVEL_LOW
) ? "low" : "high");
139 static ssize_t
amdgpu_set_dpm_forced_performance_level(struct device
*dev
,
140 struct device_attribute
*attr
,
144 struct drm_device
*ddev
= dev_get_drvdata(dev
);
145 struct amdgpu_device
*adev
= ddev
->dev_private
;
146 enum amdgpu_dpm_forced_level level
;
149 /* Can't force performance level when the card is off */
150 if ((adev
->flags
& AMD_IS_PX
) &&
151 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
154 if (strncmp("low", buf
, strlen("low")) == 0) {
155 level
= AMDGPU_DPM_FORCED_LEVEL_LOW
;
156 } else if (strncmp("high", buf
, strlen("high")) == 0) {
157 level
= AMDGPU_DPM_FORCED_LEVEL_HIGH
;
158 } else if (strncmp("auto", buf
, strlen("auto")) == 0) {
159 level
= AMDGPU_DPM_FORCED_LEVEL_AUTO
;
160 } else if (strncmp("manual", buf
, strlen("manual")) == 0) {
161 level
= AMDGPU_DPM_FORCED_LEVEL_MANUAL
;
167 if (adev
->pp_enabled
)
168 amdgpu_dpm_force_performance_level(adev
, level
);
170 mutex_lock(&adev
->pm
.mutex
);
171 if (adev
->pm
.dpm
.thermal_active
) {
173 mutex_unlock(&adev
->pm
.mutex
);
176 ret
= amdgpu_dpm_force_performance_level(adev
, level
);
180 adev
->pm
.dpm
.forced_level
= level
;
181 mutex_unlock(&adev
->pm
.mutex
);
187 static ssize_t
amdgpu_get_pp_num_states(struct device
*dev
,
188 struct device_attribute
*attr
,
191 struct drm_device
*ddev
= dev_get_drvdata(dev
);
192 struct amdgpu_device
*adev
= ddev
->dev_private
;
193 struct pp_states_info data
;
196 if (adev
->pp_enabled
)
197 amdgpu_dpm_get_pp_num_states(adev
, &data
);
199 buf_len
= snprintf(buf
, PAGE_SIZE
, "states: %d\n", data
.nums
);
200 for (i
= 0; i
< data
.nums
; i
++)
201 buf_len
+= snprintf(buf
+ buf_len
, PAGE_SIZE
, "%d %s\n", i
,
202 (data
.states
[i
] == POWER_STATE_TYPE_INTERNAL_BOOT
) ? "boot" :
203 (data
.states
[i
] == POWER_STATE_TYPE_BATTERY
) ? "battery" :
204 (data
.states
[i
] == POWER_STATE_TYPE_BALANCED
) ? "balanced" :
205 (data
.states
[i
] == POWER_STATE_TYPE_PERFORMANCE
) ? "performance" : "default");
210 static ssize_t
amdgpu_get_pp_cur_state(struct device
*dev
,
211 struct device_attribute
*attr
,
214 struct drm_device
*ddev
= dev_get_drvdata(dev
);
215 struct amdgpu_device
*adev
= ddev
->dev_private
;
216 struct pp_states_info data
;
217 enum amd_pm_state_type pm
= 0;
220 if (adev
->pp_enabled
) {
222 pm
= amdgpu_dpm_get_current_power_state(adev
);
223 amdgpu_dpm_get_pp_num_states(adev
, &data
);
225 for (i
= 0; i
< data
.nums
; i
++) {
226 if (pm
== data
.states
[i
])
234 return snprintf(buf
, PAGE_SIZE
, "%d\n", i
);
237 static ssize_t
amdgpu_get_pp_force_state(struct device
*dev
,
238 struct device_attribute
*attr
,
241 struct drm_device
*ddev
= dev_get_drvdata(dev
);
242 struct amdgpu_device
*adev
= ddev
->dev_private
;
243 struct pp_states_info data
;
244 enum amd_pm_state_type pm
= 0;
247 if (adev
->pp_force_state_enabled
&& adev
->pp_enabled
) {
248 pm
= amdgpu_dpm_get_current_power_state(adev
);
249 amdgpu_dpm_get_pp_num_states(adev
, &data
);
251 for (i
= 0; i
< data
.nums
; i
++) {
252 if (pm
== data
.states
[i
])
259 return snprintf(buf
, PAGE_SIZE
, "%d\n", i
);
262 return snprintf(buf
, PAGE_SIZE
, "\n");
265 static ssize_t
amdgpu_set_pp_force_state(struct device
*dev
,
266 struct device_attribute
*attr
,
270 struct drm_device
*ddev
= dev_get_drvdata(dev
);
271 struct amdgpu_device
*adev
= ddev
->dev_private
;
272 enum amd_pm_state_type state
= 0;
276 if (strlen(buf
) == 1)
277 adev
->pp_force_state_enabled
= false;
278 else if (adev
->pp_enabled
) {
279 struct pp_states_info data
;
281 ret
= kstrtoul(buf
, 0, &idx
);
282 if (ret
|| idx
>= ARRAY_SIZE(data
.states
)) {
287 amdgpu_dpm_get_pp_num_states(adev
, &data
);
288 state
= data
.states
[idx
];
289 /* only set user selected power states */
290 if (state
!= POWER_STATE_TYPE_INTERNAL_BOOT
&&
291 state
!= POWER_STATE_TYPE_DEFAULT
) {
292 amdgpu_dpm_dispatch_task(adev
,
293 AMD_PP_EVENT_ENABLE_USER_STATE
, &state
, NULL
);
294 adev
->pp_force_state_enabled
= true;
301 static ssize_t
amdgpu_get_pp_table(struct device
*dev
,
302 struct device_attribute
*attr
,
305 struct drm_device
*ddev
= dev_get_drvdata(dev
);
306 struct amdgpu_device
*adev
= ddev
->dev_private
;
310 if (adev
->pp_enabled
)
311 size
= amdgpu_dpm_get_pp_table(adev
, &table
);
315 if (size
>= PAGE_SIZE
)
316 size
= PAGE_SIZE
- 1;
318 memcpy(buf
, table
, size
);
323 static ssize_t
amdgpu_set_pp_table(struct device
*dev
,
324 struct device_attribute
*attr
,
328 struct drm_device
*ddev
= dev_get_drvdata(dev
);
329 struct amdgpu_device
*adev
= ddev
->dev_private
;
331 if (adev
->pp_enabled
)
332 amdgpu_dpm_set_pp_table(adev
, buf
, count
);
337 static ssize_t
amdgpu_get_pp_dpm_sclk(struct device
*dev
,
338 struct device_attribute
*attr
,
341 struct drm_device
*ddev
= dev_get_drvdata(dev
);
342 struct amdgpu_device
*adev
= ddev
->dev_private
;
345 if (adev
->pp_enabled
)
346 size
= amdgpu_dpm_print_clock_levels(adev
, PP_SCLK
, buf
);
347 else if (adev
->pm
.funcs
->print_clock_levels
)
348 size
= adev
->pm
.funcs
->print_clock_levels(adev
, PP_SCLK
, buf
);
353 static ssize_t
amdgpu_set_pp_dpm_sclk(struct device
*dev
,
354 struct device_attribute
*attr
,
358 struct drm_device
*ddev
= dev_get_drvdata(dev
);
359 struct amdgpu_device
*adev
= ddev
->dev_private
;
362 uint32_t i
, mask
= 0;
365 for (i
= 0; i
< strlen(buf
); i
++) {
366 if (*(buf
+ i
) == '\n')
368 sub_str
[0] = *(buf
+ i
);
370 ret
= kstrtol(sub_str
, 0, &level
);
379 if (adev
->pp_enabled
)
380 amdgpu_dpm_force_clock_level(adev
, PP_SCLK
, mask
);
381 else if (adev
->pm
.funcs
->force_clock_level
)
382 adev
->pm
.funcs
->force_clock_level(adev
, PP_SCLK
, mask
);
387 static ssize_t
amdgpu_get_pp_dpm_mclk(struct device
*dev
,
388 struct device_attribute
*attr
,
391 struct drm_device
*ddev
= dev_get_drvdata(dev
);
392 struct amdgpu_device
*adev
= ddev
->dev_private
;
395 if (adev
->pp_enabled
)
396 size
= amdgpu_dpm_print_clock_levels(adev
, PP_MCLK
, buf
);
397 else if (adev
->pm
.funcs
->print_clock_levels
)
398 size
= adev
->pm
.funcs
->print_clock_levels(adev
, PP_MCLK
, buf
);
403 static ssize_t
amdgpu_set_pp_dpm_mclk(struct device
*dev
,
404 struct device_attribute
*attr
,
408 struct drm_device
*ddev
= dev_get_drvdata(dev
);
409 struct amdgpu_device
*adev
= ddev
->dev_private
;
412 uint32_t i
, mask
= 0;
415 for (i
= 0; i
< strlen(buf
); i
++) {
416 if (*(buf
+ i
) == '\n')
418 sub_str
[0] = *(buf
+ i
);
420 ret
= kstrtol(sub_str
, 0, &level
);
429 if (adev
->pp_enabled
)
430 amdgpu_dpm_force_clock_level(adev
, PP_MCLK
, mask
);
431 else if (adev
->pm
.funcs
->force_clock_level
)
432 adev
->pm
.funcs
->force_clock_level(adev
, PP_MCLK
, mask
);
437 static ssize_t
amdgpu_get_pp_dpm_pcie(struct device
*dev
,
438 struct device_attribute
*attr
,
441 struct drm_device
*ddev
= dev_get_drvdata(dev
);
442 struct amdgpu_device
*adev
= ddev
->dev_private
;
445 if (adev
->pp_enabled
)
446 size
= amdgpu_dpm_print_clock_levels(adev
, PP_PCIE
, buf
);
447 else if (adev
->pm
.funcs
->print_clock_levels
)
448 size
= adev
->pm
.funcs
->print_clock_levels(adev
, PP_PCIE
, buf
);
453 static ssize_t
amdgpu_set_pp_dpm_pcie(struct device
*dev
,
454 struct device_attribute
*attr
,
458 struct drm_device
*ddev
= dev_get_drvdata(dev
);
459 struct amdgpu_device
*adev
= ddev
->dev_private
;
462 uint32_t i
, mask
= 0;
465 for (i
= 0; i
< strlen(buf
); i
++) {
466 if (*(buf
+ i
) == '\n')
468 sub_str
[0] = *(buf
+ i
);
470 ret
= kstrtol(sub_str
, 0, &level
);
479 if (adev
->pp_enabled
)
480 amdgpu_dpm_force_clock_level(adev
, PP_PCIE
, mask
);
481 else if (adev
->pm
.funcs
->force_clock_level
)
482 adev
->pm
.funcs
->force_clock_level(adev
, PP_PCIE
, mask
);
487 static ssize_t
amdgpu_get_pp_sclk_od(struct device
*dev
,
488 struct device_attribute
*attr
,
491 struct drm_device
*ddev
= dev_get_drvdata(dev
);
492 struct amdgpu_device
*adev
= ddev
->dev_private
;
495 if (adev
->pp_enabled
)
496 value
= amdgpu_dpm_get_sclk_od(adev
);
497 else if (adev
->pm
.funcs
->get_sclk_od
)
498 value
= adev
->pm
.funcs
->get_sclk_od(adev
);
500 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
503 static ssize_t
amdgpu_set_pp_sclk_od(struct device
*dev
,
504 struct device_attribute
*attr
,
508 struct drm_device
*ddev
= dev_get_drvdata(dev
);
509 struct amdgpu_device
*adev
= ddev
->dev_private
;
513 ret
= kstrtol(buf
, 0, &value
);
520 if (adev
->pp_enabled
) {
521 amdgpu_dpm_set_sclk_od(adev
, (uint32_t)value
);
522 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_READJUST_POWER_STATE
, NULL
, NULL
);
523 } else if (adev
->pm
.funcs
->set_sclk_od
) {
524 adev
->pm
.funcs
->set_sclk_od(adev
, (uint32_t)value
);
525 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.boot_ps
;
526 amdgpu_pm_compute_clocks(adev
);
533 static ssize_t
amdgpu_get_pp_mclk_od(struct device
*dev
,
534 struct device_attribute
*attr
,
537 struct drm_device
*ddev
= dev_get_drvdata(dev
);
538 struct amdgpu_device
*adev
= ddev
->dev_private
;
541 if (adev
->pp_enabled
)
542 value
= amdgpu_dpm_get_mclk_od(adev
);
543 else if (adev
->pm
.funcs
->get_mclk_od
)
544 value
= adev
->pm
.funcs
->get_mclk_od(adev
);
546 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
549 static ssize_t
amdgpu_set_pp_mclk_od(struct device
*dev
,
550 struct device_attribute
*attr
,
554 struct drm_device
*ddev
= dev_get_drvdata(dev
);
555 struct amdgpu_device
*adev
= ddev
->dev_private
;
559 ret
= kstrtol(buf
, 0, &value
);
566 if (adev
->pp_enabled
) {
567 amdgpu_dpm_set_mclk_od(adev
, (uint32_t)value
);
568 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_READJUST_POWER_STATE
, NULL
, NULL
);
569 } else if (adev
->pm
.funcs
->set_mclk_od
) {
570 adev
->pm
.funcs
->set_mclk_od(adev
, (uint32_t)value
);
571 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.boot_ps
;
572 amdgpu_pm_compute_clocks(adev
);
579 static DEVICE_ATTR(power_dpm_state
, S_IRUGO
| S_IWUSR
, amdgpu_get_dpm_state
, amdgpu_set_dpm_state
);
580 static DEVICE_ATTR(power_dpm_force_performance_level
, S_IRUGO
| S_IWUSR
,
581 amdgpu_get_dpm_forced_performance_level
,
582 amdgpu_set_dpm_forced_performance_level
);
583 static DEVICE_ATTR(pp_num_states
, S_IRUGO
, amdgpu_get_pp_num_states
, NULL
);
584 static DEVICE_ATTR(pp_cur_state
, S_IRUGO
, amdgpu_get_pp_cur_state
, NULL
);
585 static DEVICE_ATTR(pp_force_state
, S_IRUGO
| S_IWUSR
,
586 amdgpu_get_pp_force_state
,
587 amdgpu_set_pp_force_state
);
588 static DEVICE_ATTR(pp_table
, S_IRUGO
| S_IWUSR
,
590 amdgpu_set_pp_table
);
591 static DEVICE_ATTR(pp_dpm_sclk
, S_IRUGO
| S_IWUSR
,
592 amdgpu_get_pp_dpm_sclk
,
593 amdgpu_set_pp_dpm_sclk
);
594 static DEVICE_ATTR(pp_dpm_mclk
, S_IRUGO
| S_IWUSR
,
595 amdgpu_get_pp_dpm_mclk
,
596 amdgpu_set_pp_dpm_mclk
);
597 static DEVICE_ATTR(pp_dpm_pcie
, S_IRUGO
| S_IWUSR
,
598 amdgpu_get_pp_dpm_pcie
,
599 amdgpu_set_pp_dpm_pcie
);
600 static DEVICE_ATTR(pp_sclk_od
, S_IRUGO
| S_IWUSR
,
601 amdgpu_get_pp_sclk_od
,
602 amdgpu_set_pp_sclk_od
);
603 static DEVICE_ATTR(pp_mclk_od
, S_IRUGO
| S_IWUSR
,
604 amdgpu_get_pp_mclk_od
,
605 amdgpu_set_pp_mclk_od
);
607 static ssize_t
amdgpu_hwmon_show_temp(struct device
*dev
,
608 struct device_attribute
*attr
,
611 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
612 struct drm_device
*ddev
= adev
->ddev
;
615 /* Can't get temperature when the card is off */
616 if ((adev
->flags
& AMD_IS_PX
) &&
617 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
620 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->get_temperature
)
623 temp
= amdgpu_dpm_get_temperature(adev
);
625 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
628 static ssize_t
amdgpu_hwmon_show_temp_thresh(struct device
*dev
,
629 struct device_attribute
*attr
,
632 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
633 int hyst
= to_sensor_dev_attr(attr
)->index
;
637 temp
= adev
->pm
.dpm
.thermal
.min_temp
;
639 temp
= adev
->pm
.dpm
.thermal
.max_temp
;
641 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
644 static ssize_t
amdgpu_hwmon_get_pwm1_enable(struct device
*dev
,
645 struct device_attribute
*attr
,
648 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
651 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->get_fan_control_mode
)
654 pwm_mode
= amdgpu_dpm_get_fan_control_mode(adev
);
656 /* never 0 (full-speed), fuse or smc-controlled always */
657 return sprintf(buf
, "%i\n", pwm_mode
== FDO_PWM_MODE_STATIC
? 1 : 2);
660 static ssize_t
amdgpu_hwmon_set_pwm1_enable(struct device
*dev
,
661 struct device_attribute
*attr
,
665 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
669 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->set_fan_control_mode
)
672 err
= kstrtoint(buf
, 10, &value
);
677 case 1: /* manual, percent-based */
678 amdgpu_dpm_set_fan_control_mode(adev
, FDO_PWM_MODE_STATIC
);
680 default: /* disable */
681 amdgpu_dpm_set_fan_control_mode(adev
, 0);
688 static ssize_t
amdgpu_hwmon_get_pwm1_min(struct device
*dev
,
689 struct device_attribute
*attr
,
692 return sprintf(buf
, "%i\n", 0);
695 static ssize_t
amdgpu_hwmon_get_pwm1_max(struct device
*dev
,
696 struct device_attribute
*attr
,
699 return sprintf(buf
, "%i\n", 255);
702 static ssize_t
amdgpu_hwmon_set_pwm1(struct device
*dev
,
703 struct device_attribute
*attr
,
704 const char *buf
, size_t count
)
706 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
710 err
= kstrtou32(buf
, 10, &value
);
714 value
= (value
* 100) / 255;
716 err
= amdgpu_dpm_set_fan_speed_percent(adev
, value
);
723 static ssize_t
amdgpu_hwmon_get_pwm1(struct device
*dev
,
724 struct device_attribute
*attr
,
727 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
731 err
= amdgpu_dpm_get_fan_speed_percent(adev
, &speed
);
735 speed
= (speed
* 255) / 100;
737 return sprintf(buf
, "%i\n", speed
);
740 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, amdgpu_hwmon_show_temp
, NULL
, 0);
741 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 0);
742 static SENSOR_DEVICE_ATTR(temp1_crit_hyst
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 1);
743 static SENSOR_DEVICE_ATTR(pwm1
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1
, amdgpu_hwmon_set_pwm1
, 0);
744 static SENSOR_DEVICE_ATTR(pwm1_enable
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1_enable
, amdgpu_hwmon_set_pwm1_enable
, 0);
745 static SENSOR_DEVICE_ATTR(pwm1_min
, S_IRUGO
, amdgpu_hwmon_get_pwm1_min
, NULL
, 0);
746 static SENSOR_DEVICE_ATTR(pwm1_max
, S_IRUGO
, amdgpu_hwmon_get_pwm1_max
, NULL
, 0);
748 static struct attribute
*hwmon_attributes
[] = {
749 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
750 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
751 &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
,
752 &sensor_dev_attr_pwm1
.dev_attr
.attr
,
753 &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
,
754 &sensor_dev_attr_pwm1_min
.dev_attr
.attr
,
755 &sensor_dev_attr_pwm1_max
.dev_attr
.attr
,
759 static umode_t
hwmon_attributes_visible(struct kobject
*kobj
,
760 struct attribute
*attr
, int index
)
762 struct device
*dev
= kobj_to_dev(kobj
);
763 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
764 umode_t effective_mode
= attr
->mode
;
766 /* Skip limit attributes if DPM is not enabled */
767 if (!adev
->pm
.dpm_enabled
&&
768 (attr
== &sensor_dev_attr_temp1_crit
.dev_attr
.attr
||
769 attr
== &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
||
770 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
771 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
772 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
773 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
776 if (adev
->pp_enabled
)
777 return effective_mode
;
779 /* Skip fan attributes if fan is not present */
780 if (adev
->pm
.no_fan
&&
781 (attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
782 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
783 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
784 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
787 /* mask fan attributes if we have no bindings for this asic to expose */
788 if ((!adev
->pm
.funcs
->get_fan_speed_percent
&&
789 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't query fan */
790 (!adev
->pm
.funcs
->get_fan_control_mode
&&
791 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't query state */
792 effective_mode
&= ~S_IRUGO
;
794 if ((!adev
->pm
.funcs
->set_fan_speed_percent
&&
795 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't manage fan */
796 (!adev
->pm
.funcs
->set_fan_control_mode
&&
797 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't manage state */
798 effective_mode
&= ~S_IWUSR
;
800 /* hide max/min values if we can't both query and manage the fan */
801 if ((!adev
->pm
.funcs
->set_fan_speed_percent
&&
802 !adev
->pm
.funcs
->get_fan_speed_percent
) &&
803 (attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
804 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
807 return effective_mode
;
810 static const struct attribute_group hwmon_attrgroup
= {
811 .attrs
= hwmon_attributes
,
812 .is_visible
= hwmon_attributes_visible
,
815 static const struct attribute_group
*hwmon_groups
[] = {
820 void amdgpu_dpm_thermal_work_handler(struct work_struct
*work
)
822 struct amdgpu_device
*adev
=
823 container_of(work
, struct amdgpu_device
,
824 pm
.dpm
.thermal
.work
);
825 /* switch to the thermal state */
826 enum amd_pm_state_type dpm_state
= POWER_STATE_TYPE_INTERNAL_THERMAL
;
828 if (!adev
->pm
.dpm_enabled
)
831 if (adev
->pm
.funcs
->get_temperature
) {
832 int temp
= amdgpu_dpm_get_temperature(adev
);
834 if (temp
< adev
->pm
.dpm
.thermal
.min_temp
)
835 /* switch back the user state */
836 dpm_state
= adev
->pm
.dpm
.user_state
;
838 if (adev
->pm
.dpm
.thermal
.high_to_low
)
839 /* switch back the user state */
840 dpm_state
= adev
->pm
.dpm
.user_state
;
842 mutex_lock(&adev
->pm
.mutex
);
843 if (dpm_state
== POWER_STATE_TYPE_INTERNAL_THERMAL
)
844 adev
->pm
.dpm
.thermal_active
= true;
846 adev
->pm
.dpm
.thermal_active
= false;
847 adev
->pm
.dpm
.state
= dpm_state
;
848 mutex_unlock(&adev
->pm
.mutex
);
850 amdgpu_pm_compute_clocks(adev
);
853 static struct amdgpu_ps
*amdgpu_dpm_pick_power_state(struct amdgpu_device
*adev
,
854 enum amd_pm_state_type dpm_state
)
857 struct amdgpu_ps
*ps
;
859 bool single_display
= (adev
->pm
.dpm
.new_active_crtc_count
< 2) ?
862 /* check if the vblank period is too short to adjust the mclk */
863 if (single_display
&& adev
->pm
.funcs
->vblank_too_short
) {
864 if (amdgpu_dpm_vblank_too_short(adev
))
865 single_display
= false;
868 /* certain older asics have a separare 3D performance state,
869 * so try that first if the user selected performance
871 if (dpm_state
== POWER_STATE_TYPE_PERFORMANCE
)
872 dpm_state
= POWER_STATE_TYPE_INTERNAL_3DPERF
;
873 /* balanced states don't exist at the moment */
874 if (dpm_state
== POWER_STATE_TYPE_BALANCED
)
875 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
878 /* Pick the best power state based on current conditions */
879 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
880 ps
= &adev
->pm
.dpm
.ps
[i
];
881 ui_class
= ps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
;
884 case POWER_STATE_TYPE_BATTERY
:
885 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
) {
886 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
893 case POWER_STATE_TYPE_BALANCED
:
894 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BALANCED
) {
895 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
902 case POWER_STATE_TYPE_PERFORMANCE
:
903 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
904 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
911 /* internal states */
912 case POWER_STATE_TYPE_INTERNAL_UVD
:
913 if (adev
->pm
.dpm
.uvd_ps
)
914 return adev
->pm
.dpm
.uvd_ps
;
917 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
918 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
921 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
922 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
925 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
926 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
929 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
930 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
933 case POWER_STATE_TYPE_INTERNAL_BOOT
:
934 return adev
->pm
.dpm
.boot_ps
;
935 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
936 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_THERMAL
)
939 case POWER_STATE_TYPE_INTERNAL_ACPI
:
940 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
)
943 case POWER_STATE_TYPE_INTERNAL_ULV
:
944 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
)
947 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
948 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE
)
955 /* use a fallback state if we didn't match */
957 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
958 dpm_state
= POWER_STATE_TYPE_INTERNAL_UVD_HD
;
960 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
961 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
962 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
963 if (adev
->pm
.dpm
.uvd_ps
) {
964 return adev
->pm
.dpm
.uvd_ps
;
966 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
969 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
970 dpm_state
= POWER_STATE_TYPE_INTERNAL_ACPI
;
972 case POWER_STATE_TYPE_INTERNAL_ACPI
:
973 dpm_state
= POWER_STATE_TYPE_BATTERY
;
975 case POWER_STATE_TYPE_BATTERY
:
976 case POWER_STATE_TYPE_BALANCED
:
977 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
978 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
987 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device
*adev
)
990 struct amdgpu_ps
*ps
;
991 enum amd_pm_state_type dpm_state
;
994 /* if dpm init failed */
995 if (!adev
->pm
.dpm_enabled
)
998 if (adev
->pm
.dpm
.user_state
!= adev
->pm
.dpm
.state
) {
999 /* add other state override checks here */
1000 if ((!adev
->pm
.dpm
.thermal_active
) &&
1001 (!adev
->pm
.dpm
.uvd_active
))
1002 adev
->pm
.dpm
.state
= adev
->pm
.dpm
.user_state
;
1004 dpm_state
= adev
->pm
.dpm
.state
;
1006 ps
= amdgpu_dpm_pick_power_state(adev
, dpm_state
);
1008 adev
->pm
.dpm
.requested_ps
= ps
;
1012 /* no need to reprogram if nothing changed unless we are on BTC+ */
1013 if (adev
->pm
.dpm
.current_ps
== adev
->pm
.dpm
.requested_ps
) {
1014 /* vce just modifies an existing state so force a change */
1015 if (ps
->vce_active
!= adev
->pm
.dpm
.vce_active
)
1017 if (adev
->flags
& AMD_IS_APU
) {
1018 /* for APUs if the num crtcs changed but state is the same,
1019 * all we need to do is update the display configuration.
1021 if (adev
->pm
.dpm
.new_active_crtcs
!= adev
->pm
.dpm
.current_active_crtcs
) {
1022 /* update display watermarks based on new power state */
1023 amdgpu_display_bandwidth_update(adev
);
1024 /* update displays */
1025 amdgpu_dpm_display_configuration_changed(adev
);
1026 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
1027 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
1031 /* for BTC+ if the num crtcs hasn't changed and state is the same,
1032 * nothing to do, if the num crtcs is > 1 and state is the same,
1033 * update display configuration.
1035 if (adev
->pm
.dpm
.new_active_crtcs
==
1036 adev
->pm
.dpm
.current_active_crtcs
) {
1038 } else if ((adev
->pm
.dpm
.current_active_crtc_count
> 1) &&
1039 (adev
->pm
.dpm
.new_active_crtc_count
> 1)) {
1040 /* update display watermarks based on new power state */
1041 amdgpu_display_bandwidth_update(adev
);
1042 /* update displays */
1043 amdgpu_dpm_display_configuration_changed(adev
);
1044 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
1045 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
1052 if (amdgpu_dpm
== 1) {
1053 printk("switching from power state:\n");
1054 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.current_ps
);
1055 printk("switching to power state:\n");
1056 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.requested_ps
);
1059 /* update whether vce is active */
1060 ps
->vce_active
= adev
->pm
.dpm
.vce_active
;
1062 ret
= amdgpu_dpm_pre_set_power_state(adev
);
1066 /* update display watermarks based on new power state */
1067 amdgpu_display_bandwidth_update(adev
);
1069 /* wait for the rings to drain */
1070 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
1071 struct amdgpu_ring
*ring
= adev
->rings
[i
];
1072 if (ring
&& ring
->ready
)
1073 amdgpu_fence_wait_empty(ring
);
1076 /* program the new power state */
1077 amdgpu_dpm_set_power_state(adev
);
1079 /* update current power state */
1080 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
;
1082 amdgpu_dpm_post_set_power_state(adev
);
1084 /* update displays */
1085 amdgpu_dpm_display_configuration_changed(adev
);
1087 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
1088 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
1090 if (adev
->pm
.funcs
->force_performance_level
) {
1091 if (adev
->pm
.dpm
.thermal_active
) {
1092 enum amdgpu_dpm_forced_level level
= adev
->pm
.dpm
.forced_level
;
1093 /* force low perf level for thermal */
1094 amdgpu_dpm_force_performance_level(adev
, AMDGPU_DPM_FORCED_LEVEL_LOW
);
1095 /* save the user's level */
1096 adev
->pm
.dpm
.forced_level
= level
;
1098 /* otherwise, user selected level */
1099 amdgpu_dpm_force_performance_level(adev
, adev
->pm
.dpm
.forced_level
);
1104 void amdgpu_dpm_enable_uvd(struct amdgpu_device
*adev
, bool enable
)
1106 if (adev
->pp_enabled
)
1107 amdgpu_dpm_powergate_uvd(adev
, !enable
);
1109 if (adev
->pm
.funcs
->powergate_uvd
) {
1110 mutex_lock(&adev
->pm
.mutex
);
1111 /* enable/disable UVD */
1112 amdgpu_dpm_powergate_uvd(adev
, !enable
);
1113 mutex_unlock(&adev
->pm
.mutex
);
1116 mutex_lock(&adev
->pm
.mutex
);
1117 adev
->pm
.dpm
.uvd_active
= true;
1118 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_INTERNAL_UVD
;
1119 mutex_unlock(&adev
->pm
.mutex
);
1121 mutex_lock(&adev
->pm
.mutex
);
1122 adev
->pm
.dpm
.uvd_active
= false;
1123 mutex_unlock(&adev
->pm
.mutex
);
1125 amdgpu_pm_compute_clocks(adev
);
1131 void amdgpu_dpm_enable_vce(struct amdgpu_device
*adev
, bool enable
)
1133 if (adev
->pp_enabled
)
1134 amdgpu_dpm_powergate_vce(adev
, !enable
);
1136 if (adev
->pm
.funcs
->powergate_vce
) {
1137 mutex_lock(&adev
->pm
.mutex
);
1138 amdgpu_dpm_powergate_vce(adev
, !enable
);
1139 mutex_unlock(&adev
->pm
.mutex
);
1142 mutex_lock(&adev
->pm
.mutex
);
1143 adev
->pm
.dpm
.vce_active
= true;
1144 /* XXX select vce level based on ring/task */
1145 adev
->pm
.dpm
.vce_level
= AMDGPU_VCE_LEVEL_AC_ALL
;
1146 mutex_unlock(&adev
->pm
.mutex
);
1148 mutex_lock(&adev
->pm
.mutex
);
1149 adev
->pm
.dpm
.vce_active
= false;
1150 mutex_unlock(&adev
->pm
.mutex
);
1152 amdgpu_pm_compute_clocks(adev
);
1157 void amdgpu_pm_print_power_states(struct amdgpu_device
*adev
)
1161 if (adev
->pp_enabled
)
1165 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
1166 amdgpu_dpm_print_power_state(adev
, &adev
->pm
.dpm
.ps
[i
]);
1170 int amdgpu_pm_sysfs_init(struct amdgpu_device
*adev
)
1174 if (adev
->pm
.sysfs_initialized
)
1177 if (!adev
->pp_enabled
) {
1178 if (adev
->pm
.funcs
->get_temperature
== NULL
)
1182 adev
->pm
.int_hwmon_dev
= hwmon_device_register_with_groups(adev
->dev
,
1185 if (IS_ERR(adev
->pm
.int_hwmon_dev
)) {
1186 ret
= PTR_ERR(adev
->pm
.int_hwmon_dev
);
1188 "Unable to register hwmon device: %d\n", ret
);
1192 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_state
);
1194 DRM_ERROR("failed to create device file for dpm state\n");
1197 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
1199 DRM_ERROR("failed to create device file for dpm state\n");
1203 if (adev
->pp_enabled
) {
1204 ret
= device_create_file(adev
->dev
, &dev_attr_pp_num_states
);
1206 DRM_ERROR("failed to create device file pp_num_states\n");
1209 ret
= device_create_file(adev
->dev
, &dev_attr_pp_cur_state
);
1211 DRM_ERROR("failed to create device file pp_cur_state\n");
1214 ret
= device_create_file(adev
->dev
, &dev_attr_pp_force_state
);
1216 DRM_ERROR("failed to create device file pp_force_state\n");
1219 ret
= device_create_file(adev
->dev
, &dev_attr_pp_table
);
1221 DRM_ERROR("failed to create device file pp_table\n");
1226 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_sclk
);
1228 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1231 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_mclk
);
1233 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1236 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_pcie
);
1238 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1241 ret
= device_create_file(adev
->dev
, &dev_attr_pp_sclk_od
);
1243 DRM_ERROR("failed to create device file pp_sclk_od\n");
1246 ret
= device_create_file(adev
->dev
, &dev_attr_pp_mclk_od
);
1248 DRM_ERROR("failed to create device file pp_mclk_od\n");
1252 ret
= amdgpu_debugfs_pm_init(adev
);
1254 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1258 adev
->pm
.sysfs_initialized
= true;
1263 void amdgpu_pm_sysfs_fini(struct amdgpu_device
*adev
)
1265 if (adev
->pm
.int_hwmon_dev
)
1266 hwmon_device_unregister(adev
->pm
.int_hwmon_dev
);
1267 device_remove_file(adev
->dev
, &dev_attr_power_dpm_state
);
1268 device_remove_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
1269 if (adev
->pp_enabled
) {
1270 device_remove_file(adev
->dev
, &dev_attr_pp_num_states
);
1271 device_remove_file(adev
->dev
, &dev_attr_pp_cur_state
);
1272 device_remove_file(adev
->dev
, &dev_attr_pp_force_state
);
1273 device_remove_file(adev
->dev
, &dev_attr_pp_table
);
1275 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_sclk
);
1276 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_mclk
);
1277 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_pcie
);
1278 device_remove_file(adev
->dev
, &dev_attr_pp_sclk_od
);
1279 device_remove_file(adev
->dev
, &dev_attr_pp_mclk_od
);
1282 void amdgpu_pm_compute_clocks(struct amdgpu_device
*adev
)
1284 struct drm_device
*ddev
= adev
->ddev
;
1285 struct drm_crtc
*crtc
;
1286 struct amdgpu_crtc
*amdgpu_crtc
;
1288 if (!adev
->pm
.dpm_enabled
)
1291 if (adev
->pp_enabled
) {
1294 amdgpu_display_bandwidth_update(adev
);
1295 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
1296 struct amdgpu_ring
*ring
= adev
->rings
[i
];
1297 if (ring
&& ring
->ready
)
1298 amdgpu_fence_wait_empty(ring
);
1301 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE
, NULL
, NULL
);
1303 mutex_lock(&adev
->pm
.mutex
);
1304 adev
->pm
.dpm
.new_active_crtcs
= 0;
1305 adev
->pm
.dpm
.new_active_crtc_count
= 0;
1306 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
1307 list_for_each_entry(crtc
,
1308 &ddev
->mode_config
.crtc_list
, head
) {
1309 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1310 if (crtc
->enabled
) {
1311 adev
->pm
.dpm
.new_active_crtcs
|= (1 << amdgpu_crtc
->crtc_id
);
1312 adev
->pm
.dpm
.new_active_crtc_count
++;
1316 /* update battery/ac status */
1317 if (power_supply_is_system_supplied() > 0)
1318 adev
->pm
.dpm
.ac_power
= true;
1320 adev
->pm
.dpm
.ac_power
= false;
1322 amdgpu_dpm_change_power_state_locked(adev
);
1324 mutex_unlock(&adev
->pm
.mutex
);
1331 #if defined(CONFIG_DEBUG_FS)
1333 static int amdgpu_debugfs_pm_info(struct seq_file
*m
, void *data
)
1335 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1336 struct drm_device
*dev
= node
->minor
->dev
;
1337 struct amdgpu_device
*adev
= dev
->dev_private
;
1338 struct drm_device
*ddev
= adev
->ddev
;
1340 if (!adev
->pm
.dpm_enabled
) {
1341 seq_printf(m
, "dpm not enabled\n");
1344 if ((adev
->flags
& AMD_IS_PX
) &&
1345 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
)) {
1346 seq_printf(m
, "PX asic powered off\n");
1347 } else if (adev
->pp_enabled
) {
1348 amdgpu_dpm_debugfs_print_current_performance_level(adev
, m
);
1350 mutex_lock(&adev
->pm
.mutex
);
1351 if (adev
->pm
.funcs
->debugfs_print_current_performance_level
)
1352 amdgpu_dpm_debugfs_print_current_performance_level(adev
, m
);
1354 seq_printf(m
, "Debugfs support not implemented for this asic\n");
1355 mutex_unlock(&adev
->pm
.mutex
);
1361 static const struct drm_info_list amdgpu_pm_info_list
[] = {
1362 {"amdgpu_pm_info", amdgpu_debugfs_pm_info
, 0, NULL
},
1366 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
)
1368 #if defined(CONFIG_DEBUG_FS)
1369 return amdgpu_debugfs_add_files(adev
, amdgpu_pm_info_list
, ARRAY_SIZE(amdgpu_pm_info_list
));