2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
29 #include "radeon_asic.h"
30 #include "radeon_drm.h"
31 #include "evergreend.h"
34 #include "evergreen_reg.h"
35 #include "evergreen_blit_shaders.h"
37 #define EVERGREEN_PFP_UCODE_SIZE 1120
38 #define EVERGREEN_PM4_UCODE_SIZE 1376
40 static void evergreen_gpu_init(struct radeon_device
*rdev
);
41 void evergreen_fini(struct radeon_device
*rdev
);
42 void evergreen_pcie_gen2_enable(struct radeon_device
*rdev
);
44 void evergreen_fix_pci_max_read_req_size(struct radeon_device
*rdev
)
49 cap
= pci_pcie_cap(rdev
->pdev
);
53 err
= pci_read_config_word(rdev
->pdev
, cap
+ PCI_EXP_DEVCTL
, &ctl
);
57 v
= (ctl
& PCI_EXP_DEVCTL_READRQ
) >> 12;
59 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
60 * to avoid hangs or perfomance issues
62 if ((v
== 0) || (v
== 6) || (v
== 7)) {
63 ctl
&= ~PCI_EXP_DEVCTL_READRQ
;
65 pci_write_config_word(rdev
->pdev
, cap
+ PCI_EXP_DEVCTL
, ctl
);
69 void evergreen_pre_page_flip(struct radeon_device
*rdev
, int crtc
)
71 /* enable the pflip int */
72 radeon_irq_kms_pflip_irq_get(rdev
, crtc
);
75 void evergreen_post_page_flip(struct radeon_device
*rdev
, int crtc
)
77 /* disable the pflip int */
78 radeon_irq_kms_pflip_irq_put(rdev
, crtc
);
81 u32
evergreen_page_flip(struct radeon_device
*rdev
, int crtc_id
, u64 crtc_base
)
83 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[crtc_id
];
84 u32 tmp
= RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
);
87 /* Lock the graphics update lock */
88 tmp
|= EVERGREEN_GRPH_UPDATE_LOCK
;
89 WREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
, tmp
);
91 /* update the scanout addresses */
92 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ radeon_crtc
->crtc_offset
,
93 upper_32_bits(crtc_base
));
94 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ radeon_crtc
->crtc_offset
,
97 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ radeon_crtc
->crtc_offset
,
98 upper_32_bits(crtc_base
));
99 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ radeon_crtc
->crtc_offset
,
102 /* Wait for update_pending to go high. */
103 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
104 if (RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING
)
108 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
110 /* Unlock the lock, so double-buffering can take place inside vblank */
111 tmp
&= ~EVERGREEN_GRPH_UPDATE_LOCK
;
112 WREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
, tmp
);
114 /* Return current update_pending status: */
115 return RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING
;
118 /* get temperature in millidegrees */
119 int evergreen_get_temp(struct radeon_device
*rdev
)
124 if (rdev
->family
== CHIP_JUNIPER
) {
125 toffset
= (RREG32(CG_THERMAL_CTRL
) & TOFFSET_MASK
) >>
127 temp
= (RREG32(CG_TS0_STATUS
) & TS0_ADC_DOUT_MASK
) >>
131 actual_temp
= temp
/ 2 - (0x200 - toffset
);
133 actual_temp
= temp
/ 2 + toffset
;
135 actual_temp
= actual_temp
* 1000;
138 temp
= (RREG32(CG_MULT_THERMAL_STATUS
) & ASIC_T_MASK
) >>
143 else if (temp
& 0x200)
145 else if (temp
& 0x100) {
146 actual_temp
= temp
& 0x1ff;
147 actual_temp
|= ~0x1ff;
149 actual_temp
= temp
& 0xff;
151 actual_temp
= (actual_temp
* 1000) / 2;
157 int sumo_get_temp(struct radeon_device
*rdev
)
159 u32 temp
= RREG32(CG_THERMAL_STATUS
) & 0xff;
160 int actual_temp
= temp
- 49;
162 return actual_temp
* 1000;
165 void sumo_pm_init_profile(struct radeon_device
*rdev
)
170 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
171 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
172 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
173 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
176 if (rdev
->flags
& RADEON_IS_MOBILITY
)
177 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
179 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
181 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= idx
;
182 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= idx
;
183 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
184 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
186 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= idx
;
187 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= idx
;
188 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
189 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
191 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= idx
;
192 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= idx
;
193 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
194 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
196 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= idx
;
197 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= idx
;
198 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
199 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
202 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
203 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= idx
;
204 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= idx
;
205 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
206 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
=
207 rdev
->pm
.power_state
[idx
].num_clock_modes
- 1;
209 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= idx
;
210 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= idx
;
211 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
212 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
=
213 rdev
->pm
.power_state
[idx
].num_clock_modes
- 1;
216 void evergreen_pm_misc(struct radeon_device
*rdev
)
218 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
219 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
220 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
221 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
223 if (voltage
->type
== VOLTAGE_SW
) {
224 /* 0xff01 is a flag rather then an actual voltage */
225 if (voltage
->voltage
== 0xff01)
227 if (voltage
->voltage
&& (voltage
->voltage
!= rdev
->pm
.current_vddc
)) {
228 radeon_atom_set_voltage(rdev
, voltage
->voltage
, SET_VOLTAGE_TYPE_ASIC_VDDC
);
229 rdev
->pm
.current_vddc
= voltage
->voltage
;
230 DRM_DEBUG("Setting: vddc: %d\n", voltage
->voltage
);
232 /* 0xff01 is a flag rather then an actual voltage */
233 if (voltage
->vddci
== 0xff01)
235 if (voltage
->vddci
&& (voltage
->vddci
!= rdev
->pm
.current_vddci
)) {
236 radeon_atom_set_voltage(rdev
, voltage
->vddci
, SET_VOLTAGE_TYPE_ASIC_VDDCI
);
237 rdev
->pm
.current_vddci
= voltage
->vddci
;
238 DRM_DEBUG("Setting: vddci: %d\n", voltage
->vddci
);
243 void evergreen_pm_prepare(struct radeon_device
*rdev
)
245 struct drm_device
*ddev
= rdev
->ddev
;
246 struct drm_crtc
*crtc
;
247 struct radeon_crtc
*radeon_crtc
;
250 /* disable any active CRTCs */
251 list_for_each_entry(crtc
, &ddev
->mode_config
.crtc_list
, head
) {
252 radeon_crtc
= to_radeon_crtc(crtc
);
253 if (radeon_crtc
->enabled
) {
254 tmp
= RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
);
255 tmp
|= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE
;
256 WREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
261 void evergreen_pm_finish(struct radeon_device
*rdev
)
263 struct drm_device
*ddev
= rdev
->ddev
;
264 struct drm_crtc
*crtc
;
265 struct radeon_crtc
*radeon_crtc
;
268 /* enable any active CRTCs */
269 list_for_each_entry(crtc
, &ddev
->mode_config
.crtc_list
, head
) {
270 radeon_crtc
= to_radeon_crtc(crtc
);
271 if (radeon_crtc
->enabled
) {
272 tmp
= RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
);
273 tmp
&= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE
;
274 WREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
279 bool evergreen_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
281 bool connected
= false;
285 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
289 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
293 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
297 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
301 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
305 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
315 void evergreen_hpd_set_polarity(struct radeon_device
*rdev
,
316 enum radeon_hpd_id hpd
)
319 bool connected
= evergreen_hpd_sense(rdev
, hpd
);
323 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
325 tmp
&= ~DC_HPDx_INT_POLARITY
;
327 tmp
|= DC_HPDx_INT_POLARITY
;
328 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
331 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
333 tmp
&= ~DC_HPDx_INT_POLARITY
;
335 tmp
|= DC_HPDx_INT_POLARITY
;
336 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
339 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
341 tmp
&= ~DC_HPDx_INT_POLARITY
;
343 tmp
|= DC_HPDx_INT_POLARITY
;
344 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
347 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
349 tmp
&= ~DC_HPDx_INT_POLARITY
;
351 tmp
|= DC_HPDx_INT_POLARITY
;
352 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
355 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
357 tmp
&= ~DC_HPDx_INT_POLARITY
;
359 tmp
|= DC_HPDx_INT_POLARITY
;
360 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
363 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
365 tmp
&= ~DC_HPDx_INT_POLARITY
;
367 tmp
|= DC_HPDx_INT_POLARITY
;
368 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
375 void evergreen_hpd_init(struct radeon_device
*rdev
)
377 struct drm_device
*dev
= rdev
->ddev
;
378 struct drm_connector
*connector
;
379 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) |
380 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN
;
382 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
383 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
384 switch (radeon_connector
->hpd
.hpd
) {
386 WREG32(DC_HPD1_CONTROL
, tmp
);
387 rdev
->irq
.hpd
[0] = true;
390 WREG32(DC_HPD2_CONTROL
, tmp
);
391 rdev
->irq
.hpd
[1] = true;
394 WREG32(DC_HPD3_CONTROL
, tmp
);
395 rdev
->irq
.hpd
[2] = true;
398 WREG32(DC_HPD4_CONTROL
, tmp
);
399 rdev
->irq
.hpd
[3] = true;
402 WREG32(DC_HPD5_CONTROL
, tmp
);
403 rdev
->irq
.hpd
[4] = true;
406 WREG32(DC_HPD6_CONTROL
, tmp
);
407 rdev
->irq
.hpd
[5] = true;
412 radeon_hpd_set_polarity(rdev
, radeon_connector
->hpd
.hpd
);
414 if (rdev
->irq
.installed
)
415 evergreen_irq_set(rdev
);
418 void evergreen_hpd_fini(struct radeon_device
*rdev
)
420 struct drm_device
*dev
= rdev
->ddev
;
421 struct drm_connector
*connector
;
423 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
424 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
425 switch (radeon_connector
->hpd
.hpd
) {
427 WREG32(DC_HPD1_CONTROL
, 0);
428 rdev
->irq
.hpd
[0] = false;
431 WREG32(DC_HPD2_CONTROL
, 0);
432 rdev
->irq
.hpd
[1] = false;
435 WREG32(DC_HPD3_CONTROL
, 0);
436 rdev
->irq
.hpd
[2] = false;
439 WREG32(DC_HPD4_CONTROL
, 0);
440 rdev
->irq
.hpd
[3] = false;
443 WREG32(DC_HPD5_CONTROL
, 0);
444 rdev
->irq
.hpd
[4] = false;
447 WREG32(DC_HPD6_CONTROL
, 0);
448 rdev
->irq
.hpd
[5] = false;
456 /* watermark setup */
458 static u32
evergreen_line_buffer_adjust(struct radeon_device
*rdev
,
459 struct radeon_crtc
*radeon_crtc
,
460 struct drm_display_mode
*mode
,
461 struct drm_display_mode
*other_mode
)
466 * There are 3 line buffers, each one shared by 2 display controllers.
467 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
468 * the display controllers. The paritioning is done via one of four
469 * preset allocations specified in bits 2:0:
470 * first display controller
471 * 0 - first half of lb (3840 * 2)
472 * 1 - first 3/4 of lb (5760 * 2)
473 * 2 - whole lb (7680 * 2), other crtc must be disabled
474 * 3 - first 1/4 of lb (1920 * 2)
475 * second display controller
476 * 4 - second half of lb (3840 * 2)
477 * 5 - second 3/4 of lb (5760 * 2)
478 * 6 - whole lb (7680 * 2), other crtc must be disabled
479 * 7 - last 1/4 of lb (1920 * 2)
481 /* this can get tricky if we have two large displays on a paired group
482 * of crtcs. Ideally for multiple large displays we'd assign them to
483 * non-linked crtcs for maximum line buffer allocation.
485 if (radeon_crtc
->base
.enabled
&& mode
) {
493 /* second controller of the pair uses second half of the lb */
494 if (radeon_crtc
->crtc_id
% 2)
496 WREG32(DC_LB_MEMORY_SPLIT
+ radeon_crtc
->crtc_offset
, tmp
);
498 if (radeon_crtc
->base
.enabled
&& mode
) {
503 if (ASIC_IS_DCE5(rdev
))
509 if (ASIC_IS_DCE5(rdev
))
515 if (ASIC_IS_DCE5(rdev
))
521 if (ASIC_IS_DCE5(rdev
))
528 /* controller not enabled, so no lb used */
532 static u32
evergreen_get_number_of_dram_channels(struct radeon_device
*rdev
)
534 u32 tmp
= RREG32(MC_SHARED_CHMAP
);
536 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
549 struct evergreen_wm_params
{
550 u32 dram_channels
; /* number of dram channels */
551 u32 yclk
; /* bandwidth per dram data pin in kHz */
552 u32 sclk
; /* engine clock in kHz */
553 u32 disp_clk
; /* display clock in kHz */
554 u32 src_width
; /* viewport width */
555 u32 active_time
; /* active display time in ns */
556 u32 blank_time
; /* blank time in ns */
557 bool interlaced
; /* mode is interlaced */
558 fixed20_12 vsc
; /* vertical scale ratio */
559 u32 num_heads
; /* number of active crtcs */
560 u32 bytes_per_pixel
; /* bytes per pixel display + overlay */
561 u32 lb_size
; /* line buffer allocated to pipe */
562 u32 vtaps
; /* vertical scaler taps */
565 static u32
evergreen_dram_bandwidth(struct evergreen_wm_params
*wm
)
567 /* Calculate DRAM Bandwidth and the part allocated to display. */
568 fixed20_12 dram_efficiency
; /* 0.7 */
569 fixed20_12 yclk
, dram_channels
, bandwidth
;
572 a
.full
= dfixed_const(1000);
573 yclk
.full
= dfixed_const(wm
->yclk
);
574 yclk
.full
= dfixed_div(yclk
, a
);
575 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
576 a
.full
= dfixed_const(10);
577 dram_efficiency
.full
= dfixed_const(7);
578 dram_efficiency
.full
= dfixed_div(dram_efficiency
, a
);
579 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
580 bandwidth
.full
= dfixed_mul(bandwidth
, dram_efficiency
);
582 return dfixed_trunc(bandwidth
);
585 static u32
evergreen_dram_bandwidth_for_display(struct evergreen_wm_params
*wm
)
587 /* Calculate DRAM Bandwidth and the part allocated to display. */
588 fixed20_12 disp_dram_allocation
; /* 0.3 to 0.7 */
589 fixed20_12 yclk
, dram_channels
, bandwidth
;
592 a
.full
= dfixed_const(1000);
593 yclk
.full
= dfixed_const(wm
->yclk
);
594 yclk
.full
= dfixed_div(yclk
, a
);
595 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
596 a
.full
= dfixed_const(10);
597 disp_dram_allocation
.full
= dfixed_const(3); /* XXX worse case value 0.3 */
598 disp_dram_allocation
.full
= dfixed_div(disp_dram_allocation
, a
);
599 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
600 bandwidth
.full
= dfixed_mul(bandwidth
, disp_dram_allocation
);
602 return dfixed_trunc(bandwidth
);
605 static u32
evergreen_data_return_bandwidth(struct evergreen_wm_params
*wm
)
607 /* Calculate the display Data return Bandwidth */
608 fixed20_12 return_efficiency
; /* 0.8 */
609 fixed20_12 sclk
, bandwidth
;
612 a
.full
= dfixed_const(1000);
613 sclk
.full
= dfixed_const(wm
->sclk
);
614 sclk
.full
= dfixed_div(sclk
, a
);
615 a
.full
= dfixed_const(10);
616 return_efficiency
.full
= dfixed_const(8);
617 return_efficiency
.full
= dfixed_div(return_efficiency
, a
);
618 a
.full
= dfixed_const(32);
619 bandwidth
.full
= dfixed_mul(a
, sclk
);
620 bandwidth
.full
= dfixed_mul(bandwidth
, return_efficiency
);
622 return dfixed_trunc(bandwidth
);
625 static u32
evergreen_dmif_request_bandwidth(struct evergreen_wm_params
*wm
)
627 /* Calculate the DMIF Request Bandwidth */
628 fixed20_12 disp_clk_request_efficiency
; /* 0.8 */
629 fixed20_12 disp_clk
, bandwidth
;
632 a
.full
= dfixed_const(1000);
633 disp_clk
.full
= dfixed_const(wm
->disp_clk
);
634 disp_clk
.full
= dfixed_div(disp_clk
, a
);
635 a
.full
= dfixed_const(10);
636 disp_clk_request_efficiency
.full
= dfixed_const(8);
637 disp_clk_request_efficiency
.full
= dfixed_div(disp_clk_request_efficiency
, a
);
638 a
.full
= dfixed_const(32);
639 bandwidth
.full
= dfixed_mul(a
, disp_clk
);
640 bandwidth
.full
= dfixed_mul(bandwidth
, disp_clk_request_efficiency
);
642 return dfixed_trunc(bandwidth
);
645 static u32
evergreen_available_bandwidth(struct evergreen_wm_params
*wm
)
647 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
648 u32 dram_bandwidth
= evergreen_dram_bandwidth(wm
);
649 u32 data_return_bandwidth
= evergreen_data_return_bandwidth(wm
);
650 u32 dmif_req_bandwidth
= evergreen_dmif_request_bandwidth(wm
);
652 return min(dram_bandwidth
, min(data_return_bandwidth
, dmif_req_bandwidth
));
655 static u32
evergreen_average_bandwidth(struct evergreen_wm_params
*wm
)
657 /* Calculate the display mode Average Bandwidth
658 * DisplayMode should contain the source and destination dimensions,
662 fixed20_12 line_time
;
663 fixed20_12 src_width
;
664 fixed20_12 bandwidth
;
667 a
.full
= dfixed_const(1000);
668 line_time
.full
= dfixed_const(wm
->active_time
+ wm
->blank_time
);
669 line_time
.full
= dfixed_div(line_time
, a
);
670 bpp
.full
= dfixed_const(wm
->bytes_per_pixel
);
671 src_width
.full
= dfixed_const(wm
->src_width
);
672 bandwidth
.full
= dfixed_mul(src_width
, bpp
);
673 bandwidth
.full
= dfixed_mul(bandwidth
, wm
->vsc
);
674 bandwidth
.full
= dfixed_div(bandwidth
, line_time
);
676 return dfixed_trunc(bandwidth
);
679 static u32
evergreen_latency_watermark(struct evergreen_wm_params
*wm
)
681 /* First calcualte the latency in ns */
682 u32 mc_latency
= 2000; /* 2000 ns. */
683 u32 available_bandwidth
= evergreen_available_bandwidth(wm
);
684 u32 worst_chunk_return_time
= (512 * 8 * 1000) / available_bandwidth
;
685 u32 cursor_line_pair_return_time
= (128 * 4 * 1000) / available_bandwidth
;
686 u32 dc_latency
= 40000000 / wm
->disp_clk
; /* dc pipe latency */
687 u32 other_heads_data_return_time
= ((wm
->num_heads
+ 1) * worst_chunk_return_time
) +
688 (wm
->num_heads
* cursor_line_pair_return_time
);
689 u32 latency
= mc_latency
+ other_heads_data_return_time
+ dc_latency
;
690 u32 max_src_lines_per_dst_line
, lb_fill_bw
, line_fill_time
;
693 if (wm
->num_heads
== 0)
696 a
.full
= dfixed_const(2);
697 b
.full
= dfixed_const(1);
698 if ((wm
->vsc
.full
> a
.full
) ||
699 ((wm
->vsc
.full
> b
.full
) && (wm
->vtaps
>= 3)) ||
701 ((wm
->vsc
.full
>= a
.full
) && wm
->interlaced
))
702 max_src_lines_per_dst_line
= 4;
704 max_src_lines_per_dst_line
= 2;
706 a
.full
= dfixed_const(available_bandwidth
);
707 b
.full
= dfixed_const(wm
->num_heads
);
708 a
.full
= dfixed_div(a
, b
);
710 b
.full
= dfixed_const(1000);
711 c
.full
= dfixed_const(wm
->disp_clk
);
712 b
.full
= dfixed_div(c
, b
);
713 c
.full
= dfixed_const(wm
->bytes_per_pixel
);
714 b
.full
= dfixed_mul(b
, c
);
716 lb_fill_bw
= min(dfixed_trunc(a
), dfixed_trunc(b
));
718 a
.full
= dfixed_const(max_src_lines_per_dst_line
* wm
->src_width
* wm
->bytes_per_pixel
);
719 b
.full
= dfixed_const(1000);
720 c
.full
= dfixed_const(lb_fill_bw
);
721 b
.full
= dfixed_div(c
, b
);
722 a
.full
= dfixed_div(a
, b
);
723 line_fill_time
= dfixed_trunc(a
);
725 if (line_fill_time
< wm
->active_time
)
728 return latency
+ (line_fill_time
- wm
->active_time
);
732 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params
*wm
)
734 if (evergreen_average_bandwidth(wm
) <=
735 (evergreen_dram_bandwidth_for_display(wm
) / wm
->num_heads
))
741 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params
*wm
)
743 if (evergreen_average_bandwidth(wm
) <=
744 (evergreen_available_bandwidth(wm
) / wm
->num_heads
))
750 static bool evergreen_check_latency_hiding(struct evergreen_wm_params
*wm
)
752 u32 lb_partitions
= wm
->lb_size
/ wm
->src_width
;
753 u32 line_time
= wm
->active_time
+ wm
->blank_time
;
754 u32 latency_tolerant_lines
;
758 a
.full
= dfixed_const(1);
759 if (wm
->vsc
.full
> a
.full
)
760 latency_tolerant_lines
= 1;
762 if (lb_partitions
<= (wm
->vtaps
+ 1))
763 latency_tolerant_lines
= 1;
765 latency_tolerant_lines
= 2;
768 latency_hiding
= (latency_tolerant_lines
* line_time
+ wm
->blank_time
);
770 if (evergreen_latency_watermark(wm
) <= latency_hiding
)
776 static void evergreen_program_watermarks(struct radeon_device
*rdev
,
777 struct radeon_crtc
*radeon_crtc
,
778 u32 lb_size
, u32 num_heads
)
780 struct drm_display_mode
*mode
= &radeon_crtc
->base
.mode
;
781 struct evergreen_wm_params wm
;
784 u32 latency_watermark_a
= 0, latency_watermark_b
= 0;
785 u32 priority_a_mark
= 0, priority_b_mark
= 0;
786 u32 priority_a_cnt
= PRIORITY_OFF
;
787 u32 priority_b_cnt
= PRIORITY_OFF
;
788 u32 pipe_offset
= radeon_crtc
->crtc_id
* 16;
789 u32 tmp
, arb_control3
;
792 if (radeon_crtc
->base
.enabled
&& num_heads
&& mode
) {
793 pixel_period
= 1000000 / (u32
)mode
->clock
;
794 line_time
= min((u32
)mode
->crtc_htotal
* pixel_period
, (u32
)65535);
798 wm
.yclk
= rdev
->pm
.current_mclk
* 10;
799 wm
.sclk
= rdev
->pm
.current_sclk
* 10;
800 wm
.disp_clk
= mode
->clock
;
801 wm
.src_width
= mode
->crtc_hdisplay
;
802 wm
.active_time
= mode
->crtc_hdisplay
* pixel_period
;
803 wm
.blank_time
= line_time
- wm
.active_time
;
804 wm
.interlaced
= false;
805 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
806 wm
.interlaced
= true;
807 wm
.vsc
= radeon_crtc
->vsc
;
809 if (radeon_crtc
->rmx_type
!= RMX_OFF
)
811 wm
.bytes_per_pixel
= 4; /* XXX: get this from fb config */
812 wm
.lb_size
= lb_size
;
813 wm
.dram_channels
= evergreen_get_number_of_dram_channels(rdev
);
814 wm
.num_heads
= num_heads
;
816 /* set for high clocks */
817 latency_watermark_a
= min(evergreen_latency_watermark(&wm
), (u32
)65535);
818 /* set for low clocks */
819 /* wm.yclk = low clk; wm.sclk = low clk */
820 latency_watermark_b
= min(evergreen_latency_watermark(&wm
), (u32
)65535);
822 /* possibly force display priority to high */
823 /* should really do this at mode validation time... */
824 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm
) ||
825 !evergreen_average_bandwidth_vs_available_bandwidth(&wm
) ||
826 !evergreen_check_latency_hiding(&wm
) ||
827 (rdev
->disp_priority
== 2)) {
828 DRM_DEBUG_KMS("force priority to high\n");
829 priority_a_cnt
|= PRIORITY_ALWAYS_ON
;
830 priority_b_cnt
|= PRIORITY_ALWAYS_ON
;
833 a
.full
= dfixed_const(1000);
834 b
.full
= dfixed_const(mode
->clock
);
835 b
.full
= dfixed_div(b
, a
);
836 c
.full
= dfixed_const(latency_watermark_a
);
837 c
.full
= dfixed_mul(c
, b
);
838 c
.full
= dfixed_mul(c
, radeon_crtc
->hsc
);
839 c
.full
= dfixed_div(c
, a
);
840 a
.full
= dfixed_const(16);
841 c
.full
= dfixed_div(c
, a
);
842 priority_a_mark
= dfixed_trunc(c
);
843 priority_a_cnt
|= priority_a_mark
& PRIORITY_MARK_MASK
;
845 a
.full
= dfixed_const(1000);
846 b
.full
= dfixed_const(mode
->clock
);
847 b
.full
= dfixed_div(b
, a
);
848 c
.full
= dfixed_const(latency_watermark_b
);
849 c
.full
= dfixed_mul(c
, b
);
850 c
.full
= dfixed_mul(c
, radeon_crtc
->hsc
);
851 c
.full
= dfixed_div(c
, a
);
852 a
.full
= dfixed_const(16);
853 c
.full
= dfixed_div(c
, a
);
854 priority_b_mark
= dfixed_trunc(c
);
855 priority_b_cnt
|= priority_b_mark
& PRIORITY_MARK_MASK
;
859 arb_control3
= RREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
);
861 tmp
&= ~LATENCY_WATERMARK_MASK(3);
862 tmp
|= LATENCY_WATERMARK_MASK(1);
863 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, tmp
);
864 WREG32(PIPE0_LATENCY_CONTROL
+ pipe_offset
,
865 (LATENCY_LOW_WATERMARK(latency_watermark_a
) |
866 LATENCY_HIGH_WATERMARK(line_time
)));
868 tmp
= RREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
);
869 tmp
&= ~LATENCY_WATERMARK_MASK(3);
870 tmp
|= LATENCY_WATERMARK_MASK(2);
871 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, tmp
);
872 WREG32(PIPE0_LATENCY_CONTROL
+ pipe_offset
,
873 (LATENCY_LOW_WATERMARK(latency_watermark_b
) |
874 LATENCY_HIGH_WATERMARK(line_time
)));
875 /* restore original selection */
876 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, arb_control3
);
878 /* write the priority marks */
879 WREG32(PRIORITY_A_CNT
+ radeon_crtc
->crtc_offset
, priority_a_cnt
);
880 WREG32(PRIORITY_B_CNT
+ radeon_crtc
->crtc_offset
, priority_b_cnt
);
884 void evergreen_bandwidth_update(struct radeon_device
*rdev
)
886 struct drm_display_mode
*mode0
= NULL
;
887 struct drm_display_mode
*mode1
= NULL
;
888 u32 num_heads
= 0, lb_size
;
891 radeon_update_display_priority(rdev
);
893 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
894 if (rdev
->mode_info
.crtcs
[i
]->base
.enabled
)
897 for (i
= 0; i
< rdev
->num_crtc
; i
+= 2) {
898 mode0
= &rdev
->mode_info
.crtcs
[i
]->base
.mode
;
899 mode1
= &rdev
->mode_info
.crtcs
[i
+1]->base
.mode
;
900 lb_size
= evergreen_line_buffer_adjust(rdev
, rdev
->mode_info
.crtcs
[i
], mode0
, mode1
);
901 evergreen_program_watermarks(rdev
, rdev
->mode_info
.crtcs
[i
], lb_size
, num_heads
);
902 lb_size
= evergreen_line_buffer_adjust(rdev
, rdev
->mode_info
.crtcs
[i
+1], mode1
, mode0
);
903 evergreen_program_watermarks(rdev
, rdev
->mode_info
.crtcs
[i
+1], lb_size
, num_heads
);
907 int evergreen_mc_wait_for_idle(struct radeon_device
*rdev
)
912 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
914 tmp
= RREG32(SRBM_STATUS
) & 0x1F00;
925 void evergreen_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
930 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
932 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
933 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
935 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
936 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
938 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
948 int evergreen_pcie_gart_enable(struct radeon_device
*rdev
)
953 if (rdev
->gart
.robj
== NULL
) {
954 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
957 r
= radeon_gart_table_vram_pin(rdev
);
960 radeon_gart_restore(rdev
);
962 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
963 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
964 EFFECTIVE_L2_QUEUE_SIZE(7));
965 WREG32(VM_L2_CNTL2
, 0);
966 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
967 /* Setup TLB control */
968 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
969 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
970 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
|
971 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
972 if (rdev
->flags
& RADEON_IS_IGP
) {
973 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL
, tmp
);
974 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL
, tmp
);
975 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL
, tmp
);
977 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
978 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
979 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
981 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
982 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
983 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
984 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
985 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
986 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
987 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
988 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
989 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
990 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
991 (u32
)(rdev
->dummy_page
.addr
>> 12));
992 WREG32(VM_CONTEXT1_CNTL
, 0);
994 evergreen_pcie_gart_tlb_flush(rdev
);
995 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
996 (unsigned)(rdev
->mc
.gtt_size
>> 20),
997 (unsigned long long)rdev
->gart
.table_addr
);
998 rdev
->gart
.ready
= true;
1002 void evergreen_pcie_gart_disable(struct radeon_device
*rdev
)
1006 /* Disable all tables */
1007 WREG32(VM_CONTEXT0_CNTL
, 0);
1008 WREG32(VM_CONTEXT1_CNTL
, 0);
1010 /* Setup L2 cache */
1011 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
1012 EFFECTIVE_L2_QUEUE_SIZE(7));
1013 WREG32(VM_L2_CNTL2
, 0);
1014 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1015 /* Setup TLB control */
1016 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1017 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
1018 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
1019 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
1020 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
1021 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
1022 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
1023 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
1024 radeon_gart_table_vram_unpin(rdev
);
1027 void evergreen_pcie_gart_fini(struct radeon_device
*rdev
)
1029 evergreen_pcie_gart_disable(rdev
);
1030 radeon_gart_table_vram_free(rdev
);
1031 radeon_gart_fini(rdev
);
1035 void evergreen_agp_enable(struct radeon_device
*rdev
)
1039 /* Setup L2 cache */
1040 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
1041 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1042 EFFECTIVE_L2_QUEUE_SIZE(7));
1043 WREG32(VM_L2_CNTL2
, 0);
1044 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1045 /* Setup TLB control */
1046 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
1047 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1048 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
|
1049 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1050 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
1051 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
1052 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
1053 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
1054 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
1055 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
1056 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
1057 WREG32(VM_CONTEXT0_CNTL
, 0);
1058 WREG32(VM_CONTEXT1_CNTL
, 0);
1061 void evergreen_mc_stop(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
)
1063 save
->vga_control
[0] = RREG32(D1VGA_CONTROL
);
1064 save
->vga_control
[1] = RREG32(D2VGA_CONTROL
);
1065 save
->vga_render_control
= RREG32(VGA_RENDER_CONTROL
);
1066 save
->vga_hdp_control
= RREG32(VGA_HDP_CONTROL
);
1067 save
->crtc_control
[0] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
1068 save
->crtc_control
[1] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
1069 if (rdev
->num_crtc
>= 4) {
1070 save
->vga_control
[2] = RREG32(EVERGREEN_D3VGA_CONTROL
);
1071 save
->vga_control
[3] = RREG32(EVERGREEN_D4VGA_CONTROL
);
1072 save
->crtc_control
[2] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
1073 save
->crtc_control
[3] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
1075 if (rdev
->num_crtc
>= 6) {
1076 save
->vga_control
[4] = RREG32(EVERGREEN_D5VGA_CONTROL
);
1077 save
->vga_control
[5] = RREG32(EVERGREEN_D6VGA_CONTROL
);
1078 save
->crtc_control
[4] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
1079 save
->crtc_control
[5] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
1082 /* Stop all video */
1083 WREG32(VGA_RENDER_CONTROL
, 0);
1084 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 1);
1085 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 1);
1086 if (rdev
->num_crtc
>= 4) {
1087 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 1);
1088 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 1);
1090 if (rdev
->num_crtc
>= 6) {
1091 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 1);
1092 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 1);
1094 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
1095 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
1096 if (rdev
->num_crtc
>= 4) {
1097 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1098 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1100 if (rdev
->num_crtc
>= 6) {
1101 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1102 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1104 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
1105 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
1106 if (rdev
->num_crtc
>= 4) {
1107 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1108 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1110 if (rdev
->num_crtc
>= 6) {
1111 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1112 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1115 WREG32(D1VGA_CONTROL
, 0);
1116 WREG32(D2VGA_CONTROL
, 0);
1117 if (rdev
->num_crtc
>= 4) {
1118 WREG32(EVERGREEN_D3VGA_CONTROL
, 0);
1119 WREG32(EVERGREEN_D4VGA_CONTROL
, 0);
1121 if (rdev
->num_crtc
>= 6) {
1122 WREG32(EVERGREEN_D5VGA_CONTROL
, 0);
1123 WREG32(EVERGREEN_D6VGA_CONTROL
, 0);
1127 void evergreen_mc_resume(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
)
1129 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1130 upper_32_bits(rdev
->mc
.vram_start
));
1131 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1132 upper_32_bits(rdev
->mc
.vram_start
));
1133 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1134 (u32
)rdev
->mc
.vram_start
);
1135 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1136 (u32
)rdev
->mc
.vram_start
);
1138 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1139 upper_32_bits(rdev
->mc
.vram_start
));
1140 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1141 upper_32_bits(rdev
->mc
.vram_start
));
1142 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1143 (u32
)rdev
->mc
.vram_start
);
1144 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1145 (u32
)rdev
->mc
.vram_start
);
1147 if (rdev
->num_crtc
>= 4) {
1148 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1149 upper_32_bits(rdev
->mc
.vram_start
));
1150 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1151 upper_32_bits(rdev
->mc
.vram_start
));
1152 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1153 (u32
)rdev
->mc
.vram_start
);
1154 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1155 (u32
)rdev
->mc
.vram_start
);
1157 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1158 upper_32_bits(rdev
->mc
.vram_start
));
1159 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1160 upper_32_bits(rdev
->mc
.vram_start
));
1161 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1162 (u32
)rdev
->mc
.vram_start
);
1163 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1164 (u32
)rdev
->mc
.vram_start
);
1166 if (rdev
->num_crtc
>= 6) {
1167 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1168 upper_32_bits(rdev
->mc
.vram_start
));
1169 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1170 upper_32_bits(rdev
->mc
.vram_start
));
1171 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1172 (u32
)rdev
->mc
.vram_start
);
1173 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1174 (u32
)rdev
->mc
.vram_start
);
1176 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1177 upper_32_bits(rdev
->mc
.vram_start
));
1178 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1179 upper_32_bits(rdev
->mc
.vram_start
));
1180 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1181 (u32
)rdev
->mc
.vram_start
);
1182 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1183 (u32
)rdev
->mc
.vram_start
);
1186 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH
, upper_32_bits(rdev
->mc
.vram_start
));
1187 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS
, (u32
)rdev
->mc
.vram_start
);
1188 /* Unlock host access */
1189 WREG32(VGA_HDP_CONTROL
, save
->vga_hdp_control
);
1191 /* Restore video state */
1192 WREG32(D1VGA_CONTROL
, save
->vga_control
[0]);
1193 WREG32(D2VGA_CONTROL
, save
->vga_control
[1]);
1194 if (rdev
->num_crtc
>= 4) {
1195 WREG32(EVERGREEN_D3VGA_CONTROL
, save
->vga_control
[2]);
1196 WREG32(EVERGREEN_D4VGA_CONTROL
, save
->vga_control
[3]);
1198 if (rdev
->num_crtc
>= 6) {
1199 WREG32(EVERGREEN_D5VGA_CONTROL
, save
->vga_control
[4]);
1200 WREG32(EVERGREEN_D6VGA_CONTROL
, save
->vga_control
[5]);
1202 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 1);
1203 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 1);
1204 if (rdev
->num_crtc
>= 4) {
1205 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 1);
1206 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 1);
1208 if (rdev
->num_crtc
>= 6) {
1209 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 1);
1210 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 1);
1212 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, save
->crtc_control
[0]);
1213 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, save
->crtc_control
[1]);
1214 if (rdev
->num_crtc
>= 4) {
1215 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, save
->crtc_control
[2]);
1216 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, save
->crtc_control
[3]);
1218 if (rdev
->num_crtc
>= 6) {
1219 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, save
->crtc_control
[4]);
1220 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, save
->crtc_control
[5]);
1222 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
1223 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
1224 if (rdev
->num_crtc
>= 4) {
1225 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1226 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1228 if (rdev
->num_crtc
>= 6) {
1229 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1230 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1232 WREG32(VGA_RENDER_CONTROL
, save
->vga_render_control
);
1235 void evergreen_mc_program(struct radeon_device
*rdev
)
1237 struct evergreen_mc_save save
;
1241 /* Initialize HDP */
1242 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1243 WREG32((0x2c14 + j
), 0x00000000);
1244 WREG32((0x2c18 + j
), 0x00000000);
1245 WREG32((0x2c1c + j
), 0x00000000);
1246 WREG32((0x2c20 + j
), 0x00000000);
1247 WREG32((0x2c24 + j
), 0x00000000);
1249 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1251 evergreen_mc_stop(rdev
, &save
);
1252 if (evergreen_mc_wait_for_idle(rdev
)) {
1253 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1255 /* Lockout access through VGA aperture*/
1256 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1257 /* Update configuration */
1258 if (rdev
->flags
& RADEON_IS_AGP
) {
1259 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1260 /* VRAM before AGP */
1261 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1262 rdev
->mc
.vram_start
>> 12);
1263 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1264 rdev
->mc
.gtt_end
>> 12);
1266 /* VRAM after AGP */
1267 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1268 rdev
->mc
.gtt_start
>> 12);
1269 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1270 rdev
->mc
.vram_end
>> 12);
1273 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1274 rdev
->mc
.vram_start
>> 12);
1275 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1276 rdev
->mc
.vram_end
>> 12);
1278 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, rdev
->vram_scratch
.gpu_addr
>> 12);
1279 if (rdev
->flags
& RADEON_IS_IGP
) {
1280 tmp
= RREG32(MC_FUS_VM_FB_OFFSET
) & 0x000FFFFF;
1281 tmp
|= ((rdev
->mc
.vram_end
>> 20) & 0xF) << 24;
1282 tmp
|= ((rdev
->mc
.vram_start
>> 20) & 0xF) << 20;
1283 WREG32(MC_FUS_VM_FB_OFFSET
, tmp
);
1285 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1286 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1287 WREG32(MC_VM_FB_LOCATION
, tmp
);
1288 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1289 WREG32(HDP_NONSURFACE_INFO
, (2 << 7) | (1 << 30));
1290 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1291 if (rdev
->flags
& RADEON_IS_AGP
) {
1292 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 16);
1293 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 16);
1294 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1296 WREG32(MC_VM_AGP_BASE
, 0);
1297 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1298 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1300 if (evergreen_mc_wait_for_idle(rdev
)) {
1301 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1303 evergreen_mc_resume(rdev
, &save
);
1304 /* we need to own VRAM, so turn off the VGA renderer here
1305 * to stop it overwriting our objects */
1306 rv515_vga_render_disable(rdev
);
1312 void evergreen_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
1314 /* set to DX10/11 mode */
1315 radeon_ring_write(rdev
, PACKET3(PACKET3_MODE_CONTROL
, 0));
1316 radeon_ring_write(rdev
, 1);
1317 /* FIXME: implement */
1318 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
1319 radeon_ring_write(rdev
,
1323 (ib
->gpu_addr
& 0xFFFFFFFC));
1324 radeon_ring_write(rdev
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
1325 radeon_ring_write(rdev
, ib
->length_dw
);
1329 static int evergreen_cp_load_microcode(struct radeon_device
*rdev
)
1331 const __be32
*fw_data
;
1334 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
1342 RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
1344 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
1345 WREG32(CP_PFP_UCODE_ADDR
, 0);
1346 for (i
= 0; i
< EVERGREEN_PFP_UCODE_SIZE
; i
++)
1347 WREG32(CP_PFP_UCODE_DATA
, be32_to_cpup(fw_data
++));
1348 WREG32(CP_PFP_UCODE_ADDR
, 0);
1350 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
1351 WREG32(CP_ME_RAM_WADDR
, 0);
1352 for (i
= 0; i
< EVERGREEN_PM4_UCODE_SIZE
; i
++)
1353 WREG32(CP_ME_RAM_DATA
, be32_to_cpup(fw_data
++));
1355 WREG32(CP_PFP_UCODE_ADDR
, 0);
1356 WREG32(CP_ME_RAM_WADDR
, 0);
1357 WREG32(CP_ME_RAM_RADDR
, 0);
1361 static int evergreen_cp_start(struct radeon_device
*rdev
)
1366 r
= radeon_ring_lock(rdev
, 7);
1368 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1371 radeon_ring_write(rdev
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
1372 radeon_ring_write(rdev
, 0x1);
1373 radeon_ring_write(rdev
, 0x0);
1374 radeon_ring_write(rdev
, rdev
->config
.evergreen
.max_hw_contexts
- 1);
1375 radeon_ring_write(rdev
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1376 radeon_ring_write(rdev
, 0);
1377 radeon_ring_write(rdev
, 0);
1378 radeon_ring_unlock_commit(rdev
);
1381 WREG32(CP_ME_CNTL
, cp_me
);
1383 r
= radeon_ring_lock(rdev
, evergreen_default_size
+ 19);
1385 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1389 /* setup clear context state */
1390 radeon_ring_write(rdev
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1391 radeon_ring_write(rdev
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
1393 for (i
= 0; i
< evergreen_default_size
; i
++)
1394 radeon_ring_write(rdev
, evergreen_default_state
[i
]);
1396 radeon_ring_write(rdev
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1397 radeon_ring_write(rdev
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
1399 /* set clear context state */
1400 radeon_ring_write(rdev
, PACKET3(PACKET3_CLEAR_STATE
, 0));
1401 radeon_ring_write(rdev
, 0);
1403 /* SQ_VTX_BASE_VTX_LOC */
1404 radeon_ring_write(rdev
, 0xc0026f00);
1405 radeon_ring_write(rdev
, 0x00000000);
1406 radeon_ring_write(rdev
, 0x00000000);
1407 radeon_ring_write(rdev
, 0x00000000);
1410 radeon_ring_write(rdev
, 0xc0036f00);
1411 radeon_ring_write(rdev
, 0x00000bc4);
1412 radeon_ring_write(rdev
, 0xffffffff);
1413 radeon_ring_write(rdev
, 0xffffffff);
1414 radeon_ring_write(rdev
, 0xffffffff);
1416 radeon_ring_write(rdev
, 0xc0026900);
1417 radeon_ring_write(rdev
, 0x00000316);
1418 radeon_ring_write(rdev
, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1419 radeon_ring_write(rdev
, 0x00000010); /* */
1421 radeon_ring_unlock_commit(rdev
);
1426 int evergreen_cp_resume(struct radeon_device
*rdev
)
1432 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1433 WREG32(GRBM_SOFT_RESET
, (SOFT_RESET_CP
|
1439 RREG32(GRBM_SOFT_RESET
);
1441 WREG32(GRBM_SOFT_RESET
, 0);
1442 RREG32(GRBM_SOFT_RESET
);
1444 /* Set ring buffer size */
1445 rb_bufsz
= drm_order(rdev
->cp
.ring_size
/ 8);
1446 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
1448 tmp
|= BUF_SWAP_32BIT
;
1450 WREG32(CP_RB_CNTL
, tmp
);
1451 WREG32(CP_SEM_WAIT_TIMER
, 0x4);
1453 /* Set the write pointer delay */
1454 WREG32(CP_RB_WPTR_DELAY
, 0);
1456 /* Initialize the ring buffer's read and write pointers */
1457 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
1458 WREG32(CP_RB_RPTR_WR
, 0);
1460 WREG32(CP_RB_WPTR
, rdev
->cp
.wptr
);
1462 /* set the wb address wether it's enabled or not */
1463 WREG32(CP_RB_RPTR_ADDR
,
1464 ((rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC));
1465 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
1466 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
1468 if (rdev
->wb
.enabled
)
1469 WREG32(SCRATCH_UMSK
, 0xff);
1471 tmp
|= RB_NO_UPDATE
;
1472 WREG32(SCRATCH_UMSK
, 0);
1476 WREG32(CP_RB_CNTL
, tmp
);
1478 WREG32(CP_RB_BASE
, rdev
->cp
.gpu_addr
>> 8);
1479 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
1481 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
1483 evergreen_cp_start(rdev
);
1484 rdev
->cp
.ready
= true;
1485 r
= radeon_ring_test(rdev
);
1487 rdev
->cp
.ready
= false;
1496 static u32
evergreen_get_tile_pipe_to_backend_map(struct radeon_device
*rdev
,
1499 u32 backend_disable_mask
)
1501 u32 backend_map
= 0;
1502 u32 enabled_backends_mask
= 0;
1503 u32 enabled_backends_count
= 0;
1505 u32 swizzle_pipe
[EVERGREEN_MAX_PIPES
];
1506 u32 cur_backend
= 0;
1508 bool force_no_swizzle
;
1510 if (num_tile_pipes
> EVERGREEN_MAX_PIPES
)
1511 num_tile_pipes
= EVERGREEN_MAX_PIPES
;
1512 if (num_tile_pipes
< 1)
1514 if (num_backends
> EVERGREEN_MAX_BACKENDS
)
1515 num_backends
= EVERGREEN_MAX_BACKENDS
;
1516 if (num_backends
< 1)
1519 for (i
= 0; i
< EVERGREEN_MAX_BACKENDS
; ++i
) {
1520 if (((backend_disable_mask
>> i
) & 1) == 0) {
1521 enabled_backends_mask
|= (1 << i
);
1522 ++enabled_backends_count
;
1524 if (enabled_backends_count
== num_backends
)
1528 if (enabled_backends_count
== 0) {
1529 enabled_backends_mask
= 1;
1530 enabled_backends_count
= 1;
1533 if (enabled_backends_count
!= num_backends
)
1534 num_backends
= enabled_backends_count
;
1536 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * EVERGREEN_MAX_PIPES
);
1537 switch (rdev
->family
) {
1545 force_no_swizzle
= false;
1552 force_no_swizzle
= true;
1555 if (force_no_swizzle
) {
1556 bool last_backend_enabled
= false;
1558 force_no_swizzle
= false;
1559 for (i
= 0; i
< EVERGREEN_MAX_BACKENDS
; ++i
) {
1560 if (((enabled_backends_mask
>> i
) & 1) == 1) {
1561 if (last_backend_enabled
)
1562 force_no_swizzle
= true;
1563 last_backend_enabled
= true;
1565 last_backend_enabled
= false;
1569 switch (num_tile_pipes
) {
1574 DRM_ERROR("odd number of pipes!\n");
1577 swizzle_pipe
[0] = 0;
1578 swizzle_pipe
[1] = 1;
1581 if (force_no_swizzle
) {
1582 swizzle_pipe
[0] = 0;
1583 swizzle_pipe
[1] = 1;
1584 swizzle_pipe
[2] = 2;
1585 swizzle_pipe
[3] = 3;
1587 swizzle_pipe
[0] = 0;
1588 swizzle_pipe
[1] = 2;
1589 swizzle_pipe
[2] = 1;
1590 swizzle_pipe
[3] = 3;
1594 if (force_no_swizzle
) {
1595 swizzle_pipe
[0] = 0;
1596 swizzle_pipe
[1] = 1;
1597 swizzle_pipe
[2] = 2;
1598 swizzle_pipe
[3] = 3;
1599 swizzle_pipe
[4] = 4;
1600 swizzle_pipe
[5] = 5;
1602 swizzle_pipe
[0] = 0;
1603 swizzle_pipe
[1] = 2;
1604 swizzle_pipe
[2] = 4;
1605 swizzle_pipe
[3] = 1;
1606 swizzle_pipe
[4] = 3;
1607 swizzle_pipe
[5] = 5;
1611 if (force_no_swizzle
) {
1612 swizzle_pipe
[0] = 0;
1613 swizzle_pipe
[1] = 1;
1614 swizzle_pipe
[2] = 2;
1615 swizzle_pipe
[3] = 3;
1616 swizzle_pipe
[4] = 4;
1617 swizzle_pipe
[5] = 5;
1618 swizzle_pipe
[6] = 6;
1619 swizzle_pipe
[7] = 7;
1621 swizzle_pipe
[0] = 0;
1622 swizzle_pipe
[1] = 2;
1623 swizzle_pipe
[2] = 4;
1624 swizzle_pipe
[3] = 6;
1625 swizzle_pipe
[4] = 1;
1626 swizzle_pipe
[5] = 3;
1627 swizzle_pipe
[6] = 5;
1628 swizzle_pipe
[7] = 7;
1633 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
1634 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
1635 cur_backend
= (cur_backend
+ 1) % EVERGREEN_MAX_BACKENDS
;
1637 backend_map
|= (((cur_backend
& 0xf) << (swizzle_pipe
[cur_pipe
] * 4)));
1639 cur_backend
= (cur_backend
+ 1) % EVERGREEN_MAX_BACKENDS
;
1645 static void evergreen_gpu_init(struct radeon_device
*rdev
)
1647 u32 cc_rb_backend_disable
= 0;
1648 u32 cc_gc_shader_pipe_config
;
1649 u32 gb_addr_config
= 0;
1650 u32 mc_shared_chmap
, mc_arb_ramcfg
;
1656 u32 sq_lds_resource_mgmt
;
1657 u32 sq_gpr_resource_mgmt_1
;
1658 u32 sq_gpr_resource_mgmt_2
;
1659 u32 sq_gpr_resource_mgmt_3
;
1660 u32 sq_thread_resource_mgmt
;
1661 u32 sq_thread_resource_mgmt_2
;
1662 u32 sq_stack_resource_mgmt_1
;
1663 u32 sq_stack_resource_mgmt_2
;
1664 u32 sq_stack_resource_mgmt_3
;
1665 u32 vgt_cache_invalidation
;
1666 u32 hdp_host_path_cntl
, tmp
;
1667 int i
, j
, num_shader_engines
, ps_thread_count
;
1669 switch (rdev
->family
) {
1672 rdev
->config
.evergreen
.num_ses
= 2;
1673 rdev
->config
.evergreen
.max_pipes
= 4;
1674 rdev
->config
.evergreen
.max_tile_pipes
= 8;
1675 rdev
->config
.evergreen
.max_simds
= 10;
1676 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1677 rdev
->config
.evergreen
.max_gprs
= 256;
1678 rdev
->config
.evergreen
.max_threads
= 248;
1679 rdev
->config
.evergreen
.max_gs_threads
= 32;
1680 rdev
->config
.evergreen
.max_stack_entries
= 512;
1681 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1682 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1683 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1684 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1685 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1686 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1688 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1689 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1690 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1693 rdev
->config
.evergreen
.num_ses
= 1;
1694 rdev
->config
.evergreen
.max_pipes
= 4;
1695 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1696 rdev
->config
.evergreen
.max_simds
= 10;
1697 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1698 rdev
->config
.evergreen
.max_gprs
= 256;
1699 rdev
->config
.evergreen
.max_threads
= 248;
1700 rdev
->config
.evergreen
.max_gs_threads
= 32;
1701 rdev
->config
.evergreen
.max_stack_entries
= 512;
1702 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1703 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1704 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1705 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1706 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1707 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1709 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1710 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1711 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1714 rdev
->config
.evergreen
.num_ses
= 1;
1715 rdev
->config
.evergreen
.max_pipes
= 4;
1716 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1717 rdev
->config
.evergreen
.max_simds
= 5;
1718 rdev
->config
.evergreen
.max_backends
= 2 * rdev
->config
.evergreen
.num_ses
;
1719 rdev
->config
.evergreen
.max_gprs
= 256;
1720 rdev
->config
.evergreen
.max_threads
= 248;
1721 rdev
->config
.evergreen
.max_gs_threads
= 32;
1722 rdev
->config
.evergreen
.max_stack_entries
= 256;
1723 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1724 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1725 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1726 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1727 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1728 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1730 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1731 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1732 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1736 rdev
->config
.evergreen
.num_ses
= 1;
1737 rdev
->config
.evergreen
.max_pipes
= 2;
1738 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1739 rdev
->config
.evergreen
.max_simds
= 2;
1740 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1741 rdev
->config
.evergreen
.max_gprs
= 256;
1742 rdev
->config
.evergreen
.max_threads
= 192;
1743 rdev
->config
.evergreen
.max_gs_threads
= 16;
1744 rdev
->config
.evergreen
.max_stack_entries
= 256;
1745 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1746 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1747 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1748 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1749 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1750 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1752 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1753 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1754 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1757 rdev
->config
.evergreen
.num_ses
= 1;
1758 rdev
->config
.evergreen
.max_pipes
= 2;
1759 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1760 rdev
->config
.evergreen
.max_simds
= 2;
1761 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1762 rdev
->config
.evergreen
.max_gprs
= 256;
1763 rdev
->config
.evergreen
.max_threads
= 192;
1764 rdev
->config
.evergreen
.max_gs_threads
= 16;
1765 rdev
->config
.evergreen
.max_stack_entries
= 256;
1766 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1767 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1768 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1769 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1770 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1771 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1773 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1774 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1775 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1778 rdev
->config
.evergreen
.num_ses
= 1;
1779 rdev
->config
.evergreen
.max_pipes
= 4;
1780 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1781 if (rdev
->pdev
->device
== 0x9648)
1782 rdev
->config
.evergreen
.max_simds
= 3;
1783 else if ((rdev
->pdev
->device
== 0x9647) ||
1784 (rdev
->pdev
->device
== 0x964a))
1785 rdev
->config
.evergreen
.max_simds
= 4;
1787 rdev
->config
.evergreen
.max_simds
= 5;
1788 rdev
->config
.evergreen
.max_backends
= 2 * rdev
->config
.evergreen
.num_ses
;
1789 rdev
->config
.evergreen
.max_gprs
= 256;
1790 rdev
->config
.evergreen
.max_threads
= 248;
1791 rdev
->config
.evergreen
.max_gs_threads
= 32;
1792 rdev
->config
.evergreen
.max_stack_entries
= 256;
1793 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1794 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1795 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1796 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1797 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1798 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1800 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1801 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1802 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1805 rdev
->config
.evergreen
.num_ses
= 1;
1806 rdev
->config
.evergreen
.max_pipes
= 4;
1807 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1808 rdev
->config
.evergreen
.max_simds
= 2;
1809 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1810 rdev
->config
.evergreen
.max_gprs
= 256;
1811 rdev
->config
.evergreen
.max_threads
= 248;
1812 rdev
->config
.evergreen
.max_gs_threads
= 32;
1813 rdev
->config
.evergreen
.max_stack_entries
= 512;
1814 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1815 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1816 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1817 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1818 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1819 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1821 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1822 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1823 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1826 rdev
->config
.evergreen
.num_ses
= 2;
1827 rdev
->config
.evergreen
.max_pipes
= 4;
1828 rdev
->config
.evergreen
.max_tile_pipes
= 8;
1829 rdev
->config
.evergreen
.max_simds
= 7;
1830 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1831 rdev
->config
.evergreen
.max_gprs
= 256;
1832 rdev
->config
.evergreen
.max_threads
= 248;
1833 rdev
->config
.evergreen
.max_gs_threads
= 32;
1834 rdev
->config
.evergreen
.max_stack_entries
= 512;
1835 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1836 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1837 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1838 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1839 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1840 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1842 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1843 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1844 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1847 rdev
->config
.evergreen
.num_ses
= 1;
1848 rdev
->config
.evergreen
.max_pipes
= 4;
1849 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1850 rdev
->config
.evergreen
.max_simds
= 6;
1851 rdev
->config
.evergreen
.max_backends
= 2 * rdev
->config
.evergreen
.num_ses
;
1852 rdev
->config
.evergreen
.max_gprs
= 256;
1853 rdev
->config
.evergreen
.max_threads
= 248;
1854 rdev
->config
.evergreen
.max_gs_threads
= 32;
1855 rdev
->config
.evergreen
.max_stack_entries
= 256;
1856 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1857 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1858 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1859 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1860 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1861 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1863 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1864 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1865 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1868 rdev
->config
.evergreen
.num_ses
= 1;
1869 rdev
->config
.evergreen
.max_pipes
= 4;
1870 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1871 rdev
->config
.evergreen
.max_simds
= 2;
1872 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1873 rdev
->config
.evergreen
.max_gprs
= 256;
1874 rdev
->config
.evergreen
.max_threads
= 192;
1875 rdev
->config
.evergreen
.max_gs_threads
= 16;
1876 rdev
->config
.evergreen
.max_stack_entries
= 256;
1877 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1878 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1879 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1880 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1881 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1882 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1884 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1885 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1886 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1890 /* Initialize HDP */
1891 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1892 WREG32((0x2c14 + j
), 0x00000000);
1893 WREG32((0x2c18 + j
), 0x00000000);
1894 WREG32((0x2c1c + j
), 0x00000000);
1895 WREG32((0x2c20 + j
), 0x00000000);
1896 WREG32((0x2c24 + j
), 0x00000000);
1899 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1901 evergreen_fix_pci_max_read_req_size(rdev
);
1903 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & ~2;
1905 cc_gc_shader_pipe_config
|=
1906 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK
<< rdev
->config
.evergreen
.max_pipes
)
1907 & EVERGREEN_MAX_PIPES_MASK
);
1908 cc_gc_shader_pipe_config
|=
1909 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK
<< rdev
->config
.evergreen
.max_simds
)
1910 & EVERGREEN_MAX_SIMDS_MASK
);
1912 cc_rb_backend_disable
=
1913 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK
<< rdev
->config
.evergreen
.max_backends
)
1914 & EVERGREEN_MAX_BACKENDS_MASK
);
1917 mc_shared_chmap
= RREG32(MC_SHARED_CHMAP
);
1918 if (rdev
->flags
& RADEON_IS_IGP
)
1919 mc_arb_ramcfg
= RREG32(FUS_MC_ARB_RAMCFG
);
1921 mc_arb_ramcfg
= RREG32(MC_ARB_RAMCFG
);
1923 switch (rdev
->config
.evergreen
.max_tile_pipes
) {
1926 gb_addr_config
|= NUM_PIPES(0);
1929 gb_addr_config
|= NUM_PIPES(1);
1932 gb_addr_config
|= NUM_PIPES(2);
1935 gb_addr_config
|= NUM_PIPES(3);
1939 gb_addr_config
|= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
1940 gb_addr_config
|= BANK_INTERLEAVE_SIZE(0);
1941 gb_addr_config
|= NUM_SHADER_ENGINES(rdev
->config
.evergreen
.num_ses
- 1);
1942 gb_addr_config
|= SHADER_ENGINE_TILE_SIZE(1);
1943 gb_addr_config
|= NUM_GPUS(0); /* Hemlock? */
1944 gb_addr_config
|= MULTI_GPU_TILE_SIZE(2);
1946 if (((mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
) > 2)
1947 gb_addr_config
|= ROW_SIZE(2);
1949 gb_addr_config
|= ROW_SIZE((mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
);
1951 if (rdev
->ddev
->pdev
->device
== 0x689e) {
1954 u8 efuse_box_bit_131_124
;
1956 WREG32(RCU_IND_INDEX
, 0x204);
1957 efuse_straps_4
= RREG32(RCU_IND_DATA
);
1958 WREG32(RCU_IND_INDEX
, 0x203);
1959 efuse_straps_3
= RREG32(RCU_IND_DATA
);
1960 efuse_box_bit_131_124
= (u8
)(((efuse_straps_4
& 0xf) << 4) | ((efuse_straps_3
& 0xf0000000) >> 28));
1962 switch(efuse_box_bit_131_124
) {
1964 gb_backend_map
= 0x76543210;
1967 gb_backend_map
= 0x77553311;
1970 gb_backend_map
= 0x77553300;
1973 gb_backend_map
= 0x77552211;
1976 gb_backend_map
= 0x77443300;
1979 gb_backend_map
= 0x66552211;
1982 gb_backend_map
= 0x77552200;
1985 gb_backend_map
= 0x66442200;
1988 gb_backend_map
= 0x66553311;
1991 DRM_ERROR("bad backend map, using default\n");
1993 evergreen_get_tile_pipe_to_backend_map(rdev
,
1994 rdev
->config
.evergreen
.max_tile_pipes
,
1995 rdev
->config
.evergreen
.max_backends
,
1996 ((EVERGREEN_MAX_BACKENDS_MASK
<<
1997 rdev
->config
.evergreen
.max_backends
) &
1998 EVERGREEN_MAX_BACKENDS_MASK
));
2001 } else if (rdev
->ddev
->pdev
->device
== 0x68b9) {
2003 u8 efuse_box_bit_127_124
;
2005 WREG32(RCU_IND_INDEX
, 0x203);
2006 efuse_straps_3
= RREG32(RCU_IND_DATA
);
2007 efuse_box_bit_127_124
= (u8
)((efuse_straps_3
& 0xF0000000) >> 28);
2009 switch(efuse_box_bit_127_124
) {
2011 gb_backend_map
= 0x00003210;
2017 gb_backend_map
= 0x00003311;
2020 DRM_ERROR("bad backend map, using default\n");
2022 evergreen_get_tile_pipe_to_backend_map(rdev
,
2023 rdev
->config
.evergreen
.max_tile_pipes
,
2024 rdev
->config
.evergreen
.max_backends
,
2025 ((EVERGREEN_MAX_BACKENDS_MASK
<<
2026 rdev
->config
.evergreen
.max_backends
) &
2027 EVERGREEN_MAX_BACKENDS_MASK
));
2031 switch (rdev
->family
) {
2035 gb_backend_map
= 0x66442200;
2038 gb_backend_map
= 0x00002200;
2042 evergreen_get_tile_pipe_to_backend_map(rdev
,
2043 rdev
->config
.evergreen
.max_tile_pipes
,
2044 rdev
->config
.evergreen
.max_backends
,
2045 ((EVERGREEN_MAX_BACKENDS_MASK
<<
2046 rdev
->config
.evergreen
.max_backends
) &
2047 EVERGREEN_MAX_BACKENDS_MASK
));
2051 /* setup tiling info dword. gb_addr_config is not adequate since it does
2052 * not have bank info, so create a custom tiling dword.
2053 * bits 3:0 num_pipes
2054 * bits 7:4 num_banks
2055 * bits 11:8 group_size
2056 * bits 15:12 row_size
2058 rdev
->config
.evergreen
.tile_config
= 0;
2059 switch (rdev
->config
.evergreen
.max_tile_pipes
) {
2062 rdev
->config
.evergreen
.tile_config
|= (0 << 0);
2065 rdev
->config
.evergreen
.tile_config
|= (1 << 0);
2068 rdev
->config
.evergreen
.tile_config
|= (2 << 0);
2071 rdev
->config
.evergreen
.tile_config
|= (3 << 0);
2074 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
2075 if (rdev
->flags
& RADEON_IS_IGP
)
2076 rdev
->config
.evergreen
.tile_config
|= 1 << 4;
2078 rdev
->config
.evergreen
.tile_config
|=
2079 ((mc_arb_ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
) << 4;
2080 rdev
->config
.evergreen
.tile_config
|=
2081 ((mc_arb_ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
) << 8;
2082 rdev
->config
.evergreen
.tile_config
|=
2083 ((gb_addr_config
& 0x30000000) >> 28) << 12;
2085 rdev
->config
.evergreen
.backend_map
= gb_backend_map
;
2086 WREG32(GB_BACKEND_MAP
, gb_backend_map
);
2087 WREG32(GB_ADDR_CONFIG
, gb_addr_config
);
2088 WREG32(DMIF_ADDR_CONFIG
, gb_addr_config
);
2089 WREG32(HDP_ADDR_CONFIG
, gb_addr_config
);
2091 num_shader_engines
= ((RREG32(GB_ADDR_CONFIG
) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
2092 grbm_gfx_index
= INSTANCE_BROADCAST_WRITES
;
2094 for (i
= 0; i
< rdev
->config
.evergreen
.num_ses
; i
++) {
2095 u32 rb
= cc_rb_backend_disable
| (0xf0 << 16);
2096 u32 sp
= cc_gc_shader_pipe_config
;
2097 u32 gfx
= grbm_gfx_index
| SE_INDEX(i
);
2099 if (i
== num_shader_engines
) {
2100 rb
|= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK
);
2101 sp
|= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK
);
2104 WREG32(GRBM_GFX_INDEX
, gfx
);
2105 WREG32(RLC_GFX_INDEX
, gfx
);
2107 WREG32(CC_RB_BACKEND_DISABLE
, rb
);
2108 WREG32(CC_SYS_RB_BACKEND_DISABLE
, rb
);
2109 WREG32(GC_USER_RB_BACKEND_DISABLE
, rb
);
2110 WREG32(CC_GC_SHADER_PIPE_CONFIG
, sp
);
2113 grbm_gfx_index
|= SE_BROADCAST_WRITES
;
2114 WREG32(GRBM_GFX_INDEX
, grbm_gfx_index
);
2115 WREG32(RLC_GFX_INDEX
, grbm_gfx_index
);
2117 WREG32(CGTS_SYS_TCC_DISABLE
, 0);
2118 WREG32(CGTS_TCC_DISABLE
, 0);
2119 WREG32(CGTS_USER_SYS_TCC_DISABLE
, 0);
2120 WREG32(CGTS_USER_TCC_DISABLE
, 0);
2122 /* set HW defaults for 3D engine */
2123 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) |
2124 ROQ_IB2_START(0x2b)));
2126 WREG32(CP_MEQ_THRESHOLDS
, STQ_SPLIT(0x30));
2128 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
|
2133 sx_debug_1
= RREG32(SX_DEBUG_1
);
2134 sx_debug_1
|= ENABLE_NEW_SMX_ADDRESS
;
2135 WREG32(SX_DEBUG_1
, sx_debug_1
);
2138 smx_dc_ctl0
= RREG32(SMX_DC_CTL0
);
2139 smx_dc_ctl0
&= ~NUMBER_OF_SETS(0x1ff);
2140 smx_dc_ctl0
|= NUMBER_OF_SETS(rdev
->config
.evergreen
.sx_num_of_sets
);
2141 WREG32(SMX_DC_CTL0
, smx_dc_ctl0
);
2143 WREG32(SX_EXPORT_BUFFER_SIZES
, (COLOR_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_size
/ 4) - 1) |
2144 POSITION_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_pos_size
/ 4) - 1) |
2145 SMX_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_smx_size
/ 4) - 1)));
2147 WREG32(PA_SC_FIFO_SIZE
, (SC_PRIM_FIFO_SIZE(rdev
->config
.evergreen
.sc_prim_fifo_size
) |
2148 SC_HIZ_TILE_FIFO_SIZE(rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
) |
2149 SC_EARLYZ_TILE_FIFO_SIZE(rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
)));
2151 WREG32(VGT_NUM_INSTANCES
, 1);
2152 WREG32(SPI_CONFIG_CNTL
, 0);
2153 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(4));
2154 WREG32(CP_PERFMON_CNTL
, 0);
2156 WREG32(SQ_MS_FIFO_SIZES
, (CACHE_FIFO_SIZE(16 * rdev
->config
.evergreen
.sq_num_cf_insts
) |
2157 FETCH_FIFO_HIWATER(0x4) |
2158 DONE_FIFO_HIWATER(0xe0) |
2159 ALU_UPDATE_FIFO_HIWATER(0x8)));
2161 sq_config
= RREG32(SQ_CONFIG
);
2162 sq_config
&= ~(PS_PRIO(3) |
2166 sq_config
|= (VC_ENABLE
|
2173 switch (rdev
->family
) {
2179 /* no vertex cache */
2180 sq_config
&= ~VC_ENABLE
;
2186 sq_lds_resource_mgmt
= RREG32(SQ_LDS_RESOURCE_MGMT
);
2188 sq_gpr_resource_mgmt_1
= NUM_PS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2))* 12 / 32);
2189 sq_gpr_resource_mgmt_1
|= NUM_VS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 6 / 32);
2190 sq_gpr_resource_mgmt_1
|= NUM_CLAUSE_TEMP_GPRS(4);
2191 sq_gpr_resource_mgmt_2
= NUM_GS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 4 / 32);
2192 sq_gpr_resource_mgmt_2
|= NUM_ES_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 4 / 32);
2193 sq_gpr_resource_mgmt_3
= NUM_HS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 3 / 32);
2194 sq_gpr_resource_mgmt_3
|= NUM_LS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 3 / 32);
2196 switch (rdev
->family
) {
2201 ps_thread_count
= 96;
2204 ps_thread_count
= 128;
2208 sq_thread_resource_mgmt
= NUM_PS_THREADS(ps_thread_count
);
2209 sq_thread_resource_mgmt
|= NUM_VS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2210 sq_thread_resource_mgmt
|= NUM_GS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2211 sq_thread_resource_mgmt
|= NUM_ES_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2212 sq_thread_resource_mgmt_2
= NUM_HS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2213 sq_thread_resource_mgmt_2
|= NUM_LS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2215 sq_stack_resource_mgmt_1
= NUM_PS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2216 sq_stack_resource_mgmt_1
|= NUM_VS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2217 sq_stack_resource_mgmt_2
= NUM_GS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2218 sq_stack_resource_mgmt_2
|= NUM_ES_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2219 sq_stack_resource_mgmt_3
= NUM_HS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2220 sq_stack_resource_mgmt_3
|= NUM_LS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2222 WREG32(SQ_CONFIG
, sq_config
);
2223 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
2224 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
2225 WREG32(SQ_GPR_RESOURCE_MGMT_3
, sq_gpr_resource_mgmt_3
);
2226 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
2227 WREG32(SQ_THREAD_RESOURCE_MGMT_2
, sq_thread_resource_mgmt_2
);
2228 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
2229 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
2230 WREG32(SQ_STACK_RESOURCE_MGMT_3
, sq_stack_resource_mgmt_3
);
2231 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, 0);
2232 WREG32(SQ_LDS_RESOURCE_MGMT
, sq_lds_resource_mgmt
);
2234 WREG32(PA_SC_FORCE_EOV_MAX_CNTS
, (FORCE_EOV_MAX_CLK_CNT(4095) |
2235 FORCE_EOV_MAX_REZ_CNT(255)));
2237 switch (rdev
->family
) {
2243 vgt_cache_invalidation
= CACHE_INVALIDATION(TC_ONLY
);
2246 vgt_cache_invalidation
= CACHE_INVALIDATION(VC_AND_TC
);
2249 vgt_cache_invalidation
|= AUTO_INVLD_EN(ES_AND_GS_AUTO
);
2250 WREG32(VGT_CACHE_INVALIDATION
, vgt_cache_invalidation
);
2252 WREG32(VGT_GS_VERTEX_REUSE
, 16);
2253 WREG32(PA_SU_LINE_STIPPLE_VALUE
, 0);
2254 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
2256 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, 14);
2257 WREG32(VGT_OUT_DEALLOC_CNTL
, 16);
2259 WREG32(CB_PERF_CTR0_SEL_0
, 0);
2260 WREG32(CB_PERF_CTR0_SEL_1
, 0);
2261 WREG32(CB_PERF_CTR1_SEL_0
, 0);
2262 WREG32(CB_PERF_CTR1_SEL_1
, 0);
2263 WREG32(CB_PERF_CTR2_SEL_0
, 0);
2264 WREG32(CB_PERF_CTR2_SEL_1
, 0);
2265 WREG32(CB_PERF_CTR3_SEL_0
, 0);
2266 WREG32(CB_PERF_CTR3_SEL_1
, 0);
2268 /* clear render buffer base addresses */
2269 WREG32(CB_COLOR0_BASE
, 0);
2270 WREG32(CB_COLOR1_BASE
, 0);
2271 WREG32(CB_COLOR2_BASE
, 0);
2272 WREG32(CB_COLOR3_BASE
, 0);
2273 WREG32(CB_COLOR4_BASE
, 0);
2274 WREG32(CB_COLOR5_BASE
, 0);
2275 WREG32(CB_COLOR6_BASE
, 0);
2276 WREG32(CB_COLOR7_BASE
, 0);
2277 WREG32(CB_COLOR8_BASE
, 0);
2278 WREG32(CB_COLOR9_BASE
, 0);
2279 WREG32(CB_COLOR10_BASE
, 0);
2280 WREG32(CB_COLOR11_BASE
, 0);
2282 /* set the shader const cache sizes to 0 */
2283 for (i
= SQ_ALU_CONST_BUFFER_SIZE_PS_0
; i
< 0x28200; i
+= 4)
2285 for (i
= SQ_ALU_CONST_BUFFER_SIZE_HS_0
; i
< 0x29000; i
+= 4)
2288 tmp
= RREG32(HDP_MISC_CNTL
);
2289 tmp
|= HDP_FLUSH_INVALIDATE_CACHE
;
2290 WREG32(HDP_MISC_CNTL
, tmp
);
2292 hdp_host_path_cntl
= RREG32(HDP_HOST_PATH_CNTL
);
2293 WREG32(HDP_HOST_PATH_CNTL
, hdp_host_path_cntl
);
2295 WREG32(PA_CL_ENHANCE
, CLIP_VTX_REORDER_ENA
| NUM_CLIP_SEQ(3));
2301 int evergreen_mc_init(struct radeon_device
*rdev
)
2304 int chansize
, numchan
;
2306 /* Get VRAM informations */
2307 rdev
->mc
.vram_is_ddr
= true;
2308 if (rdev
->flags
& RADEON_IS_IGP
)
2309 tmp
= RREG32(FUS_MC_ARB_RAMCFG
);
2311 tmp
= RREG32(MC_ARB_RAMCFG
);
2312 if (tmp
& CHANSIZE_OVERRIDE
) {
2314 } else if (tmp
& CHANSIZE_MASK
) {
2319 tmp
= RREG32(MC_SHARED_CHMAP
);
2320 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
2335 rdev
->mc
.vram_width
= numchan
* chansize
;
2336 /* Could aper size report 0 ? */
2337 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
2338 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
2339 /* Setup GPU memory space */
2340 if (rdev
->flags
& RADEON_IS_IGP
) {
2341 /* size in bytes on fusion */
2342 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
2343 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
2345 /* size in MB on evergreen */
2346 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
2347 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
2349 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
2350 r700_vram_gtt_location(rdev
, &rdev
->mc
);
2351 radeon_update_bandwidth_info(rdev
);
2356 bool evergreen_gpu_is_lockup(struct radeon_device
*rdev
)
2360 u32 grbm_status_se0
, grbm_status_se1
;
2361 struct r100_gpu_lockup
*lockup
= &rdev
->config
.evergreen
.lockup
;
2364 srbm_status
= RREG32(SRBM_STATUS
);
2365 grbm_status
= RREG32(GRBM_STATUS
);
2366 grbm_status_se0
= RREG32(GRBM_STATUS_SE0
);
2367 grbm_status_se1
= RREG32(GRBM_STATUS_SE1
);
2368 if (!(grbm_status
& GUI_ACTIVE
)) {
2369 r100_gpu_lockup_update(lockup
, &rdev
->cp
);
2372 /* force CP activities */
2373 r
= radeon_ring_lock(rdev
, 2);
2376 radeon_ring_write(rdev
, 0x80000000);
2377 radeon_ring_write(rdev
, 0x80000000);
2378 radeon_ring_unlock_commit(rdev
);
2380 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
2381 return r100_gpu_cp_is_lockup(rdev
, lockup
, &rdev
->cp
);
2384 static int evergreen_gpu_soft_reset(struct radeon_device
*rdev
)
2386 struct evergreen_mc_save save
;
2389 if (!(RREG32(GRBM_STATUS
) & GUI_ACTIVE
))
2392 dev_info(rdev
->dev
, "GPU softreset \n");
2393 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
2394 RREG32(GRBM_STATUS
));
2395 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
2396 RREG32(GRBM_STATUS_SE0
));
2397 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
2398 RREG32(GRBM_STATUS_SE1
));
2399 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
2400 RREG32(SRBM_STATUS
));
2401 evergreen_mc_stop(rdev
, &save
);
2402 if (evergreen_mc_wait_for_idle(rdev
)) {
2403 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
2405 /* Disable CP parsing/prefetching */
2406 WREG32(CP_ME_CNTL
, CP_ME_HALT
| CP_PFP_HALT
);
2408 /* reset all the gfx blocks */
2409 grbm_reset
= (SOFT_RESET_CP
|
2422 dev_info(rdev
->dev
, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset
);
2423 WREG32(GRBM_SOFT_RESET
, grbm_reset
);
2424 (void)RREG32(GRBM_SOFT_RESET
);
2426 WREG32(GRBM_SOFT_RESET
, 0);
2427 (void)RREG32(GRBM_SOFT_RESET
);
2428 /* Wait a little for things to settle down */
2430 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
2431 RREG32(GRBM_STATUS
));
2432 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
2433 RREG32(GRBM_STATUS_SE0
));
2434 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
2435 RREG32(GRBM_STATUS_SE1
));
2436 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
2437 RREG32(SRBM_STATUS
));
2438 evergreen_mc_resume(rdev
, &save
);
2442 int evergreen_asic_reset(struct radeon_device
*rdev
)
2444 return evergreen_gpu_soft_reset(rdev
);
2449 u32
evergreen_get_vblank_counter(struct radeon_device
*rdev
, int crtc
)
2453 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
2455 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
2457 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
2459 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
2461 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
2463 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
2469 void evergreen_disable_interrupt_state(struct radeon_device
*rdev
)
2473 WREG32(CP_INT_CNTL
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
2474 WREG32(GRBM_INT_CNTL
, 0);
2475 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
2476 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
2477 if (rdev
->num_crtc
>= 4) {
2478 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
2479 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
2481 if (rdev
->num_crtc
>= 6) {
2482 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
2483 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
2486 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
2487 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
2488 if (rdev
->num_crtc
>= 4) {
2489 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
2490 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
2492 if (rdev
->num_crtc
>= 6) {
2493 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
2494 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
2497 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
2498 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
2500 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2501 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2502 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2503 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2504 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2505 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2506 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2507 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2508 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2509 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2510 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2511 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2515 int evergreen_irq_set(struct radeon_device
*rdev
)
2517 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
2518 u32 crtc1
= 0, crtc2
= 0, crtc3
= 0, crtc4
= 0, crtc5
= 0, crtc6
= 0;
2519 u32 hpd1
, hpd2
, hpd3
, hpd4
, hpd5
, hpd6
;
2520 u32 grbm_int_cntl
= 0;
2521 u32 grph1
= 0, grph2
= 0, grph3
= 0, grph4
= 0, grph5
= 0, grph6
= 0;
2523 if (!rdev
->irq
.installed
) {
2524 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2527 /* don't enable anything if the ih is disabled */
2528 if (!rdev
->ih
.enabled
) {
2529 r600_disable_interrupts(rdev
);
2530 /* force the active interrupt state to all disabled */
2531 evergreen_disable_interrupt_state(rdev
);
2535 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2536 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2537 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2538 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2539 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2540 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2542 if (rdev
->irq
.sw_int
) {
2543 DRM_DEBUG("evergreen_irq_set: sw int\n");
2544 cp_int_cntl
|= RB_INT_ENABLE
;
2545 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
2547 if (rdev
->irq
.crtc_vblank_int
[0] ||
2548 rdev
->irq
.pflip
[0]) {
2549 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2550 crtc1
|= VBLANK_INT_MASK
;
2552 if (rdev
->irq
.crtc_vblank_int
[1] ||
2553 rdev
->irq
.pflip
[1]) {
2554 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2555 crtc2
|= VBLANK_INT_MASK
;
2557 if (rdev
->irq
.crtc_vblank_int
[2] ||
2558 rdev
->irq
.pflip
[2]) {
2559 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2560 crtc3
|= VBLANK_INT_MASK
;
2562 if (rdev
->irq
.crtc_vblank_int
[3] ||
2563 rdev
->irq
.pflip
[3]) {
2564 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2565 crtc4
|= VBLANK_INT_MASK
;
2567 if (rdev
->irq
.crtc_vblank_int
[4] ||
2568 rdev
->irq
.pflip
[4]) {
2569 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2570 crtc5
|= VBLANK_INT_MASK
;
2572 if (rdev
->irq
.crtc_vblank_int
[5] ||
2573 rdev
->irq
.pflip
[5]) {
2574 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2575 crtc6
|= VBLANK_INT_MASK
;
2577 if (rdev
->irq
.hpd
[0]) {
2578 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2579 hpd1
|= DC_HPDx_INT_EN
;
2581 if (rdev
->irq
.hpd
[1]) {
2582 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2583 hpd2
|= DC_HPDx_INT_EN
;
2585 if (rdev
->irq
.hpd
[2]) {
2586 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2587 hpd3
|= DC_HPDx_INT_EN
;
2589 if (rdev
->irq
.hpd
[3]) {
2590 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2591 hpd4
|= DC_HPDx_INT_EN
;
2593 if (rdev
->irq
.hpd
[4]) {
2594 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2595 hpd5
|= DC_HPDx_INT_EN
;
2597 if (rdev
->irq
.hpd
[5]) {
2598 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2599 hpd6
|= DC_HPDx_INT_EN
;
2601 if (rdev
->irq
.gui_idle
) {
2602 DRM_DEBUG("gui idle\n");
2603 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
2606 WREG32(CP_INT_CNTL
, cp_int_cntl
);
2607 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
2609 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, crtc1
);
2610 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, crtc2
);
2611 if (rdev
->num_crtc
>= 4) {
2612 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, crtc3
);
2613 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, crtc4
);
2615 if (rdev
->num_crtc
>= 6) {
2616 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, crtc5
);
2617 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, crtc6
);
2620 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, grph1
);
2621 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, grph2
);
2622 if (rdev
->num_crtc
>= 4) {
2623 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, grph3
);
2624 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, grph4
);
2626 if (rdev
->num_crtc
>= 6) {
2627 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, grph5
);
2628 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, grph6
);
2631 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
2632 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
2633 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
2634 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
2635 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
2636 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
2641 static void evergreen_irq_ack(struct radeon_device
*rdev
)
2645 rdev
->irq
.stat_regs
.evergreen
.disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
2646 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
2647 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE2
);
2648 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE3
);
2649 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE4
);
2650 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE5
);
2651 rdev
->irq
.stat_regs
.evergreen
.d1grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
2652 rdev
->irq
.stat_regs
.evergreen
.d2grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
2653 if (rdev
->num_crtc
>= 4) {
2654 rdev
->irq
.stat_regs
.evergreen
.d3grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
2655 rdev
->irq
.stat_regs
.evergreen
.d4grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
2657 if (rdev
->num_crtc
>= 6) {
2658 rdev
->irq
.stat_regs
.evergreen
.d5grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
2659 rdev
->irq
.stat_regs
.evergreen
.d6grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
2662 if (rdev
->irq
.stat_regs
.evergreen
.d1grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2663 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2664 if (rdev
->irq
.stat_regs
.evergreen
.d2grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2665 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2666 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VBLANK_INTERRUPT
)
2667 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VBLANK_ACK
);
2668 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VLINE_INTERRUPT
)
2669 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VLINE_ACK
);
2670 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VBLANK_INTERRUPT
)
2671 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VBLANK_ACK
);
2672 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VLINE_INTERRUPT
)
2673 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VLINE_ACK
);
2675 if (rdev
->num_crtc
>= 4) {
2676 if (rdev
->irq
.stat_regs
.evergreen
.d3grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2677 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2678 if (rdev
->irq
.stat_regs
.evergreen
.d4grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2679 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2680 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
)
2681 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VBLANK_ACK
);
2682 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VLINE_INTERRUPT
)
2683 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VLINE_ACK
);
2684 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
)
2685 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VBLANK_ACK
);
2686 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VLINE_INTERRUPT
)
2687 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VLINE_ACK
);
2690 if (rdev
->num_crtc
>= 6) {
2691 if (rdev
->irq
.stat_regs
.evergreen
.d5grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2692 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2693 if (rdev
->irq
.stat_regs
.evergreen
.d6grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2694 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2695 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
)
2696 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VBLANK_ACK
);
2697 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VLINE_INTERRUPT
)
2698 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VLINE_ACK
);
2699 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
)
2700 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VBLANK_ACK
);
2701 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VLINE_INTERRUPT
)
2702 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VLINE_ACK
);
2705 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& DC_HPD1_INTERRUPT
) {
2706 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
2707 tmp
|= DC_HPDx_INT_ACK
;
2708 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2710 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& DC_HPD2_INTERRUPT
) {
2711 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
2712 tmp
|= DC_HPDx_INT_ACK
;
2713 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2715 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& DC_HPD3_INTERRUPT
) {
2716 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
2717 tmp
|= DC_HPDx_INT_ACK
;
2718 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2720 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& DC_HPD4_INTERRUPT
) {
2721 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
2722 tmp
|= DC_HPDx_INT_ACK
;
2723 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2725 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& DC_HPD5_INTERRUPT
) {
2726 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
2727 tmp
|= DC_HPDx_INT_ACK
;
2728 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2730 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& DC_HPD6_INTERRUPT
) {
2731 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
2732 tmp
|= DC_HPDx_INT_ACK
;
2733 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2737 void evergreen_irq_disable(struct radeon_device
*rdev
)
2739 r600_disable_interrupts(rdev
);
2740 /* Wait and acknowledge irq */
2742 evergreen_irq_ack(rdev
);
2743 evergreen_disable_interrupt_state(rdev
);
2746 void evergreen_irq_suspend(struct radeon_device
*rdev
)
2748 evergreen_irq_disable(rdev
);
2749 r600_rlc_stop(rdev
);
2752 static u32
evergreen_get_ih_wptr(struct radeon_device
*rdev
)
2756 if (rdev
->wb
.enabled
)
2757 wptr
= le32_to_cpu(rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4]);
2759 wptr
= RREG32(IH_RB_WPTR
);
2761 if (wptr
& RB_OVERFLOW
) {
2762 /* When a ring buffer overflow happen start parsing interrupt
2763 * from the last not overwritten vector (wptr + 16). Hopefully
2764 * this should allow us to catchup.
2766 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2767 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
2768 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
2769 tmp
= RREG32(IH_RB_CNTL
);
2770 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
2771 WREG32(IH_RB_CNTL
, tmp
);
2773 return (wptr
& rdev
->ih
.ptr_mask
);
2776 int evergreen_irq_process(struct radeon_device
*rdev
)
2780 u32 src_id
, src_data
;
2782 unsigned long flags
;
2783 bool queue_hotplug
= false;
2785 if (!rdev
->ih
.enabled
|| rdev
->shutdown
)
2788 wptr
= evergreen_get_ih_wptr(rdev
);
2789 rptr
= rdev
->ih
.rptr
;
2790 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
2792 spin_lock_irqsave(&rdev
->ih
.lock
, flags
);
2794 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2798 /* Order reading of wptr vs. reading of IH ring data */
2801 /* display interrupts */
2802 evergreen_irq_ack(rdev
);
2804 rdev
->ih
.wptr
= wptr
;
2805 while (rptr
!= wptr
) {
2806 /* wptr/rptr are in bytes! */
2807 ring_index
= rptr
/ 4;
2808 src_id
= le32_to_cpu(rdev
->ih
.ring
[ring_index
]) & 0xff;
2809 src_data
= le32_to_cpu(rdev
->ih
.ring
[ring_index
+ 1]) & 0xfffffff;
2812 case 1: /* D1 vblank/vline */
2814 case 0: /* D1 vblank */
2815 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VBLANK_INTERRUPT
) {
2816 if (rdev
->irq
.crtc_vblank_int
[0]) {
2817 drm_handle_vblank(rdev
->ddev
, 0);
2818 rdev
->pm
.vblank_sync
= true;
2819 wake_up(&rdev
->irq
.vblank_queue
);
2821 if (rdev
->irq
.pflip
[0])
2822 radeon_crtc_handle_flip(rdev
, 0);
2823 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
2824 DRM_DEBUG("IH: D1 vblank\n");
2827 case 1: /* D1 vline */
2828 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VLINE_INTERRUPT
) {
2829 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
2830 DRM_DEBUG("IH: D1 vline\n");
2834 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2838 case 2: /* D2 vblank/vline */
2840 case 0: /* D2 vblank */
2841 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VBLANK_INTERRUPT
) {
2842 if (rdev
->irq
.crtc_vblank_int
[1]) {
2843 drm_handle_vblank(rdev
->ddev
, 1);
2844 rdev
->pm
.vblank_sync
= true;
2845 wake_up(&rdev
->irq
.vblank_queue
);
2847 if (rdev
->irq
.pflip
[1])
2848 radeon_crtc_handle_flip(rdev
, 1);
2849 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~LB_D2_VBLANK_INTERRUPT
;
2850 DRM_DEBUG("IH: D2 vblank\n");
2853 case 1: /* D2 vline */
2854 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VLINE_INTERRUPT
) {
2855 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~LB_D2_VLINE_INTERRUPT
;
2856 DRM_DEBUG("IH: D2 vline\n");
2860 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2864 case 3: /* D3 vblank/vline */
2866 case 0: /* D3 vblank */
2867 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
) {
2868 if (rdev
->irq
.crtc_vblank_int
[2]) {
2869 drm_handle_vblank(rdev
->ddev
, 2);
2870 rdev
->pm
.vblank_sync
= true;
2871 wake_up(&rdev
->irq
.vblank_queue
);
2873 if (rdev
->irq
.pflip
[2])
2874 radeon_crtc_handle_flip(rdev
, 2);
2875 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~LB_D3_VBLANK_INTERRUPT
;
2876 DRM_DEBUG("IH: D3 vblank\n");
2879 case 1: /* D3 vline */
2880 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VLINE_INTERRUPT
) {
2881 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~LB_D3_VLINE_INTERRUPT
;
2882 DRM_DEBUG("IH: D3 vline\n");
2886 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2890 case 4: /* D4 vblank/vline */
2892 case 0: /* D4 vblank */
2893 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
) {
2894 if (rdev
->irq
.crtc_vblank_int
[3]) {
2895 drm_handle_vblank(rdev
->ddev
, 3);
2896 rdev
->pm
.vblank_sync
= true;
2897 wake_up(&rdev
->irq
.vblank_queue
);
2899 if (rdev
->irq
.pflip
[3])
2900 radeon_crtc_handle_flip(rdev
, 3);
2901 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~LB_D4_VBLANK_INTERRUPT
;
2902 DRM_DEBUG("IH: D4 vblank\n");
2905 case 1: /* D4 vline */
2906 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VLINE_INTERRUPT
) {
2907 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~LB_D4_VLINE_INTERRUPT
;
2908 DRM_DEBUG("IH: D4 vline\n");
2912 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2916 case 5: /* D5 vblank/vline */
2918 case 0: /* D5 vblank */
2919 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
) {
2920 if (rdev
->irq
.crtc_vblank_int
[4]) {
2921 drm_handle_vblank(rdev
->ddev
, 4);
2922 rdev
->pm
.vblank_sync
= true;
2923 wake_up(&rdev
->irq
.vblank_queue
);
2925 if (rdev
->irq
.pflip
[4])
2926 radeon_crtc_handle_flip(rdev
, 4);
2927 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~LB_D5_VBLANK_INTERRUPT
;
2928 DRM_DEBUG("IH: D5 vblank\n");
2931 case 1: /* D5 vline */
2932 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VLINE_INTERRUPT
) {
2933 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~LB_D5_VLINE_INTERRUPT
;
2934 DRM_DEBUG("IH: D5 vline\n");
2938 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2942 case 6: /* D6 vblank/vline */
2944 case 0: /* D6 vblank */
2945 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
) {
2946 if (rdev
->irq
.crtc_vblank_int
[5]) {
2947 drm_handle_vblank(rdev
->ddev
, 5);
2948 rdev
->pm
.vblank_sync
= true;
2949 wake_up(&rdev
->irq
.vblank_queue
);
2951 if (rdev
->irq
.pflip
[5])
2952 radeon_crtc_handle_flip(rdev
, 5);
2953 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~LB_D6_VBLANK_INTERRUPT
;
2954 DRM_DEBUG("IH: D6 vblank\n");
2957 case 1: /* D6 vline */
2958 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VLINE_INTERRUPT
) {
2959 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~LB_D6_VLINE_INTERRUPT
;
2960 DRM_DEBUG("IH: D6 vline\n");
2964 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2968 case 42: /* HPD hotplug */
2971 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& DC_HPD1_INTERRUPT
) {
2972 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~DC_HPD1_INTERRUPT
;
2973 queue_hotplug
= true;
2974 DRM_DEBUG("IH: HPD1\n");
2978 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& DC_HPD2_INTERRUPT
) {
2979 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~DC_HPD2_INTERRUPT
;
2980 queue_hotplug
= true;
2981 DRM_DEBUG("IH: HPD2\n");
2985 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& DC_HPD3_INTERRUPT
) {
2986 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~DC_HPD3_INTERRUPT
;
2987 queue_hotplug
= true;
2988 DRM_DEBUG("IH: HPD3\n");
2992 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& DC_HPD4_INTERRUPT
) {
2993 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~DC_HPD4_INTERRUPT
;
2994 queue_hotplug
= true;
2995 DRM_DEBUG("IH: HPD4\n");
2999 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& DC_HPD5_INTERRUPT
) {
3000 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~DC_HPD5_INTERRUPT
;
3001 queue_hotplug
= true;
3002 DRM_DEBUG("IH: HPD5\n");
3006 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& DC_HPD6_INTERRUPT
) {
3007 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~DC_HPD6_INTERRUPT
;
3008 queue_hotplug
= true;
3009 DRM_DEBUG("IH: HPD6\n");
3013 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3017 case 176: /* CP_INT in ring buffer */
3018 case 177: /* CP_INT in IB1 */
3019 case 178: /* CP_INT in IB2 */
3020 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
3021 radeon_fence_process(rdev
);
3023 case 181: /* CP EOP event */
3024 DRM_DEBUG("IH: CP EOP\n");
3025 radeon_fence_process(rdev
);
3027 case 233: /* GUI IDLE */
3028 DRM_DEBUG("IH: GUI idle\n");
3029 rdev
->pm
.gui_idle
= true;
3030 wake_up(&rdev
->irq
.idle_queue
);
3033 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3037 /* wptr/rptr are in bytes! */
3039 rptr
&= rdev
->ih
.ptr_mask
;
3041 /* make sure wptr hasn't changed while processing */
3042 wptr
= evergreen_get_ih_wptr(rdev
);
3043 if (wptr
!= rdev
->ih
.wptr
)
3046 schedule_work(&rdev
->hotplug_work
);
3047 rdev
->ih
.rptr
= rptr
;
3048 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
3049 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
3053 static int evergreen_startup(struct radeon_device
*rdev
)
3057 /* enable pcie gen2 link */
3058 evergreen_pcie_gen2_enable(rdev
);
3060 if (ASIC_IS_DCE5(rdev
)) {
3061 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
|| !rdev
->mc_fw
) {
3062 r
= ni_init_microcode(rdev
);
3064 DRM_ERROR("Failed to load firmware!\n");
3068 r
= ni_mc_load_microcode(rdev
);
3070 DRM_ERROR("Failed to load MC firmware!\n");
3074 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
3075 r
= r600_init_microcode(rdev
);
3077 DRM_ERROR("Failed to load firmware!\n");
3083 r
= r600_vram_scratch_init(rdev
);
3087 evergreen_mc_program(rdev
);
3088 if (rdev
->flags
& RADEON_IS_AGP
) {
3089 evergreen_agp_enable(rdev
);
3091 r
= evergreen_pcie_gart_enable(rdev
);
3095 evergreen_gpu_init(rdev
);
3097 r
= evergreen_blit_init(rdev
);
3099 r600_blit_fini(rdev
);
3100 rdev
->asic
->copy
= NULL
;
3101 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
3104 /* allocate wb buffer */
3105 r
= radeon_wb_init(rdev
);
3110 r
= r600_irq_init(rdev
);
3112 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
3113 radeon_irq_kms_fini(rdev
);
3116 evergreen_irq_set(rdev
);
3118 r
= radeon_ring_init(rdev
, rdev
->cp
.ring_size
);
3121 r
= evergreen_cp_load_microcode(rdev
);
3124 r
= evergreen_cp_resume(rdev
);
3131 int evergreen_resume(struct radeon_device
*rdev
)
3135 /* reset the asic, the gfx blocks are often in a bad state
3136 * after the driver is unloaded or after a resume
3138 if (radeon_asic_reset(rdev
))
3139 dev_warn(rdev
->dev
, "GPU reset failed !\n");
3140 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3141 * posting will perform necessary task to bring back GPU into good
3145 atom_asic_init(rdev
->mode_info
.atom_context
);
3147 r
= evergreen_startup(rdev
);
3149 DRM_ERROR("evergreen startup failed on resume\n");
3153 r
= r600_ib_test(rdev
);
3155 DRM_ERROR("radeon: failed testing IB (%d).\n", r
);
3163 int evergreen_suspend(struct radeon_device
*rdev
)
3165 /* FIXME: we should wait for ring to be empty */
3167 rdev
->cp
.ready
= false;
3168 evergreen_irq_suspend(rdev
);
3169 radeon_wb_disable(rdev
);
3170 evergreen_pcie_gart_disable(rdev
);
3171 r600_blit_suspend(rdev
);
3176 /* Plan is to move initialization in that function and use
3177 * helper function so that radeon_device_init pretty much
3178 * do nothing more than calling asic specific function. This
3179 * should also allow to remove a bunch of callback function
3182 int evergreen_init(struct radeon_device
*rdev
)
3186 /* This don't do much */
3187 r
= radeon_gem_init(rdev
);
3191 if (!radeon_get_bios(rdev
)) {
3192 if (ASIC_IS_AVIVO(rdev
))
3195 /* Must be an ATOMBIOS */
3196 if (!rdev
->is_atom_bios
) {
3197 dev_err(rdev
->dev
, "Expecting atombios for evergreen GPU\n");
3200 r
= radeon_atombios_init(rdev
);
3203 /* reset the asic, the gfx blocks are often in a bad state
3204 * after the driver is unloaded or after a resume
3206 if (radeon_asic_reset(rdev
))
3207 dev_warn(rdev
->dev
, "GPU reset failed !\n");
3208 /* Post card if necessary */
3209 if (!radeon_card_posted(rdev
)) {
3211 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
3214 DRM_INFO("GPU not posted. posting now...\n");
3215 atom_asic_init(rdev
->mode_info
.atom_context
);
3217 /* Initialize scratch registers */
3218 r600_scratch_init(rdev
);
3219 /* Initialize surface registers */
3220 radeon_surface_init(rdev
);
3221 /* Initialize clocks */
3222 radeon_get_clock_info(rdev
->ddev
);
3224 r
= radeon_fence_driver_init(rdev
);
3227 /* initialize AGP */
3228 if (rdev
->flags
& RADEON_IS_AGP
) {
3229 r
= radeon_agp_init(rdev
);
3231 radeon_agp_disable(rdev
);
3233 /* initialize memory controller */
3234 r
= evergreen_mc_init(rdev
);
3237 /* Memory manager */
3238 r
= radeon_bo_init(rdev
);
3242 r
= radeon_irq_kms_init(rdev
);
3246 rdev
->cp
.ring_obj
= NULL
;
3247 r600_ring_init(rdev
, 1024 * 1024);
3249 rdev
->ih
.ring_obj
= NULL
;
3250 r600_ih_ring_init(rdev
, 64 * 1024);
3252 r
= r600_pcie_gart_init(rdev
);
3256 rdev
->accel_working
= true;
3257 r
= evergreen_startup(rdev
);
3259 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
3261 r600_irq_fini(rdev
);
3262 radeon_wb_fini(rdev
);
3263 radeon_irq_kms_fini(rdev
);
3264 evergreen_pcie_gart_fini(rdev
);
3265 rdev
->accel_working
= false;
3267 if (rdev
->accel_working
) {
3268 r
= radeon_ib_pool_init(rdev
);
3270 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r
);
3271 rdev
->accel_working
= false;
3273 r
= r600_ib_test(rdev
);
3275 DRM_ERROR("radeon: failed testing IB (%d).\n", r
);
3276 rdev
->accel_working
= false;
3280 /* Don't start up if the MC ucode is missing on BTC parts.
3281 * The default clocks and voltages before the MC ucode
3282 * is loaded are not suffient for advanced operations.
3284 if (ASIC_IS_DCE5(rdev
)) {
3285 if (!rdev
->mc_fw
&& !(rdev
->flags
& RADEON_IS_IGP
)) {
3286 DRM_ERROR("radeon: MC ucode required for NI+.\n");
3294 void evergreen_fini(struct radeon_device
*rdev
)
3296 r600_blit_fini(rdev
);
3298 r600_irq_fini(rdev
);
3299 radeon_wb_fini(rdev
);
3300 radeon_ib_pool_fini(rdev
);
3301 radeon_irq_kms_fini(rdev
);
3302 evergreen_pcie_gart_fini(rdev
);
3303 r600_vram_scratch_fini(rdev
);
3304 radeon_gem_fini(rdev
);
3305 radeon_fence_driver_fini(rdev
);
3306 radeon_agp_fini(rdev
);
3307 radeon_bo_fini(rdev
);
3308 radeon_atombios_fini(rdev
);
3313 void evergreen_pcie_gen2_enable(struct radeon_device
*rdev
)
3315 u32 link_width_cntl
, speed_cntl
;
3317 if (radeon_pcie_gen2
== 0)
3320 if (rdev
->flags
& RADEON_IS_IGP
)
3323 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3326 /* x2 cards have a special sequence */
3327 if (ASIC_IS_X2(rdev
))
3330 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3331 if ((speed_cntl
& LC_OTHER_SIDE_EVER_SENT_GEN2
) ||
3332 (speed_cntl
& LC_OTHER_SIDE_SUPPORTS_GEN2
)) {
3334 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3335 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3336 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3338 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3339 speed_cntl
&= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN
;
3340 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3342 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3343 speed_cntl
|= LC_CLR_FAILED_SPD_CHANGE_CNT
;
3344 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3346 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3347 speed_cntl
&= ~LC_CLR_FAILED_SPD_CHANGE_CNT
;
3348 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3350 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3351 speed_cntl
|= LC_GEN2_EN_STRAP
;
3352 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3355 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3356 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3358 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
3360 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3361 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);