radeon/audio: set_avi_packet() function cleanup
[deliverable/linux.git] / drivers / gpu / drm / radeon / cik.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include "drmP.h"
28 #include "radeon.h"
29 #include "radeon_asic.h"
30 #include "radeon_audio.h"
31 #include "cikd.h"
32 #include "atom.h"
33 #include "cik_blit_shaders.h"
34 #include "radeon_ucode.h"
35 #include "clearstate_ci.h"
36 #include "radeon_kfd.h"
37
38 MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
39 MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
40 MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
41 MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
42 MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
43 MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin");
44 MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
45 MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
46 MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
47
48 MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
49 MODULE_FIRMWARE("radeon/bonaire_me.bin");
50 MODULE_FIRMWARE("radeon/bonaire_ce.bin");
51 MODULE_FIRMWARE("radeon/bonaire_mec.bin");
52 MODULE_FIRMWARE("radeon/bonaire_mc.bin");
53 MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
54 MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
55 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
56
57 MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
58 MODULE_FIRMWARE("radeon/HAWAII_me.bin");
59 MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
60 MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
61 MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
62 MODULE_FIRMWARE("radeon/HAWAII_mc2.bin");
63 MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
64 MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
65 MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
66
67 MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
68 MODULE_FIRMWARE("radeon/hawaii_me.bin");
69 MODULE_FIRMWARE("radeon/hawaii_ce.bin");
70 MODULE_FIRMWARE("radeon/hawaii_mec.bin");
71 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
72 MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
73 MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
74 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
75
76 MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
77 MODULE_FIRMWARE("radeon/KAVERI_me.bin");
78 MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
79 MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
80 MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
81 MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
82
83 MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
84 MODULE_FIRMWARE("radeon/kaveri_me.bin");
85 MODULE_FIRMWARE("radeon/kaveri_ce.bin");
86 MODULE_FIRMWARE("radeon/kaveri_mec.bin");
87 MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
88 MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
89 MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
90
91 MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
92 MODULE_FIRMWARE("radeon/KABINI_me.bin");
93 MODULE_FIRMWARE("radeon/KABINI_ce.bin");
94 MODULE_FIRMWARE("radeon/KABINI_mec.bin");
95 MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
96 MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
97
98 MODULE_FIRMWARE("radeon/kabini_pfp.bin");
99 MODULE_FIRMWARE("radeon/kabini_me.bin");
100 MODULE_FIRMWARE("radeon/kabini_ce.bin");
101 MODULE_FIRMWARE("radeon/kabini_mec.bin");
102 MODULE_FIRMWARE("radeon/kabini_rlc.bin");
103 MODULE_FIRMWARE("radeon/kabini_sdma.bin");
104
105 MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
106 MODULE_FIRMWARE("radeon/MULLINS_me.bin");
107 MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
108 MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
109 MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
110 MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
111
112 MODULE_FIRMWARE("radeon/mullins_pfp.bin");
113 MODULE_FIRMWARE("radeon/mullins_me.bin");
114 MODULE_FIRMWARE("radeon/mullins_ce.bin");
115 MODULE_FIRMWARE("radeon/mullins_mec.bin");
116 MODULE_FIRMWARE("radeon/mullins_rlc.bin");
117 MODULE_FIRMWARE("radeon/mullins_sdma.bin");
118
119 extern int r600_ih_ring_alloc(struct radeon_device *rdev);
120 extern void r600_ih_ring_fini(struct radeon_device *rdev);
121 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
122 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
123 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
124 extern void sumo_rlc_fini(struct radeon_device *rdev);
125 extern int sumo_rlc_init(struct radeon_device *rdev);
126 extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
127 extern void si_rlc_reset(struct radeon_device *rdev);
128 extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
129 static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
130 extern int cik_sdma_resume(struct radeon_device *rdev);
131 extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
132 extern void cik_sdma_fini(struct radeon_device *rdev);
133 extern void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable);
134 static void cik_rlc_stop(struct radeon_device *rdev);
135 static void cik_pcie_gen3_enable(struct radeon_device *rdev);
136 static void cik_program_aspm(struct radeon_device *rdev);
137 static void cik_init_pg(struct radeon_device *rdev);
138 static void cik_init_cg(struct radeon_device *rdev);
139 static void cik_fini_pg(struct radeon_device *rdev);
140 static void cik_fini_cg(struct radeon_device *rdev);
141 static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
142 bool enable);
143
144 /* get temperature in millidegrees */
145 int ci_get_temp(struct radeon_device *rdev)
146 {
147 u32 temp;
148 int actual_temp = 0;
149
150 temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
151 CTF_TEMP_SHIFT;
152
153 if (temp & 0x200)
154 actual_temp = 255;
155 else
156 actual_temp = temp & 0x1ff;
157
158 actual_temp = actual_temp * 1000;
159
160 return actual_temp;
161 }
162
163 /* get temperature in millidegrees */
164 int kv_get_temp(struct radeon_device *rdev)
165 {
166 u32 temp;
167 int actual_temp = 0;
168
169 temp = RREG32_SMC(0xC0300E0C);
170
171 if (temp)
172 actual_temp = (temp / 8) - 49;
173 else
174 actual_temp = 0;
175
176 actual_temp = actual_temp * 1000;
177
178 return actual_temp;
179 }
180
181 /*
182 * Indirect registers accessor
183 */
184 u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
185 {
186 unsigned long flags;
187 u32 r;
188
189 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
190 WREG32(PCIE_INDEX, reg);
191 (void)RREG32(PCIE_INDEX);
192 r = RREG32(PCIE_DATA);
193 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
194 return r;
195 }
196
197 void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
198 {
199 unsigned long flags;
200
201 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
202 WREG32(PCIE_INDEX, reg);
203 (void)RREG32(PCIE_INDEX);
204 WREG32(PCIE_DATA, v);
205 (void)RREG32(PCIE_DATA);
206 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
207 }
208
209 static const u32 spectre_rlc_save_restore_register_list[] =
210 {
211 (0x0e00 << 16) | (0xc12c >> 2),
212 0x00000000,
213 (0x0e00 << 16) | (0xc140 >> 2),
214 0x00000000,
215 (0x0e00 << 16) | (0xc150 >> 2),
216 0x00000000,
217 (0x0e00 << 16) | (0xc15c >> 2),
218 0x00000000,
219 (0x0e00 << 16) | (0xc168 >> 2),
220 0x00000000,
221 (0x0e00 << 16) | (0xc170 >> 2),
222 0x00000000,
223 (0x0e00 << 16) | (0xc178 >> 2),
224 0x00000000,
225 (0x0e00 << 16) | (0xc204 >> 2),
226 0x00000000,
227 (0x0e00 << 16) | (0xc2b4 >> 2),
228 0x00000000,
229 (0x0e00 << 16) | (0xc2b8 >> 2),
230 0x00000000,
231 (0x0e00 << 16) | (0xc2bc >> 2),
232 0x00000000,
233 (0x0e00 << 16) | (0xc2c0 >> 2),
234 0x00000000,
235 (0x0e00 << 16) | (0x8228 >> 2),
236 0x00000000,
237 (0x0e00 << 16) | (0x829c >> 2),
238 0x00000000,
239 (0x0e00 << 16) | (0x869c >> 2),
240 0x00000000,
241 (0x0600 << 16) | (0x98f4 >> 2),
242 0x00000000,
243 (0x0e00 << 16) | (0x98f8 >> 2),
244 0x00000000,
245 (0x0e00 << 16) | (0x9900 >> 2),
246 0x00000000,
247 (0x0e00 << 16) | (0xc260 >> 2),
248 0x00000000,
249 (0x0e00 << 16) | (0x90e8 >> 2),
250 0x00000000,
251 (0x0e00 << 16) | (0x3c000 >> 2),
252 0x00000000,
253 (0x0e00 << 16) | (0x3c00c >> 2),
254 0x00000000,
255 (0x0e00 << 16) | (0x8c1c >> 2),
256 0x00000000,
257 (0x0e00 << 16) | (0x9700 >> 2),
258 0x00000000,
259 (0x0e00 << 16) | (0xcd20 >> 2),
260 0x00000000,
261 (0x4e00 << 16) | (0xcd20 >> 2),
262 0x00000000,
263 (0x5e00 << 16) | (0xcd20 >> 2),
264 0x00000000,
265 (0x6e00 << 16) | (0xcd20 >> 2),
266 0x00000000,
267 (0x7e00 << 16) | (0xcd20 >> 2),
268 0x00000000,
269 (0x8e00 << 16) | (0xcd20 >> 2),
270 0x00000000,
271 (0x9e00 << 16) | (0xcd20 >> 2),
272 0x00000000,
273 (0xae00 << 16) | (0xcd20 >> 2),
274 0x00000000,
275 (0xbe00 << 16) | (0xcd20 >> 2),
276 0x00000000,
277 (0x0e00 << 16) | (0x89bc >> 2),
278 0x00000000,
279 (0x0e00 << 16) | (0x8900 >> 2),
280 0x00000000,
281 0x3,
282 (0x0e00 << 16) | (0xc130 >> 2),
283 0x00000000,
284 (0x0e00 << 16) | (0xc134 >> 2),
285 0x00000000,
286 (0x0e00 << 16) | (0xc1fc >> 2),
287 0x00000000,
288 (0x0e00 << 16) | (0xc208 >> 2),
289 0x00000000,
290 (0x0e00 << 16) | (0xc264 >> 2),
291 0x00000000,
292 (0x0e00 << 16) | (0xc268 >> 2),
293 0x00000000,
294 (0x0e00 << 16) | (0xc26c >> 2),
295 0x00000000,
296 (0x0e00 << 16) | (0xc270 >> 2),
297 0x00000000,
298 (0x0e00 << 16) | (0xc274 >> 2),
299 0x00000000,
300 (0x0e00 << 16) | (0xc278 >> 2),
301 0x00000000,
302 (0x0e00 << 16) | (0xc27c >> 2),
303 0x00000000,
304 (0x0e00 << 16) | (0xc280 >> 2),
305 0x00000000,
306 (0x0e00 << 16) | (0xc284 >> 2),
307 0x00000000,
308 (0x0e00 << 16) | (0xc288 >> 2),
309 0x00000000,
310 (0x0e00 << 16) | (0xc28c >> 2),
311 0x00000000,
312 (0x0e00 << 16) | (0xc290 >> 2),
313 0x00000000,
314 (0x0e00 << 16) | (0xc294 >> 2),
315 0x00000000,
316 (0x0e00 << 16) | (0xc298 >> 2),
317 0x00000000,
318 (0x0e00 << 16) | (0xc29c >> 2),
319 0x00000000,
320 (0x0e00 << 16) | (0xc2a0 >> 2),
321 0x00000000,
322 (0x0e00 << 16) | (0xc2a4 >> 2),
323 0x00000000,
324 (0x0e00 << 16) | (0xc2a8 >> 2),
325 0x00000000,
326 (0x0e00 << 16) | (0xc2ac >> 2),
327 0x00000000,
328 (0x0e00 << 16) | (0xc2b0 >> 2),
329 0x00000000,
330 (0x0e00 << 16) | (0x301d0 >> 2),
331 0x00000000,
332 (0x0e00 << 16) | (0x30238 >> 2),
333 0x00000000,
334 (0x0e00 << 16) | (0x30250 >> 2),
335 0x00000000,
336 (0x0e00 << 16) | (0x30254 >> 2),
337 0x00000000,
338 (0x0e00 << 16) | (0x30258 >> 2),
339 0x00000000,
340 (0x0e00 << 16) | (0x3025c >> 2),
341 0x00000000,
342 (0x4e00 << 16) | (0xc900 >> 2),
343 0x00000000,
344 (0x5e00 << 16) | (0xc900 >> 2),
345 0x00000000,
346 (0x6e00 << 16) | (0xc900 >> 2),
347 0x00000000,
348 (0x7e00 << 16) | (0xc900 >> 2),
349 0x00000000,
350 (0x8e00 << 16) | (0xc900 >> 2),
351 0x00000000,
352 (0x9e00 << 16) | (0xc900 >> 2),
353 0x00000000,
354 (0xae00 << 16) | (0xc900 >> 2),
355 0x00000000,
356 (0xbe00 << 16) | (0xc900 >> 2),
357 0x00000000,
358 (0x4e00 << 16) | (0xc904 >> 2),
359 0x00000000,
360 (0x5e00 << 16) | (0xc904 >> 2),
361 0x00000000,
362 (0x6e00 << 16) | (0xc904 >> 2),
363 0x00000000,
364 (0x7e00 << 16) | (0xc904 >> 2),
365 0x00000000,
366 (0x8e00 << 16) | (0xc904 >> 2),
367 0x00000000,
368 (0x9e00 << 16) | (0xc904 >> 2),
369 0x00000000,
370 (0xae00 << 16) | (0xc904 >> 2),
371 0x00000000,
372 (0xbe00 << 16) | (0xc904 >> 2),
373 0x00000000,
374 (0x4e00 << 16) | (0xc908 >> 2),
375 0x00000000,
376 (0x5e00 << 16) | (0xc908 >> 2),
377 0x00000000,
378 (0x6e00 << 16) | (0xc908 >> 2),
379 0x00000000,
380 (0x7e00 << 16) | (0xc908 >> 2),
381 0x00000000,
382 (0x8e00 << 16) | (0xc908 >> 2),
383 0x00000000,
384 (0x9e00 << 16) | (0xc908 >> 2),
385 0x00000000,
386 (0xae00 << 16) | (0xc908 >> 2),
387 0x00000000,
388 (0xbe00 << 16) | (0xc908 >> 2),
389 0x00000000,
390 (0x4e00 << 16) | (0xc90c >> 2),
391 0x00000000,
392 (0x5e00 << 16) | (0xc90c >> 2),
393 0x00000000,
394 (0x6e00 << 16) | (0xc90c >> 2),
395 0x00000000,
396 (0x7e00 << 16) | (0xc90c >> 2),
397 0x00000000,
398 (0x8e00 << 16) | (0xc90c >> 2),
399 0x00000000,
400 (0x9e00 << 16) | (0xc90c >> 2),
401 0x00000000,
402 (0xae00 << 16) | (0xc90c >> 2),
403 0x00000000,
404 (0xbe00 << 16) | (0xc90c >> 2),
405 0x00000000,
406 (0x4e00 << 16) | (0xc910 >> 2),
407 0x00000000,
408 (0x5e00 << 16) | (0xc910 >> 2),
409 0x00000000,
410 (0x6e00 << 16) | (0xc910 >> 2),
411 0x00000000,
412 (0x7e00 << 16) | (0xc910 >> 2),
413 0x00000000,
414 (0x8e00 << 16) | (0xc910 >> 2),
415 0x00000000,
416 (0x9e00 << 16) | (0xc910 >> 2),
417 0x00000000,
418 (0xae00 << 16) | (0xc910 >> 2),
419 0x00000000,
420 (0xbe00 << 16) | (0xc910 >> 2),
421 0x00000000,
422 (0x0e00 << 16) | (0xc99c >> 2),
423 0x00000000,
424 (0x0e00 << 16) | (0x9834 >> 2),
425 0x00000000,
426 (0x0000 << 16) | (0x30f00 >> 2),
427 0x00000000,
428 (0x0001 << 16) | (0x30f00 >> 2),
429 0x00000000,
430 (0x0000 << 16) | (0x30f04 >> 2),
431 0x00000000,
432 (0x0001 << 16) | (0x30f04 >> 2),
433 0x00000000,
434 (0x0000 << 16) | (0x30f08 >> 2),
435 0x00000000,
436 (0x0001 << 16) | (0x30f08 >> 2),
437 0x00000000,
438 (0x0000 << 16) | (0x30f0c >> 2),
439 0x00000000,
440 (0x0001 << 16) | (0x30f0c >> 2),
441 0x00000000,
442 (0x0600 << 16) | (0x9b7c >> 2),
443 0x00000000,
444 (0x0e00 << 16) | (0x8a14 >> 2),
445 0x00000000,
446 (0x0e00 << 16) | (0x8a18 >> 2),
447 0x00000000,
448 (0x0600 << 16) | (0x30a00 >> 2),
449 0x00000000,
450 (0x0e00 << 16) | (0x8bf0 >> 2),
451 0x00000000,
452 (0x0e00 << 16) | (0x8bcc >> 2),
453 0x00000000,
454 (0x0e00 << 16) | (0x8b24 >> 2),
455 0x00000000,
456 (0x0e00 << 16) | (0x30a04 >> 2),
457 0x00000000,
458 (0x0600 << 16) | (0x30a10 >> 2),
459 0x00000000,
460 (0x0600 << 16) | (0x30a14 >> 2),
461 0x00000000,
462 (0x0600 << 16) | (0x30a18 >> 2),
463 0x00000000,
464 (0x0600 << 16) | (0x30a2c >> 2),
465 0x00000000,
466 (0x0e00 << 16) | (0xc700 >> 2),
467 0x00000000,
468 (0x0e00 << 16) | (0xc704 >> 2),
469 0x00000000,
470 (0x0e00 << 16) | (0xc708 >> 2),
471 0x00000000,
472 (0x0e00 << 16) | (0xc768 >> 2),
473 0x00000000,
474 (0x0400 << 16) | (0xc770 >> 2),
475 0x00000000,
476 (0x0400 << 16) | (0xc774 >> 2),
477 0x00000000,
478 (0x0400 << 16) | (0xc778 >> 2),
479 0x00000000,
480 (0x0400 << 16) | (0xc77c >> 2),
481 0x00000000,
482 (0x0400 << 16) | (0xc780 >> 2),
483 0x00000000,
484 (0x0400 << 16) | (0xc784 >> 2),
485 0x00000000,
486 (0x0400 << 16) | (0xc788 >> 2),
487 0x00000000,
488 (0x0400 << 16) | (0xc78c >> 2),
489 0x00000000,
490 (0x0400 << 16) | (0xc798 >> 2),
491 0x00000000,
492 (0x0400 << 16) | (0xc79c >> 2),
493 0x00000000,
494 (0x0400 << 16) | (0xc7a0 >> 2),
495 0x00000000,
496 (0x0400 << 16) | (0xc7a4 >> 2),
497 0x00000000,
498 (0x0400 << 16) | (0xc7a8 >> 2),
499 0x00000000,
500 (0x0400 << 16) | (0xc7ac >> 2),
501 0x00000000,
502 (0x0400 << 16) | (0xc7b0 >> 2),
503 0x00000000,
504 (0x0400 << 16) | (0xc7b4 >> 2),
505 0x00000000,
506 (0x0e00 << 16) | (0x9100 >> 2),
507 0x00000000,
508 (0x0e00 << 16) | (0x3c010 >> 2),
509 0x00000000,
510 (0x0e00 << 16) | (0x92a8 >> 2),
511 0x00000000,
512 (0x0e00 << 16) | (0x92ac >> 2),
513 0x00000000,
514 (0x0e00 << 16) | (0x92b4 >> 2),
515 0x00000000,
516 (0x0e00 << 16) | (0x92b8 >> 2),
517 0x00000000,
518 (0x0e00 << 16) | (0x92bc >> 2),
519 0x00000000,
520 (0x0e00 << 16) | (0x92c0 >> 2),
521 0x00000000,
522 (0x0e00 << 16) | (0x92c4 >> 2),
523 0x00000000,
524 (0x0e00 << 16) | (0x92c8 >> 2),
525 0x00000000,
526 (0x0e00 << 16) | (0x92cc >> 2),
527 0x00000000,
528 (0x0e00 << 16) | (0x92d0 >> 2),
529 0x00000000,
530 (0x0e00 << 16) | (0x8c00 >> 2),
531 0x00000000,
532 (0x0e00 << 16) | (0x8c04 >> 2),
533 0x00000000,
534 (0x0e00 << 16) | (0x8c20 >> 2),
535 0x00000000,
536 (0x0e00 << 16) | (0x8c38 >> 2),
537 0x00000000,
538 (0x0e00 << 16) | (0x8c3c >> 2),
539 0x00000000,
540 (0x0e00 << 16) | (0xae00 >> 2),
541 0x00000000,
542 (0x0e00 << 16) | (0x9604 >> 2),
543 0x00000000,
544 (0x0e00 << 16) | (0xac08 >> 2),
545 0x00000000,
546 (0x0e00 << 16) | (0xac0c >> 2),
547 0x00000000,
548 (0x0e00 << 16) | (0xac10 >> 2),
549 0x00000000,
550 (0x0e00 << 16) | (0xac14 >> 2),
551 0x00000000,
552 (0x0e00 << 16) | (0xac58 >> 2),
553 0x00000000,
554 (0x0e00 << 16) | (0xac68 >> 2),
555 0x00000000,
556 (0x0e00 << 16) | (0xac6c >> 2),
557 0x00000000,
558 (0x0e00 << 16) | (0xac70 >> 2),
559 0x00000000,
560 (0x0e00 << 16) | (0xac74 >> 2),
561 0x00000000,
562 (0x0e00 << 16) | (0xac78 >> 2),
563 0x00000000,
564 (0x0e00 << 16) | (0xac7c >> 2),
565 0x00000000,
566 (0x0e00 << 16) | (0xac80 >> 2),
567 0x00000000,
568 (0x0e00 << 16) | (0xac84 >> 2),
569 0x00000000,
570 (0x0e00 << 16) | (0xac88 >> 2),
571 0x00000000,
572 (0x0e00 << 16) | (0xac8c >> 2),
573 0x00000000,
574 (0x0e00 << 16) | (0x970c >> 2),
575 0x00000000,
576 (0x0e00 << 16) | (0x9714 >> 2),
577 0x00000000,
578 (0x0e00 << 16) | (0x9718 >> 2),
579 0x00000000,
580 (0x0e00 << 16) | (0x971c >> 2),
581 0x00000000,
582 (0x0e00 << 16) | (0x31068 >> 2),
583 0x00000000,
584 (0x4e00 << 16) | (0x31068 >> 2),
585 0x00000000,
586 (0x5e00 << 16) | (0x31068 >> 2),
587 0x00000000,
588 (0x6e00 << 16) | (0x31068 >> 2),
589 0x00000000,
590 (0x7e00 << 16) | (0x31068 >> 2),
591 0x00000000,
592 (0x8e00 << 16) | (0x31068 >> 2),
593 0x00000000,
594 (0x9e00 << 16) | (0x31068 >> 2),
595 0x00000000,
596 (0xae00 << 16) | (0x31068 >> 2),
597 0x00000000,
598 (0xbe00 << 16) | (0x31068 >> 2),
599 0x00000000,
600 (0x0e00 << 16) | (0xcd10 >> 2),
601 0x00000000,
602 (0x0e00 << 16) | (0xcd14 >> 2),
603 0x00000000,
604 (0x0e00 << 16) | (0x88b0 >> 2),
605 0x00000000,
606 (0x0e00 << 16) | (0x88b4 >> 2),
607 0x00000000,
608 (0x0e00 << 16) | (0x88b8 >> 2),
609 0x00000000,
610 (0x0e00 << 16) | (0x88bc >> 2),
611 0x00000000,
612 (0x0400 << 16) | (0x89c0 >> 2),
613 0x00000000,
614 (0x0e00 << 16) | (0x88c4 >> 2),
615 0x00000000,
616 (0x0e00 << 16) | (0x88c8 >> 2),
617 0x00000000,
618 (0x0e00 << 16) | (0x88d0 >> 2),
619 0x00000000,
620 (0x0e00 << 16) | (0x88d4 >> 2),
621 0x00000000,
622 (0x0e00 << 16) | (0x88d8 >> 2),
623 0x00000000,
624 (0x0e00 << 16) | (0x8980 >> 2),
625 0x00000000,
626 (0x0e00 << 16) | (0x30938 >> 2),
627 0x00000000,
628 (0x0e00 << 16) | (0x3093c >> 2),
629 0x00000000,
630 (0x0e00 << 16) | (0x30940 >> 2),
631 0x00000000,
632 (0x0e00 << 16) | (0x89a0 >> 2),
633 0x00000000,
634 (0x0e00 << 16) | (0x30900 >> 2),
635 0x00000000,
636 (0x0e00 << 16) | (0x30904 >> 2),
637 0x00000000,
638 (0x0e00 << 16) | (0x89b4 >> 2),
639 0x00000000,
640 (0x0e00 << 16) | (0x3c210 >> 2),
641 0x00000000,
642 (0x0e00 << 16) | (0x3c214 >> 2),
643 0x00000000,
644 (0x0e00 << 16) | (0x3c218 >> 2),
645 0x00000000,
646 (0x0e00 << 16) | (0x8904 >> 2),
647 0x00000000,
648 0x5,
649 (0x0e00 << 16) | (0x8c28 >> 2),
650 (0x0e00 << 16) | (0x8c2c >> 2),
651 (0x0e00 << 16) | (0x8c30 >> 2),
652 (0x0e00 << 16) | (0x8c34 >> 2),
653 (0x0e00 << 16) | (0x9600 >> 2),
654 };
655
656 static const u32 kalindi_rlc_save_restore_register_list[] =
657 {
658 (0x0e00 << 16) | (0xc12c >> 2),
659 0x00000000,
660 (0x0e00 << 16) | (0xc140 >> 2),
661 0x00000000,
662 (0x0e00 << 16) | (0xc150 >> 2),
663 0x00000000,
664 (0x0e00 << 16) | (0xc15c >> 2),
665 0x00000000,
666 (0x0e00 << 16) | (0xc168 >> 2),
667 0x00000000,
668 (0x0e00 << 16) | (0xc170 >> 2),
669 0x00000000,
670 (0x0e00 << 16) | (0xc204 >> 2),
671 0x00000000,
672 (0x0e00 << 16) | (0xc2b4 >> 2),
673 0x00000000,
674 (0x0e00 << 16) | (0xc2b8 >> 2),
675 0x00000000,
676 (0x0e00 << 16) | (0xc2bc >> 2),
677 0x00000000,
678 (0x0e00 << 16) | (0xc2c0 >> 2),
679 0x00000000,
680 (0x0e00 << 16) | (0x8228 >> 2),
681 0x00000000,
682 (0x0e00 << 16) | (0x829c >> 2),
683 0x00000000,
684 (0x0e00 << 16) | (0x869c >> 2),
685 0x00000000,
686 (0x0600 << 16) | (0x98f4 >> 2),
687 0x00000000,
688 (0x0e00 << 16) | (0x98f8 >> 2),
689 0x00000000,
690 (0x0e00 << 16) | (0x9900 >> 2),
691 0x00000000,
692 (0x0e00 << 16) | (0xc260 >> 2),
693 0x00000000,
694 (0x0e00 << 16) | (0x90e8 >> 2),
695 0x00000000,
696 (0x0e00 << 16) | (0x3c000 >> 2),
697 0x00000000,
698 (0x0e00 << 16) | (0x3c00c >> 2),
699 0x00000000,
700 (0x0e00 << 16) | (0x8c1c >> 2),
701 0x00000000,
702 (0x0e00 << 16) | (0x9700 >> 2),
703 0x00000000,
704 (0x0e00 << 16) | (0xcd20 >> 2),
705 0x00000000,
706 (0x4e00 << 16) | (0xcd20 >> 2),
707 0x00000000,
708 (0x5e00 << 16) | (0xcd20 >> 2),
709 0x00000000,
710 (0x6e00 << 16) | (0xcd20 >> 2),
711 0x00000000,
712 (0x7e00 << 16) | (0xcd20 >> 2),
713 0x00000000,
714 (0x0e00 << 16) | (0x89bc >> 2),
715 0x00000000,
716 (0x0e00 << 16) | (0x8900 >> 2),
717 0x00000000,
718 0x3,
719 (0x0e00 << 16) | (0xc130 >> 2),
720 0x00000000,
721 (0x0e00 << 16) | (0xc134 >> 2),
722 0x00000000,
723 (0x0e00 << 16) | (0xc1fc >> 2),
724 0x00000000,
725 (0x0e00 << 16) | (0xc208 >> 2),
726 0x00000000,
727 (0x0e00 << 16) | (0xc264 >> 2),
728 0x00000000,
729 (0x0e00 << 16) | (0xc268 >> 2),
730 0x00000000,
731 (0x0e00 << 16) | (0xc26c >> 2),
732 0x00000000,
733 (0x0e00 << 16) | (0xc270 >> 2),
734 0x00000000,
735 (0x0e00 << 16) | (0xc274 >> 2),
736 0x00000000,
737 (0x0e00 << 16) | (0xc28c >> 2),
738 0x00000000,
739 (0x0e00 << 16) | (0xc290 >> 2),
740 0x00000000,
741 (0x0e00 << 16) | (0xc294 >> 2),
742 0x00000000,
743 (0x0e00 << 16) | (0xc298 >> 2),
744 0x00000000,
745 (0x0e00 << 16) | (0xc2a0 >> 2),
746 0x00000000,
747 (0x0e00 << 16) | (0xc2a4 >> 2),
748 0x00000000,
749 (0x0e00 << 16) | (0xc2a8 >> 2),
750 0x00000000,
751 (0x0e00 << 16) | (0xc2ac >> 2),
752 0x00000000,
753 (0x0e00 << 16) | (0x301d0 >> 2),
754 0x00000000,
755 (0x0e00 << 16) | (0x30238 >> 2),
756 0x00000000,
757 (0x0e00 << 16) | (0x30250 >> 2),
758 0x00000000,
759 (0x0e00 << 16) | (0x30254 >> 2),
760 0x00000000,
761 (0x0e00 << 16) | (0x30258 >> 2),
762 0x00000000,
763 (0x0e00 << 16) | (0x3025c >> 2),
764 0x00000000,
765 (0x4e00 << 16) | (0xc900 >> 2),
766 0x00000000,
767 (0x5e00 << 16) | (0xc900 >> 2),
768 0x00000000,
769 (0x6e00 << 16) | (0xc900 >> 2),
770 0x00000000,
771 (0x7e00 << 16) | (0xc900 >> 2),
772 0x00000000,
773 (0x4e00 << 16) | (0xc904 >> 2),
774 0x00000000,
775 (0x5e00 << 16) | (0xc904 >> 2),
776 0x00000000,
777 (0x6e00 << 16) | (0xc904 >> 2),
778 0x00000000,
779 (0x7e00 << 16) | (0xc904 >> 2),
780 0x00000000,
781 (0x4e00 << 16) | (0xc908 >> 2),
782 0x00000000,
783 (0x5e00 << 16) | (0xc908 >> 2),
784 0x00000000,
785 (0x6e00 << 16) | (0xc908 >> 2),
786 0x00000000,
787 (0x7e00 << 16) | (0xc908 >> 2),
788 0x00000000,
789 (0x4e00 << 16) | (0xc90c >> 2),
790 0x00000000,
791 (0x5e00 << 16) | (0xc90c >> 2),
792 0x00000000,
793 (0x6e00 << 16) | (0xc90c >> 2),
794 0x00000000,
795 (0x7e00 << 16) | (0xc90c >> 2),
796 0x00000000,
797 (0x4e00 << 16) | (0xc910 >> 2),
798 0x00000000,
799 (0x5e00 << 16) | (0xc910 >> 2),
800 0x00000000,
801 (0x6e00 << 16) | (0xc910 >> 2),
802 0x00000000,
803 (0x7e00 << 16) | (0xc910 >> 2),
804 0x00000000,
805 (0x0e00 << 16) | (0xc99c >> 2),
806 0x00000000,
807 (0x0e00 << 16) | (0x9834 >> 2),
808 0x00000000,
809 (0x0000 << 16) | (0x30f00 >> 2),
810 0x00000000,
811 (0x0000 << 16) | (0x30f04 >> 2),
812 0x00000000,
813 (0x0000 << 16) | (0x30f08 >> 2),
814 0x00000000,
815 (0x0000 << 16) | (0x30f0c >> 2),
816 0x00000000,
817 (0x0600 << 16) | (0x9b7c >> 2),
818 0x00000000,
819 (0x0e00 << 16) | (0x8a14 >> 2),
820 0x00000000,
821 (0x0e00 << 16) | (0x8a18 >> 2),
822 0x00000000,
823 (0x0600 << 16) | (0x30a00 >> 2),
824 0x00000000,
825 (0x0e00 << 16) | (0x8bf0 >> 2),
826 0x00000000,
827 (0x0e00 << 16) | (0x8bcc >> 2),
828 0x00000000,
829 (0x0e00 << 16) | (0x8b24 >> 2),
830 0x00000000,
831 (0x0e00 << 16) | (0x30a04 >> 2),
832 0x00000000,
833 (0x0600 << 16) | (0x30a10 >> 2),
834 0x00000000,
835 (0x0600 << 16) | (0x30a14 >> 2),
836 0x00000000,
837 (0x0600 << 16) | (0x30a18 >> 2),
838 0x00000000,
839 (0x0600 << 16) | (0x30a2c >> 2),
840 0x00000000,
841 (0x0e00 << 16) | (0xc700 >> 2),
842 0x00000000,
843 (0x0e00 << 16) | (0xc704 >> 2),
844 0x00000000,
845 (0x0e00 << 16) | (0xc708 >> 2),
846 0x00000000,
847 (0x0e00 << 16) | (0xc768 >> 2),
848 0x00000000,
849 (0x0400 << 16) | (0xc770 >> 2),
850 0x00000000,
851 (0x0400 << 16) | (0xc774 >> 2),
852 0x00000000,
853 (0x0400 << 16) | (0xc798 >> 2),
854 0x00000000,
855 (0x0400 << 16) | (0xc79c >> 2),
856 0x00000000,
857 (0x0e00 << 16) | (0x9100 >> 2),
858 0x00000000,
859 (0x0e00 << 16) | (0x3c010 >> 2),
860 0x00000000,
861 (0x0e00 << 16) | (0x8c00 >> 2),
862 0x00000000,
863 (0x0e00 << 16) | (0x8c04 >> 2),
864 0x00000000,
865 (0x0e00 << 16) | (0x8c20 >> 2),
866 0x00000000,
867 (0x0e00 << 16) | (0x8c38 >> 2),
868 0x00000000,
869 (0x0e00 << 16) | (0x8c3c >> 2),
870 0x00000000,
871 (0x0e00 << 16) | (0xae00 >> 2),
872 0x00000000,
873 (0x0e00 << 16) | (0x9604 >> 2),
874 0x00000000,
875 (0x0e00 << 16) | (0xac08 >> 2),
876 0x00000000,
877 (0x0e00 << 16) | (0xac0c >> 2),
878 0x00000000,
879 (0x0e00 << 16) | (0xac10 >> 2),
880 0x00000000,
881 (0x0e00 << 16) | (0xac14 >> 2),
882 0x00000000,
883 (0x0e00 << 16) | (0xac58 >> 2),
884 0x00000000,
885 (0x0e00 << 16) | (0xac68 >> 2),
886 0x00000000,
887 (0x0e00 << 16) | (0xac6c >> 2),
888 0x00000000,
889 (0x0e00 << 16) | (0xac70 >> 2),
890 0x00000000,
891 (0x0e00 << 16) | (0xac74 >> 2),
892 0x00000000,
893 (0x0e00 << 16) | (0xac78 >> 2),
894 0x00000000,
895 (0x0e00 << 16) | (0xac7c >> 2),
896 0x00000000,
897 (0x0e00 << 16) | (0xac80 >> 2),
898 0x00000000,
899 (0x0e00 << 16) | (0xac84 >> 2),
900 0x00000000,
901 (0x0e00 << 16) | (0xac88 >> 2),
902 0x00000000,
903 (0x0e00 << 16) | (0xac8c >> 2),
904 0x00000000,
905 (0x0e00 << 16) | (0x970c >> 2),
906 0x00000000,
907 (0x0e00 << 16) | (0x9714 >> 2),
908 0x00000000,
909 (0x0e00 << 16) | (0x9718 >> 2),
910 0x00000000,
911 (0x0e00 << 16) | (0x971c >> 2),
912 0x00000000,
913 (0x0e00 << 16) | (0x31068 >> 2),
914 0x00000000,
915 (0x4e00 << 16) | (0x31068 >> 2),
916 0x00000000,
917 (0x5e00 << 16) | (0x31068 >> 2),
918 0x00000000,
919 (0x6e00 << 16) | (0x31068 >> 2),
920 0x00000000,
921 (0x7e00 << 16) | (0x31068 >> 2),
922 0x00000000,
923 (0x0e00 << 16) | (0xcd10 >> 2),
924 0x00000000,
925 (0x0e00 << 16) | (0xcd14 >> 2),
926 0x00000000,
927 (0x0e00 << 16) | (0x88b0 >> 2),
928 0x00000000,
929 (0x0e00 << 16) | (0x88b4 >> 2),
930 0x00000000,
931 (0x0e00 << 16) | (0x88b8 >> 2),
932 0x00000000,
933 (0x0e00 << 16) | (0x88bc >> 2),
934 0x00000000,
935 (0x0400 << 16) | (0x89c0 >> 2),
936 0x00000000,
937 (0x0e00 << 16) | (0x88c4 >> 2),
938 0x00000000,
939 (0x0e00 << 16) | (0x88c8 >> 2),
940 0x00000000,
941 (0x0e00 << 16) | (0x88d0 >> 2),
942 0x00000000,
943 (0x0e00 << 16) | (0x88d4 >> 2),
944 0x00000000,
945 (0x0e00 << 16) | (0x88d8 >> 2),
946 0x00000000,
947 (0x0e00 << 16) | (0x8980 >> 2),
948 0x00000000,
949 (0x0e00 << 16) | (0x30938 >> 2),
950 0x00000000,
951 (0x0e00 << 16) | (0x3093c >> 2),
952 0x00000000,
953 (0x0e00 << 16) | (0x30940 >> 2),
954 0x00000000,
955 (0x0e00 << 16) | (0x89a0 >> 2),
956 0x00000000,
957 (0x0e00 << 16) | (0x30900 >> 2),
958 0x00000000,
959 (0x0e00 << 16) | (0x30904 >> 2),
960 0x00000000,
961 (0x0e00 << 16) | (0x89b4 >> 2),
962 0x00000000,
963 (0x0e00 << 16) | (0x3e1fc >> 2),
964 0x00000000,
965 (0x0e00 << 16) | (0x3c210 >> 2),
966 0x00000000,
967 (0x0e00 << 16) | (0x3c214 >> 2),
968 0x00000000,
969 (0x0e00 << 16) | (0x3c218 >> 2),
970 0x00000000,
971 (0x0e00 << 16) | (0x8904 >> 2),
972 0x00000000,
973 0x5,
974 (0x0e00 << 16) | (0x8c28 >> 2),
975 (0x0e00 << 16) | (0x8c2c >> 2),
976 (0x0e00 << 16) | (0x8c30 >> 2),
977 (0x0e00 << 16) | (0x8c34 >> 2),
978 (0x0e00 << 16) | (0x9600 >> 2),
979 };
980
981 static const u32 bonaire_golden_spm_registers[] =
982 {
983 0x30800, 0xe0ffffff, 0xe0000000
984 };
985
986 static const u32 bonaire_golden_common_registers[] =
987 {
988 0xc770, 0xffffffff, 0x00000800,
989 0xc774, 0xffffffff, 0x00000800,
990 0xc798, 0xffffffff, 0x00007fbf,
991 0xc79c, 0xffffffff, 0x00007faf
992 };
993
994 static const u32 bonaire_golden_registers[] =
995 {
996 0x3354, 0x00000333, 0x00000333,
997 0x3350, 0x000c0fc0, 0x00040200,
998 0x9a10, 0x00010000, 0x00058208,
999 0x3c000, 0xffff1fff, 0x00140000,
1000 0x3c200, 0xfdfc0fff, 0x00000100,
1001 0x3c234, 0x40000000, 0x40000200,
1002 0x9830, 0xffffffff, 0x00000000,
1003 0x9834, 0xf00fffff, 0x00000400,
1004 0x9838, 0x0002021c, 0x00020200,
1005 0xc78, 0x00000080, 0x00000000,
1006 0x5bb0, 0x000000f0, 0x00000070,
1007 0x5bc0, 0xf0311fff, 0x80300000,
1008 0x98f8, 0x73773777, 0x12010001,
1009 0x350c, 0x00810000, 0x408af000,
1010 0x7030, 0x31000111, 0x00000011,
1011 0x2f48, 0x73773777, 0x12010001,
1012 0x220c, 0x00007fb6, 0x0021a1b1,
1013 0x2210, 0x00007fb6, 0x002021b1,
1014 0x2180, 0x00007fb6, 0x00002191,
1015 0x2218, 0x00007fb6, 0x002121b1,
1016 0x221c, 0x00007fb6, 0x002021b1,
1017 0x21dc, 0x00007fb6, 0x00002191,
1018 0x21e0, 0x00007fb6, 0x00002191,
1019 0x3628, 0x0000003f, 0x0000000a,
1020 0x362c, 0x0000003f, 0x0000000a,
1021 0x2ae4, 0x00073ffe, 0x000022a2,
1022 0x240c, 0x000007ff, 0x00000000,
1023 0x8a14, 0xf000003f, 0x00000007,
1024 0x8bf0, 0x00002001, 0x00000001,
1025 0x8b24, 0xffffffff, 0x00ffffff,
1026 0x30a04, 0x0000ff0f, 0x00000000,
1027 0x28a4c, 0x07ffffff, 0x06000000,
1028 0x4d8, 0x00000fff, 0x00000100,
1029 0x3e78, 0x00000001, 0x00000002,
1030 0x9100, 0x03000000, 0x0362c688,
1031 0x8c00, 0x000000ff, 0x00000001,
1032 0xe40, 0x00001fff, 0x00001fff,
1033 0x9060, 0x0000007f, 0x00000020,
1034 0x9508, 0x00010000, 0x00010000,
1035 0xac14, 0x000003ff, 0x000000f3,
1036 0xac0c, 0xffffffff, 0x00001032
1037 };
1038
1039 static const u32 bonaire_mgcg_cgcg_init[] =
1040 {
1041 0xc420, 0xffffffff, 0xfffffffc,
1042 0x30800, 0xffffffff, 0xe0000000,
1043 0x3c2a0, 0xffffffff, 0x00000100,
1044 0x3c208, 0xffffffff, 0x00000100,
1045 0x3c2c0, 0xffffffff, 0xc0000100,
1046 0x3c2c8, 0xffffffff, 0xc0000100,
1047 0x3c2c4, 0xffffffff, 0xc0000100,
1048 0x55e4, 0xffffffff, 0x00600100,
1049 0x3c280, 0xffffffff, 0x00000100,
1050 0x3c214, 0xffffffff, 0x06000100,
1051 0x3c220, 0xffffffff, 0x00000100,
1052 0x3c218, 0xffffffff, 0x06000100,
1053 0x3c204, 0xffffffff, 0x00000100,
1054 0x3c2e0, 0xffffffff, 0x00000100,
1055 0x3c224, 0xffffffff, 0x00000100,
1056 0x3c200, 0xffffffff, 0x00000100,
1057 0x3c230, 0xffffffff, 0x00000100,
1058 0x3c234, 0xffffffff, 0x00000100,
1059 0x3c250, 0xffffffff, 0x00000100,
1060 0x3c254, 0xffffffff, 0x00000100,
1061 0x3c258, 0xffffffff, 0x00000100,
1062 0x3c25c, 0xffffffff, 0x00000100,
1063 0x3c260, 0xffffffff, 0x00000100,
1064 0x3c27c, 0xffffffff, 0x00000100,
1065 0x3c278, 0xffffffff, 0x00000100,
1066 0x3c210, 0xffffffff, 0x06000100,
1067 0x3c290, 0xffffffff, 0x00000100,
1068 0x3c274, 0xffffffff, 0x00000100,
1069 0x3c2b4, 0xffffffff, 0x00000100,
1070 0x3c2b0, 0xffffffff, 0x00000100,
1071 0x3c270, 0xffffffff, 0x00000100,
1072 0x30800, 0xffffffff, 0xe0000000,
1073 0x3c020, 0xffffffff, 0x00010000,
1074 0x3c024, 0xffffffff, 0x00030002,
1075 0x3c028, 0xffffffff, 0x00040007,
1076 0x3c02c, 0xffffffff, 0x00060005,
1077 0x3c030, 0xffffffff, 0x00090008,
1078 0x3c034, 0xffffffff, 0x00010000,
1079 0x3c038, 0xffffffff, 0x00030002,
1080 0x3c03c, 0xffffffff, 0x00040007,
1081 0x3c040, 0xffffffff, 0x00060005,
1082 0x3c044, 0xffffffff, 0x00090008,
1083 0x3c048, 0xffffffff, 0x00010000,
1084 0x3c04c, 0xffffffff, 0x00030002,
1085 0x3c050, 0xffffffff, 0x00040007,
1086 0x3c054, 0xffffffff, 0x00060005,
1087 0x3c058, 0xffffffff, 0x00090008,
1088 0x3c05c, 0xffffffff, 0x00010000,
1089 0x3c060, 0xffffffff, 0x00030002,
1090 0x3c064, 0xffffffff, 0x00040007,
1091 0x3c068, 0xffffffff, 0x00060005,
1092 0x3c06c, 0xffffffff, 0x00090008,
1093 0x3c070, 0xffffffff, 0x00010000,
1094 0x3c074, 0xffffffff, 0x00030002,
1095 0x3c078, 0xffffffff, 0x00040007,
1096 0x3c07c, 0xffffffff, 0x00060005,
1097 0x3c080, 0xffffffff, 0x00090008,
1098 0x3c084, 0xffffffff, 0x00010000,
1099 0x3c088, 0xffffffff, 0x00030002,
1100 0x3c08c, 0xffffffff, 0x00040007,
1101 0x3c090, 0xffffffff, 0x00060005,
1102 0x3c094, 0xffffffff, 0x00090008,
1103 0x3c098, 0xffffffff, 0x00010000,
1104 0x3c09c, 0xffffffff, 0x00030002,
1105 0x3c0a0, 0xffffffff, 0x00040007,
1106 0x3c0a4, 0xffffffff, 0x00060005,
1107 0x3c0a8, 0xffffffff, 0x00090008,
1108 0x3c000, 0xffffffff, 0x96e00200,
1109 0x8708, 0xffffffff, 0x00900100,
1110 0xc424, 0xffffffff, 0x0020003f,
1111 0x38, 0xffffffff, 0x0140001c,
1112 0x3c, 0x000f0000, 0x000f0000,
1113 0x220, 0xffffffff, 0xC060000C,
1114 0x224, 0xc0000fff, 0x00000100,
1115 0xf90, 0xffffffff, 0x00000100,
1116 0xf98, 0x00000101, 0x00000000,
1117 0x20a8, 0xffffffff, 0x00000104,
1118 0x55e4, 0xff000fff, 0x00000100,
1119 0x30cc, 0xc0000fff, 0x00000104,
1120 0xc1e4, 0x00000001, 0x00000001,
1121 0xd00c, 0xff000ff0, 0x00000100,
1122 0xd80c, 0xff000ff0, 0x00000100
1123 };
1124
1125 static const u32 spectre_golden_spm_registers[] =
1126 {
1127 0x30800, 0xe0ffffff, 0xe0000000
1128 };
1129
1130 static const u32 spectre_golden_common_registers[] =
1131 {
1132 0xc770, 0xffffffff, 0x00000800,
1133 0xc774, 0xffffffff, 0x00000800,
1134 0xc798, 0xffffffff, 0x00007fbf,
1135 0xc79c, 0xffffffff, 0x00007faf
1136 };
1137
1138 static const u32 spectre_golden_registers[] =
1139 {
1140 0x3c000, 0xffff1fff, 0x96940200,
1141 0x3c00c, 0xffff0001, 0xff000000,
1142 0x3c200, 0xfffc0fff, 0x00000100,
1143 0x6ed8, 0x00010101, 0x00010000,
1144 0x9834, 0xf00fffff, 0x00000400,
1145 0x9838, 0xfffffffc, 0x00020200,
1146 0x5bb0, 0x000000f0, 0x00000070,
1147 0x5bc0, 0xf0311fff, 0x80300000,
1148 0x98f8, 0x73773777, 0x12010001,
1149 0x9b7c, 0x00ff0000, 0x00fc0000,
1150 0x2f48, 0x73773777, 0x12010001,
1151 0x8a14, 0xf000003f, 0x00000007,
1152 0x8b24, 0xffffffff, 0x00ffffff,
1153 0x28350, 0x3f3f3fff, 0x00000082,
1154 0x28354, 0x0000003f, 0x00000000,
1155 0x3e78, 0x00000001, 0x00000002,
1156 0x913c, 0xffff03df, 0x00000004,
1157 0xc768, 0x00000008, 0x00000008,
1158 0x8c00, 0x000008ff, 0x00000800,
1159 0x9508, 0x00010000, 0x00010000,
1160 0xac0c, 0xffffffff, 0x54763210,
1161 0x214f8, 0x01ff01ff, 0x00000002,
1162 0x21498, 0x007ff800, 0x00200000,
1163 0x2015c, 0xffffffff, 0x00000f40,
1164 0x30934, 0xffffffff, 0x00000001
1165 };
1166
1167 static const u32 spectre_mgcg_cgcg_init[] =
1168 {
1169 0xc420, 0xffffffff, 0xfffffffc,
1170 0x30800, 0xffffffff, 0xe0000000,
1171 0x3c2a0, 0xffffffff, 0x00000100,
1172 0x3c208, 0xffffffff, 0x00000100,
1173 0x3c2c0, 0xffffffff, 0x00000100,
1174 0x3c2c8, 0xffffffff, 0x00000100,
1175 0x3c2c4, 0xffffffff, 0x00000100,
1176 0x55e4, 0xffffffff, 0x00600100,
1177 0x3c280, 0xffffffff, 0x00000100,
1178 0x3c214, 0xffffffff, 0x06000100,
1179 0x3c220, 0xffffffff, 0x00000100,
1180 0x3c218, 0xffffffff, 0x06000100,
1181 0x3c204, 0xffffffff, 0x00000100,
1182 0x3c2e0, 0xffffffff, 0x00000100,
1183 0x3c224, 0xffffffff, 0x00000100,
1184 0x3c200, 0xffffffff, 0x00000100,
1185 0x3c230, 0xffffffff, 0x00000100,
1186 0x3c234, 0xffffffff, 0x00000100,
1187 0x3c250, 0xffffffff, 0x00000100,
1188 0x3c254, 0xffffffff, 0x00000100,
1189 0x3c258, 0xffffffff, 0x00000100,
1190 0x3c25c, 0xffffffff, 0x00000100,
1191 0x3c260, 0xffffffff, 0x00000100,
1192 0x3c27c, 0xffffffff, 0x00000100,
1193 0x3c278, 0xffffffff, 0x00000100,
1194 0x3c210, 0xffffffff, 0x06000100,
1195 0x3c290, 0xffffffff, 0x00000100,
1196 0x3c274, 0xffffffff, 0x00000100,
1197 0x3c2b4, 0xffffffff, 0x00000100,
1198 0x3c2b0, 0xffffffff, 0x00000100,
1199 0x3c270, 0xffffffff, 0x00000100,
1200 0x30800, 0xffffffff, 0xe0000000,
1201 0x3c020, 0xffffffff, 0x00010000,
1202 0x3c024, 0xffffffff, 0x00030002,
1203 0x3c028, 0xffffffff, 0x00040007,
1204 0x3c02c, 0xffffffff, 0x00060005,
1205 0x3c030, 0xffffffff, 0x00090008,
1206 0x3c034, 0xffffffff, 0x00010000,
1207 0x3c038, 0xffffffff, 0x00030002,
1208 0x3c03c, 0xffffffff, 0x00040007,
1209 0x3c040, 0xffffffff, 0x00060005,
1210 0x3c044, 0xffffffff, 0x00090008,
1211 0x3c048, 0xffffffff, 0x00010000,
1212 0x3c04c, 0xffffffff, 0x00030002,
1213 0x3c050, 0xffffffff, 0x00040007,
1214 0x3c054, 0xffffffff, 0x00060005,
1215 0x3c058, 0xffffffff, 0x00090008,
1216 0x3c05c, 0xffffffff, 0x00010000,
1217 0x3c060, 0xffffffff, 0x00030002,
1218 0x3c064, 0xffffffff, 0x00040007,
1219 0x3c068, 0xffffffff, 0x00060005,
1220 0x3c06c, 0xffffffff, 0x00090008,
1221 0x3c070, 0xffffffff, 0x00010000,
1222 0x3c074, 0xffffffff, 0x00030002,
1223 0x3c078, 0xffffffff, 0x00040007,
1224 0x3c07c, 0xffffffff, 0x00060005,
1225 0x3c080, 0xffffffff, 0x00090008,
1226 0x3c084, 0xffffffff, 0x00010000,
1227 0x3c088, 0xffffffff, 0x00030002,
1228 0x3c08c, 0xffffffff, 0x00040007,
1229 0x3c090, 0xffffffff, 0x00060005,
1230 0x3c094, 0xffffffff, 0x00090008,
1231 0x3c098, 0xffffffff, 0x00010000,
1232 0x3c09c, 0xffffffff, 0x00030002,
1233 0x3c0a0, 0xffffffff, 0x00040007,
1234 0x3c0a4, 0xffffffff, 0x00060005,
1235 0x3c0a8, 0xffffffff, 0x00090008,
1236 0x3c0ac, 0xffffffff, 0x00010000,
1237 0x3c0b0, 0xffffffff, 0x00030002,
1238 0x3c0b4, 0xffffffff, 0x00040007,
1239 0x3c0b8, 0xffffffff, 0x00060005,
1240 0x3c0bc, 0xffffffff, 0x00090008,
1241 0x3c000, 0xffffffff, 0x96e00200,
1242 0x8708, 0xffffffff, 0x00900100,
1243 0xc424, 0xffffffff, 0x0020003f,
1244 0x38, 0xffffffff, 0x0140001c,
1245 0x3c, 0x000f0000, 0x000f0000,
1246 0x220, 0xffffffff, 0xC060000C,
1247 0x224, 0xc0000fff, 0x00000100,
1248 0xf90, 0xffffffff, 0x00000100,
1249 0xf98, 0x00000101, 0x00000000,
1250 0x20a8, 0xffffffff, 0x00000104,
1251 0x55e4, 0xff000fff, 0x00000100,
1252 0x30cc, 0xc0000fff, 0x00000104,
1253 0xc1e4, 0x00000001, 0x00000001,
1254 0xd00c, 0xff000ff0, 0x00000100,
1255 0xd80c, 0xff000ff0, 0x00000100
1256 };
1257
1258 static const u32 kalindi_golden_spm_registers[] =
1259 {
1260 0x30800, 0xe0ffffff, 0xe0000000
1261 };
1262
1263 static const u32 kalindi_golden_common_registers[] =
1264 {
1265 0xc770, 0xffffffff, 0x00000800,
1266 0xc774, 0xffffffff, 0x00000800,
1267 0xc798, 0xffffffff, 0x00007fbf,
1268 0xc79c, 0xffffffff, 0x00007faf
1269 };
1270
1271 static const u32 kalindi_golden_registers[] =
1272 {
1273 0x3c000, 0xffffdfff, 0x6e944040,
1274 0x55e4, 0xff607fff, 0xfc000100,
1275 0x3c220, 0xff000fff, 0x00000100,
1276 0x3c224, 0xff000fff, 0x00000100,
1277 0x3c200, 0xfffc0fff, 0x00000100,
1278 0x6ed8, 0x00010101, 0x00010000,
1279 0x9830, 0xffffffff, 0x00000000,
1280 0x9834, 0xf00fffff, 0x00000400,
1281 0x5bb0, 0x000000f0, 0x00000070,
1282 0x5bc0, 0xf0311fff, 0x80300000,
1283 0x98f8, 0x73773777, 0x12010001,
1284 0x98fc, 0xffffffff, 0x00000010,
1285 0x9b7c, 0x00ff0000, 0x00fc0000,
1286 0x8030, 0x00001f0f, 0x0000100a,
1287 0x2f48, 0x73773777, 0x12010001,
1288 0x2408, 0x000fffff, 0x000c007f,
1289 0x8a14, 0xf000003f, 0x00000007,
1290 0x8b24, 0x3fff3fff, 0x00ffcfff,
1291 0x30a04, 0x0000ff0f, 0x00000000,
1292 0x28a4c, 0x07ffffff, 0x06000000,
1293 0x4d8, 0x00000fff, 0x00000100,
1294 0x3e78, 0x00000001, 0x00000002,
1295 0xc768, 0x00000008, 0x00000008,
1296 0x8c00, 0x000000ff, 0x00000003,
1297 0x214f8, 0x01ff01ff, 0x00000002,
1298 0x21498, 0x007ff800, 0x00200000,
1299 0x2015c, 0xffffffff, 0x00000f40,
1300 0x88c4, 0x001f3ae3, 0x00000082,
1301 0x88d4, 0x0000001f, 0x00000010,
1302 0x30934, 0xffffffff, 0x00000000
1303 };
1304
1305 static const u32 kalindi_mgcg_cgcg_init[] =
1306 {
1307 0xc420, 0xffffffff, 0xfffffffc,
1308 0x30800, 0xffffffff, 0xe0000000,
1309 0x3c2a0, 0xffffffff, 0x00000100,
1310 0x3c208, 0xffffffff, 0x00000100,
1311 0x3c2c0, 0xffffffff, 0x00000100,
1312 0x3c2c8, 0xffffffff, 0x00000100,
1313 0x3c2c4, 0xffffffff, 0x00000100,
1314 0x55e4, 0xffffffff, 0x00600100,
1315 0x3c280, 0xffffffff, 0x00000100,
1316 0x3c214, 0xffffffff, 0x06000100,
1317 0x3c220, 0xffffffff, 0x00000100,
1318 0x3c218, 0xffffffff, 0x06000100,
1319 0x3c204, 0xffffffff, 0x00000100,
1320 0x3c2e0, 0xffffffff, 0x00000100,
1321 0x3c224, 0xffffffff, 0x00000100,
1322 0x3c200, 0xffffffff, 0x00000100,
1323 0x3c230, 0xffffffff, 0x00000100,
1324 0x3c234, 0xffffffff, 0x00000100,
1325 0x3c250, 0xffffffff, 0x00000100,
1326 0x3c254, 0xffffffff, 0x00000100,
1327 0x3c258, 0xffffffff, 0x00000100,
1328 0x3c25c, 0xffffffff, 0x00000100,
1329 0x3c260, 0xffffffff, 0x00000100,
1330 0x3c27c, 0xffffffff, 0x00000100,
1331 0x3c278, 0xffffffff, 0x00000100,
1332 0x3c210, 0xffffffff, 0x06000100,
1333 0x3c290, 0xffffffff, 0x00000100,
1334 0x3c274, 0xffffffff, 0x00000100,
1335 0x3c2b4, 0xffffffff, 0x00000100,
1336 0x3c2b0, 0xffffffff, 0x00000100,
1337 0x3c270, 0xffffffff, 0x00000100,
1338 0x30800, 0xffffffff, 0xe0000000,
1339 0x3c020, 0xffffffff, 0x00010000,
1340 0x3c024, 0xffffffff, 0x00030002,
1341 0x3c028, 0xffffffff, 0x00040007,
1342 0x3c02c, 0xffffffff, 0x00060005,
1343 0x3c030, 0xffffffff, 0x00090008,
1344 0x3c034, 0xffffffff, 0x00010000,
1345 0x3c038, 0xffffffff, 0x00030002,
1346 0x3c03c, 0xffffffff, 0x00040007,
1347 0x3c040, 0xffffffff, 0x00060005,
1348 0x3c044, 0xffffffff, 0x00090008,
1349 0x3c000, 0xffffffff, 0x96e00200,
1350 0x8708, 0xffffffff, 0x00900100,
1351 0xc424, 0xffffffff, 0x0020003f,
1352 0x38, 0xffffffff, 0x0140001c,
1353 0x3c, 0x000f0000, 0x000f0000,
1354 0x220, 0xffffffff, 0xC060000C,
1355 0x224, 0xc0000fff, 0x00000100,
1356 0x20a8, 0xffffffff, 0x00000104,
1357 0x55e4, 0xff000fff, 0x00000100,
1358 0x30cc, 0xc0000fff, 0x00000104,
1359 0xc1e4, 0x00000001, 0x00000001,
1360 0xd00c, 0xff000ff0, 0x00000100,
1361 0xd80c, 0xff000ff0, 0x00000100
1362 };
1363
1364 static const u32 hawaii_golden_spm_registers[] =
1365 {
1366 0x30800, 0xe0ffffff, 0xe0000000
1367 };
1368
1369 static const u32 hawaii_golden_common_registers[] =
1370 {
1371 0x30800, 0xffffffff, 0xe0000000,
1372 0x28350, 0xffffffff, 0x3a00161a,
1373 0x28354, 0xffffffff, 0x0000002e,
1374 0x9a10, 0xffffffff, 0x00018208,
1375 0x98f8, 0xffffffff, 0x12011003
1376 };
1377
1378 static const u32 hawaii_golden_registers[] =
1379 {
1380 0x3354, 0x00000333, 0x00000333,
1381 0x9a10, 0x00010000, 0x00058208,
1382 0x9830, 0xffffffff, 0x00000000,
1383 0x9834, 0xf00fffff, 0x00000400,
1384 0x9838, 0x0002021c, 0x00020200,
1385 0xc78, 0x00000080, 0x00000000,
1386 0x5bb0, 0x000000f0, 0x00000070,
1387 0x5bc0, 0xf0311fff, 0x80300000,
1388 0x350c, 0x00810000, 0x408af000,
1389 0x7030, 0x31000111, 0x00000011,
1390 0x2f48, 0x73773777, 0x12010001,
1391 0x2120, 0x0000007f, 0x0000001b,
1392 0x21dc, 0x00007fb6, 0x00002191,
1393 0x3628, 0x0000003f, 0x0000000a,
1394 0x362c, 0x0000003f, 0x0000000a,
1395 0x2ae4, 0x00073ffe, 0x000022a2,
1396 0x240c, 0x000007ff, 0x00000000,
1397 0x8bf0, 0x00002001, 0x00000001,
1398 0x8b24, 0xffffffff, 0x00ffffff,
1399 0x30a04, 0x0000ff0f, 0x00000000,
1400 0x28a4c, 0x07ffffff, 0x06000000,
1401 0x3e78, 0x00000001, 0x00000002,
1402 0xc768, 0x00000008, 0x00000008,
1403 0xc770, 0x00000f00, 0x00000800,
1404 0xc774, 0x00000f00, 0x00000800,
1405 0xc798, 0x00ffffff, 0x00ff7fbf,
1406 0xc79c, 0x00ffffff, 0x00ff7faf,
1407 0x8c00, 0x000000ff, 0x00000800,
1408 0xe40, 0x00001fff, 0x00001fff,
1409 0x9060, 0x0000007f, 0x00000020,
1410 0x9508, 0x00010000, 0x00010000,
1411 0xae00, 0x00100000, 0x000ff07c,
1412 0xac14, 0x000003ff, 0x0000000f,
1413 0xac10, 0xffffffff, 0x7564fdec,
1414 0xac0c, 0xffffffff, 0x3120b9a8,
1415 0xac08, 0x20000000, 0x0f9c0000
1416 };
1417
1418 static const u32 hawaii_mgcg_cgcg_init[] =
1419 {
1420 0xc420, 0xffffffff, 0xfffffffd,
1421 0x30800, 0xffffffff, 0xe0000000,
1422 0x3c2a0, 0xffffffff, 0x00000100,
1423 0x3c208, 0xffffffff, 0x00000100,
1424 0x3c2c0, 0xffffffff, 0x00000100,
1425 0x3c2c8, 0xffffffff, 0x00000100,
1426 0x3c2c4, 0xffffffff, 0x00000100,
1427 0x55e4, 0xffffffff, 0x00200100,
1428 0x3c280, 0xffffffff, 0x00000100,
1429 0x3c214, 0xffffffff, 0x06000100,
1430 0x3c220, 0xffffffff, 0x00000100,
1431 0x3c218, 0xffffffff, 0x06000100,
1432 0x3c204, 0xffffffff, 0x00000100,
1433 0x3c2e0, 0xffffffff, 0x00000100,
1434 0x3c224, 0xffffffff, 0x00000100,
1435 0x3c200, 0xffffffff, 0x00000100,
1436 0x3c230, 0xffffffff, 0x00000100,
1437 0x3c234, 0xffffffff, 0x00000100,
1438 0x3c250, 0xffffffff, 0x00000100,
1439 0x3c254, 0xffffffff, 0x00000100,
1440 0x3c258, 0xffffffff, 0x00000100,
1441 0x3c25c, 0xffffffff, 0x00000100,
1442 0x3c260, 0xffffffff, 0x00000100,
1443 0x3c27c, 0xffffffff, 0x00000100,
1444 0x3c278, 0xffffffff, 0x00000100,
1445 0x3c210, 0xffffffff, 0x06000100,
1446 0x3c290, 0xffffffff, 0x00000100,
1447 0x3c274, 0xffffffff, 0x00000100,
1448 0x3c2b4, 0xffffffff, 0x00000100,
1449 0x3c2b0, 0xffffffff, 0x00000100,
1450 0x3c270, 0xffffffff, 0x00000100,
1451 0x30800, 0xffffffff, 0xe0000000,
1452 0x3c020, 0xffffffff, 0x00010000,
1453 0x3c024, 0xffffffff, 0x00030002,
1454 0x3c028, 0xffffffff, 0x00040007,
1455 0x3c02c, 0xffffffff, 0x00060005,
1456 0x3c030, 0xffffffff, 0x00090008,
1457 0x3c034, 0xffffffff, 0x00010000,
1458 0x3c038, 0xffffffff, 0x00030002,
1459 0x3c03c, 0xffffffff, 0x00040007,
1460 0x3c040, 0xffffffff, 0x00060005,
1461 0x3c044, 0xffffffff, 0x00090008,
1462 0x3c048, 0xffffffff, 0x00010000,
1463 0x3c04c, 0xffffffff, 0x00030002,
1464 0x3c050, 0xffffffff, 0x00040007,
1465 0x3c054, 0xffffffff, 0x00060005,
1466 0x3c058, 0xffffffff, 0x00090008,
1467 0x3c05c, 0xffffffff, 0x00010000,
1468 0x3c060, 0xffffffff, 0x00030002,
1469 0x3c064, 0xffffffff, 0x00040007,
1470 0x3c068, 0xffffffff, 0x00060005,
1471 0x3c06c, 0xffffffff, 0x00090008,
1472 0x3c070, 0xffffffff, 0x00010000,
1473 0x3c074, 0xffffffff, 0x00030002,
1474 0x3c078, 0xffffffff, 0x00040007,
1475 0x3c07c, 0xffffffff, 0x00060005,
1476 0x3c080, 0xffffffff, 0x00090008,
1477 0x3c084, 0xffffffff, 0x00010000,
1478 0x3c088, 0xffffffff, 0x00030002,
1479 0x3c08c, 0xffffffff, 0x00040007,
1480 0x3c090, 0xffffffff, 0x00060005,
1481 0x3c094, 0xffffffff, 0x00090008,
1482 0x3c098, 0xffffffff, 0x00010000,
1483 0x3c09c, 0xffffffff, 0x00030002,
1484 0x3c0a0, 0xffffffff, 0x00040007,
1485 0x3c0a4, 0xffffffff, 0x00060005,
1486 0x3c0a8, 0xffffffff, 0x00090008,
1487 0x3c0ac, 0xffffffff, 0x00010000,
1488 0x3c0b0, 0xffffffff, 0x00030002,
1489 0x3c0b4, 0xffffffff, 0x00040007,
1490 0x3c0b8, 0xffffffff, 0x00060005,
1491 0x3c0bc, 0xffffffff, 0x00090008,
1492 0x3c0c0, 0xffffffff, 0x00010000,
1493 0x3c0c4, 0xffffffff, 0x00030002,
1494 0x3c0c8, 0xffffffff, 0x00040007,
1495 0x3c0cc, 0xffffffff, 0x00060005,
1496 0x3c0d0, 0xffffffff, 0x00090008,
1497 0x3c0d4, 0xffffffff, 0x00010000,
1498 0x3c0d8, 0xffffffff, 0x00030002,
1499 0x3c0dc, 0xffffffff, 0x00040007,
1500 0x3c0e0, 0xffffffff, 0x00060005,
1501 0x3c0e4, 0xffffffff, 0x00090008,
1502 0x3c0e8, 0xffffffff, 0x00010000,
1503 0x3c0ec, 0xffffffff, 0x00030002,
1504 0x3c0f0, 0xffffffff, 0x00040007,
1505 0x3c0f4, 0xffffffff, 0x00060005,
1506 0x3c0f8, 0xffffffff, 0x00090008,
1507 0xc318, 0xffffffff, 0x00020200,
1508 0x3350, 0xffffffff, 0x00000200,
1509 0x15c0, 0xffffffff, 0x00000400,
1510 0x55e8, 0xffffffff, 0x00000000,
1511 0x2f50, 0xffffffff, 0x00000902,
1512 0x3c000, 0xffffffff, 0x96940200,
1513 0x8708, 0xffffffff, 0x00900100,
1514 0xc424, 0xffffffff, 0x0020003f,
1515 0x38, 0xffffffff, 0x0140001c,
1516 0x3c, 0x000f0000, 0x000f0000,
1517 0x220, 0xffffffff, 0xc060000c,
1518 0x224, 0xc0000fff, 0x00000100,
1519 0xf90, 0xffffffff, 0x00000100,
1520 0xf98, 0x00000101, 0x00000000,
1521 0x20a8, 0xffffffff, 0x00000104,
1522 0x55e4, 0xff000fff, 0x00000100,
1523 0x30cc, 0xc0000fff, 0x00000104,
1524 0xc1e4, 0x00000001, 0x00000001,
1525 0xd00c, 0xff000ff0, 0x00000100,
1526 0xd80c, 0xff000ff0, 0x00000100
1527 };
1528
1529 static const u32 godavari_golden_registers[] =
1530 {
1531 0x55e4, 0xff607fff, 0xfc000100,
1532 0x6ed8, 0x00010101, 0x00010000,
1533 0x9830, 0xffffffff, 0x00000000,
1534 0x98302, 0xf00fffff, 0x00000400,
1535 0x6130, 0xffffffff, 0x00010000,
1536 0x5bb0, 0x000000f0, 0x00000070,
1537 0x5bc0, 0xf0311fff, 0x80300000,
1538 0x98f8, 0x73773777, 0x12010001,
1539 0x98fc, 0xffffffff, 0x00000010,
1540 0x8030, 0x00001f0f, 0x0000100a,
1541 0x2f48, 0x73773777, 0x12010001,
1542 0x2408, 0x000fffff, 0x000c007f,
1543 0x8a14, 0xf000003f, 0x00000007,
1544 0x8b24, 0xffffffff, 0x00ff0fff,
1545 0x30a04, 0x0000ff0f, 0x00000000,
1546 0x28a4c, 0x07ffffff, 0x06000000,
1547 0x4d8, 0x00000fff, 0x00000100,
1548 0xd014, 0x00010000, 0x00810001,
1549 0xd814, 0x00010000, 0x00810001,
1550 0x3e78, 0x00000001, 0x00000002,
1551 0xc768, 0x00000008, 0x00000008,
1552 0xc770, 0x00000f00, 0x00000800,
1553 0xc774, 0x00000f00, 0x00000800,
1554 0xc798, 0x00ffffff, 0x00ff7fbf,
1555 0xc79c, 0x00ffffff, 0x00ff7faf,
1556 0x8c00, 0x000000ff, 0x00000001,
1557 0x214f8, 0x01ff01ff, 0x00000002,
1558 0x21498, 0x007ff800, 0x00200000,
1559 0x2015c, 0xffffffff, 0x00000f40,
1560 0x88c4, 0x001f3ae3, 0x00000082,
1561 0x88d4, 0x0000001f, 0x00000010,
1562 0x30934, 0xffffffff, 0x00000000
1563 };
1564
1565
1566 static void cik_init_golden_registers(struct radeon_device *rdev)
1567 {
1568 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
1569 mutex_lock(&rdev->grbm_idx_mutex);
1570 switch (rdev->family) {
1571 case CHIP_BONAIRE:
1572 radeon_program_register_sequence(rdev,
1573 bonaire_mgcg_cgcg_init,
1574 (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
1575 radeon_program_register_sequence(rdev,
1576 bonaire_golden_registers,
1577 (const u32)ARRAY_SIZE(bonaire_golden_registers));
1578 radeon_program_register_sequence(rdev,
1579 bonaire_golden_common_registers,
1580 (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
1581 radeon_program_register_sequence(rdev,
1582 bonaire_golden_spm_registers,
1583 (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
1584 break;
1585 case CHIP_KABINI:
1586 radeon_program_register_sequence(rdev,
1587 kalindi_mgcg_cgcg_init,
1588 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
1589 radeon_program_register_sequence(rdev,
1590 kalindi_golden_registers,
1591 (const u32)ARRAY_SIZE(kalindi_golden_registers));
1592 radeon_program_register_sequence(rdev,
1593 kalindi_golden_common_registers,
1594 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
1595 radeon_program_register_sequence(rdev,
1596 kalindi_golden_spm_registers,
1597 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
1598 break;
1599 case CHIP_MULLINS:
1600 radeon_program_register_sequence(rdev,
1601 kalindi_mgcg_cgcg_init,
1602 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
1603 radeon_program_register_sequence(rdev,
1604 godavari_golden_registers,
1605 (const u32)ARRAY_SIZE(godavari_golden_registers));
1606 radeon_program_register_sequence(rdev,
1607 kalindi_golden_common_registers,
1608 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
1609 radeon_program_register_sequence(rdev,
1610 kalindi_golden_spm_registers,
1611 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
1612 break;
1613 case CHIP_KAVERI:
1614 radeon_program_register_sequence(rdev,
1615 spectre_mgcg_cgcg_init,
1616 (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
1617 radeon_program_register_sequence(rdev,
1618 spectre_golden_registers,
1619 (const u32)ARRAY_SIZE(spectre_golden_registers));
1620 radeon_program_register_sequence(rdev,
1621 spectre_golden_common_registers,
1622 (const u32)ARRAY_SIZE(spectre_golden_common_registers));
1623 radeon_program_register_sequence(rdev,
1624 spectre_golden_spm_registers,
1625 (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
1626 break;
1627 case CHIP_HAWAII:
1628 radeon_program_register_sequence(rdev,
1629 hawaii_mgcg_cgcg_init,
1630 (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
1631 radeon_program_register_sequence(rdev,
1632 hawaii_golden_registers,
1633 (const u32)ARRAY_SIZE(hawaii_golden_registers));
1634 radeon_program_register_sequence(rdev,
1635 hawaii_golden_common_registers,
1636 (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
1637 radeon_program_register_sequence(rdev,
1638 hawaii_golden_spm_registers,
1639 (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
1640 break;
1641 default:
1642 break;
1643 }
1644 mutex_unlock(&rdev->grbm_idx_mutex);
1645 }
1646
1647 /**
1648 * cik_get_xclk - get the xclk
1649 *
1650 * @rdev: radeon_device pointer
1651 *
1652 * Returns the reference clock used by the gfx engine
1653 * (CIK).
1654 */
1655 u32 cik_get_xclk(struct radeon_device *rdev)
1656 {
1657 u32 reference_clock = rdev->clock.spll.reference_freq;
1658
1659 if (rdev->flags & RADEON_IS_IGP) {
1660 if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
1661 return reference_clock / 2;
1662 } else {
1663 if (RREG32_SMC(CG_CLKPIN_CNTL) & XTALIN_DIVIDE)
1664 return reference_clock / 4;
1665 }
1666 return reference_clock;
1667 }
1668
1669 /**
1670 * cik_mm_rdoorbell - read a doorbell dword
1671 *
1672 * @rdev: radeon_device pointer
1673 * @index: doorbell index
1674 *
1675 * Returns the value in the doorbell aperture at the
1676 * requested doorbell index (CIK).
1677 */
1678 u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
1679 {
1680 if (index < rdev->doorbell.num_doorbells) {
1681 return readl(rdev->doorbell.ptr + index);
1682 } else {
1683 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
1684 return 0;
1685 }
1686 }
1687
1688 /**
1689 * cik_mm_wdoorbell - write a doorbell dword
1690 *
1691 * @rdev: radeon_device pointer
1692 * @index: doorbell index
1693 * @v: value to write
1694 *
1695 * Writes @v to the doorbell aperture at the
1696 * requested doorbell index (CIK).
1697 */
1698 void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
1699 {
1700 if (index < rdev->doorbell.num_doorbells) {
1701 writel(v, rdev->doorbell.ptr + index);
1702 } else {
1703 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
1704 }
1705 }
1706
1707 #define BONAIRE_IO_MC_REGS_SIZE 36
1708
1709 static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
1710 {
1711 {0x00000070, 0x04400000},
1712 {0x00000071, 0x80c01803},
1713 {0x00000072, 0x00004004},
1714 {0x00000073, 0x00000100},
1715 {0x00000074, 0x00ff0000},
1716 {0x00000075, 0x34000000},
1717 {0x00000076, 0x08000014},
1718 {0x00000077, 0x00cc08ec},
1719 {0x00000078, 0x00000400},
1720 {0x00000079, 0x00000000},
1721 {0x0000007a, 0x04090000},
1722 {0x0000007c, 0x00000000},
1723 {0x0000007e, 0x4408a8e8},
1724 {0x0000007f, 0x00000304},
1725 {0x00000080, 0x00000000},
1726 {0x00000082, 0x00000001},
1727 {0x00000083, 0x00000002},
1728 {0x00000084, 0xf3e4f400},
1729 {0x00000085, 0x052024e3},
1730 {0x00000087, 0x00000000},
1731 {0x00000088, 0x01000000},
1732 {0x0000008a, 0x1c0a0000},
1733 {0x0000008b, 0xff010000},
1734 {0x0000008d, 0xffffefff},
1735 {0x0000008e, 0xfff3efff},
1736 {0x0000008f, 0xfff3efbf},
1737 {0x00000092, 0xf7ffffff},
1738 {0x00000093, 0xffffff7f},
1739 {0x00000095, 0x00101101},
1740 {0x00000096, 0x00000fff},
1741 {0x00000097, 0x00116fff},
1742 {0x00000098, 0x60010000},
1743 {0x00000099, 0x10010000},
1744 {0x0000009a, 0x00006000},
1745 {0x0000009b, 0x00001000},
1746 {0x0000009f, 0x00b48000}
1747 };
1748
1749 #define HAWAII_IO_MC_REGS_SIZE 22
1750
1751 static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
1752 {
1753 {0x0000007d, 0x40000000},
1754 {0x0000007e, 0x40180304},
1755 {0x0000007f, 0x0000ff00},
1756 {0x00000081, 0x00000000},
1757 {0x00000083, 0x00000800},
1758 {0x00000086, 0x00000000},
1759 {0x00000087, 0x00000100},
1760 {0x00000088, 0x00020100},
1761 {0x00000089, 0x00000000},
1762 {0x0000008b, 0x00040000},
1763 {0x0000008c, 0x00000100},
1764 {0x0000008e, 0xff010000},
1765 {0x00000090, 0xffffefff},
1766 {0x00000091, 0xfff3efff},
1767 {0x00000092, 0xfff3efbf},
1768 {0x00000093, 0xf7ffffff},
1769 {0x00000094, 0xffffff7f},
1770 {0x00000095, 0x00000fff},
1771 {0x00000096, 0x00116fff},
1772 {0x00000097, 0x60010000},
1773 {0x00000098, 0x10010000},
1774 {0x0000009f, 0x00c79000}
1775 };
1776
1777
1778 /**
1779 * cik_srbm_select - select specific register instances
1780 *
1781 * @rdev: radeon_device pointer
1782 * @me: selected ME (micro engine)
1783 * @pipe: pipe
1784 * @queue: queue
1785 * @vmid: VMID
1786 *
1787 * Switches the currently active registers instances. Some
1788 * registers are instanced per VMID, others are instanced per
1789 * me/pipe/queue combination.
1790 */
1791 static void cik_srbm_select(struct radeon_device *rdev,
1792 u32 me, u32 pipe, u32 queue, u32 vmid)
1793 {
1794 u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
1795 MEID(me & 0x3) |
1796 VMID(vmid & 0xf) |
1797 QUEUEID(queue & 0x7));
1798 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
1799 }
1800
1801 /* ucode loading */
1802 /**
1803 * ci_mc_load_microcode - load MC ucode into the hw
1804 *
1805 * @rdev: radeon_device pointer
1806 *
1807 * Load the GDDR MC ucode into the hw (CIK).
1808 * Returns 0 on success, error on failure.
1809 */
1810 int ci_mc_load_microcode(struct radeon_device *rdev)
1811 {
1812 const __be32 *fw_data = NULL;
1813 const __le32 *new_fw_data = NULL;
1814 u32 running, blackout = 0, tmp;
1815 u32 *io_mc_regs = NULL;
1816 const __le32 *new_io_mc_regs = NULL;
1817 int i, regs_size, ucode_size;
1818
1819 if (!rdev->mc_fw)
1820 return -EINVAL;
1821
1822 if (rdev->new_fw) {
1823 const struct mc_firmware_header_v1_0 *hdr =
1824 (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
1825
1826 radeon_ucode_print_mc_hdr(&hdr->header);
1827
1828 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
1829 new_io_mc_regs = (const __le32 *)
1830 (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
1831 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1832 new_fw_data = (const __le32 *)
1833 (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1834 } else {
1835 ucode_size = rdev->mc_fw->size / 4;
1836
1837 switch (rdev->family) {
1838 case CHIP_BONAIRE:
1839 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
1840 regs_size = BONAIRE_IO_MC_REGS_SIZE;
1841 break;
1842 case CHIP_HAWAII:
1843 io_mc_regs = (u32 *)&hawaii_io_mc_regs;
1844 regs_size = HAWAII_IO_MC_REGS_SIZE;
1845 break;
1846 default:
1847 return -EINVAL;
1848 }
1849 fw_data = (const __be32 *)rdev->mc_fw->data;
1850 }
1851
1852 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
1853
1854 if (running == 0) {
1855 if (running) {
1856 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1857 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1858 }
1859
1860 /* reset the engine and set to writable */
1861 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1862 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
1863
1864 /* load mc io regs */
1865 for (i = 0; i < regs_size; i++) {
1866 if (rdev->new_fw) {
1867 WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
1868 WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
1869 } else {
1870 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1871 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1872 }
1873 }
1874
1875 tmp = RREG32(MC_SEQ_MISC0);
1876 if ((rdev->pdev->device == 0x6649) && ((tmp & 0xff00) == 0x5600)) {
1877 WREG32(MC_SEQ_IO_DEBUG_INDEX, 5);
1878 WREG32(MC_SEQ_IO_DEBUG_DATA, 0x00000023);
1879 WREG32(MC_SEQ_IO_DEBUG_INDEX, 9);
1880 WREG32(MC_SEQ_IO_DEBUG_DATA, 0x000001f0);
1881 }
1882
1883 /* load the MC ucode */
1884 for (i = 0; i < ucode_size; i++) {
1885 if (rdev->new_fw)
1886 WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
1887 else
1888 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1889 }
1890
1891 /* put the engine back into the active state */
1892 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1893 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
1894 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
1895
1896 /* wait for training to complete */
1897 for (i = 0; i < rdev->usec_timeout; i++) {
1898 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
1899 break;
1900 udelay(1);
1901 }
1902 for (i = 0; i < rdev->usec_timeout; i++) {
1903 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
1904 break;
1905 udelay(1);
1906 }
1907
1908 if (running)
1909 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
1910 }
1911
1912 return 0;
1913 }
1914
1915 /**
1916 * cik_init_microcode - load ucode images from disk
1917 *
1918 * @rdev: radeon_device pointer
1919 *
1920 * Use the firmware interface to load the ucode images into
1921 * the driver (not loaded into hw).
1922 * Returns 0 on success, error on failure.
1923 */
1924 static int cik_init_microcode(struct radeon_device *rdev)
1925 {
1926 const char *chip_name;
1927 const char *new_chip_name;
1928 size_t pfp_req_size, me_req_size, ce_req_size,
1929 mec_req_size, rlc_req_size, mc_req_size = 0,
1930 sdma_req_size, smc_req_size = 0, mc2_req_size = 0;
1931 char fw_name[30];
1932 int new_fw = 0;
1933 int err;
1934 int num_fw;
1935
1936 DRM_DEBUG("\n");
1937
1938 switch (rdev->family) {
1939 case CHIP_BONAIRE:
1940 chip_name = "BONAIRE";
1941 new_chip_name = "bonaire";
1942 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1943 me_req_size = CIK_ME_UCODE_SIZE * 4;
1944 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1945 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1946 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1947 mc_req_size = BONAIRE_MC_UCODE_SIZE * 4;
1948 mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
1949 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1950 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
1951 num_fw = 8;
1952 break;
1953 case CHIP_HAWAII:
1954 chip_name = "HAWAII";
1955 new_chip_name = "hawaii";
1956 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1957 me_req_size = CIK_ME_UCODE_SIZE * 4;
1958 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1959 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1960 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1961 mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
1962 mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
1963 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1964 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
1965 num_fw = 8;
1966 break;
1967 case CHIP_KAVERI:
1968 chip_name = "KAVERI";
1969 new_chip_name = "kaveri";
1970 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1971 me_req_size = CIK_ME_UCODE_SIZE * 4;
1972 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1973 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1974 rlc_req_size = KV_RLC_UCODE_SIZE * 4;
1975 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1976 num_fw = 7;
1977 break;
1978 case CHIP_KABINI:
1979 chip_name = "KABINI";
1980 new_chip_name = "kabini";
1981 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1982 me_req_size = CIK_ME_UCODE_SIZE * 4;
1983 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1984 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1985 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
1986 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1987 num_fw = 6;
1988 break;
1989 case CHIP_MULLINS:
1990 chip_name = "MULLINS";
1991 new_chip_name = "mullins";
1992 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1993 me_req_size = CIK_ME_UCODE_SIZE * 4;
1994 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1995 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1996 rlc_req_size = ML_RLC_UCODE_SIZE * 4;
1997 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1998 num_fw = 6;
1999 break;
2000 default: BUG();
2001 }
2002
2003 DRM_INFO("Loading %s Microcode\n", new_chip_name);
2004
2005 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
2006 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2007 if (err) {
2008 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2009 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2010 if (err)
2011 goto out;
2012 if (rdev->pfp_fw->size != pfp_req_size) {
2013 printk(KERN_ERR
2014 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
2015 rdev->pfp_fw->size, fw_name);
2016 err = -EINVAL;
2017 goto out;
2018 }
2019 } else {
2020 err = radeon_ucode_validate(rdev->pfp_fw);
2021 if (err) {
2022 printk(KERN_ERR
2023 "cik_fw: validation failed for firmware \"%s\"\n",
2024 fw_name);
2025 goto out;
2026 } else {
2027 new_fw++;
2028 }
2029 }
2030
2031 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
2032 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2033 if (err) {
2034 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2035 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2036 if (err)
2037 goto out;
2038 if (rdev->me_fw->size != me_req_size) {
2039 printk(KERN_ERR
2040 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
2041 rdev->me_fw->size, fw_name);
2042 err = -EINVAL;
2043 }
2044 } else {
2045 err = radeon_ucode_validate(rdev->me_fw);
2046 if (err) {
2047 printk(KERN_ERR
2048 "cik_fw: validation failed for firmware \"%s\"\n",
2049 fw_name);
2050 goto out;
2051 } else {
2052 new_fw++;
2053 }
2054 }
2055
2056 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
2057 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
2058 if (err) {
2059 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
2060 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
2061 if (err)
2062 goto out;
2063 if (rdev->ce_fw->size != ce_req_size) {
2064 printk(KERN_ERR
2065 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
2066 rdev->ce_fw->size, fw_name);
2067 err = -EINVAL;
2068 }
2069 } else {
2070 err = radeon_ucode_validate(rdev->ce_fw);
2071 if (err) {
2072 printk(KERN_ERR
2073 "cik_fw: validation failed for firmware \"%s\"\n",
2074 fw_name);
2075 goto out;
2076 } else {
2077 new_fw++;
2078 }
2079 }
2080
2081 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", new_chip_name);
2082 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
2083 if (err) {
2084 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
2085 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
2086 if (err)
2087 goto out;
2088 if (rdev->mec_fw->size != mec_req_size) {
2089 printk(KERN_ERR
2090 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
2091 rdev->mec_fw->size, fw_name);
2092 err = -EINVAL;
2093 }
2094 } else {
2095 err = radeon_ucode_validate(rdev->mec_fw);
2096 if (err) {
2097 printk(KERN_ERR
2098 "cik_fw: validation failed for firmware \"%s\"\n",
2099 fw_name);
2100 goto out;
2101 } else {
2102 new_fw++;
2103 }
2104 }
2105
2106 if (rdev->family == CHIP_KAVERI) {
2107 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", new_chip_name);
2108 err = request_firmware(&rdev->mec2_fw, fw_name, rdev->dev);
2109 if (err) {
2110 goto out;
2111 } else {
2112 err = radeon_ucode_validate(rdev->mec2_fw);
2113 if (err) {
2114 goto out;
2115 } else {
2116 new_fw++;
2117 }
2118 }
2119 }
2120
2121 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
2122 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2123 if (err) {
2124 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
2125 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2126 if (err)
2127 goto out;
2128 if (rdev->rlc_fw->size != rlc_req_size) {
2129 printk(KERN_ERR
2130 "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
2131 rdev->rlc_fw->size, fw_name);
2132 err = -EINVAL;
2133 }
2134 } else {
2135 err = radeon_ucode_validate(rdev->rlc_fw);
2136 if (err) {
2137 printk(KERN_ERR
2138 "cik_fw: validation failed for firmware \"%s\"\n",
2139 fw_name);
2140 goto out;
2141 } else {
2142 new_fw++;
2143 }
2144 }
2145
2146 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", new_chip_name);
2147 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
2148 if (err) {
2149 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
2150 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
2151 if (err)
2152 goto out;
2153 if (rdev->sdma_fw->size != sdma_req_size) {
2154 printk(KERN_ERR
2155 "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
2156 rdev->sdma_fw->size, fw_name);
2157 err = -EINVAL;
2158 }
2159 } else {
2160 err = radeon_ucode_validate(rdev->sdma_fw);
2161 if (err) {
2162 printk(KERN_ERR
2163 "cik_fw: validation failed for firmware \"%s\"\n",
2164 fw_name);
2165 goto out;
2166 } else {
2167 new_fw++;
2168 }
2169 }
2170
2171 /* No SMC, MC ucode on APUs */
2172 if (!(rdev->flags & RADEON_IS_IGP)) {
2173 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
2174 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
2175 if (err) {
2176 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
2177 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
2178 if (err) {
2179 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
2180 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
2181 if (err)
2182 goto out;
2183 }
2184 if ((rdev->mc_fw->size != mc_req_size) &&
2185 (rdev->mc_fw->size != mc2_req_size)){
2186 printk(KERN_ERR
2187 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
2188 rdev->mc_fw->size, fw_name);
2189 err = -EINVAL;
2190 }
2191 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
2192 } else {
2193 err = radeon_ucode_validate(rdev->mc_fw);
2194 if (err) {
2195 printk(KERN_ERR
2196 "cik_fw: validation failed for firmware \"%s\"\n",
2197 fw_name);
2198 goto out;
2199 } else {
2200 new_fw++;
2201 }
2202 }
2203
2204 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
2205 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2206 if (err) {
2207 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
2208 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2209 if (err) {
2210 printk(KERN_ERR
2211 "smc: error loading firmware \"%s\"\n",
2212 fw_name);
2213 release_firmware(rdev->smc_fw);
2214 rdev->smc_fw = NULL;
2215 err = 0;
2216 } else if (rdev->smc_fw->size != smc_req_size) {
2217 printk(KERN_ERR
2218 "cik_smc: Bogus length %zu in firmware \"%s\"\n",
2219 rdev->smc_fw->size, fw_name);
2220 err = -EINVAL;
2221 }
2222 } else {
2223 err = radeon_ucode_validate(rdev->smc_fw);
2224 if (err) {
2225 printk(KERN_ERR
2226 "cik_fw: validation failed for firmware \"%s\"\n",
2227 fw_name);
2228 goto out;
2229 } else {
2230 new_fw++;
2231 }
2232 }
2233 }
2234
2235 if (new_fw == 0) {
2236 rdev->new_fw = false;
2237 } else if (new_fw < num_fw) {
2238 printk(KERN_ERR "ci_fw: mixing new and old firmware!\n");
2239 err = -EINVAL;
2240 } else {
2241 rdev->new_fw = true;
2242 }
2243
2244 out:
2245 if (err) {
2246 if (err != -EINVAL)
2247 printk(KERN_ERR
2248 "cik_cp: Failed to load firmware \"%s\"\n",
2249 fw_name);
2250 release_firmware(rdev->pfp_fw);
2251 rdev->pfp_fw = NULL;
2252 release_firmware(rdev->me_fw);
2253 rdev->me_fw = NULL;
2254 release_firmware(rdev->ce_fw);
2255 rdev->ce_fw = NULL;
2256 release_firmware(rdev->mec_fw);
2257 rdev->mec_fw = NULL;
2258 release_firmware(rdev->mec2_fw);
2259 rdev->mec2_fw = NULL;
2260 release_firmware(rdev->rlc_fw);
2261 rdev->rlc_fw = NULL;
2262 release_firmware(rdev->sdma_fw);
2263 rdev->sdma_fw = NULL;
2264 release_firmware(rdev->mc_fw);
2265 rdev->mc_fw = NULL;
2266 release_firmware(rdev->smc_fw);
2267 rdev->smc_fw = NULL;
2268 }
2269 return err;
2270 }
2271
2272 /*
2273 * Core functions
2274 */
2275 /**
2276 * cik_tiling_mode_table_init - init the hw tiling table
2277 *
2278 * @rdev: radeon_device pointer
2279 *
2280 * Starting with SI, the tiling setup is done globally in a
2281 * set of 32 tiling modes. Rather than selecting each set of
2282 * parameters per surface as on older asics, we just select
2283 * which index in the tiling table we want to use, and the
2284 * surface uses those parameters (CIK).
2285 */
2286 static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2287 {
2288 const u32 num_tile_mode_states = 32;
2289 const u32 num_secondary_tile_mode_states = 16;
2290 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
2291 u32 num_pipe_configs;
2292 u32 num_rbs = rdev->config.cik.max_backends_per_se *
2293 rdev->config.cik.max_shader_engines;
2294
2295 switch (rdev->config.cik.mem_row_size_in_kb) {
2296 case 1:
2297 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
2298 break;
2299 case 2:
2300 default:
2301 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
2302 break;
2303 case 4:
2304 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
2305 break;
2306 }
2307
2308 num_pipe_configs = rdev->config.cik.max_tile_pipes;
2309 if (num_pipe_configs > 8)
2310 num_pipe_configs = 16;
2311
2312 if (num_pipe_configs == 16) {
2313 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2314 switch (reg_offset) {
2315 case 0:
2316 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2317 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2318 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2319 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2320 break;
2321 case 1:
2322 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2323 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2324 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2325 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2326 break;
2327 case 2:
2328 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2329 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2330 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2331 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2332 break;
2333 case 3:
2334 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2335 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2336 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2337 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2338 break;
2339 case 4:
2340 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2341 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2342 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2343 TILE_SPLIT(split_equal_to_row_size));
2344 break;
2345 case 5:
2346 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2347 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2348 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2349 break;
2350 case 6:
2351 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2352 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2353 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2354 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2355 break;
2356 case 7:
2357 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2358 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2359 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2360 TILE_SPLIT(split_equal_to_row_size));
2361 break;
2362 case 8:
2363 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2364 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2365 break;
2366 case 9:
2367 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2368 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2369 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2370 break;
2371 case 10:
2372 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2373 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2374 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2375 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2376 break;
2377 case 11:
2378 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2379 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2380 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2381 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2382 break;
2383 case 12:
2384 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2385 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2386 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2387 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2388 break;
2389 case 13:
2390 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2391 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2392 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2393 break;
2394 case 14:
2395 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2396 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2397 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2398 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2399 break;
2400 case 16:
2401 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2402 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2403 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2404 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2405 break;
2406 case 17:
2407 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2408 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2409 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2410 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2411 break;
2412 case 27:
2413 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2414 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2415 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2416 break;
2417 case 28:
2418 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2419 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2420 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2421 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2422 break;
2423 case 29:
2424 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2425 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2426 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2427 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2428 break;
2429 case 30:
2430 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2431 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2432 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2433 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2434 break;
2435 default:
2436 gb_tile_moden = 0;
2437 break;
2438 }
2439 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2440 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2441 }
2442 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2443 switch (reg_offset) {
2444 case 0:
2445 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2446 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2447 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2448 NUM_BANKS(ADDR_SURF_16_BANK));
2449 break;
2450 case 1:
2451 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2452 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2453 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2454 NUM_BANKS(ADDR_SURF_16_BANK));
2455 break;
2456 case 2:
2457 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2458 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2459 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2460 NUM_BANKS(ADDR_SURF_16_BANK));
2461 break;
2462 case 3:
2463 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2464 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2465 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2466 NUM_BANKS(ADDR_SURF_16_BANK));
2467 break;
2468 case 4:
2469 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2470 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2471 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2472 NUM_BANKS(ADDR_SURF_8_BANK));
2473 break;
2474 case 5:
2475 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2476 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2477 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2478 NUM_BANKS(ADDR_SURF_4_BANK));
2479 break;
2480 case 6:
2481 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2482 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2483 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2484 NUM_BANKS(ADDR_SURF_2_BANK));
2485 break;
2486 case 8:
2487 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2488 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2489 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2490 NUM_BANKS(ADDR_SURF_16_BANK));
2491 break;
2492 case 9:
2493 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2494 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2495 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2496 NUM_BANKS(ADDR_SURF_16_BANK));
2497 break;
2498 case 10:
2499 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2500 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2501 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2502 NUM_BANKS(ADDR_SURF_16_BANK));
2503 break;
2504 case 11:
2505 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2506 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2507 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2508 NUM_BANKS(ADDR_SURF_8_BANK));
2509 break;
2510 case 12:
2511 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2512 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2513 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2514 NUM_BANKS(ADDR_SURF_4_BANK));
2515 break;
2516 case 13:
2517 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2518 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2519 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2520 NUM_BANKS(ADDR_SURF_2_BANK));
2521 break;
2522 case 14:
2523 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2524 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2525 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2526 NUM_BANKS(ADDR_SURF_2_BANK));
2527 break;
2528 default:
2529 gb_tile_moden = 0;
2530 break;
2531 }
2532 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2533 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2534 }
2535 } else if (num_pipe_configs == 8) {
2536 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2537 switch (reg_offset) {
2538 case 0:
2539 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2540 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2541 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2542 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2543 break;
2544 case 1:
2545 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2546 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2547 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2548 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2549 break;
2550 case 2:
2551 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2552 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2553 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2554 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2555 break;
2556 case 3:
2557 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2558 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2559 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2560 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2561 break;
2562 case 4:
2563 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2564 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2565 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2566 TILE_SPLIT(split_equal_to_row_size));
2567 break;
2568 case 5:
2569 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2570 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2571 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2572 break;
2573 case 6:
2574 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2575 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2576 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2577 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2578 break;
2579 case 7:
2580 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2581 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2582 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2583 TILE_SPLIT(split_equal_to_row_size));
2584 break;
2585 case 8:
2586 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2587 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2588 break;
2589 case 9:
2590 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2591 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2592 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2593 break;
2594 case 10:
2595 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2596 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2597 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2598 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2599 break;
2600 case 11:
2601 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2602 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2603 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2604 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2605 break;
2606 case 12:
2607 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2608 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2609 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2610 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2611 break;
2612 case 13:
2613 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2614 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2615 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2616 break;
2617 case 14:
2618 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2619 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2620 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2621 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2622 break;
2623 case 16:
2624 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2625 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2626 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2627 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2628 break;
2629 case 17:
2630 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2631 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2632 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2633 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2634 break;
2635 case 27:
2636 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2637 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2638 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2639 break;
2640 case 28:
2641 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2642 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2643 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2644 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2645 break;
2646 case 29:
2647 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2648 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2649 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2650 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2651 break;
2652 case 30:
2653 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2654 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2655 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2656 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2657 break;
2658 default:
2659 gb_tile_moden = 0;
2660 break;
2661 }
2662 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2663 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2664 }
2665 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2666 switch (reg_offset) {
2667 case 0:
2668 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2669 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2670 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2671 NUM_BANKS(ADDR_SURF_16_BANK));
2672 break;
2673 case 1:
2674 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2675 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2676 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2677 NUM_BANKS(ADDR_SURF_16_BANK));
2678 break;
2679 case 2:
2680 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2681 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2682 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2683 NUM_BANKS(ADDR_SURF_16_BANK));
2684 break;
2685 case 3:
2686 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2687 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2688 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2689 NUM_BANKS(ADDR_SURF_16_BANK));
2690 break;
2691 case 4:
2692 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2693 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2694 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2695 NUM_BANKS(ADDR_SURF_8_BANK));
2696 break;
2697 case 5:
2698 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2699 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2700 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2701 NUM_BANKS(ADDR_SURF_4_BANK));
2702 break;
2703 case 6:
2704 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2705 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2706 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2707 NUM_BANKS(ADDR_SURF_2_BANK));
2708 break;
2709 case 8:
2710 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2711 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2712 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2713 NUM_BANKS(ADDR_SURF_16_BANK));
2714 break;
2715 case 9:
2716 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2717 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2718 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2719 NUM_BANKS(ADDR_SURF_16_BANK));
2720 break;
2721 case 10:
2722 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2723 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2724 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2725 NUM_BANKS(ADDR_SURF_16_BANK));
2726 break;
2727 case 11:
2728 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2729 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2730 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2731 NUM_BANKS(ADDR_SURF_16_BANK));
2732 break;
2733 case 12:
2734 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2735 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2736 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2737 NUM_BANKS(ADDR_SURF_8_BANK));
2738 break;
2739 case 13:
2740 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2741 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2742 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2743 NUM_BANKS(ADDR_SURF_4_BANK));
2744 break;
2745 case 14:
2746 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2747 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2748 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2749 NUM_BANKS(ADDR_SURF_2_BANK));
2750 break;
2751 default:
2752 gb_tile_moden = 0;
2753 break;
2754 }
2755 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2756 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2757 }
2758 } else if (num_pipe_configs == 4) {
2759 if (num_rbs == 4) {
2760 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2761 switch (reg_offset) {
2762 case 0:
2763 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2764 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2765 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2766 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2767 break;
2768 case 1:
2769 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2770 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2771 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2772 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2773 break;
2774 case 2:
2775 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2776 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2777 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2778 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2779 break;
2780 case 3:
2781 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2782 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2783 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2784 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2785 break;
2786 case 4:
2787 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2788 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2789 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2790 TILE_SPLIT(split_equal_to_row_size));
2791 break;
2792 case 5:
2793 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2794 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2795 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2796 break;
2797 case 6:
2798 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2799 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2800 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2801 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2802 break;
2803 case 7:
2804 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2805 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2806 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2807 TILE_SPLIT(split_equal_to_row_size));
2808 break;
2809 case 8:
2810 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2811 PIPE_CONFIG(ADDR_SURF_P4_16x16));
2812 break;
2813 case 9:
2814 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2815 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2816 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2817 break;
2818 case 10:
2819 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2820 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2821 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2822 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2823 break;
2824 case 11:
2825 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2826 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2827 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2828 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2829 break;
2830 case 12:
2831 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2832 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2833 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2834 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2835 break;
2836 case 13:
2837 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2838 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2839 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2840 break;
2841 case 14:
2842 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2843 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2844 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2845 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2846 break;
2847 case 16:
2848 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2849 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2850 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2851 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2852 break;
2853 case 17:
2854 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2855 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2856 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2857 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2858 break;
2859 case 27:
2860 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2861 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2862 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2863 break;
2864 case 28:
2865 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2866 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2867 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2868 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2869 break;
2870 case 29:
2871 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2872 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2873 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2874 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2875 break;
2876 case 30:
2877 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2878 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2879 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2880 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2881 break;
2882 default:
2883 gb_tile_moden = 0;
2884 break;
2885 }
2886 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2887 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2888 }
2889 } else if (num_rbs < 4) {
2890 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2891 switch (reg_offset) {
2892 case 0:
2893 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2894 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2895 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2896 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2897 break;
2898 case 1:
2899 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2900 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2901 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2902 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2903 break;
2904 case 2:
2905 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2906 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2907 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2908 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2909 break;
2910 case 3:
2911 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2912 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2913 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2914 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2915 break;
2916 case 4:
2917 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2918 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2919 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2920 TILE_SPLIT(split_equal_to_row_size));
2921 break;
2922 case 5:
2923 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2924 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2925 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2926 break;
2927 case 6:
2928 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2929 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2930 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2931 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2932 break;
2933 case 7:
2934 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2935 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2936 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2937 TILE_SPLIT(split_equal_to_row_size));
2938 break;
2939 case 8:
2940 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2941 PIPE_CONFIG(ADDR_SURF_P4_8x16));
2942 break;
2943 case 9:
2944 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2945 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2946 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2947 break;
2948 case 10:
2949 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2950 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2951 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2952 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2953 break;
2954 case 11:
2955 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2956 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2957 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2958 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2959 break;
2960 case 12:
2961 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2962 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2963 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2964 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2965 break;
2966 case 13:
2967 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2968 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2969 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2970 break;
2971 case 14:
2972 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2973 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2974 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2975 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2976 break;
2977 case 16:
2978 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2979 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2980 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2981 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2982 break;
2983 case 17:
2984 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2985 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2986 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2987 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2988 break;
2989 case 27:
2990 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2991 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2992 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2993 break;
2994 case 28:
2995 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2996 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2997 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2998 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2999 break;
3000 case 29:
3001 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3002 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3003 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
3004 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3005 break;
3006 case 30:
3007 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
3008 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3009 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
3010 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3011 break;
3012 default:
3013 gb_tile_moden = 0;
3014 break;
3015 }
3016 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
3017 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
3018 }
3019 }
3020 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
3021 switch (reg_offset) {
3022 case 0:
3023 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3024 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3025 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3026 NUM_BANKS(ADDR_SURF_16_BANK));
3027 break;
3028 case 1:
3029 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3030 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3031 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3032 NUM_BANKS(ADDR_SURF_16_BANK));
3033 break;
3034 case 2:
3035 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3036 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3037 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3038 NUM_BANKS(ADDR_SURF_16_BANK));
3039 break;
3040 case 3:
3041 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3042 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3043 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3044 NUM_BANKS(ADDR_SURF_16_BANK));
3045 break;
3046 case 4:
3047 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3048 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3049 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3050 NUM_BANKS(ADDR_SURF_16_BANK));
3051 break;
3052 case 5:
3053 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3054 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3055 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3056 NUM_BANKS(ADDR_SURF_8_BANK));
3057 break;
3058 case 6:
3059 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3060 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3061 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3062 NUM_BANKS(ADDR_SURF_4_BANK));
3063 break;
3064 case 8:
3065 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3066 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3067 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3068 NUM_BANKS(ADDR_SURF_16_BANK));
3069 break;
3070 case 9:
3071 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3072 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3073 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3074 NUM_BANKS(ADDR_SURF_16_BANK));
3075 break;
3076 case 10:
3077 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3078 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3079 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3080 NUM_BANKS(ADDR_SURF_16_BANK));
3081 break;
3082 case 11:
3083 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3084 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3085 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3086 NUM_BANKS(ADDR_SURF_16_BANK));
3087 break;
3088 case 12:
3089 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3090 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3091 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3092 NUM_BANKS(ADDR_SURF_16_BANK));
3093 break;
3094 case 13:
3095 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3096 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3097 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3098 NUM_BANKS(ADDR_SURF_8_BANK));
3099 break;
3100 case 14:
3101 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3102 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3103 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3104 NUM_BANKS(ADDR_SURF_4_BANK));
3105 break;
3106 default:
3107 gb_tile_moden = 0;
3108 break;
3109 }
3110 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
3111 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
3112 }
3113 } else if (num_pipe_configs == 2) {
3114 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
3115 switch (reg_offset) {
3116 case 0:
3117 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3118 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
3119 PIPE_CONFIG(ADDR_SURF_P2) |
3120 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
3121 break;
3122 case 1:
3123 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3124 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
3125 PIPE_CONFIG(ADDR_SURF_P2) |
3126 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
3127 break;
3128 case 2:
3129 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3130 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
3131 PIPE_CONFIG(ADDR_SURF_P2) |
3132 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
3133 break;
3134 case 3:
3135 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3136 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
3137 PIPE_CONFIG(ADDR_SURF_P2) |
3138 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
3139 break;
3140 case 4:
3141 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3142 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
3143 PIPE_CONFIG(ADDR_SURF_P2) |
3144 TILE_SPLIT(split_equal_to_row_size));
3145 break;
3146 case 5:
3147 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3148 PIPE_CONFIG(ADDR_SURF_P2) |
3149 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3150 break;
3151 case 6:
3152 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
3153 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
3154 PIPE_CONFIG(ADDR_SURF_P2) |
3155 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
3156 break;
3157 case 7:
3158 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
3159 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
3160 PIPE_CONFIG(ADDR_SURF_P2) |
3161 TILE_SPLIT(split_equal_to_row_size));
3162 break;
3163 case 8:
3164 gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3165 PIPE_CONFIG(ADDR_SURF_P2);
3166 break;
3167 case 9:
3168 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3169 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3170 PIPE_CONFIG(ADDR_SURF_P2));
3171 break;
3172 case 10:
3173 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3174 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3175 PIPE_CONFIG(ADDR_SURF_P2) |
3176 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3177 break;
3178 case 11:
3179 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3180 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3181 PIPE_CONFIG(ADDR_SURF_P2) |
3182 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3183 break;
3184 case 12:
3185 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
3186 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3187 PIPE_CONFIG(ADDR_SURF_P2) |
3188 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3189 break;
3190 case 13:
3191 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3192 PIPE_CONFIG(ADDR_SURF_P2) |
3193 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
3194 break;
3195 case 14:
3196 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3197 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3198 PIPE_CONFIG(ADDR_SURF_P2) |
3199 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3200 break;
3201 case 16:
3202 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3203 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3204 PIPE_CONFIG(ADDR_SURF_P2) |
3205 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3206 break;
3207 case 17:
3208 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
3209 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3210 PIPE_CONFIG(ADDR_SURF_P2) |
3211 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3212 break;
3213 case 27:
3214 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3215 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3216 PIPE_CONFIG(ADDR_SURF_P2));
3217 break;
3218 case 28:
3219 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
3220 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3221 PIPE_CONFIG(ADDR_SURF_P2) |
3222 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3223 break;
3224 case 29:
3225 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3226 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3227 PIPE_CONFIG(ADDR_SURF_P2) |
3228 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3229 break;
3230 case 30:
3231 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
3232 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3233 PIPE_CONFIG(ADDR_SURF_P2) |
3234 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3235 break;
3236 default:
3237 gb_tile_moden = 0;
3238 break;
3239 }
3240 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
3241 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
3242 }
3243 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
3244 switch (reg_offset) {
3245 case 0:
3246 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3247 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3248 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3249 NUM_BANKS(ADDR_SURF_16_BANK));
3250 break;
3251 case 1:
3252 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3253 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3254 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3255 NUM_BANKS(ADDR_SURF_16_BANK));
3256 break;
3257 case 2:
3258 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3259 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3260 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3261 NUM_BANKS(ADDR_SURF_16_BANK));
3262 break;
3263 case 3:
3264 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3265 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3266 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3267 NUM_BANKS(ADDR_SURF_16_BANK));
3268 break;
3269 case 4:
3270 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3271 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3272 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3273 NUM_BANKS(ADDR_SURF_16_BANK));
3274 break;
3275 case 5:
3276 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3277 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3278 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3279 NUM_BANKS(ADDR_SURF_16_BANK));
3280 break;
3281 case 6:
3282 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3283 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3284 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3285 NUM_BANKS(ADDR_SURF_8_BANK));
3286 break;
3287 case 8:
3288 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3289 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3290 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3291 NUM_BANKS(ADDR_SURF_16_BANK));
3292 break;
3293 case 9:
3294 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3295 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3296 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3297 NUM_BANKS(ADDR_SURF_16_BANK));
3298 break;
3299 case 10:
3300 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3301 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3302 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3303 NUM_BANKS(ADDR_SURF_16_BANK));
3304 break;
3305 case 11:
3306 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3307 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3308 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3309 NUM_BANKS(ADDR_SURF_16_BANK));
3310 break;
3311 case 12:
3312 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3313 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3314 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3315 NUM_BANKS(ADDR_SURF_16_BANK));
3316 break;
3317 case 13:
3318 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3319 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3320 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3321 NUM_BANKS(ADDR_SURF_16_BANK));
3322 break;
3323 case 14:
3324 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3325 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3326 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3327 NUM_BANKS(ADDR_SURF_8_BANK));
3328 break;
3329 default:
3330 gb_tile_moden = 0;
3331 break;
3332 }
3333 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
3334 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
3335 }
3336 } else
3337 DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
3338 }
3339
3340 /**
3341 * cik_select_se_sh - select which SE, SH to address
3342 *
3343 * @rdev: radeon_device pointer
3344 * @se_num: shader engine to address
3345 * @sh_num: sh block to address
3346 *
3347 * Select which SE, SH combinations to address. Certain
3348 * registers are instanced per SE or SH. 0xffffffff means
3349 * broadcast to all SEs or SHs (CIK).
3350 */
3351 static void cik_select_se_sh(struct radeon_device *rdev,
3352 u32 se_num, u32 sh_num)
3353 {
3354 u32 data = INSTANCE_BROADCAST_WRITES;
3355
3356 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
3357 data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
3358 else if (se_num == 0xffffffff)
3359 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
3360 else if (sh_num == 0xffffffff)
3361 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
3362 else
3363 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
3364 WREG32(GRBM_GFX_INDEX, data);
3365 }
3366
3367 /**
3368 * cik_create_bitmask - create a bitmask
3369 *
3370 * @bit_width: length of the mask
3371 *
3372 * create a variable length bit mask (CIK).
3373 * Returns the bitmask.
3374 */
3375 static u32 cik_create_bitmask(u32 bit_width)
3376 {
3377 u32 i, mask = 0;
3378
3379 for (i = 0; i < bit_width; i++) {
3380 mask <<= 1;
3381 mask |= 1;
3382 }
3383 return mask;
3384 }
3385
3386 /**
3387 * cik_get_rb_disabled - computes the mask of disabled RBs
3388 *
3389 * @rdev: radeon_device pointer
3390 * @max_rb_num: max RBs (render backends) for the asic
3391 * @se_num: number of SEs (shader engines) for the asic
3392 * @sh_per_se: number of SH blocks per SE for the asic
3393 *
3394 * Calculates the bitmask of disabled RBs (CIK).
3395 * Returns the disabled RB bitmask.
3396 */
3397 static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3398 u32 max_rb_num_per_se,
3399 u32 sh_per_se)
3400 {
3401 u32 data, mask;
3402
3403 data = RREG32(CC_RB_BACKEND_DISABLE);
3404 if (data & 1)
3405 data &= BACKEND_DISABLE_MASK;
3406 else
3407 data = 0;
3408 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
3409
3410 data >>= BACKEND_DISABLE_SHIFT;
3411
3412 mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
3413
3414 return data & mask;
3415 }
3416
3417 /**
3418 * cik_setup_rb - setup the RBs on the asic
3419 *
3420 * @rdev: radeon_device pointer
3421 * @se_num: number of SEs (shader engines) for the asic
3422 * @sh_per_se: number of SH blocks per SE for the asic
3423 * @max_rb_num: max RBs (render backends) for the asic
3424 *
3425 * Configures per-SE/SH RB registers (CIK).
3426 */
3427 static void cik_setup_rb(struct radeon_device *rdev,
3428 u32 se_num, u32 sh_per_se,
3429 u32 max_rb_num_per_se)
3430 {
3431 int i, j;
3432 u32 data, mask;
3433 u32 disabled_rbs = 0;
3434 u32 enabled_rbs = 0;
3435
3436 mutex_lock(&rdev->grbm_idx_mutex);
3437 for (i = 0; i < se_num; i++) {
3438 for (j = 0; j < sh_per_se; j++) {
3439 cik_select_se_sh(rdev, i, j);
3440 data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3441 if (rdev->family == CHIP_HAWAII)
3442 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3443 else
3444 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
3445 }
3446 }
3447 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3448 mutex_unlock(&rdev->grbm_idx_mutex);
3449
3450 mask = 1;
3451 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3452 if (!(disabled_rbs & mask))
3453 enabled_rbs |= mask;
3454 mask <<= 1;
3455 }
3456
3457 rdev->config.cik.backend_enable_mask = enabled_rbs;
3458
3459 mutex_lock(&rdev->grbm_idx_mutex);
3460 for (i = 0; i < se_num; i++) {
3461 cik_select_se_sh(rdev, i, 0xffffffff);
3462 data = 0;
3463 for (j = 0; j < sh_per_se; j++) {
3464 switch (enabled_rbs & 3) {
3465 case 0:
3466 if (j == 0)
3467 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
3468 else
3469 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
3470 break;
3471 case 1:
3472 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
3473 break;
3474 case 2:
3475 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
3476 break;
3477 case 3:
3478 default:
3479 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
3480 break;
3481 }
3482 enabled_rbs >>= 2;
3483 }
3484 WREG32(PA_SC_RASTER_CONFIG, data);
3485 }
3486 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3487 mutex_unlock(&rdev->grbm_idx_mutex);
3488 }
3489
3490 /**
3491 * cik_gpu_init - setup the 3D engine
3492 *
3493 * @rdev: radeon_device pointer
3494 *
3495 * Configures the 3D engine and tiling configuration
3496 * registers so that the 3D engine is usable.
3497 */
3498 static void cik_gpu_init(struct radeon_device *rdev)
3499 {
3500 u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
3501 u32 mc_shared_chmap, mc_arb_ramcfg;
3502 u32 hdp_host_path_cntl;
3503 u32 tmp;
3504 int i, j;
3505
3506 switch (rdev->family) {
3507 case CHIP_BONAIRE:
3508 rdev->config.cik.max_shader_engines = 2;
3509 rdev->config.cik.max_tile_pipes = 4;
3510 rdev->config.cik.max_cu_per_sh = 7;
3511 rdev->config.cik.max_sh_per_se = 1;
3512 rdev->config.cik.max_backends_per_se = 2;
3513 rdev->config.cik.max_texture_channel_caches = 4;
3514 rdev->config.cik.max_gprs = 256;
3515 rdev->config.cik.max_gs_threads = 32;
3516 rdev->config.cik.max_hw_contexts = 8;
3517
3518 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3519 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3520 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3521 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3522 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3523 break;
3524 case CHIP_HAWAII:
3525 rdev->config.cik.max_shader_engines = 4;
3526 rdev->config.cik.max_tile_pipes = 16;
3527 rdev->config.cik.max_cu_per_sh = 11;
3528 rdev->config.cik.max_sh_per_se = 1;
3529 rdev->config.cik.max_backends_per_se = 4;
3530 rdev->config.cik.max_texture_channel_caches = 16;
3531 rdev->config.cik.max_gprs = 256;
3532 rdev->config.cik.max_gs_threads = 32;
3533 rdev->config.cik.max_hw_contexts = 8;
3534
3535 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3536 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3537 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3538 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3539 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
3540 break;
3541 case CHIP_KAVERI:
3542 rdev->config.cik.max_shader_engines = 1;
3543 rdev->config.cik.max_tile_pipes = 4;
3544 if ((rdev->pdev->device == 0x1304) ||
3545 (rdev->pdev->device == 0x1305) ||
3546 (rdev->pdev->device == 0x130C) ||
3547 (rdev->pdev->device == 0x130F) ||
3548 (rdev->pdev->device == 0x1310) ||
3549 (rdev->pdev->device == 0x1311) ||
3550 (rdev->pdev->device == 0x131C)) {
3551 rdev->config.cik.max_cu_per_sh = 8;
3552 rdev->config.cik.max_backends_per_se = 2;
3553 } else if ((rdev->pdev->device == 0x1309) ||
3554 (rdev->pdev->device == 0x130A) ||
3555 (rdev->pdev->device == 0x130D) ||
3556 (rdev->pdev->device == 0x1313) ||
3557 (rdev->pdev->device == 0x131D)) {
3558 rdev->config.cik.max_cu_per_sh = 6;
3559 rdev->config.cik.max_backends_per_se = 2;
3560 } else if ((rdev->pdev->device == 0x1306) ||
3561 (rdev->pdev->device == 0x1307) ||
3562 (rdev->pdev->device == 0x130B) ||
3563 (rdev->pdev->device == 0x130E) ||
3564 (rdev->pdev->device == 0x1315) ||
3565 (rdev->pdev->device == 0x1318) ||
3566 (rdev->pdev->device == 0x131B)) {
3567 rdev->config.cik.max_cu_per_sh = 4;
3568 rdev->config.cik.max_backends_per_se = 1;
3569 } else {
3570 rdev->config.cik.max_cu_per_sh = 3;
3571 rdev->config.cik.max_backends_per_se = 1;
3572 }
3573 rdev->config.cik.max_sh_per_se = 1;
3574 rdev->config.cik.max_texture_channel_caches = 4;
3575 rdev->config.cik.max_gprs = 256;
3576 rdev->config.cik.max_gs_threads = 16;
3577 rdev->config.cik.max_hw_contexts = 8;
3578
3579 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3580 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3581 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3582 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3583 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3584 break;
3585 case CHIP_KABINI:
3586 case CHIP_MULLINS:
3587 default:
3588 rdev->config.cik.max_shader_engines = 1;
3589 rdev->config.cik.max_tile_pipes = 2;
3590 rdev->config.cik.max_cu_per_sh = 2;
3591 rdev->config.cik.max_sh_per_se = 1;
3592 rdev->config.cik.max_backends_per_se = 1;
3593 rdev->config.cik.max_texture_channel_caches = 2;
3594 rdev->config.cik.max_gprs = 256;
3595 rdev->config.cik.max_gs_threads = 16;
3596 rdev->config.cik.max_hw_contexts = 8;
3597
3598 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3599 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3600 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3601 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3602 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3603 break;
3604 }
3605
3606 /* Initialize HDP */
3607 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3608 WREG32((0x2c14 + j), 0x00000000);
3609 WREG32((0x2c18 + j), 0x00000000);
3610 WREG32((0x2c1c + j), 0x00000000);
3611 WREG32((0x2c20 + j), 0x00000000);
3612 WREG32((0x2c24 + j), 0x00000000);
3613 }
3614
3615 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3616
3617 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3618
3619 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
3620 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3621
3622 rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
3623 rdev->config.cik.mem_max_burst_length_bytes = 256;
3624 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
3625 rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3626 if (rdev->config.cik.mem_row_size_in_kb > 4)
3627 rdev->config.cik.mem_row_size_in_kb = 4;
3628 /* XXX use MC settings? */
3629 rdev->config.cik.shader_engine_tile_size = 32;
3630 rdev->config.cik.num_gpus = 1;
3631 rdev->config.cik.multi_gpu_tile_size = 64;
3632
3633 /* fix up row size */
3634 gb_addr_config &= ~ROW_SIZE_MASK;
3635 switch (rdev->config.cik.mem_row_size_in_kb) {
3636 case 1:
3637 default:
3638 gb_addr_config |= ROW_SIZE(0);
3639 break;
3640 case 2:
3641 gb_addr_config |= ROW_SIZE(1);
3642 break;
3643 case 4:
3644 gb_addr_config |= ROW_SIZE(2);
3645 break;
3646 }
3647
3648 /* setup tiling info dword. gb_addr_config is not adequate since it does
3649 * not have bank info, so create a custom tiling dword.
3650 * bits 3:0 num_pipes
3651 * bits 7:4 num_banks
3652 * bits 11:8 group_size
3653 * bits 15:12 row_size
3654 */
3655 rdev->config.cik.tile_config = 0;
3656 switch (rdev->config.cik.num_tile_pipes) {
3657 case 1:
3658 rdev->config.cik.tile_config |= (0 << 0);
3659 break;
3660 case 2:
3661 rdev->config.cik.tile_config |= (1 << 0);
3662 break;
3663 case 4:
3664 rdev->config.cik.tile_config |= (2 << 0);
3665 break;
3666 case 8:
3667 default:
3668 /* XXX what about 12? */
3669 rdev->config.cik.tile_config |= (3 << 0);
3670 break;
3671 }
3672 rdev->config.cik.tile_config |=
3673 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
3674 rdev->config.cik.tile_config |=
3675 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
3676 rdev->config.cik.tile_config |=
3677 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
3678
3679 WREG32(GB_ADDR_CONFIG, gb_addr_config);
3680 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
3681 WREG32(DMIF_ADDR_CALC, gb_addr_config);
3682 WREG32(SDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
3683 WREG32(SDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
3684 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3685 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3686 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3687
3688 cik_tiling_mode_table_init(rdev);
3689
3690 cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
3691 rdev->config.cik.max_sh_per_se,
3692 rdev->config.cik.max_backends_per_se);
3693
3694 rdev->config.cik.active_cus = 0;
3695 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
3696 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
3697 rdev->config.cik.active_cus +=
3698 hweight32(cik_get_cu_active_bitmap(rdev, i, j));
3699 }
3700 }
3701
3702 /* set HW defaults for 3D engine */
3703 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3704
3705 mutex_lock(&rdev->grbm_idx_mutex);
3706 /*
3707 * making sure that the following register writes will be broadcasted
3708 * to all the shaders
3709 */
3710 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3711 WREG32(SX_DEBUG_1, 0x20);
3712
3713 WREG32(TA_CNTL_AUX, 0x00010000);
3714
3715 tmp = RREG32(SPI_CONFIG_CNTL);
3716 tmp |= 0x03000000;
3717 WREG32(SPI_CONFIG_CNTL, tmp);
3718
3719 WREG32(SQ_CONFIG, 1);
3720
3721 WREG32(DB_DEBUG, 0);
3722
3723 tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
3724 tmp |= 0x00000400;
3725 WREG32(DB_DEBUG2, tmp);
3726
3727 tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
3728 tmp |= 0x00020200;
3729 WREG32(DB_DEBUG3, tmp);
3730
3731 tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
3732 tmp |= 0x00018208;
3733 WREG32(CB_HW_CONTROL, tmp);
3734
3735 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3736
3737 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
3738 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
3739 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
3740 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
3741
3742 WREG32(VGT_NUM_INSTANCES, 1);
3743
3744 WREG32(CP_PERFMON_CNTL, 0);
3745
3746 WREG32(SQ_CONFIG, 0);
3747
3748 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3749 FORCE_EOV_MAX_REZ_CNT(255)));
3750
3751 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
3752 AUTO_INVLD_EN(ES_AND_GS_AUTO));
3753
3754 WREG32(VGT_GS_VERTEX_REUSE, 16);
3755 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3756
3757 tmp = RREG32(HDP_MISC_CNTL);
3758 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3759 WREG32(HDP_MISC_CNTL, tmp);
3760
3761 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3762 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3763
3764 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3765 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
3766 mutex_unlock(&rdev->grbm_idx_mutex);
3767
3768 udelay(50);
3769 }
3770
3771 /*
3772 * GPU scratch registers helpers function.
3773 */
3774 /**
3775 * cik_scratch_init - setup driver info for CP scratch regs
3776 *
3777 * @rdev: radeon_device pointer
3778 *
3779 * Set up the number and offset of the CP scratch registers.
3780 * NOTE: use of CP scratch registers is a legacy inferface and
3781 * is not used by default on newer asics (r6xx+). On newer asics,
3782 * memory buffers are used for fences rather than scratch regs.
3783 */
3784 static void cik_scratch_init(struct radeon_device *rdev)
3785 {
3786 int i;
3787
3788 rdev->scratch.num_reg = 7;
3789 rdev->scratch.reg_base = SCRATCH_REG0;
3790 for (i = 0; i < rdev->scratch.num_reg; i++) {
3791 rdev->scratch.free[i] = true;
3792 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3793 }
3794 }
3795
3796 /**
3797 * cik_ring_test - basic gfx ring test
3798 *
3799 * @rdev: radeon_device pointer
3800 * @ring: radeon_ring structure holding ring information
3801 *
3802 * Allocate a scratch register and write to it using the gfx ring (CIK).
3803 * Provides a basic gfx ring test to verify that the ring is working.
3804 * Used by cik_cp_gfx_resume();
3805 * Returns 0 on success, error on failure.
3806 */
3807 int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3808 {
3809 uint32_t scratch;
3810 uint32_t tmp = 0;
3811 unsigned i;
3812 int r;
3813
3814 r = radeon_scratch_get(rdev, &scratch);
3815 if (r) {
3816 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3817 return r;
3818 }
3819 WREG32(scratch, 0xCAFEDEAD);
3820 r = radeon_ring_lock(rdev, ring, 3);
3821 if (r) {
3822 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
3823 radeon_scratch_free(rdev, scratch);
3824 return r;
3825 }
3826 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3827 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
3828 radeon_ring_write(ring, 0xDEADBEEF);
3829 radeon_ring_unlock_commit(rdev, ring, false);
3830
3831 for (i = 0; i < rdev->usec_timeout; i++) {
3832 tmp = RREG32(scratch);
3833 if (tmp == 0xDEADBEEF)
3834 break;
3835 DRM_UDELAY(1);
3836 }
3837 if (i < rdev->usec_timeout) {
3838 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
3839 } else {
3840 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
3841 ring->idx, scratch, tmp);
3842 r = -EINVAL;
3843 }
3844 radeon_scratch_free(rdev, scratch);
3845 return r;
3846 }
3847
3848 /**
3849 * cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
3850 *
3851 * @rdev: radeon_device pointer
3852 * @ridx: radeon ring index
3853 *
3854 * Emits an hdp flush on the cp.
3855 */
3856 static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev,
3857 int ridx)
3858 {
3859 struct radeon_ring *ring = &rdev->ring[ridx];
3860 u32 ref_and_mask;
3861
3862 switch (ring->idx) {
3863 case CAYMAN_RING_TYPE_CP1_INDEX:
3864 case CAYMAN_RING_TYPE_CP2_INDEX:
3865 default:
3866 switch (ring->me) {
3867 case 0:
3868 ref_and_mask = CP2 << ring->pipe;
3869 break;
3870 case 1:
3871 ref_and_mask = CP6 << ring->pipe;
3872 break;
3873 default:
3874 return;
3875 }
3876 break;
3877 case RADEON_RING_TYPE_GFX_INDEX:
3878 ref_and_mask = CP0;
3879 break;
3880 }
3881
3882 radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3883 radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
3884 WAIT_REG_MEM_FUNCTION(3) | /* == */
3885 WAIT_REG_MEM_ENGINE(1))); /* pfp */
3886 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
3887 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
3888 radeon_ring_write(ring, ref_and_mask);
3889 radeon_ring_write(ring, ref_and_mask);
3890 radeon_ring_write(ring, 0x20); /* poll interval */
3891 }
3892
3893 /**
3894 * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
3895 *
3896 * @rdev: radeon_device pointer
3897 * @fence: radeon fence object
3898 *
3899 * Emits a fence sequnce number on the gfx ring and flushes
3900 * GPU caches.
3901 */
3902 void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
3903 struct radeon_fence *fence)
3904 {
3905 struct radeon_ring *ring = &rdev->ring[fence->ring];
3906 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3907
3908 /* EVENT_WRITE_EOP - flush caches, send int */
3909 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3910 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
3911 EOP_TC_ACTION_EN |
3912 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3913 EVENT_INDEX(5)));
3914 radeon_ring_write(ring, addr & 0xfffffffc);
3915 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
3916 radeon_ring_write(ring, fence->seq);
3917 radeon_ring_write(ring, 0);
3918 }
3919
3920 /**
3921 * cik_fence_compute_ring_emit - emit a fence on the compute ring
3922 *
3923 * @rdev: radeon_device pointer
3924 * @fence: radeon fence object
3925 *
3926 * Emits a fence sequnce number on the compute ring and flushes
3927 * GPU caches.
3928 */
3929 void cik_fence_compute_ring_emit(struct radeon_device *rdev,
3930 struct radeon_fence *fence)
3931 {
3932 struct radeon_ring *ring = &rdev->ring[fence->ring];
3933 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3934
3935 /* RELEASE_MEM - flush caches, send int */
3936 radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
3937 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
3938 EOP_TC_ACTION_EN |
3939 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3940 EVENT_INDEX(5)));
3941 radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
3942 radeon_ring_write(ring, addr & 0xfffffffc);
3943 radeon_ring_write(ring, upper_32_bits(addr));
3944 radeon_ring_write(ring, fence->seq);
3945 radeon_ring_write(ring, 0);
3946 }
3947
3948 /**
3949 * cik_semaphore_ring_emit - emit a semaphore on the CP ring
3950 *
3951 * @rdev: radeon_device pointer
3952 * @ring: radeon ring buffer object
3953 * @semaphore: radeon semaphore object
3954 * @emit_wait: Is this a sempahore wait?
3955 *
3956 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
3957 * from running ahead of semaphore waits.
3958 */
3959 bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3960 struct radeon_ring *ring,
3961 struct radeon_semaphore *semaphore,
3962 bool emit_wait)
3963 {
3964 uint64_t addr = semaphore->gpu_addr;
3965 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
3966
3967 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
3968 radeon_ring_write(ring, lower_32_bits(addr));
3969 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
3970
3971 if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
3972 /* Prevent the PFP from running ahead of the semaphore wait */
3973 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3974 radeon_ring_write(ring, 0x0);
3975 }
3976
3977 return true;
3978 }
3979
3980 /**
3981 * cik_copy_cpdma - copy pages using the CP DMA engine
3982 *
3983 * @rdev: radeon_device pointer
3984 * @src_offset: src GPU address
3985 * @dst_offset: dst GPU address
3986 * @num_gpu_pages: number of GPU pages to xfer
3987 * @resv: reservation object to sync to
3988 *
3989 * Copy GPU paging using the CP DMA engine (CIK+).
3990 * Used by the radeon ttm implementation to move pages if
3991 * registered as the asic copy callback.
3992 */
3993 struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
3994 uint64_t src_offset, uint64_t dst_offset,
3995 unsigned num_gpu_pages,
3996 struct reservation_object *resv)
3997 {
3998 struct radeon_fence *fence;
3999 struct radeon_sync sync;
4000 int ring_index = rdev->asic->copy.blit_ring_index;
4001 struct radeon_ring *ring = &rdev->ring[ring_index];
4002 u32 size_in_bytes, cur_size_in_bytes, control;
4003 int i, num_loops;
4004 int r = 0;
4005
4006 radeon_sync_create(&sync);
4007
4008 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
4009 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
4010 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
4011 if (r) {
4012 DRM_ERROR("radeon: moving bo (%d).\n", r);
4013 radeon_sync_free(rdev, &sync, NULL);
4014 return ERR_PTR(r);
4015 }
4016
4017 radeon_sync_resv(rdev, &sync, resv, false);
4018 radeon_sync_rings(rdev, &sync, ring->idx);
4019
4020 for (i = 0; i < num_loops; i++) {
4021 cur_size_in_bytes = size_in_bytes;
4022 if (cur_size_in_bytes > 0x1fffff)
4023 cur_size_in_bytes = 0x1fffff;
4024 size_in_bytes -= cur_size_in_bytes;
4025 control = 0;
4026 if (size_in_bytes == 0)
4027 control |= PACKET3_DMA_DATA_CP_SYNC;
4028 radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4029 radeon_ring_write(ring, control);
4030 radeon_ring_write(ring, lower_32_bits(src_offset));
4031 radeon_ring_write(ring, upper_32_bits(src_offset));
4032 radeon_ring_write(ring, lower_32_bits(dst_offset));
4033 radeon_ring_write(ring, upper_32_bits(dst_offset));
4034 radeon_ring_write(ring, cur_size_in_bytes);
4035 src_offset += cur_size_in_bytes;
4036 dst_offset += cur_size_in_bytes;
4037 }
4038
4039 r = radeon_fence_emit(rdev, &fence, ring->idx);
4040 if (r) {
4041 radeon_ring_unlock_undo(rdev, ring);
4042 radeon_sync_free(rdev, &sync, NULL);
4043 return ERR_PTR(r);
4044 }
4045
4046 radeon_ring_unlock_commit(rdev, ring, false);
4047 radeon_sync_free(rdev, &sync, fence);
4048
4049 return fence;
4050 }
4051
4052 /*
4053 * IB stuff
4054 */
4055 /**
4056 * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
4057 *
4058 * @rdev: radeon_device pointer
4059 * @ib: radeon indirect buffer object
4060 *
4061 * Emits an DE (drawing engine) or CE (constant engine) IB
4062 * on the gfx ring. IBs are usually generated by userspace
4063 * acceleration drivers and submitted to the kernel for
4064 * sheduling on the ring. This function schedules the IB
4065 * on the gfx ring for execution by the GPU.
4066 */
4067 void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
4068 {
4069 struct radeon_ring *ring = &rdev->ring[ib->ring];
4070 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
4071 u32 header, control = INDIRECT_BUFFER_VALID;
4072
4073 if (ib->is_const_ib) {
4074 /* set switch buffer packet before const IB */
4075 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4076 radeon_ring_write(ring, 0);
4077
4078 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
4079 } else {
4080 u32 next_rptr;
4081 if (ring->rptr_save_reg) {
4082 next_rptr = ring->wptr + 3 + 4;
4083 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
4084 radeon_ring_write(ring, ((ring->rptr_save_reg -
4085 PACKET3_SET_UCONFIG_REG_START) >> 2));
4086 radeon_ring_write(ring, next_rptr);
4087 } else if (rdev->wb.enabled) {
4088 next_rptr = ring->wptr + 5 + 4;
4089 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4090 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
4091 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
4092 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
4093 radeon_ring_write(ring, next_rptr);
4094 }
4095
4096 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4097 }
4098
4099 control |= ib->length_dw | (vm_id << 24);
4100
4101 radeon_ring_write(ring, header);
4102 radeon_ring_write(ring,
4103 #ifdef __BIG_ENDIAN
4104 (2 << 0) |
4105 #endif
4106 (ib->gpu_addr & 0xFFFFFFFC));
4107 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
4108 radeon_ring_write(ring, control);
4109 }
4110
4111 /**
4112 * cik_ib_test - basic gfx ring IB test
4113 *
4114 * @rdev: radeon_device pointer
4115 * @ring: radeon_ring structure holding ring information
4116 *
4117 * Allocate an IB and execute it on the gfx ring (CIK).
4118 * Provides a basic gfx ring test to verify that IBs are working.
4119 * Returns 0 on success, error on failure.
4120 */
4121 int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
4122 {
4123 struct radeon_ib ib;
4124 uint32_t scratch;
4125 uint32_t tmp = 0;
4126 unsigned i;
4127 int r;
4128
4129 r = radeon_scratch_get(rdev, &scratch);
4130 if (r) {
4131 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
4132 return r;
4133 }
4134 WREG32(scratch, 0xCAFEDEAD);
4135 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
4136 if (r) {
4137 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
4138 radeon_scratch_free(rdev, scratch);
4139 return r;
4140 }
4141 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
4142 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
4143 ib.ptr[2] = 0xDEADBEEF;
4144 ib.length_dw = 3;
4145 r = radeon_ib_schedule(rdev, &ib, NULL, false);
4146 if (r) {
4147 radeon_scratch_free(rdev, scratch);
4148 radeon_ib_free(rdev, &ib);
4149 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
4150 return r;
4151 }
4152 r = radeon_fence_wait(ib.fence, false);
4153 if (r) {
4154 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
4155 radeon_scratch_free(rdev, scratch);
4156 radeon_ib_free(rdev, &ib);
4157 return r;
4158 }
4159 for (i = 0; i < rdev->usec_timeout; i++) {
4160 tmp = RREG32(scratch);
4161 if (tmp == 0xDEADBEEF)
4162 break;
4163 DRM_UDELAY(1);
4164 }
4165 if (i < rdev->usec_timeout) {
4166 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
4167 } else {
4168 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
4169 scratch, tmp);
4170 r = -EINVAL;
4171 }
4172 radeon_scratch_free(rdev, scratch);
4173 radeon_ib_free(rdev, &ib);
4174 return r;
4175 }
4176
4177 /*
4178 * CP.
4179 * On CIK, gfx and compute now have independant command processors.
4180 *
4181 * GFX
4182 * Gfx consists of a single ring and can process both gfx jobs and
4183 * compute jobs. The gfx CP consists of three microengines (ME):
4184 * PFP - Pre-Fetch Parser
4185 * ME - Micro Engine
4186 * CE - Constant Engine
4187 * The PFP and ME make up what is considered the Drawing Engine (DE).
4188 * The CE is an asynchronous engine used for updating buffer desciptors
4189 * used by the DE so that they can be loaded into cache in parallel
4190 * while the DE is processing state update packets.
4191 *
4192 * Compute
4193 * The compute CP consists of two microengines (ME):
4194 * MEC1 - Compute MicroEngine 1
4195 * MEC2 - Compute MicroEngine 2
4196 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
4197 * The queues are exposed to userspace and are programmed directly
4198 * by the compute runtime.
4199 */
4200 /**
4201 * cik_cp_gfx_enable - enable/disable the gfx CP MEs
4202 *
4203 * @rdev: radeon_device pointer
4204 * @enable: enable or disable the MEs
4205 *
4206 * Halts or unhalts the gfx MEs.
4207 */
4208 static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
4209 {
4210 if (enable)
4211 WREG32(CP_ME_CNTL, 0);
4212 else {
4213 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
4214 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
4215 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
4216 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
4217 }
4218 udelay(50);
4219 }
4220
4221 /**
4222 * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
4223 *
4224 * @rdev: radeon_device pointer
4225 *
4226 * Loads the gfx PFP, ME, and CE ucode.
4227 * Returns 0 for success, -EINVAL if the ucode is not available.
4228 */
4229 static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
4230 {
4231 int i;
4232
4233 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
4234 return -EINVAL;
4235
4236 cik_cp_gfx_enable(rdev, false);
4237
4238 if (rdev->new_fw) {
4239 const struct gfx_firmware_header_v1_0 *pfp_hdr =
4240 (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
4241 const struct gfx_firmware_header_v1_0 *ce_hdr =
4242 (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
4243 const struct gfx_firmware_header_v1_0 *me_hdr =
4244 (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
4245 const __le32 *fw_data;
4246 u32 fw_size;
4247
4248 radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
4249 radeon_ucode_print_gfx_hdr(&ce_hdr->header);
4250 radeon_ucode_print_gfx_hdr(&me_hdr->header);
4251
4252 /* PFP */
4253 fw_data = (const __le32 *)
4254 (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
4255 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
4256 WREG32(CP_PFP_UCODE_ADDR, 0);
4257 for (i = 0; i < fw_size; i++)
4258 WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
4259 WREG32(CP_PFP_UCODE_ADDR, le32_to_cpu(pfp_hdr->header.ucode_version));
4260
4261 /* CE */
4262 fw_data = (const __le32 *)
4263 (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
4264 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
4265 WREG32(CP_CE_UCODE_ADDR, 0);
4266 for (i = 0; i < fw_size; i++)
4267 WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
4268 WREG32(CP_CE_UCODE_ADDR, le32_to_cpu(ce_hdr->header.ucode_version));
4269
4270 /* ME */
4271 fw_data = (const __be32 *)
4272 (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
4273 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
4274 WREG32(CP_ME_RAM_WADDR, 0);
4275 for (i = 0; i < fw_size; i++)
4276 WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
4277 WREG32(CP_ME_RAM_WADDR, le32_to_cpu(me_hdr->header.ucode_version));
4278 WREG32(CP_ME_RAM_RADDR, le32_to_cpu(me_hdr->header.ucode_version));
4279 } else {
4280 const __be32 *fw_data;
4281
4282 /* PFP */
4283 fw_data = (const __be32 *)rdev->pfp_fw->data;
4284 WREG32(CP_PFP_UCODE_ADDR, 0);
4285 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
4286 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
4287 WREG32(CP_PFP_UCODE_ADDR, 0);
4288
4289 /* CE */
4290 fw_data = (const __be32 *)rdev->ce_fw->data;
4291 WREG32(CP_CE_UCODE_ADDR, 0);
4292 for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
4293 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
4294 WREG32(CP_CE_UCODE_ADDR, 0);
4295
4296 /* ME */
4297 fw_data = (const __be32 *)rdev->me_fw->data;
4298 WREG32(CP_ME_RAM_WADDR, 0);
4299 for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
4300 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
4301 WREG32(CP_ME_RAM_WADDR, 0);
4302 }
4303
4304 return 0;
4305 }
4306
4307 /**
4308 * cik_cp_gfx_start - start the gfx ring
4309 *
4310 * @rdev: radeon_device pointer
4311 *
4312 * Enables the ring and loads the clear state context and other
4313 * packets required to init the ring.
4314 * Returns 0 for success, error for failure.
4315 */
4316 static int cik_cp_gfx_start(struct radeon_device *rdev)
4317 {
4318 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
4319 int r, i;
4320
4321 /* init the CP */
4322 WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
4323 WREG32(CP_ENDIAN_SWAP, 0);
4324 WREG32(CP_DEVICE_ID, 1);
4325
4326 cik_cp_gfx_enable(rdev, true);
4327
4328 r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
4329 if (r) {
4330 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
4331 return r;
4332 }
4333
4334 /* init the CE partitions. CE only used for gfx on CIK */
4335 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4336 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4337 radeon_ring_write(ring, 0x8000);
4338 radeon_ring_write(ring, 0x8000);
4339
4340 /* setup clear context state */
4341 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4342 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4343
4344 radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4345 radeon_ring_write(ring, 0x80000000);
4346 radeon_ring_write(ring, 0x80000000);
4347
4348 for (i = 0; i < cik_default_size; i++)
4349 radeon_ring_write(ring, cik_default_state[i]);
4350
4351 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4352 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
4353
4354 /* set clear context state */
4355 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
4356 radeon_ring_write(ring, 0);
4357
4358 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4359 radeon_ring_write(ring, 0x00000316);
4360 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
4361 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
4362
4363 radeon_ring_unlock_commit(rdev, ring, false);
4364
4365 return 0;
4366 }
4367
4368 /**
4369 * cik_cp_gfx_fini - stop the gfx ring
4370 *
4371 * @rdev: radeon_device pointer
4372 *
4373 * Stop the gfx ring and tear down the driver ring
4374 * info.
4375 */
4376 static void cik_cp_gfx_fini(struct radeon_device *rdev)
4377 {
4378 cik_cp_gfx_enable(rdev, false);
4379 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
4380 }
4381
4382 /**
4383 * cik_cp_gfx_resume - setup the gfx ring buffer registers
4384 *
4385 * @rdev: radeon_device pointer
4386 *
4387 * Program the location and size of the gfx ring buffer
4388 * and test it to make sure it's working.
4389 * Returns 0 for success, error for failure.
4390 */
4391 static int cik_cp_gfx_resume(struct radeon_device *rdev)
4392 {
4393 struct radeon_ring *ring;
4394 u32 tmp;
4395 u32 rb_bufsz;
4396 u64 rb_addr;
4397 int r;
4398
4399 WREG32(CP_SEM_WAIT_TIMER, 0x0);
4400 if (rdev->family != CHIP_HAWAII)
4401 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
4402
4403 /* Set the write pointer delay */
4404 WREG32(CP_RB_WPTR_DELAY, 0);
4405
4406 /* set the RB to use vmid 0 */
4407 WREG32(CP_RB_VMID, 0);
4408
4409 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
4410
4411 /* ring 0 - compute and gfx */
4412 /* Set ring buffer size */
4413 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
4414 rb_bufsz = order_base_2(ring->ring_size / 8);
4415 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
4416 #ifdef __BIG_ENDIAN
4417 tmp |= BUF_SWAP_32BIT;
4418 #endif
4419 WREG32(CP_RB0_CNTL, tmp);
4420
4421 /* Initialize the ring buffer's read and write pointers */
4422 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
4423 ring->wptr = 0;
4424 WREG32(CP_RB0_WPTR, ring->wptr);
4425
4426 /* set the wb address wether it's enabled or not */
4427 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
4428 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
4429
4430 /* scratch register shadowing is no longer supported */
4431 WREG32(SCRATCH_UMSK, 0);
4432
4433 if (!rdev->wb.enabled)
4434 tmp |= RB_NO_UPDATE;
4435
4436 mdelay(1);
4437 WREG32(CP_RB0_CNTL, tmp);
4438
4439 rb_addr = ring->gpu_addr >> 8;
4440 WREG32(CP_RB0_BASE, rb_addr);
4441 WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
4442
4443 /* start the ring */
4444 cik_cp_gfx_start(rdev);
4445 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
4446 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
4447 if (r) {
4448 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
4449 return r;
4450 }
4451
4452 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
4453 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
4454
4455 return 0;
4456 }
4457
4458 u32 cik_gfx_get_rptr(struct radeon_device *rdev,
4459 struct radeon_ring *ring)
4460 {
4461 u32 rptr;
4462
4463 if (rdev->wb.enabled)
4464 rptr = rdev->wb.wb[ring->rptr_offs/4];
4465 else
4466 rptr = RREG32(CP_RB0_RPTR);
4467
4468 return rptr;
4469 }
4470
4471 u32 cik_gfx_get_wptr(struct radeon_device *rdev,
4472 struct radeon_ring *ring)
4473 {
4474 u32 wptr;
4475
4476 wptr = RREG32(CP_RB0_WPTR);
4477
4478 return wptr;
4479 }
4480
4481 void cik_gfx_set_wptr(struct radeon_device *rdev,
4482 struct radeon_ring *ring)
4483 {
4484 WREG32(CP_RB0_WPTR, ring->wptr);
4485 (void)RREG32(CP_RB0_WPTR);
4486 }
4487
4488 u32 cik_compute_get_rptr(struct radeon_device *rdev,
4489 struct radeon_ring *ring)
4490 {
4491 u32 rptr;
4492
4493 if (rdev->wb.enabled) {
4494 rptr = rdev->wb.wb[ring->rptr_offs/4];
4495 } else {
4496 mutex_lock(&rdev->srbm_mutex);
4497 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
4498 rptr = RREG32(CP_HQD_PQ_RPTR);
4499 cik_srbm_select(rdev, 0, 0, 0, 0);
4500 mutex_unlock(&rdev->srbm_mutex);
4501 }
4502
4503 return rptr;
4504 }
4505
4506 u32 cik_compute_get_wptr(struct radeon_device *rdev,
4507 struct radeon_ring *ring)
4508 {
4509 u32 wptr;
4510
4511 if (rdev->wb.enabled) {
4512 /* XXX check if swapping is necessary on BE */
4513 wptr = rdev->wb.wb[ring->wptr_offs/4];
4514 } else {
4515 mutex_lock(&rdev->srbm_mutex);
4516 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
4517 wptr = RREG32(CP_HQD_PQ_WPTR);
4518 cik_srbm_select(rdev, 0, 0, 0, 0);
4519 mutex_unlock(&rdev->srbm_mutex);
4520 }
4521
4522 return wptr;
4523 }
4524
4525 void cik_compute_set_wptr(struct radeon_device *rdev,
4526 struct radeon_ring *ring)
4527 {
4528 /* XXX check if swapping is necessary on BE */
4529 rdev->wb.wb[ring->wptr_offs/4] = ring->wptr;
4530 WDOORBELL32(ring->doorbell_index, ring->wptr);
4531 }
4532
4533 /**
4534 * cik_cp_compute_enable - enable/disable the compute CP MEs
4535 *
4536 * @rdev: radeon_device pointer
4537 * @enable: enable or disable the MEs
4538 *
4539 * Halts or unhalts the compute MEs.
4540 */
4541 static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
4542 {
4543 if (enable)
4544 WREG32(CP_MEC_CNTL, 0);
4545 else {
4546 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
4547 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
4548 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
4549 }
4550 udelay(50);
4551 }
4552
4553 /**
4554 * cik_cp_compute_load_microcode - load the compute CP ME ucode
4555 *
4556 * @rdev: radeon_device pointer
4557 *
4558 * Loads the compute MEC1&2 ucode.
4559 * Returns 0 for success, -EINVAL if the ucode is not available.
4560 */
4561 static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
4562 {
4563 int i;
4564
4565 if (!rdev->mec_fw)
4566 return -EINVAL;
4567
4568 cik_cp_compute_enable(rdev, false);
4569
4570 if (rdev->new_fw) {
4571 const struct gfx_firmware_header_v1_0 *mec_hdr =
4572 (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
4573 const __le32 *fw_data;
4574 u32 fw_size;
4575
4576 radeon_ucode_print_gfx_hdr(&mec_hdr->header);
4577
4578 /* MEC1 */
4579 fw_data = (const __le32 *)
4580 (rdev->mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
4581 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
4582 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4583 for (i = 0; i < fw_size; i++)
4584 WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
4585 WREG32(CP_MEC_ME1_UCODE_ADDR, le32_to_cpu(mec_hdr->header.ucode_version));
4586
4587 /* MEC2 */
4588 if (rdev->family == CHIP_KAVERI) {
4589 const struct gfx_firmware_header_v1_0 *mec2_hdr =
4590 (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
4591
4592 fw_data = (const __le32 *)
4593 (rdev->mec2_fw->data +
4594 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
4595 fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
4596 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4597 for (i = 0; i < fw_size; i++)
4598 WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
4599 WREG32(CP_MEC_ME2_UCODE_ADDR, le32_to_cpu(mec2_hdr->header.ucode_version));
4600 }
4601 } else {
4602 const __be32 *fw_data;
4603
4604 /* MEC1 */
4605 fw_data = (const __be32 *)rdev->mec_fw->data;
4606 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4607 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
4608 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
4609 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4610
4611 if (rdev->family == CHIP_KAVERI) {
4612 /* MEC2 */
4613 fw_data = (const __be32 *)rdev->mec_fw->data;
4614 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4615 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
4616 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
4617 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4618 }
4619 }
4620
4621 return 0;
4622 }
4623
4624 /**
4625 * cik_cp_compute_start - start the compute queues
4626 *
4627 * @rdev: radeon_device pointer
4628 *
4629 * Enable the compute queues.
4630 * Returns 0 for success, error for failure.
4631 */
4632 static int cik_cp_compute_start(struct radeon_device *rdev)
4633 {
4634 cik_cp_compute_enable(rdev, true);
4635
4636 return 0;
4637 }
4638
4639 /**
4640 * cik_cp_compute_fini - stop the compute queues
4641 *
4642 * @rdev: radeon_device pointer
4643 *
4644 * Stop the compute queues and tear down the driver queue
4645 * info.
4646 */
4647 static void cik_cp_compute_fini(struct radeon_device *rdev)
4648 {
4649 int i, idx, r;
4650
4651 cik_cp_compute_enable(rdev, false);
4652
4653 for (i = 0; i < 2; i++) {
4654 if (i == 0)
4655 idx = CAYMAN_RING_TYPE_CP1_INDEX;
4656 else
4657 idx = CAYMAN_RING_TYPE_CP2_INDEX;
4658
4659 if (rdev->ring[idx].mqd_obj) {
4660 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
4661 if (unlikely(r != 0))
4662 dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
4663
4664 radeon_bo_unpin(rdev->ring[idx].mqd_obj);
4665 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
4666
4667 radeon_bo_unref(&rdev->ring[idx].mqd_obj);
4668 rdev->ring[idx].mqd_obj = NULL;
4669 }
4670 }
4671 }
4672
4673 static void cik_mec_fini(struct radeon_device *rdev)
4674 {
4675 int r;
4676
4677 if (rdev->mec.hpd_eop_obj) {
4678 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
4679 if (unlikely(r != 0))
4680 dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
4681 radeon_bo_unpin(rdev->mec.hpd_eop_obj);
4682 radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
4683
4684 radeon_bo_unref(&rdev->mec.hpd_eop_obj);
4685 rdev->mec.hpd_eop_obj = NULL;
4686 }
4687 }
4688
4689 #define MEC_HPD_SIZE 2048
4690
4691 static int cik_mec_init(struct radeon_device *rdev)
4692 {
4693 int r;
4694 u32 *hpd;
4695
4696 /*
4697 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
4698 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
4699 * Nonetheless, we assign only 1 pipe because all other pipes will
4700 * be handled by KFD
4701 */
4702 rdev->mec.num_mec = 1;
4703 rdev->mec.num_pipe = 1;
4704 rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
4705
4706 if (rdev->mec.hpd_eop_obj == NULL) {
4707 r = radeon_bo_create(rdev,
4708 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
4709 PAGE_SIZE, true,
4710 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
4711 &rdev->mec.hpd_eop_obj);
4712 if (r) {
4713 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
4714 return r;
4715 }
4716 }
4717
4718 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
4719 if (unlikely(r != 0)) {
4720 cik_mec_fini(rdev);
4721 return r;
4722 }
4723 r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
4724 &rdev->mec.hpd_eop_gpu_addr);
4725 if (r) {
4726 dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
4727 cik_mec_fini(rdev);
4728 return r;
4729 }
4730 r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
4731 if (r) {
4732 dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
4733 cik_mec_fini(rdev);
4734 return r;
4735 }
4736
4737 /* clear memory. Not sure if this is required or not */
4738 memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
4739
4740 radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
4741 radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
4742
4743 return 0;
4744 }
4745
4746 struct hqd_registers
4747 {
4748 u32 cp_mqd_base_addr;
4749 u32 cp_mqd_base_addr_hi;
4750 u32 cp_hqd_active;
4751 u32 cp_hqd_vmid;
4752 u32 cp_hqd_persistent_state;
4753 u32 cp_hqd_pipe_priority;
4754 u32 cp_hqd_queue_priority;
4755 u32 cp_hqd_quantum;
4756 u32 cp_hqd_pq_base;
4757 u32 cp_hqd_pq_base_hi;
4758 u32 cp_hqd_pq_rptr;
4759 u32 cp_hqd_pq_rptr_report_addr;
4760 u32 cp_hqd_pq_rptr_report_addr_hi;
4761 u32 cp_hqd_pq_wptr_poll_addr;
4762 u32 cp_hqd_pq_wptr_poll_addr_hi;
4763 u32 cp_hqd_pq_doorbell_control;
4764 u32 cp_hqd_pq_wptr;
4765 u32 cp_hqd_pq_control;
4766 u32 cp_hqd_ib_base_addr;
4767 u32 cp_hqd_ib_base_addr_hi;
4768 u32 cp_hqd_ib_rptr;
4769 u32 cp_hqd_ib_control;
4770 u32 cp_hqd_iq_timer;
4771 u32 cp_hqd_iq_rptr;
4772 u32 cp_hqd_dequeue_request;
4773 u32 cp_hqd_dma_offload;
4774 u32 cp_hqd_sema_cmd;
4775 u32 cp_hqd_msg_type;
4776 u32 cp_hqd_atomic0_preop_lo;
4777 u32 cp_hqd_atomic0_preop_hi;
4778 u32 cp_hqd_atomic1_preop_lo;
4779 u32 cp_hqd_atomic1_preop_hi;
4780 u32 cp_hqd_hq_scheduler0;
4781 u32 cp_hqd_hq_scheduler1;
4782 u32 cp_mqd_control;
4783 };
4784
4785 struct bonaire_mqd
4786 {
4787 u32 header;
4788 u32 dispatch_initiator;
4789 u32 dimensions[3];
4790 u32 start_idx[3];
4791 u32 num_threads[3];
4792 u32 pipeline_stat_enable;
4793 u32 perf_counter_enable;
4794 u32 pgm[2];
4795 u32 tba[2];
4796 u32 tma[2];
4797 u32 pgm_rsrc[2];
4798 u32 vmid;
4799 u32 resource_limits;
4800 u32 static_thread_mgmt01[2];
4801 u32 tmp_ring_size;
4802 u32 static_thread_mgmt23[2];
4803 u32 restart[3];
4804 u32 thread_trace_enable;
4805 u32 reserved1;
4806 u32 user_data[16];
4807 u32 vgtcs_invoke_count[2];
4808 struct hqd_registers queue_state;
4809 u32 dequeue_cntr;
4810 u32 interrupt_queue[64];
4811 };
4812
4813 /**
4814 * cik_cp_compute_resume - setup the compute queue registers
4815 *
4816 * @rdev: radeon_device pointer
4817 *
4818 * Program the compute queues and test them to make sure they
4819 * are working.
4820 * Returns 0 for success, error for failure.
4821 */
4822 static int cik_cp_compute_resume(struct radeon_device *rdev)
4823 {
4824 int r, i, j, idx;
4825 u32 tmp;
4826 bool use_doorbell = true;
4827 u64 hqd_gpu_addr;
4828 u64 mqd_gpu_addr;
4829 u64 eop_gpu_addr;
4830 u64 wb_gpu_addr;
4831 u32 *buf;
4832 struct bonaire_mqd *mqd;
4833
4834 r = cik_cp_compute_start(rdev);
4835 if (r)
4836 return r;
4837
4838 /* fix up chicken bits */
4839 tmp = RREG32(CP_CPF_DEBUG);
4840 tmp |= (1 << 23);
4841 WREG32(CP_CPF_DEBUG, tmp);
4842
4843 /* init the pipes */
4844 mutex_lock(&rdev->srbm_mutex);
4845
4846 eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr;
4847
4848 cik_srbm_select(rdev, 0, 0, 0, 0);
4849
4850 /* write the EOP addr */
4851 WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
4852 WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
4853
4854 /* set the VMID assigned */
4855 WREG32(CP_HPD_EOP_VMID, 0);
4856
4857 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4858 tmp = RREG32(CP_HPD_EOP_CONTROL);
4859 tmp &= ~EOP_SIZE_MASK;
4860 tmp |= order_base_2(MEC_HPD_SIZE / 8);
4861 WREG32(CP_HPD_EOP_CONTROL, tmp);
4862
4863 mutex_unlock(&rdev->srbm_mutex);
4864
4865 /* init the queues. Just two for now. */
4866 for (i = 0; i < 2; i++) {
4867 if (i == 0)
4868 idx = CAYMAN_RING_TYPE_CP1_INDEX;
4869 else
4870 idx = CAYMAN_RING_TYPE_CP2_INDEX;
4871
4872 if (rdev->ring[idx].mqd_obj == NULL) {
4873 r = radeon_bo_create(rdev,
4874 sizeof(struct bonaire_mqd),
4875 PAGE_SIZE, true,
4876 RADEON_GEM_DOMAIN_GTT, 0, NULL,
4877 NULL, &rdev->ring[idx].mqd_obj);
4878 if (r) {
4879 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
4880 return r;
4881 }
4882 }
4883
4884 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
4885 if (unlikely(r != 0)) {
4886 cik_cp_compute_fini(rdev);
4887 return r;
4888 }
4889 r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
4890 &mqd_gpu_addr);
4891 if (r) {
4892 dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
4893 cik_cp_compute_fini(rdev);
4894 return r;
4895 }
4896 r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
4897 if (r) {
4898 dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
4899 cik_cp_compute_fini(rdev);
4900 return r;
4901 }
4902
4903 /* init the mqd struct */
4904 memset(buf, 0, sizeof(struct bonaire_mqd));
4905
4906 mqd = (struct bonaire_mqd *)buf;
4907 mqd->header = 0xC0310800;
4908 mqd->static_thread_mgmt01[0] = 0xffffffff;
4909 mqd->static_thread_mgmt01[1] = 0xffffffff;
4910 mqd->static_thread_mgmt23[0] = 0xffffffff;
4911 mqd->static_thread_mgmt23[1] = 0xffffffff;
4912
4913 mutex_lock(&rdev->srbm_mutex);
4914 cik_srbm_select(rdev, rdev->ring[idx].me,
4915 rdev->ring[idx].pipe,
4916 rdev->ring[idx].queue, 0);
4917
4918 /* disable wptr polling */
4919 tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
4920 tmp &= ~WPTR_POLL_EN;
4921 WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
4922
4923 /* enable doorbell? */
4924 mqd->queue_state.cp_hqd_pq_doorbell_control =
4925 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
4926 if (use_doorbell)
4927 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
4928 else
4929 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
4930 WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
4931 mqd->queue_state.cp_hqd_pq_doorbell_control);
4932
4933 /* disable the queue if it's active */
4934 mqd->queue_state.cp_hqd_dequeue_request = 0;
4935 mqd->queue_state.cp_hqd_pq_rptr = 0;
4936 mqd->queue_state.cp_hqd_pq_wptr= 0;
4937 if (RREG32(CP_HQD_ACTIVE) & 1) {
4938 WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
4939 for (j = 0; j < rdev->usec_timeout; j++) {
4940 if (!(RREG32(CP_HQD_ACTIVE) & 1))
4941 break;
4942 udelay(1);
4943 }
4944 WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
4945 WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
4946 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
4947 }
4948
4949 /* set the pointer to the MQD */
4950 mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
4951 mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
4952 WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
4953 WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
4954 /* set MQD vmid to 0 */
4955 mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
4956 mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
4957 WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
4958
4959 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4960 hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
4961 mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
4962 mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4963 WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
4964 WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
4965
4966 /* set up the HQD, this is similar to CP_RB0_CNTL */
4967 mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
4968 mqd->queue_state.cp_hqd_pq_control &=
4969 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
4970
4971 mqd->queue_state.cp_hqd_pq_control |=
4972 order_base_2(rdev->ring[idx].ring_size / 8);
4973 mqd->queue_state.cp_hqd_pq_control |=
4974 (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
4975 #ifdef __BIG_ENDIAN
4976 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
4977 #endif
4978 mqd->queue_state.cp_hqd_pq_control &=
4979 ~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
4980 mqd->queue_state.cp_hqd_pq_control |=
4981 PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
4982 WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
4983
4984 /* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
4985 if (i == 0)
4986 wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
4987 else
4988 wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
4989 mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
4990 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4991 WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
4992 WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
4993 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
4994
4995 /* set the wb address wether it's enabled or not */
4996 if (i == 0)
4997 wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
4998 else
4999 wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
5000 mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
5001 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
5002 upper_32_bits(wb_gpu_addr) & 0xffff;
5003 WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
5004 mqd->queue_state.cp_hqd_pq_rptr_report_addr);
5005 WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
5006 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
5007
5008 /* enable the doorbell if requested */
5009 if (use_doorbell) {
5010 mqd->queue_state.cp_hqd_pq_doorbell_control =
5011 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
5012 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
5013 mqd->queue_state.cp_hqd_pq_doorbell_control |=
5014 DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
5015 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
5016 mqd->queue_state.cp_hqd_pq_doorbell_control &=
5017 ~(DOORBELL_SOURCE | DOORBELL_HIT);
5018
5019 } else {
5020 mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
5021 }
5022 WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
5023 mqd->queue_state.cp_hqd_pq_doorbell_control);
5024
5025 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
5026 rdev->ring[idx].wptr = 0;
5027 mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
5028 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
5029 mqd->queue_state.cp_hqd_pq_rptr = RREG32(CP_HQD_PQ_RPTR);
5030
5031 /* set the vmid for the queue */
5032 mqd->queue_state.cp_hqd_vmid = 0;
5033 WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
5034
5035 /* activate the queue */
5036 mqd->queue_state.cp_hqd_active = 1;
5037 WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
5038
5039 cik_srbm_select(rdev, 0, 0, 0, 0);
5040 mutex_unlock(&rdev->srbm_mutex);
5041
5042 radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
5043 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
5044
5045 rdev->ring[idx].ready = true;
5046 r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
5047 if (r)
5048 rdev->ring[idx].ready = false;
5049 }
5050
5051 return 0;
5052 }
5053
5054 static void cik_cp_enable(struct radeon_device *rdev, bool enable)
5055 {
5056 cik_cp_gfx_enable(rdev, enable);
5057 cik_cp_compute_enable(rdev, enable);
5058 }
5059
5060 static int cik_cp_load_microcode(struct radeon_device *rdev)
5061 {
5062 int r;
5063
5064 r = cik_cp_gfx_load_microcode(rdev);
5065 if (r)
5066 return r;
5067 r = cik_cp_compute_load_microcode(rdev);
5068 if (r)
5069 return r;
5070
5071 return 0;
5072 }
5073
5074 static void cik_cp_fini(struct radeon_device *rdev)
5075 {
5076 cik_cp_gfx_fini(rdev);
5077 cik_cp_compute_fini(rdev);
5078 }
5079
5080 static int cik_cp_resume(struct radeon_device *rdev)
5081 {
5082 int r;
5083
5084 cik_enable_gui_idle_interrupt(rdev, false);
5085
5086 r = cik_cp_load_microcode(rdev);
5087 if (r)
5088 return r;
5089
5090 r = cik_cp_gfx_resume(rdev);
5091 if (r)
5092 return r;
5093 r = cik_cp_compute_resume(rdev);
5094 if (r)
5095 return r;
5096
5097 cik_enable_gui_idle_interrupt(rdev, true);
5098
5099 return 0;
5100 }
5101
5102 static void cik_print_gpu_status_regs(struct radeon_device *rdev)
5103 {
5104 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
5105 RREG32(GRBM_STATUS));
5106 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
5107 RREG32(GRBM_STATUS2));
5108 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
5109 RREG32(GRBM_STATUS_SE0));
5110 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
5111 RREG32(GRBM_STATUS_SE1));
5112 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
5113 RREG32(GRBM_STATUS_SE2));
5114 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
5115 RREG32(GRBM_STATUS_SE3));
5116 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
5117 RREG32(SRBM_STATUS));
5118 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
5119 RREG32(SRBM_STATUS2));
5120 dev_info(rdev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
5121 RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
5122 dev_info(rdev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
5123 RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
5124 dev_info(rdev->dev, " CP_STAT = 0x%08x\n", RREG32(CP_STAT));
5125 dev_info(rdev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
5126 RREG32(CP_STALLED_STAT1));
5127 dev_info(rdev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
5128 RREG32(CP_STALLED_STAT2));
5129 dev_info(rdev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
5130 RREG32(CP_STALLED_STAT3));
5131 dev_info(rdev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
5132 RREG32(CP_CPF_BUSY_STAT));
5133 dev_info(rdev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
5134 RREG32(CP_CPF_STALLED_STAT1));
5135 dev_info(rdev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
5136 dev_info(rdev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
5137 dev_info(rdev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
5138 RREG32(CP_CPC_STALLED_STAT1));
5139 dev_info(rdev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
5140 }
5141
5142 /**
5143 * cik_gpu_check_soft_reset - check which blocks are busy
5144 *
5145 * @rdev: radeon_device pointer
5146 *
5147 * Check which blocks are busy and return the relevant reset
5148 * mask to be used by cik_gpu_soft_reset().
5149 * Returns a mask of the blocks to be reset.
5150 */
5151 u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
5152 {
5153 u32 reset_mask = 0;
5154 u32 tmp;
5155
5156 /* GRBM_STATUS */
5157 tmp = RREG32(GRBM_STATUS);
5158 if (tmp & (PA_BUSY | SC_BUSY |
5159 BCI_BUSY | SX_BUSY |
5160 TA_BUSY | VGT_BUSY |
5161 DB_BUSY | CB_BUSY |
5162 GDS_BUSY | SPI_BUSY |
5163 IA_BUSY | IA_BUSY_NO_DMA))
5164 reset_mask |= RADEON_RESET_GFX;
5165
5166 if (tmp & (CP_BUSY | CP_COHERENCY_BUSY))
5167 reset_mask |= RADEON_RESET_CP;
5168
5169 /* GRBM_STATUS2 */
5170 tmp = RREG32(GRBM_STATUS2);
5171 if (tmp & RLC_BUSY)
5172 reset_mask |= RADEON_RESET_RLC;
5173
5174 /* SDMA0_STATUS_REG */
5175 tmp = RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
5176 if (!(tmp & SDMA_IDLE))
5177 reset_mask |= RADEON_RESET_DMA;
5178
5179 /* SDMA1_STATUS_REG */
5180 tmp = RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
5181 if (!(tmp & SDMA_IDLE))
5182 reset_mask |= RADEON_RESET_DMA1;
5183
5184 /* SRBM_STATUS2 */
5185 tmp = RREG32(SRBM_STATUS2);
5186 if (tmp & SDMA_BUSY)
5187 reset_mask |= RADEON_RESET_DMA;
5188
5189 if (tmp & SDMA1_BUSY)
5190 reset_mask |= RADEON_RESET_DMA1;
5191
5192 /* SRBM_STATUS */
5193 tmp = RREG32(SRBM_STATUS);
5194
5195 if (tmp & IH_BUSY)
5196 reset_mask |= RADEON_RESET_IH;
5197
5198 if (tmp & SEM_BUSY)
5199 reset_mask |= RADEON_RESET_SEM;
5200
5201 if (tmp & GRBM_RQ_PENDING)
5202 reset_mask |= RADEON_RESET_GRBM;
5203
5204 if (tmp & VMC_BUSY)
5205 reset_mask |= RADEON_RESET_VMC;
5206
5207 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
5208 MCC_BUSY | MCD_BUSY))
5209 reset_mask |= RADEON_RESET_MC;
5210
5211 if (evergreen_is_display_hung(rdev))
5212 reset_mask |= RADEON_RESET_DISPLAY;
5213
5214 /* Skip MC reset as it's mostly likely not hung, just busy */
5215 if (reset_mask & RADEON_RESET_MC) {
5216 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
5217 reset_mask &= ~RADEON_RESET_MC;
5218 }
5219
5220 return reset_mask;
5221 }
5222
5223 /**
5224 * cik_gpu_soft_reset - soft reset GPU
5225 *
5226 * @rdev: radeon_device pointer
5227 * @reset_mask: mask of which blocks to reset
5228 *
5229 * Soft reset the blocks specified in @reset_mask.
5230 */
5231 static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
5232 {
5233 struct evergreen_mc_save save;
5234 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5235 u32 tmp;
5236
5237 if (reset_mask == 0)
5238 return;
5239
5240 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
5241
5242 cik_print_gpu_status_regs(rdev);
5243 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
5244 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
5245 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
5246 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
5247
5248 /* disable CG/PG */
5249 cik_fini_pg(rdev);
5250 cik_fini_cg(rdev);
5251
5252 /* stop the rlc */
5253 cik_rlc_stop(rdev);
5254
5255 /* Disable GFX parsing/prefetching */
5256 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
5257
5258 /* Disable MEC parsing/prefetching */
5259 WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
5260
5261 if (reset_mask & RADEON_RESET_DMA) {
5262 /* sdma0 */
5263 tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
5264 tmp |= SDMA_HALT;
5265 WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
5266 }
5267 if (reset_mask & RADEON_RESET_DMA1) {
5268 /* sdma1 */
5269 tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
5270 tmp |= SDMA_HALT;
5271 WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
5272 }
5273
5274 evergreen_mc_stop(rdev, &save);
5275 if (evergreen_mc_wait_for_idle(rdev)) {
5276 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
5277 }
5278
5279 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))
5280 grbm_soft_reset = SOFT_RESET_CP | SOFT_RESET_GFX;
5281
5282 if (reset_mask & RADEON_RESET_CP) {
5283 grbm_soft_reset |= SOFT_RESET_CP;
5284
5285 srbm_soft_reset |= SOFT_RESET_GRBM;
5286 }
5287
5288 if (reset_mask & RADEON_RESET_DMA)
5289 srbm_soft_reset |= SOFT_RESET_SDMA;
5290
5291 if (reset_mask & RADEON_RESET_DMA1)
5292 srbm_soft_reset |= SOFT_RESET_SDMA1;
5293
5294 if (reset_mask & RADEON_RESET_DISPLAY)
5295 srbm_soft_reset |= SOFT_RESET_DC;
5296
5297 if (reset_mask & RADEON_RESET_RLC)
5298 grbm_soft_reset |= SOFT_RESET_RLC;
5299
5300 if (reset_mask & RADEON_RESET_SEM)
5301 srbm_soft_reset |= SOFT_RESET_SEM;
5302
5303 if (reset_mask & RADEON_RESET_IH)
5304 srbm_soft_reset |= SOFT_RESET_IH;
5305
5306 if (reset_mask & RADEON_RESET_GRBM)
5307 srbm_soft_reset |= SOFT_RESET_GRBM;
5308
5309 if (reset_mask & RADEON_RESET_VMC)
5310 srbm_soft_reset |= SOFT_RESET_VMC;
5311
5312 if (!(rdev->flags & RADEON_IS_IGP)) {
5313 if (reset_mask & RADEON_RESET_MC)
5314 srbm_soft_reset |= SOFT_RESET_MC;
5315 }
5316
5317 if (grbm_soft_reset) {
5318 tmp = RREG32(GRBM_SOFT_RESET);
5319 tmp |= grbm_soft_reset;
5320 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
5321 WREG32(GRBM_SOFT_RESET, tmp);
5322 tmp = RREG32(GRBM_SOFT_RESET);
5323
5324 udelay(50);
5325
5326 tmp &= ~grbm_soft_reset;
5327 WREG32(GRBM_SOFT_RESET, tmp);
5328 tmp = RREG32(GRBM_SOFT_RESET);
5329 }
5330
5331 if (srbm_soft_reset) {
5332 tmp = RREG32(SRBM_SOFT_RESET);
5333 tmp |= srbm_soft_reset;
5334 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
5335 WREG32(SRBM_SOFT_RESET, tmp);
5336 tmp = RREG32(SRBM_SOFT_RESET);
5337
5338 udelay(50);
5339
5340 tmp &= ~srbm_soft_reset;
5341 WREG32(SRBM_SOFT_RESET, tmp);
5342 tmp = RREG32(SRBM_SOFT_RESET);
5343 }
5344
5345 /* Wait a little for things to settle down */
5346 udelay(50);
5347
5348 evergreen_mc_resume(rdev, &save);
5349 udelay(50);
5350
5351 cik_print_gpu_status_regs(rdev);
5352 }
5353
5354 struct kv_reset_save_regs {
5355 u32 gmcon_reng_execute;
5356 u32 gmcon_misc;
5357 u32 gmcon_misc3;
5358 };
5359
5360 static void kv_save_regs_for_reset(struct radeon_device *rdev,
5361 struct kv_reset_save_regs *save)
5362 {
5363 save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE);
5364 save->gmcon_misc = RREG32(GMCON_MISC);
5365 save->gmcon_misc3 = RREG32(GMCON_MISC3);
5366
5367 WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP);
5368 WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE |
5369 STCTRL_STUTTER_EN));
5370 }
5371
5372 static void kv_restore_regs_for_reset(struct radeon_device *rdev,
5373 struct kv_reset_save_regs *save)
5374 {
5375 int i;
5376
5377 WREG32(GMCON_PGFSM_WRITE, 0);
5378 WREG32(GMCON_PGFSM_CONFIG, 0x200010ff);
5379
5380 for (i = 0; i < 5; i++)
5381 WREG32(GMCON_PGFSM_WRITE, 0);
5382
5383 WREG32(GMCON_PGFSM_WRITE, 0);
5384 WREG32(GMCON_PGFSM_CONFIG, 0x300010ff);
5385
5386 for (i = 0; i < 5; i++)
5387 WREG32(GMCON_PGFSM_WRITE, 0);
5388
5389 WREG32(GMCON_PGFSM_WRITE, 0x210000);
5390 WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff);
5391
5392 for (i = 0; i < 5; i++)
5393 WREG32(GMCON_PGFSM_WRITE, 0);
5394
5395 WREG32(GMCON_PGFSM_WRITE, 0x21003);
5396 WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff);
5397
5398 for (i = 0; i < 5; i++)
5399 WREG32(GMCON_PGFSM_WRITE, 0);
5400
5401 WREG32(GMCON_PGFSM_WRITE, 0x2b00);
5402 WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff);
5403
5404 for (i = 0; i < 5; i++)
5405 WREG32(GMCON_PGFSM_WRITE, 0);
5406
5407 WREG32(GMCON_PGFSM_WRITE, 0);
5408 WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff);
5409
5410 for (i = 0; i < 5; i++)
5411 WREG32(GMCON_PGFSM_WRITE, 0);
5412
5413 WREG32(GMCON_PGFSM_WRITE, 0x420000);
5414 WREG32(GMCON_PGFSM_CONFIG, 0x100010ff);
5415
5416 for (i = 0; i < 5; i++)
5417 WREG32(GMCON_PGFSM_WRITE, 0);
5418
5419 WREG32(GMCON_PGFSM_WRITE, 0x120202);
5420 WREG32(GMCON_PGFSM_CONFIG, 0x500010ff);
5421
5422 for (i = 0; i < 5; i++)
5423 WREG32(GMCON_PGFSM_WRITE, 0);
5424
5425 WREG32(GMCON_PGFSM_WRITE, 0x3e3e36);
5426 WREG32(GMCON_PGFSM_CONFIG, 0x600010ff);
5427
5428 for (i = 0; i < 5; i++)
5429 WREG32(GMCON_PGFSM_WRITE, 0);
5430
5431 WREG32(GMCON_PGFSM_WRITE, 0x373f3e);
5432 WREG32(GMCON_PGFSM_CONFIG, 0x700010ff);
5433
5434 for (i = 0; i < 5; i++)
5435 WREG32(GMCON_PGFSM_WRITE, 0);
5436
5437 WREG32(GMCON_PGFSM_WRITE, 0x3e1332);
5438 WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff);
5439
5440 WREG32(GMCON_MISC3, save->gmcon_misc3);
5441 WREG32(GMCON_MISC, save->gmcon_misc);
5442 WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute);
5443 }
5444
5445 static void cik_gpu_pci_config_reset(struct radeon_device *rdev)
5446 {
5447 struct evergreen_mc_save save;
5448 struct kv_reset_save_regs kv_save = { 0 };
5449 u32 tmp, i;
5450
5451 dev_info(rdev->dev, "GPU pci config reset\n");
5452
5453 /* disable dpm? */
5454
5455 /* disable cg/pg */
5456 cik_fini_pg(rdev);
5457 cik_fini_cg(rdev);
5458
5459 /* Disable GFX parsing/prefetching */
5460 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
5461
5462 /* Disable MEC parsing/prefetching */
5463 WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
5464
5465 /* sdma0 */
5466 tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
5467 tmp |= SDMA_HALT;
5468 WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
5469 /* sdma1 */
5470 tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
5471 tmp |= SDMA_HALT;
5472 WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
5473 /* XXX other engines? */
5474
5475 /* halt the rlc, disable cp internal ints */
5476 cik_rlc_stop(rdev);
5477
5478 udelay(50);
5479
5480 /* disable mem access */
5481 evergreen_mc_stop(rdev, &save);
5482 if (evergreen_mc_wait_for_idle(rdev)) {
5483 dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
5484 }
5485
5486 if (rdev->flags & RADEON_IS_IGP)
5487 kv_save_regs_for_reset(rdev, &kv_save);
5488
5489 /* disable BM */
5490 pci_clear_master(rdev->pdev);
5491 /* reset */
5492 radeon_pci_config_reset(rdev);
5493
5494 udelay(100);
5495
5496 /* wait for asic to come out of reset */
5497 for (i = 0; i < rdev->usec_timeout; i++) {
5498 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
5499 break;
5500 udelay(1);
5501 }
5502
5503 /* does asic init need to be run first??? */
5504 if (rdev->flags & RADEON_IS_IGP)
5505 kv_restore_regs_for_reset(rdev, &kv_save);
5506 }
5507
5508 /**
5509 * cik_asic_reset - soft reset GPU
5510 *
5511 * @rdev: radeon_device pointer
5512 *
5513 * Look up which blocks are hung and attempt
5514 * to reset them.
5515 * Returns 0 for success.
5516 */
5517 int cik_asic_reset(struct radeon_device *rdev)
5518 {
5519 u32 reset_mask;
5520
5521 reset_mask = cik_gpu_check_soft_reset(rdev);
5522
5523 if (reset_mask)
5524 r600_set_bios_scratch_engine_hung(rdev, true);
5525
5526 /* try soft reset */
5527 cik_gpu_soft_reset(rdev, reset_mask);
5528
5529 reset_mask = cik_gpu_check_soft_reset(rdev);
5530
5531 /* try pci config reset */
5532 if (reset_mask && radeon_hard_reset)
5533 cik_gpu_pci_config_reset(rdev);
5534
5535 reset_mask = cik_gpu_check_soft_reset(rdev);
5536
5537 if (!reset_mask)
5538 r600_set_bios_scratch_engine_hung(rdev, false);
5539
5540 return 0;
5541 }
5542
5543 /**
5544 * cik_gfx_is_lockup - check if the 3D engine is locked up
5545 *
5546 * @rdev: radeon_device pointer
5547 * @ring: radeon_ring structure holding ring information
5548 *
5549 * Check if the 3D engine is locked up (CIK).
5550 * Returns true if the engine is locked, false if not.
5551 */
5552 bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
5553 {
5554 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
5555
5556 if (!(reset_mask & (RADEON_RESET_GFX |
5557 RADEON_RESET_COMPUTE |
5558 RADEON_RESET_CP))) {
5559 radeon_ring_lockup_update(rdev, ring);
5560 return false;
5561 }
5562 return radeon_ring_test_lockup(rdev, ring);
5563 }
5564
5565 /* MC */
5566 /**
5567 * cik_mc_program - program the GPU memory controller
5568 *
5569 * @rdev: radeon_device pointer
5570 *
5571 * Set the location of vram, gart, and AGP in the GPU's
5572 * physical address space (CIK).
5573 */
5574 static void cik_mc_program(struct radeon_device *rdev)
5575 {
5576 struct evergreen_mc_save save;
5577 u32 tmp;
5578 int i, j;
5579
5580 /* Initialize HDP */
5581 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
5582 WREG32((0x2c14 + j), 0x00000000);
5583 WREG32((0x2c18 + j), 0x00000000);
5584 WREG32((0x2c1c + j), 0x00000000);
5585 WREG32((0x2c20 + j), 0x00000000);
5586 WREG32((0x2c24 + j), 0x00000000);
5587 }
5588 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
5589
5590 evergreen_mc_stop(rdev, &save);
5591 if (radeon_mc_wait_for_idle(rdev)) {
5592 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
5593 }
5594 /* Lockout access through VGA aperture*/
5595 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
5596 /* Update configuration */
5597 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
5598 rdev->mc.vram_start >> 12);
5599 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
5600 rdev->mc.vram_end >> 12);
5601 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
5602 rdev->vram_scratch.gpu_addr >> 12);
5603 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
5604 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
5605 WREG32(MC_VM_FB_LOCATION, tmp);
5606 /* XXX double check these! */
5607 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
5608 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
5609 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
5610 WREG32(MC_VM_AGP_BASE, 0);
5611 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
5612 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
5613 if (radeon_mc_wait_for_idle(rdev)) {
5614 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
5615 }
5616 evergreen_mc_resume(rdev, &save);
5617 /* we need to own VRAM, so turn off the VGA renderer here
5618 * to stop it overwriting our objects */
5619 rv515_vga_render_disable(rdev);
5620 }
5621
5622 /**
5623 * cik_mc_init - initialize the memory controller driver params
5624 *
5625 * @rdev: radeon_device pointer
5626 *
5627 * Look up the amount of vram, vram width, and decide how to place
5628 * vram and gart within the GPU's physical address space (CIK).
5629 * Returns 0 for success.
5630 */
5631 static int cik_mc_init(struct radeon_device *rdev)
5632 {
5633 u32 tmp;
5634 int chansize, numchan;
5635
5636 /* Get VRAM informations */
5637 rdev->mc.vram_is_ddr = true;
5638 tmp = RREG32(MC_ARB_RAMCFG);
5639 if (tmp & CHANSIZE_MASK) {
5640 chansize = 64;
5641 } else {
5642 chansize = 32;
5643 }
5644 tmp = RREG32(MC_SHARED_CHMAP);
5645 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
5646 case 0:
5647 default:
5648 numchan = 1;
5649 break;
5650 case 1:
5651 numchan = 2;
5652 break;
5653 case 2:
5654 numchan = 4;
5655 break;
5656 case 3:
5657 numchan = 8;
5658 break;
5659 case 4:
5660 numchan = 3;
5661 break;
5662 case 5:
5663 numchan = 6;
5664 break;
5665 case 6:
5666 numchan = 10;
5667 break;
5668 case 7:
5669 numchan = 12;
5670 break;
5671 case 8:
5672 numchan = 16;
5673 break;
5674 }
5675 rdev->mc.vram_width = numchan * chansize;
5676 /* Could aper size report 0 ? */
5677 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
5678 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
5679 /* size in MB on si */
5680 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
5681 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
5682 rdev->mc.visible_vram_size = rdev->mc.aper_size;
5683 si_vram_gtt_location(rdev, &rdev->mc);
5684 radeon_update_bandwidth_info(rdev);
5685
5686 return 0;
5687 }
5688
5689 /*
5690 * GART
5691 * VMID 0 is the physical GPU addresses as used by the kernel.
5692 * VMIDs 1-15 are used for userspace clients and are handled
5693 * by the radeon vm/hsa code.
5694 */
5695 /**
5696 * cik_pcie_gart_tlb_flush - gart tlb flush callback
5697 *
5698 * @rdev: radeon_device pointer
5699 *
5700 * Flush the TLB for the VMID 0 page table (CIK).
5701 */
5702 void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
5703 {
5704 /* flush hdp cache */
5705 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
5706
5707 /* bits 0-15 are the VM contexts0-15 */
5708 WREG32(VM_INVALIDATE_REQUEST, 0x1);
5709 }
5710
5711 static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
5712 {
5713 int i;
5714 uint32_t sh_mem_bases, sh_mem_config;
5715
5716 sh_mem_bases = 0x6000 | 0x6000 << 16;
5717 sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
5718 sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
5719
5720 mutex_lock(&rdev->srbm_mutex);
5721 for (i = 8; i < 16; i++) {
5722 cik_srbm_select(rdev, 0, 0, 0, i);
5723 /* CP and shaders */
5724 WREG32(SH_MEM_CONFIG, sh_mem_config);
5725 WREG32(SH_MEM_APE1_BASE, 1);
5726 WREG32(SH_MEM_APE1_LIMIT, 0);
5727 WREG32(SH_MEM_BASES, sh_mem_bases);
5728 }
5729 cik_srbm_select(rdev, 0, 0, 0, 0);
5730 mutex_unlock(&rdev->srbm_mutex);
5731 }
5732
5733 /**
5734 * cik_pcie_gart_enable - gart enable
5735 *
5736 * @rdev: radeon_device pointer
5737 *
5738 * This sets up the TLBs, programs the page tables for VMID0,
5739 * sets up the hw for VMIDs 1-15 which are allocated on
5740 * demand, and sets up the global locations for the LDS, GDS,
5741 * and GPUVM for FSA64 clients (CIK).
5742 * Returns 0 for success, errors for failure.
5743 */
5744 static int cik_pcie_gart_enable(struct radeon_device *rdev)
5745 {
5746 int r, i;
5747
5748 if (rdev->gart.robj == NULL) {
5749 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
5750 return -EINVAL;
5751 }
5752 r = radeon_gart_table_vram_pin(rdev);
5753 if (r)
5754 return r;
5755 /* Setup TLB control */
5756 WREG32(MC_VM_MX_L1_TLB_CNTL,
5757 (0xA << 7) |
5758 ENABLE_L1_TLB |
5759 ENABLE_L1_FRAGMENT_PROCESSING |
5760 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
5761 ENABLE_ADVANCED_DRIVER_MODEL |
5762 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
5763 /* Setup L2 cache */
5764 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
5765 ENABLE_L2_FRAGMENT_PROCESSING |
5766 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
5767 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
5768 EFFECTIVE_L2_QUEUE_SIZE(7) |
5769 CONTEXT1_IDENTITY_ACCESS_MODE(1));
5770 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
5771 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
5772 BANK_SELECT(4) |
5773 L2_CACHE_BIGK_FRAGMENT_SIZE(4));
5774 /* setup context0 */
5775 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
5776 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
5777 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
5778 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
5779 (u32)(rdev->dummy_page.addr >> 12));
5780 WREG32(VM_CONTEXT0_CNTL2, 0);
5781 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
5782 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
5783
5784 WREG32(0x15D4, 0);
5785 WREG32(0x15D8, 0);
5786 WREG32(0x15DC, 0);
5787
5788 /* restore context1-15 */
5789 /* set vm size, must be a multiple of 4 */
5790 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
5791 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
5792 for (i = 1; i < 16; i++) {
5793 if (i < 8)
5794 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
5795 rdev->vm_manager.saved_table_addr[i]);
5796 else
5797 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
5798 rdev->vm_manager.saved_table_addr[i]);
5799 }
5800
5801 /* enable context1-15 */
5802 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
5803 (u32)(rdev->dummy_page.addr >> 12));
5804 WREG32(VM_CONTEXT1_CNTL2, 4);
5805 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
5806 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
5807 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5808 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
5809 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5810 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
5811 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
5812 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
5813 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
5814 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
5815 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
5816 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
5817 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5818 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
5819
5820 if (rdev->family == CHIP_KAVERI) {
5821 u32 tmp = RREG32(CHUB_CONTROL);
5822 tmp &= ~BYPASS_VM;
5823 WREG32(CHUB_CONTROL, tmp);
5824 }
5825
5826 /* XXX SH_MEM regs */
5827 /* where to put LDS, scratch, GPUVM in FSA64 space */
5828 mutex_lock(&rdev->srbm_mutex);
5829 for (i = 0; i < 16; i++) {
5830 cik_srbm_select(rdev, 0, 0, 0, i);
5831 /* CP and shaders */
5832 WREG32(SH_MEM_CONFIG, 0);
5833 WREG32(SH_MEM_APE1_BASE, 1);
5834 WREG32(SH_MEM_APE1_LIMIT, 0);
5835 WREG32(SH_MEM_BASES, 0);
5836 /* SDMA GFX */
5837 WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA0_REGISTER_OFFSET, 0);
5838 WREG32(SDMA0_GFX_APE1_CNTL + SDMA0_REGISTER_OFFSET, 0);
5839 WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA1_REGISTER_OFFSET, 0);
5840 WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
5841 /* XXX SDMA RLC - todo */
5842 }
5843 cik_srbm_select(rdev, 0, 0, 0, 0);
5844 mutex_unlock(&rdev->srbm_mutex);
5845
5846 cik_pcie_init_compute_vmid(rdev);
5847
5848 cik_pcie_gart_tlb_flush(rdev);
5849 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
5850 (unsigned)(rdev->mc.gtt_size >> 20),
5851 (unsigned long long)rdev->gart.table_addr);
5852 rdev->gart.ready = true;
5853 return 0;
5854 }
5855
5856 /**
5857 * cik_pcie_gart_disable - gart disable
5858 *
5859 * @rdev: radeon_device pointer
5860 *
5861 * This disables all VM page table (CIK).
5862 */
5863 static void cik_pcie_gart_disable(struct radeon_device *rdev)
5864 {
5865 unsigned i;
5866
5867 for (i = 1; i < 16; ++i) {
5868 uint32_t reg;
5869 if (i < 8)
5870 reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
5871 else
5872 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
5873 rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
5874 }
5875
5876 /* Disable all tables */
5877 WREG32(VM_CONTEXT0_CNTL, 0);
5878 WREG32(VM_CONTEXT1_CNTL, 0);
5879 /* Setup TLB control */
5880 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
5881 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
5882 /* Setup L2 cache */
5883 WREG32(VM_L2_CNTL,
5884 ENABLE_L2_FRAGMENT_PROCESSING |
5885 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
5886 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
5887 EFFECTIVE_L2_QUEUE_SIZE(7) |
5888 CONTEXT1_IDENTITY_ACCESS_MODE(1));
5889 WREG32(VM_L2_CNTL2, 0);
5890 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
5891 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
5892 radeon_gart_table_vram_unpin(rdev);
5893 }
5894
5895 /**
5896 * cik_pcie_gart_fini - vm fini callback
5897 *
5898 * @rdev: radeon_device pointer
5899 *
5900 * Tears down the driver GART/VM setup (CIK).
5901 */
5902 static void cik_pcie_gart_fini(struct radeon_device *rdev)
5903 {
5904 cik_pcie_gart_disable(rdev);
5905 radeon_gart_table_vram_free(rdev);
5906 radeon_gart_fini(rdev);
5907 }
5908
5909 /* vm parser */
5910 /**
5911 * cik_ib_parse - vm ib_parse callback
5912 *
5913 * @rdev: radeon_device pointer
5914 * @ib: indirect buffer pointer
5915 *
5916 * CIK uses hw IB checking so this is a nop (CIK).
5917 */
5918 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
5919 {
5920 return 0;
5921 }
5922
5923 /*
5924 * vm
5925 * VMID 0 is the physical GPU addresses as used by the kernel.
5926 * VMIDs 1-15 are used for userspace clients and are handled
5927 * by the radeon vm/hsa code.
5928 */
5929 /**
5930 * cik_vm_init - cik vm init callback
5931 *
5932 * @rdev: radeon_device pointer
5933 *
5934 * Inits cik specific vm parameters (number of VMs, base of vram for
5935 * VMIDs 1-15) (CIK).
5936 * Returns 0 for success.
5937 */
5938 int cik_vm_init(struct radeon_device *rdev)
5939 {
5940 /*
5941 * number of VMs
5942 * VMID 0 is reserved for System
5943 * radeon graphics/compute will use VMIDs 1-7
5944 * amdkfd will use VMIDs 8-15
5945 */
5946 rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS;
5947 /* base offset of vram pages */
5948 if (rdev->flags & RADEON_IS_IGP) {
5949 u64 tmp = RREG32(MC_VM_FB_OFFSET);
5950 tmp <<= 22;
5951 rdev->vm_manager.vram_base_offset = tmp;
5952 } else
5953 rdev->vm_manager.vram_base_offset = 0;
5954
5955 return 0;
5956 }
5957
5958 /**
5959 * cik_vm_fini - cik vm fini callback
5960 *
5961 * @rdev: radeon_device pointer
5962 *
5963 * Tear down any asic specific VM setup (CIK).
5964 */
5965 void cik_vm_fini(struct radeon_device *rdev)
5966 {
5967 }
5968
5969 /**
5970 * cik_vm_decode_fault - print human readable fault info
5971 *
5972 * @rdev: radeon_device pointer
5973 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
5974 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
5975 *
5976 * Print human readable fault information (CIK).
5977 */
5978 static void cik_vm_decode_fault(struct radeon_device *rdev,
5979 u32 status, u32 addr, u32 mc_client)
5980 {
5981 u32 mc_id;
5982 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
5983 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
5984 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
5985 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
5986
5987 if (rdev->family == CHIP_HAWAII)
5988 mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
5989 else
5990 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
5991
5992 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
5993 protections, vmid, addr,
5994 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
5995 block, mc_client, mc_id);
5996 }
5997
5998 /**
5999 * cik_vm_flush - cik vm flush using the CP
6000 *
6001 * @rdev: radeon_device pointer
6002 *
6003 * Update the page table base and flush the VM TLB
6004 * using the CP (CIK).
6005 */
6006 void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
6007 unsigned vm_id, uint64_t pd_addr)
6008 {
6009 int usepfp = (ring->idx == RADEON_RING_TYPE_GFX_INDEX);
6010
6011 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6012 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
6013 WRITE_DATA_DST_SEL(0)));
6014 if (vm_id < 8) {
6015 radeon_ring_write(ring,
6016 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
6017 } else {
6018 radeon_ring_write(ring,
6019 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
6020 }
6021 radeon_ring_write(ring, 0);
6022 radeon_ring_write(ring, pd_addr >> 12);
6023
6024 /* update SH_MEM_* regs */
6025 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6026 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
6027 WRITE_DATA_DST_SEL(0)));
6028 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
6029 radeon_ring_write(ring, 0);
6030 radeon_ring_write(ring, VMID(vm_id));
6031
6032 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
6033 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
6034 WRITE_DATA_DST_SEL(0)));
6035 radeon_ring_write(ring, SH_MEM_BASES >> 2);
6036 radeon_ring_write(ring, 0);
6037
6038 radeon_ring_write(ring, 0); /* SH_MEM_BASES */
6039 radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */
6040 radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
6041 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
6042
6043 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6044 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
6045 WRITE_DATA_DST_SEL(0)));
6046 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
6047 radeon_ring_write(ring, 0);
6048 radeon_ring_write(ring, VMID(0));
6049
6050 /* HDP flush */
6051 cik_hdp_flush_cp_ring_emit(rdev, ring->idx);
6052
6053 /* bits 0-15 are the VM contexts0-15 */
6054 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6055 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
6056 WRITE_DATA_DST_SEL(0)));
6057 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
6058 radeon_ring_write(ring, 0);
6059 radeon_ring_write(ring, 1 << vm_id);
6060
6061 /* wait for the invalidate to complete */
6062 radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6063 radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
6064 WAIT_REG_MEM_FUNCTION(0) | /* always */
6065 WAIT_REG_MEM_ENGINE(0))); /* me */
6066 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
6067 radeon_ring_write(ring, 0);
6068 radeon_ring_write(ring, 0); /* ref */
6069 radeon_ring_write(ring, 0); /* mask */
6070 radeon_ring_write(ring, 0x20); /* poll interval */
6071
6072 /* compute doesn't have PFP */
6073 if (usepfp) {
6074 /* sync PFP to ME, otherwise we might get invalid PFP reads */
6075 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
6076 radeon_ring_write(ring, 0x0);
6077 }
6078 }
6079
6080 /*
6081 * RLC
6082 * The RLC is a multi-purpose microengine that handles a
6083 * variety of functions, the most important of which is
6084 * the interrupt controller.
6085 */
6086 static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
6087 bool enable)
6088 {
6089 u32 tmp = RREG32(CP_INT_CNTL_RING0);
6090
6091 if (enable)
6092 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6093 else
6094 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6095 WREG32(CP_INT_CNTL_RING0, tmp);
6096 }
6097
6098 static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
6099 {
6100 u32 tmp;
6101
6102 tmp = RREG32(RLC_LB_CNTL);
6103 if (enable)
6104 tmp |= LOAD_BALANCE_ENABLE;
6105 else
6106 tmp &= ~LOAD_BALANCE_ENABLE;
6107 WREG32(RLC_LB_CNTL, tmp);
6108 }
6109
6110 static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
6111 {
6112 u32 i, j, k;
6113 u32 mask;
6114
6115 mutex_lock(&rdev->grbm_idx_mutex);
6116 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
6117 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
6118 cik_select_se_sh(rdev, i, j);
6119 for (k = 0; k < rdev->usec_timeout; k++) {
6120 if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0)
6121 break;
6122 udelay(1);
6123 }
6124 }
6125 }
6126 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6127 mutex_unlock(&rdev->grbm_idx_mutex);
6128
6129 mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
6130 for (k = 0; k < rdev->usec_timeout; k++) {
6131 if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
6132 break;
6133 udelay(1);
6134 }
6135 }
6136
6137 static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
6138 {
6139 u32 tmp;
6140
6141 tmp = RREG32(RLC_CNTL);
6142 if (tmp != rlc)
6143 WREG32(RLC_CNTL, rlc);
6144 }
6145
6146 static u32 cik_halt_rlc(struct radeon_device *rdev)
6147 {
6148 u32 data, orig;
6149
6150 orig = data = RREG32(RLC_CNTL);
6151
6152 if (data & RLC_ENABLE) {
6153 u32 i;
6154
6155 data &= ~RLC_ENABLE;
6156 WREG32(RLC_CNTL, data);
6157
6158 for (i = 0; i < rdev->usec_timeout; i++) {
6159 if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
6160 break;
6161 udelay(1);
6162 }
6163
6164 cik_wait_for_rlc_serdes(rdev);
6165 }
6166
6167 return orig;
6168 }
6169
6170 void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
6171 {
6172 u32 tmp, i, mask;
6173
6174 tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
6175 WREG32(RLC_GPR_REG2, tmp);
6176
6177 mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
6178 for (i = 0; i < rdev->usec_timeout; i++) {
6179 if ((RREG32(RLC_GPM_STAT) & mask) == mask)
6180 break;
6181 udelay(1);
6182 }
6183
6184 for (i = 0; i < rdev->usec_timeout; i++) {
6185 if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
6186 break;
6187 udelay(1);
6188 }
6189 }
6190
6191 void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
6192 {
6193 u32 tmp;
6194
6195 tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
6196 WREG32(RLC_GPR_REG2, tmp);
6197 }
6198
6199 /**
6200 * cik_rlc_stop - stop the RLC ME
6201 *
6202 * @rdev: radeon_device pointer
6203 *
6204 * Halt the RLC ME (MicroEngine) (CIK).
6205 */
6206 static void cik_rlc_stop(struct radeon_device *rdev)
6207 {
6208 WREG32(RLC_CNTL, 0);
6209
6210 cik_enable_gui_idle_interrupt(rdev, false);
6211
6212 cik_wait_for_rlc_serdes(rdev);
6213 }
6214
6215 /**
6216 * cik_rlc_start - start the RLC ME
6217 *
6218 * @rdev: radeon_device pointer
6219 *
6220 * Unhalt the RLC ME (MicroEngine) (CIK).
6221 */
6222 static void cik_rlc_start(struct radeon_device *rdev)
6223 {
6224 WREG32(RLC_CNTL, RLC_ENABLE);
6225
6226 cik_enable_gui_idle_interrupt(rdev, true);
6227
6228 udelay(50);
6229 }
6230
6231 /**
6232 * cik_rlc_resume - setup the RLC hw
6233 *
6234 * @rdev: radeon_device pointer
6235 *
6236 * Initialize the RLC registers, load the ucode,
6237 * and start the RLC (CIK).
6238 * Returns 0 for success, -EINVAL if the ucode is not available.
6239 */
6240 static int cik_rlc_resume(struct radeon_device *rdev)
6241 {
6242 u32 i, size, tmp;
6243
6244 if (!rdev->rlc_fw)
6245 return -EINVAL;
6246
6247 cik_rlc_stop(rdev);
6248
6249 /* disable CG */
6250 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
6251 WREG32(RLC_CGCG_CGLS_CTRL, tmp);
6252
6253 si_rlc_reset(rdev);
6254
6255 cik_init_pg(rdev);
6256
6257 cik_init_cg(rdev);
6258
6259 WREG32(RLC_LB_CNTR_INIT, 0);
6260 WREG32(RLC_LB_CNTR_MAX, 0x00008000);
6261
6262 mutex_lock(&rdev->grbm_idx_mutex);
6263 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6264 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
6265 WREG32(RLC_LB_PARAMS, 0x00600408);
6266 WREG32(RLC_LB_CNTL, 0x80000004);
6267 mutex_unlock(&rdev->grbm_idx_mutex);
6268
6269 WREG32(RLC_MC_CNTL, 0);
6270 WREG32(RLC_UCODE_CNTL, 0);
6271
6272 if (rdev->new_fw) {
6273 const struct rlc_firmware_header_v1_0 *hdr =
6274 (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
6275 const __le32 *fw_data = (const __le32 *)
6276 (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6277
6278 radeon_ucode_print_rlc_hdr(&hdr->header);
6279
6280 size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
6281 WREG32(RLC_GPM_UCODE_ADDR, 0);
6282 for (i = 0; i < size; i++)
6283 WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
6284 WREG32(RLC_GPM_UCODE_ADDR, le32_to_cpu(hdr->header.ucode_version));
6285 } else {
6286 const __be32 *fw_data;
6287
6288 switch (rdev->family) {
6289 case CHIP_BONAIRE:
6290 case CHIP_HAWAII:
6291 default:
6292 size = BONAIRE_RLC_UCODE_SIZE;
6293 break;
6294 case CHIP_KAVERI:
6295 size = KV_RLC_UCODE_SIZE;
6296 break;
6297 case CHIP_KABINI:
6298 size = KB_RLC_UCODE_SIZE;
6299 break;
6300 case CHIP_MULLINS:
6301 size = ML_RLC_UCODE_SIZE;
6302 break;
6303 }
6304
6305 fw_data = (const __be32 *)rdev->rlc_fw->data;
6306 WREG32(RLC_GPM_UCODE_ADDR, 0);
6307 for (i = 0; i < size; i++)
6308 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
6309 WREG32(RLC_GPM_UCODE_ADDR, 0);
6310 }
6311
6312 /* XXX - find out what chips support lbpw */
6313 cik_enable_lbpw(rdev, false);
6314
6315 if (rdev->family == CHIP_BONAIRE)
6316 WREG32(RLC_DRIVER_DMA_STATUS, 0);
6317
6318 cik_rlc_start(rdev);
6319
6320 return 0;
6321 }
6322
6323 static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
6324 {
6325 u32 data, orig, tmp, tmp2;
6326
6327 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
6328
6329 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
6330 cik_enable_gui_idle_interrupt(rdev, true);
6331
6332 tmp = cik_halt_rlc(rdev);
6333
6334 mutex_lock(&rdev->grbm_idx_mutex);
6335 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6336 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
6337 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
6338 tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
6339 WREG32(RLC_SERDES_WR_CTRL, tmp2);
6340 mutex_unlock(&rdev->grbm_idx_mutex);
6341
6342 cik_update_rlc(rdev, tmp);
6343
6344 data |= CGCG_EN | CGLS_EN;
6345 } else {
6346 cik_enable_gui_idle_interrupt(rdev, false);
6347
6348 RREG32(CB_CGTT_SCLK_CTRL);
6349 RREG32(CB_CGTT_SCLK_CTRL);
6350 RREG32(CB_CGTT_SCLK_CTRL);
6351 RREG32(CB_CGTT_SCLK_CTRL);
6352
6353 data &= ~(CGCG_EN | CGLS_EN);
6354 }
6355
6356 if (orig != data)
6357 WREG32(RLC_CGCG_CGLS_CTRL, data);
6358
6359 }
6360
6361 static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
6362 {
6363 u32 data, orig, tmp = 0;
6364
6365 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
6366 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
6367 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
6368 orig = data = RREG32(CP_MEM_SLP_CNTL);
6369 data |= CP_MEM_LS_EN;
6370 if (orig != data)
6371 WREG32(CP_MEM_SLP_CNTL, data);
6372 }
6373 }
6374
6375 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
6376 data |= 0x00000001;
6377 data &= 0xfffffffd;
6378 if (orig != data)
6379 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
6380
6381 tmp = cik_halt_rlc(rdev);
6382
6383 mutex_lock(&rdev->grbm_idx_mutex);
6384 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6385 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
6386 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
6387 data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
6388 WREG32(RLC_SERDES_WR_CTRL, data);
6389 mutex_unlock(&rdev->grbm_idx_mutex);
6390
6391 cik_update_rlc(rdev, tmp);
6392
6393 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
6394 orig = data = RREG32(CGTS_SM_CTRL_REG);
6395 data &= ~SM_MODE_MASK;
6396 data |= SM_MODE(0x2);
6397 data |= SM_MODE_ENABLE;
6398 data &= ~CGTS_OVERRIDE;
6399 if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
6400 (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
6401 data &= ~CGTS_LS_OVERRIDE;
6402 data &= ~ON_MONITOR_ADD_MASK;
6403 data |= ON_MONITOR_ADD_EN;
6404 data |= ON_MONITOR_ADD(0x96);
6405 if (orig != data)
6406 WREG32(CGTS_SM_CTRL_REG, data);
6407 }
6408 } else {
6409 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
6410 data |= 0x00000003;
6411 if (orig != data)
6412 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
6413
6414 data = RREG32(RLC_MEM_SLP_CNTL);
6415 if (data & RLC_MEM_LS_EN) {
6416 data &= ~RLC_MEM_LS_EN;
6417 WREG32(RLC_MEM_SLP_CNTL, data);
6418 }
6419
6420 data = RREG32(CP_MEM_SLP_CNTL);
6421 if (data & CP_MEM_LS_EN) {
6422 data &= ~CP_MEM_LS_EN;
6423 WREG32(CP_MEM_SLP_CNTL, data);
6424 }
6425
6426 orig = data = RREG32(CGTS_SM_CTRL_REG);
6427 data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
6428 if (orig != data)
6429 WREG32(CGTS_SM_CTRL_REG, data);
6430
6431 tmp = cik_halt_rlc(rdev);
6432
6433 mutex_lock(&rdev->grbm_idx_mutex);
6434 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6435 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
6436 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
6437 data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
6438 WREG32(RLC_SERDES_WR_CTRL, data);
6439 mutex_unlock(&rdev->grbm_idx_mutex);
6440
6441 cik_update_rlc(rdev, tmp);
6442 }
6443 }
6444
6445 static const u32 mc_cg_registers[] =
6446 {
6447 MC_HUB_MISC_HUB_CG,
6448 MC_HUB_MISC_SIP_CG,
6449 MC_HUB_MISC_VM_CG,
6450 MC_XPB_CLK_GAT,
6451 ATC_MISC_CG,
6452 MC_CITF_MISC_WR_CG,
6453 MC_CITF_MISC_RD_CG,
6454 MC_CITF_MISC_VM_CG,
6455 VM_L2_CG,
6456 };
6457
6458 static void cik_enable_mc_ls(struct radeon_device *rdev,
6459 bool enable)
6460 {
6461 int i;
6462 u32 orig, data;
6463
6464 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
6465 orig = data = RREG32(mc_cg_registers[i]);
6466 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
6467 data |= MC_LS_ENABLE;
6468 else
6469 data &= ~MC_LS_ENABLE;
6470 if (data != orig)
6471 WREG32(mc_cg_registers[i], data);
6472 }
6473 }
6474
6475 static void cik_enable_mc_mgcg(struct radeon_device *rdev,
6476 bool enable)
6477 {
6478 int i;
6479 u32 orig, data;
6480
6481 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
6482 orig = data = RREG32(mc_cg_registers[i]);
6483 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
6484 data |= MC_CG_ENABLE;
6485 else
6486 data &= ~MC_CG_ENABLE;
6487 if (data != orig)
6488 WREG32(mc_cg_registers[i], data);
6489 }
6490 }
6491
6492 static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
6493 bool enable)
6494 {
6495 u32 orig, data;
6496
6497 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
6498 WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
6499 WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
6500 } else {
6501 orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
6502 data |= 0xff000000;
6503 if (data != orig)
6504 WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
6505
6506 orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
6507 data |= 0xff000000;
6508 if (data != orig)
6509 WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
6510 }
6511 }
6512
6513 static void cik_enable_sdma_mgls(struct radeon_device *rdev,
6514 bool enable)
6515 {
6516 u32 orig, data;
6517
6518 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
6519 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
6520 data |= 0x100;
6521 if (orig != data)
6522 WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
6523
6524 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
6525 data |= 0x100;
6526 if (orig != data)
6527 WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
6528 } else {
6529 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
6530 data &= ~0x100;
6531 if (orig != data)
6532 WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
6533
6534 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
6535 data &= ~0x100;
6536 if (orig != data)
6537 WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
6538 }
6539 }
6540
6541 static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
6542 bool enable)
6543 {
6544 u32 orig, data;
6545
6546 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
6547 data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
6548 data = 0xfff;
6549 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
6550
6551 orig = data = RREG32(UVD_CGC_CTRL);
6552 data |= DCM;
6553 if (orig != data)
6554 WREG32(UVD_CGC_CTRL, data);
6555 } else {
6556 data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
6557 data &= ~0xfff;
6558 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
6559
6560 orig = data = RREG32(UVD_CGC_CTRL);
6561 data &= ~DCM;
6562 if (orig != data)
6563 WREG32(UVD_CGC_CTRL, data);
6564 }
6565 }
6566
6567 static void cik_enable_bif_mgls(struct radeon_device *rdev,
6568 bool enable)
6569 {
6570 u32 orig, data;
6571
6572 orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
6573
6574 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
6575 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
6576 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
6577 else
6578 data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
6579 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
6580
6581 if (orig != data)
6582 WREG32_PCIE_PORT(PCIE_CNTL2, data);
6583 }
6584
6585 static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
6586 bool enable)
6587 {
6588 u32 orig, data;
6589
6590 orig = data = RREG32(HDP_HOST_PATH_CNTL);
6591
6592 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
6593 data &= ~CLOCK_GATING_DIS;
6594 else
6595 data |= CLOCK_GATING_DIS;
6596
6597 if (orig != data)
6598 WREG32(HDP_HOST_PATH_CNTL, data);
6599 }
6600
6601 static void cik_enable_hdp_ls(struct radeon_device *rdev,
6602 bool enable)
6603 {
6604 u32 orig, data;
6605
6606 orig = data = RREG32(HDP_MEM_POWER_LS);
6607
6608 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
6609 data |= HDP_LS_ENABLE;
6610 else
6611 data &= ~HDP_LS_ENABLE;
6612
6613 if (orig != data)
6614 WREG32(HDP_MEM_POWER_LS, data);
6615 }
6616
6617 void cik_update_cg(struct radeon_device *rdev,
6618 u32 block, bool enable)
6619 {
6620
6621 if (block & RADEON_CG_BLOCK_GFX) {
6622 cik_enable_gui_idle_interrupt(rdev, false);
6623 /* order matters! */
6624 if (enable) {
6625 cik_enable_mgcg(rdev, true);
6626 cik_enable_cgcg(rdev, true);
6627 } else {
6628 cik_enable_cgcg(rdev, false);
6629 cik_enable_mgcg(rdev, false);
6630 }
6631 cik_enable_gui_idle_interrupt(rdev, true);
6632 }
6633
6634 if (block & RADEON_CG_BLOCK_MC) {
6635 if (!(rdev->flags & RADEON_IS_IGP)) {
6636 cik_enable_mc_mgcg(rdev, enable);
6637 cik_enable_mc_ls(rdev, enable);
6638 }
6639 }
6640
6641 if (block & RADEON_CG_BLOCK_SDMA) {
6642 cik_enable_sdma_mgcg(rdev, enable);
6643 cik_enable_sdma_mgls(rdev, enable);
6644 }
6645
6646 if (block & RADEON_CG_BLOCK_BIF) {
6647 cik_enable_bif_mgls(rdev, enable);
6648 }
6649
6650 if (block & RADEON_CG_BLOCK_UVD) {
6651 if (rdev->has_uvd)
6652 cik_enable_uvd_mgcg(rdev, enable);
6653 }
6654
6655 if (block & RADEON_CG_BLOCK_HDP) {
6656 cik_enable_hdp_mgcg(rdev, enable);
6657 cik_enable_hdp_ls(rdev, enable);
6658 }
6659
6660 if (block & RADEON_CG_BLOCK_VCE) {
6661 vce_v2_0_enable_mgcg(rdev, enable);
6662 }
6663 }
6664
6665 static void cik_init_cg(struct radeon_device *rdev)
6666 {
6667
6668 cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
6669
6670 if (rdev->has_uvd)
6671 si_init_uvd_internal_cg(rdev);
6672
6673 cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
6674 RADEON_CG_BLOCK_SDMA |
6675 RADEON_CG_BLOCK_BIF |
6676 RADEON_CG_BLOCK_UVD |
6677 RADEON_CG_BLOCK_HDP), true);
6678 }
6679
6680 static void cik_fini_cg(struct radeon_device *rdev)
6681 {
6682 cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
6683 RADEON_CG_BLOCK_SDMA |
6684 RADEON_CG_BLOCK_BIF |
6685 RADEON_CG_BLOCK_UVD |
6686 RADEON_CG_BLOCK_HDP), false);
6687
6688 cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
6689 }
6690
6691 static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
6692 bool enable)
6693 {
6694 u32 data, orig;
6695
6696 orig = data = RREG32(RLC_PG_CNTL);
6697 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
6698 data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
6699 else
6700 data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
6701 if (orig != data)
6702 WREG32(RLC_PG_CNTL, data);
6703 }
6704
6705 static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
6706 bool enable)
6707 {
6708 u32 data, orig;
6709
6710 orig = data = RREG32(RLC_PG_CNTL);
6711 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
6712 data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
6713 else
6714 data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
6715 if (orig != data)
6716 WREG32(RLC_PG_CNTL, data);
6717 }
6718
6719 static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
6720 {
6721 u32 data, orig;
6722
6723 orig = data = RREG32(RLC_PG_CNTL);
6724 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
6725 data &= ~DISABLE_CP_PG;
6726 else
6727 data |= DISABLE_CP_PG;
6728 if (orig != data)
6729 WREG32(RLC_PG_CNTL, data);
6730 }
6731
6732 static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
6733 {
6734 u32 data, orig;
6735
6736 orig = data = RREG32(RLC_PG_CNTL);
6737 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
6738 data &= ~DISABLE_GDS_PG;
6739 else
6740 data |= DISABLE_GDS_PG;
6741 if (orig != data)
6742 WREG32(RLC_PG_CNTL, data);
6743 }
6744
6745 #define CP_ME_TABLE_SIZE 96
6746 #define CP_ME_TABLE_OFFSET 2048
6747 #define CP_MEC_TABLE_OFFSET 4096
6748
6749 void cik_init_cp_pg_table(struct radeon_device *rdev)
6750 {
6751 volatile u32 *dst_ptr;
6752 int me, i, max_me = 4;
6753 u32 bo_offset = 0;
6754 u32 table_offset, table_size;
6755
6756 if (rdev->family == CHIP_KAVERI)
6757 max_me = 5;
6758
6759 if (rdev->rlc.cp_table_ptr == NULL)
6760 return;
6761
6762 /* write the cp table buffer */
6763 dst_ptr = rdev->rlc.cp_table_ptr;
6764 for (me = 0; me < max_me; me++) {
6765 if (rdev->new_fw) {
6766 const __le32 *fw_data;
6767 const struct gfx_firmware_header_v1_0 *hdr;
6768
6769 if (me == 0) {
6770 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
6771 fw_data = (const __le32 *)
6772 (rdev->ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6773 table_offset = le32_to_cpu(hdr->jt_offset);
6774 table_size = le32_to_cpu(hdr->jt_size);
6775 } else if (me == 1) {
6776 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
6777 fw_data = (const __le32 *)
6778 (rdev->pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6779 table_offset = le32_to_cpu(hdr->jt_offset);
6780 table_size = le32_to_cpu(hdr->jt_size);
6781 } else if (me == 2) {
6782 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
6783 fw_data = (const __le32 *)
6784 (rdev->me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6785 table_offset = le32_to_cpu(hdr->jt_offset);
6786 table_size = le32_to_cpu(hdr->jt_size);
6787 } else if (me == 3) {
6788 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
6789 fw_data = (const __le32 *)
6790 (rdev->mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6791 table_offset = le32_to_cpu(hdr->jt_offset);
6792 table_size = le32_to_cpu(hdr->jt_size);
6793 } else {
6794 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
6795 fw_data = (const __le32 *)
6796 (rdev->mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6797 table_offset = le32_to_cpu(hdr->jt_offset);
6798 table_size = le32_to_cpu(hdr->jt_size);
6799 }
6800
6801 for (i = 0; i < table_size; i ++) {
6802 dst_ptr[bo_offset + i] =
6803 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
6804 }
6805 bo_offset += table_size;
6806 } else {
6807 const __be32 *fw_data;
6808 table_size = CP_ME_TABLE_SIZE;
6809
6810 if (me == 0) {
6811 fw_data = (const __be32 *)rdev->ce_fw->data;
6812 table_offset = CP_ME_TABLE_OFFSET;
6813 } else if (me == 1) {
6814 fw_data = (const __be32 *)rdev->pfp_fw->data;
6815 table_offset = CP_ME_TABLE_OFFSET;
6816 } else if (me == 2) {
6817 fw_data = (const __be32 *)rdev->me_fw->data;
6818 table_offset = CP_ME_TABLE_OFFSET;
6819 } else {
6820 fw_data = (const __be32 *)rdev->mec_fw->data;
6821 table_offset = CP_MEC_TABLE_OFFSET;
6822 }
6823
6824 for (i = 0; i < table_size; i ++) {
6825 dst_ptr[bo_offset + i] =
6826 cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
6827 }
6828 bo_offset += table_size;
6829 }
6830 }
6831 }
6832
6833 static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
6834 bool enable)
6835 {
6836 u32 data, orig;
6837
6838 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
6839 orig = data = RREG32(RLC_PG_CNTL);
6840 data |= GFX_PG_ENABLE;
6841 if (orig != data)
6842 WREG32(RLC_PG_CNTL, data);
6843
6844 orig = data = RREG32(RLC_AUTO_PG_CTRL);
6845 data |= AUTO_PG_EN;
6846 if (orig != data)
6847 WREG32(RLC_AUTO_PG_CTRL, data);
6848 } else {
6849 orig = data = RREG32(RLC_PG_CNTL);
6850 data &= ~GFX_PG_ENABLE;
6851 if (orig != data)
6852 WREG32(RLC_PG_CNTL, data);
6853
6854 orig = data = RREG32(RLC_AUTO_PG_CTRL);
6855 data &= ~AUTO_PG_EN;
6856 if (orig != data)
6857 WREG32(RLC_AUTO_PG_CTRL, data);
6858
6859 data = RREG32(DB_RENDER_CONTROL);
6860 }
6861 }
6862
6863 static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
6864 {
6865 u32 mask = 0, tmp, tmp1;
6866 int i;
6867
6868 mutex_lock(&rdev->grbm_idx_mutex);
6869 cik_select_se_sh(rdev, se, sh);
6870 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
6871 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
6872 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6873 mutex_unlock(&rdev->grbm_idx_mutex);
6874
6875 tmp &= 0xffff0000;
6876
6877 tmp |= tmp1;
6878 tmp >>= 16;
6879
6880 for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
6881 mask <<= 1;
6882 mask |= 1;
6883 }
6884
6885 return (~tmp) & mask;
6886 }
6887
6888 static void cik_init_ao_cu_mask(struct radeon_device *rdev)
6889 {
6890 u32 i, j, k, active_cu_number = 0;
6891 u32 mask, counter, cu_bitmap;
6892 u32 tmp = 0;
6893
6894 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
6895 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
6896 mask = 1;
6897 cu_bitmap = 0;
6898 counter = 0;
6899 for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
6900 if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
6901 if (counter < 2)
6902 cu_bitmap |= mask;
6903 counter ++;
6904 }
6905 mask <<= 1;
6906 }
6907
6908 active_cu_number += counter;
6909 tmp |= (cu_bitmap << (i * 16 + j * 8));
6910 }
6911 }
6912
6913 WREG32(RLC_PG_AO_CU_MASK, tmp);
6914
6915 tmp = RREG32(RLC_MAX_PG_CU);
6916 tmp &= ~MAX_PU_CU_MASK;
6917 tmp |= MAX_PU_CU(active_cu_number);
6918 WREG32(RLC_MAX_PG_CU, tmp);
6919 }
6920
6921 static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
6922 bool enable)
6923 {
6924 u32 data, orig;
6925
6926 orig = data = RREG32(RLC_PG_CNTL);
6927 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
6928 data |= STATIC_PER_CU_PG_ENABLE;
6929 else
6930 data &= ~STATIC_PER_CU_PG_ENABLE;
6931 if (orig != data)
6932 WREG32(RLC_PG_CNTL, data);
6933 }
6934
6935 static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
6936 bool enable)
6937 {
6938 u32 data, orig;
6939
6940 orig = data = RREG32(RLC_PG_CNTL);
6941 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
6942 data |= DYN_PER_CU_PG_ENABLE;
6943 else
6944 data &= ~DYN_PER_CU_PG_ENABLE;
6945 if (orig != data)
6946 WREG32(RLC_PG_CNTL, data);
6947 }
6948
6949 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
6950 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
6951
6952 static void cik_init_gfx_cgpg(struct radeon_device *rdev)
6953 {
6954 u32 data, orig;
6955 u32 i;
6956
6957 if (rdev->rlc.cs_data) {
6958 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
6959 WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
6960 WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
6961 WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
6962 } else {
6963 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
6964 for (i = 0; i < 3; i++)
6965 WREG32(RLC_GPM_SCRATCH_DATA, 0);
6966 }
6967 if (rdev->rlc.reg_list) {
6968 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
6969 for (i = 0; i < rdev->rlc.reg_list_size; i++)
6970 WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
6971 }
6972
6973 orig = data = RREG32(RLC_PG_CNTL);
6974 data |= GFX_PG_SRC;
6975 if (orig != data)
6976 WREG32(RLC_PG_CNTL, data);
6977
6978 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
6979 WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
6980
6981 data = RREG32(CP_RB_WPTR_POLL_CNTL);
6982 data &= ~IDLE_POLL_COUNT_MASK;
6983 data |= IDLE_POLL_COUNT(0x60);
6984 WREG32(CP_RB_WPTR_POLL_CNTL, data);
6985
6986 data = 0x10101010;
6987 WREG32(RLC_PG_DELAY, data);
6988
6989 data = RREG32(RLC_PG_DELAY_2);
6990 data &= ~0xff;
6991 data |= 0x3;
6992 WREG32(RLC_PG_DELAY_2, data);
6993
6994 data = RREG32(RLC_AUTO_PG_CTRL);
6995 data &= ~GRBM_REG_SGIT_MASK;
6996 data |= GRBM_REG_SGIT(0x700);
6997 WREG32(RLC_AUTO_PG_CTRL, data);
6998
6999 }
7000
7001 static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
7002 {
7003 cik_enable_gfx_cgpg(rdev, enable);
7004 cik_enable_gfx_static_mgpg(rdev, enable);
7005 cik_enable_gfx_dynamic_mgpg(rdev, enable);
7006 }
7007
7008 u32 cik_get_csb_size(struct radeon_device *rdev)
7009 {
7010 u32 count = 0;
7011 const struct cs_section_def *sect = NULL;
7012 const struct cs_extent_def *ext = NULL;
7013
7014 if (rdev->rlc.cs_data == NULL)
7015 return 0;
7016
7017 /* begin clear state */
7018 count += 2;
7019 /* context control state */
7020 count += 3;
7021
7022 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
7023 for (ext = sect->section; ext->extent != NULL; ++ext) {
7024 if (sect->id == SECT_CONTEXT)
7025 count += 2 + ext->reg_count;
7026 else
7027 return 0;
7028 }
7029 }
7030 /* pa_sc_raster_config/pa_sc_raster_config1 */
7031 count += 4;
7032 /* end clear state */
7033 count += 2;
7034 /* clear state */
7035 count += 2;
7036
7037 return count;
7038 }
7039
7040 void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
7041 {
7042 u32 count = 0, i;
7043 const struct cs_section_def *sect = NULL;
7044 const struct cs_extent_def *ext = NULL;
7045
7046 if (rdev->rlc.cs_data == NULL)
7047 return;
7048 if (buffer == NULL)
7049 return;
7050
7051 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
7052 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
7053
7054 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
7055 buffer[count++] = cpu_to_le32(0x80000000);
7056 buffer[count++] = cpu_to_le32(0x80000000);
7057
7058 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
7059 for (ext = sect->section; ext->extent != NULL; ++ext) {
7060 if (sect->id == SECT_CONTEXT) {
7061 buffer[count++] =
7062 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
7063 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
7064 for (i = 0; i < ext->reg_count; i++)
7065 buffer[count++] = cpu_to_le32(ext->extent[i]);
7066 } else {
7067 return;
7068 }
7069 }
7070 }
7071
7072 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
7073 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
7074 switch (rdev->family) {
7075 case CHIP_BONAIRE:
7076 buffer[count++] = cpu_to_le32(0x16000012);
7077 buffer[count++] = cpu_to_le32(0x00000000);
7078 break;
7079 case CHIP_KAVERI:
7080 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
7081 buffer[count++] = cpu_to_le32(0x00000000);
7082 break;
7083 case CHIP_KABINI:
7084 case CHIP_MULLINS:
7085 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
7086 buffer[count++] = cpu_to_le32(0x00000000);
7087 break;
7088 case CHIP_HAWAII:
7089 buffer[count++] = cpu_to_le32(0x3a00161a);
7090 buffer[count++] = cpu_to_le32(0x0000002e);
7091 break;
7092 default:
7093 buffer[count++] = cpu_to_le32(0x00000000);
7094 buffer[count++] = cpu_to_le32(0x00000000);
7095 break;
7096 }
7097
7098 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
7099 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
7100
7101 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
7102 buffer[count++] = cpu_to_le32(0);
7103 }
7104
7105 static void cik_init_pg(struct radeon_device *rdev)
7106 {
7107 if (rdev->pg_flags) {
7108 cik_enable_sck_slowdown_on_pu(rdev, true);
7109 cik_enable_sck_slowdown_on_pd(rdev, true);
7110 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
7111 cik_init_gfx_cgpg(rdev);
7112 cik_enable_cp_pg(rdev, true);
7113 cik_enable_gds_pg(rdev, true);
7114 }
7115 cik_init_ao_cu_mask(rdev);
7116 cik_update_gfx_pg(rdev, true);
7117 }
7118 }
7119
7120 static void cik_fini_pg(struct radeon_device *rdev)
7121 {
7122 if (rdev->pg_flags) {
7123 cik_update_gfx_pg(rdev, false);
7124 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
7125 cik_enable_cp_pg(rdev, false);
7126 cik_enable_gds_pg(rdev, false);
7127 }
7128 }
7129 }
7130
7131 /*
7132 * Interrupts
7133 * Starting with r6xx, interrupts are handled via a ring buffer.
7134 * Ring buffers are areas of GPU accessible memory that the GPU
7135 * writes interrupt vectors into and the host reads vectors out of.
7136 * There is a rptr (read pointer) that determines where the
7137 * host is currently reading, and a wptr (write pointer)
7138 * which determines where the GPU has written. When the
7139 * pointers are equal, the ring is idle. When the GPU
7140 * writes vectors to the ring buffer, it increments the
7141 * wptr. When there is an interrupt, the host then starts
7142 * fetching commands and processing them until the pointers are
7143 * equal again at which point it updates the rptr.
7144 */
7145
7146 /**
7147 * cik_enable_interrupts - Enable the interrupt ring buffer
7148 *
7149 * @rdev: radeon_device pointer
7150 *
7151 * Enable the interrupt ring buffer (CIK).
7152 */
7153 static void cik_enable_interrupts(struct radeon_device *rdev)
7154 {
7155 u32 ih_cntl = RREG32(IH_CNTL);
7156 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
7157
7158 ih_cntl |= ENABLE_INTR;
7159 ih_rb_cntl |= IH_RB_ENABLE;
7160 WREG32(IH_CNTL, ih_cntl);
7161 WREG32(IH_RB_CNTL, ih_rb_cntl);
7162 rdev->ih.enabled = true;
7163 }
7164
7165 /**
7166 * cik_disable_interrupts - Disable the interrupt ring buffer
7167 *
7168 * @rdev: radeon_device pointer
7169 *
7170 * Disable the interrupt ring buffer (CIK).
7171 */
7172 static void cik_disable_interrupts(struct radeon_device *rdev)
7173 {
7174 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
7175 u32 ih_cntl = RREG32(IH_CNTL);
7176
7177 ih_rb_cntl &= ~IH_RB_ENABLE;
7178 ih_cntl &= ~ENABLE_INTR;
7179 WREG32(IH_RB_CNTL, ih_rb_cntl);
7180 WREG32(IH_CNTL, ih_cntl);
7181 /* set rptr, wptr to 0 */
7182 WREG32(IH_RB_RPTR, 0);
7183 WREG32(IH_RB_WPTR, 0);
7184 rdev->ih.enabled = false;
7185 rdev->ih.rptr = 0;
7186 }
7187
7188 /**
7189 * cik_disable_interrupt_state - Disable all interrupt sources
7190 *
7191 * @rdev: radeon_device pointer
7192 *
7193 * Clear all interrupt enable bits used by the driver (CIK).
7194 */
7195 static void cik_disable_interrupt_state(struct radeon_device *rdev)
7196 {
7197 u32 tmp;
7198
7199 /* gfx ring */
7200 tmp = RREG32(CP_INT_CNTL_RING0) &
7201 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
7202 WREG32(CP_INT_CNTL_RING0, tmp);
7203 /* sdma */
7204 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
7205 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
7206 tmp = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
7207 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, tmp);
7208 /* compute queues */
7209 WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
7210 WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
7211 WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
7212 WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
7213 WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
7214 WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
7215 WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
7216 WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
7217 /* grbm */
7218 WREG32(GRBM_INT_CNTL, 0);
7219 /* vline/vblank, etc. */
7220 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
7221 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
7222 if (rdev->num_crtc >= 4) {
7223 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
7224 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
7225 }
7226 if (rdev->num_crtc >= 6) {
7227 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
7228 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
7229 }
7230 /* pflip */
7231 if (rdev->num_crtc >= 2) {
7232 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
7233 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
7234 }
7235 if (rdev->num_crtc >= 4) {
7236 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
7237 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
7238 }
7239 if (rdev->num_crtc >= 6) {
7240 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
7241 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
7242 }
7243
7244 /* dac hotplug */
7245 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
7246
7247 /* digital hotplug */
7248 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
7249 WREG32(DC_HPD1_INT_CONTROL, tmp);
7250 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
7251 WREG32(DC_HPD2_INT_CONTROL, tmp);
7252 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
7253 WREG32(DC_HPD3_INT_CONTROL, tmp);
7254 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
7255 WREG32(DC_HPD4_INT_CONTROL, tmp);
7256 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
7257 WREG32(DC_HPD5_INT_CONTROL, tmp);
7258 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
7259 WREG32(DC_HPD6_INT_CONTROL, tmp);
7260
7261 }
7262
7263 /**
7264 * cik_irq_init - init and enable the interrupt ring
7265 *
7266 * @rdev: radeon_device pointer
7267 *
7268 * Allocate a ring buffer for the interrupt controller,
7269 * enable the RLC, disable interrupts, enable the IH
7270 * ring buffer and enable it (CIK).
7271 * Called at device load and reume.
7272 * Returns 0 for success, errors for failure.
7273 */
7274 static int cik_irq_init(struct radeon_device *rdev)
7275 {
7276 int ret = 0;
7277 int rb_bufsz;
7278 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
7279
7280 /* allocate ring */
7281 ret = r600_ih_ring_alloc(rdev);
7282 if (ret)
7283 return ret;
7284
7285 /* disable irqs */
7286 cik_disable_interrupts(rdev);
7287
7288 /* init rlc */
7289 ret = cik_rlc_resume(rdev);
7290 if (ret) {
7291 r600_ih_ring_fini(rdev);
7292 return ret;
7293 }
7294
7295 /* setup interrupt control */
7296 /* XXX this should actually be a bus address, not an MC address. same on older asics */
7297 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
7298 interrupt_cntl = RREG32(INTERRUPT_CNTL);
7299 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
7300 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
7301 */
7302 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
7303 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
7304 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
7305 WREG32(INTERRUPT_CNTL, interrupt_cntl);
7306
7307 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
7308 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
7309
7310 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
7311 IH_WPTR_OVERFLOW_CLEAR |
7312 (rb_bufsz << 1));
7313
7314 if (rdev->wb.enabled)
7315 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
7316
7317 /* set the writeback address whether it's enabled or not */
7318 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
7319 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
7320
7321 WREG32(IH_RB_CNTL, ih_rb_cntl);
7322
7323 /* set rptr, wptr to 0 */
7324 WREG32(IH_RB_RPTR, 0);
7325 WREG32(IH_RB_WPTR, 0);
7326
7327 /* Default settings for IH_CNTL (disabled at first) */
7328 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
7329 /* RPTR_REARM only works if msi's are enabled */
7330 if (rdev->msi_enabled)
7331 ih_cntl |= RPTR_REARM;
7332 WREG32(IH_CNTL, ih_cntl);
7333
7334 /* force the active interrupt state to all disabled */
7335 cik_disable_interrupt_state(rdev);
7336
7337 pci_set_master(rdev->pdev);
7338
7339 /* enable irqs */
7340 cik_enable_interrupts(rdev);
7341
7342 return ret;
7343 }
7344
7345 /**
7346 * cik_irq_set - enable/disable interrupt sources
7347 *
7348 * @rdev: radeon_device pointer
7349 *
7350 * Enable interrupt sources on the GPU (vblanks, hpd,
7351 * etc.) (CIK).
7352 * Returns 0 for success, errors for failure.
7353 */
7354 int cik_irq_set(struct radeon_device *rdev)
7355 {
7356 u32 cp_int_cntl;
7357 u32 cp_m1p0;
7358 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
7359 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
7360 u32 grbm_int_cntl = 0;
7361 u32 dma_cntl, dma_cntl1;
7362 u32 thermal_int;
7363
7364 if (!rdev->irq.installed) {
7365 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
7366 return -EINVAL;
7367 }
7368 /* don't enable anything if the ih is disabled */
7369 if (!rdev->ih.enabled) {
7370 cik_disable_interrupts(rdev);
7371 /* force the active interrupt state to all disabled */
7372 cik_disable_interrupt_state(rdev);
7373 return 0;
7374 }
7375
7376 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
7377 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
7378 cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
7379
7380 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
7381 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
7382 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
7383 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
7384 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
7385 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
7386
7387 dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
7388 dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
7389
7390 cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
7391
7392 if (rdev->flags & RADEON_IS_IGP)
7393 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
7394 ~(THERM_INTH_MASK | THERM_INTL_MASK);
7395 else
7396 thermal_int = RREG32_SMC(CG_THERMAL_INT) &
7397 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
7398
7399 /* enable CP interrupts on all rings */
7400 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
7401 DRM_DEBUG("cik_irq_set: sw int gfx\n");
7402 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
7403 }
7404 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
7405 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7406 DRM_DEBUG("si_irq_set: sw int cp1\n");
7407 if (ring->me == 1) {
7408 switch (ring->pipe) {
7409 case 0:
7410 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
7411 break;
7412 default:
7413 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
7414 break;
7415 }
7416 } else {
7417 DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
7418 }
7419 }
7420 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
7421 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7422 DRM_DEBUG("si_irq_set: sw int cp2\n");
7423 if (ring->me == 1) {
7424 switch (ring->pipe) {
7425 case 0:
7426 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
7427 break;
7428 default:
7429 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
7430 break;
7431 }
7432 } else {
7433 DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
7434 }
7435 }
7436
7437 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
7438 DRM_DEBUG("cik_irq_set: sw int dma\n");
7439 dma_cntl |= TRAP_ENABLE;
7440 }
7441
7442 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
7443 DRM_DEBUG("cik_irq_set: sw int dma1\n");
7444 dma_cntl1 |= TRAP_ENABLE;
7445 }
7446
7447 if (rdev->irq.crtc_vblank_int[0] ||
7448 atomic_read(&rdev->irq.pflip[0])) {
7449 DRM_DEBUG("cik_irq_set: vblank 0\n");
7450 crtc1 |= VBLANK_INTERRUPT_MASK;
7451 }
7452 if (rdev->irq.crtc_vblank_int[1] ||
7453 atomic_read(&rdev->irq.pflip[1])) {
7454 DRM_DEBUG("cik_irq_set: vblank 1\n");
7455 crtc2 |= VBLANK_INTERRUPT_MASK;
7456 }
7457 if (rdev->irq.crtc_vblank_int[2] ||
7458 atomic_read(&rdev->irq.pflip[2])) {
7459 DRM_DEBUG("cik_irq_set: vblank 2\n");
7460 crtc3 |= VBLANK_INTERRUPT_MASK;
7461 }
7462 if (rdev->irq.crtc_vblank_int[3] ||
7463 atomic_read(&rdev->irq.pflip[3])) {
7464 DRM_DEBUG("cik_irq_set: vblank 3\n");
7465 crtc4 |= VBLANK_INTERRUPT_MASK;
7466 }
7467 if (rdev->irq.crtc_vblank_int[4] ||
7468 atomic_read(&rdev->irq.pflip[4])) {
7469 DRM_DEBUG("cik_irq_set: vblank 4\n");
7470 crtc5 |= VBLANK_INTERRUPT_MASK;
7471 }
7472 if (rdev->irq.crtc_vblank_int[5] ||
7473 atomic_read(&rdev->irq.pflip[5])) {
7474 DRM_DEBUG("cik_irq_set: vblank 5\n");
7475 crtc6 |= VBLANK_INTERRUPT_MASK;
7476 }
7477 if (rdev->irq.hpd[0]) {
7478 DRM_DEBUG("cik_irq_set: hpd 1\n");
7479 hpd1 |= DC_HPDx_INT_EN;
7480 }
7481 if (rdev->irq.hpd[1]) {
7482 DRM_DEBUG("cik_irq_set: hpd 2\n");
7483 hpd2 |= DC_HPDx_INT_EN;
7484 }
7485 if (rdev->irq.hpd[2]) {
7486 DRM_DEBUG("cik_irq_set: hpd 3\n");
7487 hpd3 |= DC_HPDx_INT_EN;
7488 }
7489 if (rdev->irq.hpd[3]) {
7490 DRM_DEBUG("cik_irq_set: hpd 4\n");
7491 hpd4 |= DC_HPDx_INT_EN;
7492 }
7493 if (rdev->irq.hpd[4]) {
7494 DRM_DEBUG("cik_irq_set: hpd 5\n");
7495 hpd5 |= DC_HPDx_INT_EN;
7496 }
7497 if (rdev->irq.hpd[5]) {
7498 DRM_DEBUG("cik_irq_set: hpd 6\n");
7499 hpd6 |= DC_HPDx_INT_EN;
7500 }
7501
7502 if (rdev->irq.dpm_thermal) {
7503 DRM_DEBUG("dpm thermal\n");
7504 if (rdev->flags & RADEON_IS_IGP)
7505 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
7506 else
7507 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
7508 }
7509
7510 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
7511
7512 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
7513 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
7514
7515 WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
7516
7517 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
7518
7519 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
7520 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
7521 if (rdev->num_crtc >= 4) {
7522 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
7523 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
7524 }
7525 if (rdev->num_crtc >= 6) {
7526 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
7527 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
7528 }
7529
7530 if (rdev->num_crtc >= 2) {
7531 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
7532 GRPH_PFLIP_INT_MASK);
7533 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
7534 GRPH_PFLIP_INT_MASK);
7535 }
7536 if (rdev->num_crtc >= 4) {
7537 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
7538 GRPH_PFLIP_INT_MASK);
7539 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
7540 GRPH_PFLIP_INT_MASK);
7541 }
7542 if (rdev->num_crtc >= 6) {
7543 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
7544 GRPH_PFLIP_INT_MASK);
7545 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
7546 GRPH_PFLIP_INT_MASK);
7547 }
7548
7549 WREG32(DC_HPD1_INT_CONTROL, hpd1);
7550 WREG32(DC_HPD2_INT_CONTROL, hpd2);
7551 WREG32(DC_HPD3_INT_CONTROL, hpd3);
7552 WREG32(DC_HPD4_INT_CONTROL, hpd4);
7553 WREG32(DC_HPD5_INT_CONTROL, hpd5);
7554 WREG32(DC_HPD6_INT_CONTROL, hpd6);
7555
7556 if (rdev->flags & RADEON_IS_IGP)
7557 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
7558 else
7559 WREG32_SMC(CG_THERMAL_INT, thermal_int);
7560
7561 return 0;
7562 }
7563
7564 /**
7565 * cik_irq_ack - ack interrupt sources
7566 *
7567 * @rdev: radeon_device pointer
7568 *
7569 * Ack interrupt sources on the GPU (vblanks, hpd,
7570 * etc.) (CIK). Certain interrupts sources are sw
7571 * generated and do not require an explicit ack.
7572 */
7573 static inline void cik_irq_ack(struct radeon_device *rdev)
7574 {
7575 u32 tmp;
7576
7577 rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
7578 rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
7579 rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
7580 rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
7581 rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
7582 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
7583 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
7584
7585 rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
7586 EVERGREEN_CRTC0_REGISTER_OFFSET);
7587 rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
7588 EVERGREEN_CRTC1_REGISTER_OFFSET);
7589 if (rdev->num_crtc >= 4) {
7590 rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
7591 EVERGREEN_CRTC2_REGISTER_OFFSET);
7592 rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
7593 EVERGREEN_CRTC3_REGISTER_OFFSET);
7594 }
7595 if (rdev->num_crtc >= 6) {
7596 rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
7597 EVERGREEN_CRTC4_REGISTER_OFFSET);
7598 rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
7599 EVERGREEN_CRTC5_REGISTER_OFFSET);
7600 }
7601
7602 if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
7603 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
7604 GRPH_PFLIP_INT_CLEAR);
7605 if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
7606 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
7607 GRPH_PFLIP_INT_CLEAR);
7608 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
7609 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
7610 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
7611 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
7612 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
7613 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
7614 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
7615 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
7616
7617 if (rdev->num_crtc >= 4) {
7618 if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
7619 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
7620 GRPH_PFLIP_INT_CLEAR);
7621 if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
7622 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
7623 GRPH_PFLIP_INT_CLEAR);
7624 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
7625 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
7626 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
7627 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
7628 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
7629 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
7630 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
7631 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
7632 }
7633
7634 if (rdev->num_crtc >= 6) {
7635 if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
7636 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
7637 GRPH_PFLIP_INT_CLEAR);
7638 if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
7639 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
7640 GRPH_PFLIP_INT_CLEAR);
7641 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
7642 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
7643 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
7644 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
7645 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
7646 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
7647 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
7648 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
7649 }
7650
7651 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
7652 tmp = RREG32(DC_HPD1_INT_CONTROL);
7653 tmp |= DC_HPDx_INT_ACK;
7654 WREG32(DC_HPD1_INT_CONTROL, tmp);
7655 }
7656 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
7657 tmp = RREG32(DC_HPD2_INT_CONTROL);
7658 tmp |= DC_HPDx_INT_ACK;
7659 WREG32(DC_HPD2_INT_CONTROL, tmp);
7660 }
7661 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
7662 tmp = RREG32(DC_HPD3_INT_CONTROL);
7663 tmp |= DC_HPDx_INT_ACK;
7664 WREG32(DC_HPD3_INT_CONTROL, tmp);
7665 }
7666 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
7667 tmp = RREG32(DC_HPD4_INT_CONTROL);
7668 tmp |= DC_HPDx_INT_ACK;
7669 WREG32(DC_HPD4_INT_CONTROL, tmp);
7670 }
7671 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
7672 tmp = RREG32(DC_HPD5_INT_CONTROL);
7673 tmp |= DC_HPDx_INT_ACK;
7674 WREG32(DC_HPD5_INT_CONTROL, tmp);
7675 }
7676 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
7677 tmp = RREG32(DC_HPD5_INT_CONTROL);
7678 tmp |= DC_HPDx_INT_ACK;
7679 WREG32(DC_HPD6_INT_CONTROL, tmp);
7680 }
7681 }
7682
7683 /**
7684 * cik_irq_disable - disable interrupts
7685 *
7686 * @rdev: radeon_device pointer
7687 *
7688 * Disable interrupts on the hw (CIK).
7689 */
7690 static void cik_irq_disable(struct radeon_device *rdev)
7691 {
7692 cik_disable_interrupts(rdev);
7693 /* Wait and acknowledge irq */
7694 mdelay(1);
7695 cik_irq_ack(rdev);
7696 cik_disable_interrupt_state(rdev);
7697 }
7698
7699 /**
7700 * cik_irq_disable - disable interrupts for suspend
7701 *
7702 * @rdev: radeon_device pointer
7703 *
7704 * Disable interrupts and stop the RLC (CIK).
7705 * Used for suspend.
7706 */
7707 static void cik_irq_suspend(struct radeon_device *rdev)
7708 {
7709 cik_irq_disable(rdev);
7710 cik_rlc_stop(rdev);
7711 }
7712
7713 /**
7714 * cik_irq_fini - tear down interrupt support
7715 *
7716 * @rdev: radeon_device pointer
7717 *
7718 * Disable interrupts on the hw and free the IH ring
7719 * buffer (CIK).
7720 * Used for driver unload.
7721 */
7722 static void cik_irq_fini(struct radeon_device *rdev)
7723 {
7724 cik_irq_suspend(rdev);
7725 r600_ih_ring_fini(rdev);
7726 }
7727
7728 /**
7729 * cik_get_ih_wptr - get the IH ring buffer wptr
7730 *
7731 * @rdev: radeon_device pointer
7732 *
7733 * Get the IH ring buffer wptr from either the register
7734 * or the writeback memory buffer (CIK). Also check for
7735 * ring buffer overflow and deal with it.
7736 * Used by cik_irq_process().
7737 * Returns the value of the wptr.
7738 */
7739 static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
7740 {
7741 u32 wptr, tmp;
7742
7743 if (rdev->wb.enabled)
7744 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
7745 else
7746 wptr = RREG32(IH_RB_WPTR);
7747
7748 if (wptr & RB_OVERFLOW) {
7749 wptr &= ~RB_OVERFLOW;
7750 /* When a ring buffer overflow happen start parsing interrupt
7751 * from the last not overwritten vector (wptr + 16). Hopefully
7752 * this should allow us to catchup.
7753 */
7754 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
7755 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
7756 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
7757 tmp = RREG32(IH_RB_CNTL);
7758 tmp |= IH_WPTR_OVERFLOW_CLEAR;
7759 WREG32(IH_RB_CNTL, tmp);
7760 }
7761 return (wptr & rdev->ih.ptr_mask);
7762 }
7763
7764 /* CIK IV Ring
7765 * Each IV ring entry is 128 bits:
7766 * [7:0] - interrupt source id
7767 * [31:8] - reserved
7768 * [59:32] - interrupt source data
7769 * [63:60] - reserved
7770 * [71:64] - RINGID
7771 * CP:
7772 * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
7773 * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
7774 * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
7775 * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
7776 * PIPE_ID - ME0 0=3D
7777 * - ME1&2 compute dispatcher (4 pipes each)
7778 * SDMA:
7779 * INSTANCE_ID [1:0], QUEUE_ID[1:0]
7780 * INSTANCE_ID - 0 = sdma0, 1 = sdma1
7781 * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
7782 * [79:72] - VMID
7783 * [95:80] - PASID
7784 * [127:96] - reserved
7785 */
7786 /**
7787 * cik_irq_process - interrupt handler
7788 *
7789 * @rdev: radeon_device pointer
7790 *
7791 * Interrupt hander (CIK). Walk the IH ring,
7792 * ack interrupts and schedule work to handle
7793 * interrupt events.
7794 * Returns irq process return code.
7795 */
7796 int cik_irq_process(struct radeon_device *rdev)
7797 {
7798 struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7799 struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7800 u32 wptr;
7801 u32 rptr;
7802 u32 src_id, src_data, ring_id;
7803 u8 me_id, pipe_id, queue_id;
7804 u32 ring_index;
7805 bool queue_hotplug = false;
7806 bool queue_reset = false;
7807 u32 addr, status, mc_client;
7808 bool queue_thermal = false;
7809
7810 if (!rdev->ih.enabled || rdev->shutdown)
7811 return IRQ_NONE;
7812
7813 wptr = cik_get_ih_wptr(rdev);
7814
7815 restart_ih:
7816 /* is somebody else already processing irqs? */
7817 if (atomic_xchg(&rdev->ih.lock, 1))
7818 return IRQ_NONE;
7819
7820 rptr = rdev->ih.rptr;
7821 DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
7822
7823 /* Order reading of wptr vs. reading of IH ring data */
7824 rmb();
7825
7826 /* display interrupts */
7827 cik_irq_ack(rdev);
7828
7829 while (rptr != wptr) {
7830 /* wptr/rptr are in bytes! */
7831 ring_index = rptr / 4;
7832
7833 radeon_kfd_interrupt(rdev,
7834 (const void *) &rdev->ih.ring[ring_index]);
7835
7836 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
7837 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
7838 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
7839
7840 switch (src_id) {
7841 case 1: /* D1 vblank/vline */
7842 switch (src_data) {
7843 case 0: /* D1 vblank */
7844 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
7845 if (rdev->irq.crtc_vblank_int[0]) {
7846 drm_handle_vblank(rdev->ddev, 0);
7847 rdev->pm.vblank_sync = true;
7848 wake_up(&rdev->irq.vblank_queue);
7849 }
7850 if (atomic_read(&rdev->irq.pflip[0]))
7851 radeon_crtc_handle_vblank(rdev, 0);
7852 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7853 DRM_DEBUG("IH: D1 vblank\n");
7854 }
7855 break;
7856 case 1: /* D1 vline */
7857 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
7858 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
7859 DRM_DEBUG("IH: D1 vline\n");
7860 }
7861 break;
7862 default:
7863 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7864 break;
7865 }
7866 break;
7867 case 2: /* D2 vblank/vline */
7868 switch (src_data) {
7869 case 0: /* D2 vblank */
7870 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
7871 if (rdev->irq.crtc_vblank_int[1]) {
7872 drm_handle_vblank(rdev->ddev, 1);
7873 rdev->pm.vblank_sync = true;
7874 wake_up(&rdev->irq.vblank_queue);
7875 }
7876 if (atomic_read(&rdev->irq.pflip[1]))
7877 radeon_crtc_handle_vblank(rdev, 1);
7878 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
7879 DRM_DEBUG("IH: D2 vblank\n");
7880 }
7881 break;
7882 case 1: /* D2 vline */
7883 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
7884 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
7885 DRM_DEBUG("IH: D2 vline\n");
7886 }
7887 break;
7888 default:
7889 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7890 break;
7891 }
7892 break;
7893 case 3: /* D3 vblank/vline */
7894 switch (src_data) {
7895 case 0: /* D3 vblank */
7896 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
7897 if (rdev->irq.crtc_vblank_int[2]) {
7898 drm_handle_vblank(rdev->ddev, 2);
7899 rdev->pm.vblank_sync = true;
7900 wake_up(&rdev->irq.vblank_queue);
7901 }
7902 if (atomic_read(&rdev->irq.pflip[2]))
7903 radeon_crtc_handle_vblank(rdev, 2);
7904 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
7905 DRM_DEBUG("IH: D3 vblank\n");
7906 }
7907 break;
7908 case 1: /* D3 vline */
7909 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
7910 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
7911 DRM_DEBUG("IH: D3 vline\n");
7912 }
7913 break;
7914 default:
7915 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7916 break;
7917 }
7918 break;
7919 case 4: /* D4 vblank/vline */
7920 switch (src_data) {
7921 case 0: /* D4 vblank */
7922 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
7923 if (rdev->irq.crtc_vblank_int[3]) {
7924 drm_handle_vblank(rdev->ddev, 3);
7925 rdev->pm.vblank_sync = true;
7926 wake_up(&rdev->irq.vblank_queue);
7927 }
7928 if (atomic_read(&rdev->irq.pflip[3]))
7929 radeon_crtc_handle_vblank(rdev, 3);
7930 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
7931 DRM_DEBUG("IH: D4 vblank\n");
7932 }
7933 break;
7934 case 1: /* D4 vline */
7935 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
7936 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
7937 DRM_DEBUG("IH: D4 vline\n");
7938 }
7939 break;
7940 default:
7941 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7942 break;
7943 }
7944 break;
7945 case 5: /* D5 vblank/vline */
7946 switch (src_data) {
7947 case 0: /* D5 vblank */
7948 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
7949 if (rdev->irq.crtc_vblank_int[4]) {
7950 drm_handle_vblank(rdev->ddev, 4);
7951 rdev->pm.vblank_sync = true;
7952 wake_up(&rdev->irq.vblank_queue);
7953 }
7954 if (atomic_read(&rdev->irq.pflip[4]))
7955 radeon_crtc_handle_vblank(rdev, 4);
7956 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
7957 DRM_DEBUG("IH: D5 vblank\n");
7958 }
7959 break;
7960 case 1: /* D5 vline */
7961 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
7962 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
7963 DRM_DEBUG("IH: D5 vline\n");
7964 }
7965 break;
7966 default:
7967 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7968 break;
7969 }
7970 break;
7971 case 6: /* D6 vblank/vline */
7972 switch (src_data) {
7973 case 0: /* D6 vblank */
7974 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
7975 if (rdev->irq.crtc_vblank_int[5]) {
7976 drm_handle_vblank(rdev->ddev, 5);
7977 rdev->pm.vblank_sync = true;
7978 wake_up(&rdev->irq.vblank_queue);
7979 }
7980 if (atomic_read(&rdev->irq.pflip[5]))
7981 radeon_crtc_handle_vblank(rdev, 5);
7982 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
7983 DRM_DEBUG("IH: D6 vblank\n");
7984 }
7985 break;
7986 case 1: /* D6 vline */
7987 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
7988 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
7989 DRM_DEBUG("IH: D6 vline\n");
7990 }
7991 break;
7992 default:
7993 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7994 break;
7995 }
7996 break;
7997 case 8: /* D1 page flip */
7998 case 10: /* D2 page flip */
7999 case 12: /* D3 page flip */
8000 case 14: /* D4 page flip */
8001 case 16: /* D5 page flip */
8002 case 18: /* D6 page flip */
8003 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
8004 if (radeon_use_pflipirq > 0)
8005 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
8006 break;
8007 case 42: /* HPD hotplug */
8008 switch (src_data) {
8009 case 0:
8010 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
8011 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
8012 queue_hotplug = true;
8013 DRM_DEBUG("IH: HPD1\n");
8014 }
8015 break;
8016 case 1:
8017 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
8018 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
8019 queue_hotplug = true;
8020 DRM_DEBUG("IH: HPD2\n");
8021 }
8022 break;
8023 case 2:
8024 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
8025 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
8026 queue_hotplug = true;
8027 DRM_DEBUG("IH: HPD3\n");
8028 }
8029 break;
8030 case 3:
8031 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
8032 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
8033 queue_hotplug = true;
8034 DRM_DEBUG("IH: HPD4\n");
8035 }
8036 break;
8037 case 4:
8038 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
8039 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
8040 queue_hotplug = true;
8041 DRM_DEBUG("IH: HPD5\n");
8042 }
8043 break;
8044 case 5:
8045 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
8046 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
8047 queue_hotplug = true;
8048 DRM_DEBUG("IH: HPD6\n");
8049 }
8050 break;
8051 default:
8052 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
8053 break;
8054 }
8055 break;
8056 case 124: /* UVD */
8057 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
8058 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
8059 break;
8060 case 146:
8061 case 147:
8062 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
8063 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
8064 mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
8065 /* reset addr and status */
8066 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
8067 if (addr == 0x0 && status == 0x0)
8068 break;
8069 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
8070 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
8071 addr);
8072 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
8073 status);
8074 cik_vm_decode_fault(rdev, status, addr, mc_client);
8075 break;
8076 case 167: /* VCE */
8077 DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
8078 switch (src_data) {
8079 case 0:
8080 radeon_fence_process(rdev, TN_RING_TYPE_VCE1_INDEX);
8081 break;
8082 case 1:
8083 radeon_fence_process(rdev, TN_RING_TYPE_VCE2_INDEX);
8084 break;
8085 default:
8086 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
8087 break;
8088 }
8089 break;
8090 case 176: /* GFX RB CP_INT */
8091 case 177: /* GFX IB CP_INT */
8092 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
8093 break;
8094 case 181: /* CP EOP event */
8095 DRM_DEBUG("IH: CP EOP\n");
8096 /* XXX check the bitfield order! */
8097 me_id = (ring_id & 0x60) >> 5;
8098 pipe_id = (ring_id & 0x18) >> 3;
8099 queue_id = (ring_id & 0x7) >> 0;
8100 switch (me_id) {
8101 case 0:
8102 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
8103 break;
8104 case 1:
8105 case 2:
8106 if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
8107 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
8108 if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
8109 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
8110 break;
8111 }
8112 break;
8113 case 184: /* CP Privileged reg access */
8114 DRM_ERROR("Illegal register access in command stream\n");
8115 /* XXX check the bitfield order! */
8116 me_id = (ring_id & 0x60) >> 5;
8117 pipe_id = (ring_id & 0x18) >> 3;
8118 queue_id = (ring_id & 0x7) >> 0;
8119 switch (me_id) {
8120 case 0:
8121 /* This results in a full GPU reset, but all we need to do is soft
8122 * reset the CP for gfx
8123 */
8124 queue_reset = true;
8125 break;
8126 case 1:
8127 /* XXX compute */
8128 queue_reset = true;
8129 break;
8130 case 2:
8131 /* XXX compute */
8132 queue_reset = true;
8133 break;
8134 }
8135 break;
8136 case 185: /* CP Privileged inst */
8137 DRM_ERROR("Illegal instruction in command stream\n");
8138 /* XXX check the bitfield order! */
8139 me_id = (ring_id & 0x60) >> 5;
8140 pipe_id = (ring_id & 0x18) >> 3;
8141 queue_id = (ring_id & 0x7) >> 0;
8142 switch (me_id) {
8143 case 0:
8144 /* This results in a full GPU reset, but all we need to do is soft
8145 * reset the CP for gfx
8146 */
8147 queue_reset = true;
8148 break;
8149 case 1:
8150 /* XXX compute */
8151 queue_reset = true;
8152 break;
8153 case 2:
8154 /* XXX compute */
8155 queue_reset = true;
8156 break;
8157 }
8158 break;
8159 case 224: /* SDMA trap event */
8160 /* XXX check the bitfield order! */
8161 me_id = (ring_id & 0x3) >> 0;
8162 queue_id = (ring_id & 0xc) >> 2;
8163 DRM_DEBUG("IH: SDMA trap\n");
8164 switch (me_id) {
8165 case 0:
8166 switch (queue_id) {
8167 case 0:
8168 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
8169 break;
8170 case 1:
8171 /* XXX compute */
8172 break;
8173 case 2:
8174 /* XXX compute */
8175 break;
8176 }
8177 break;
8178 case 1:
8179 switch (queue_id) {
8180 case 0:
8181 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
8182 break;
8183 case 1:
8184 /* XXX compute */
8185 break;
8186 case 2:
8187 /* XXX compute */
8188 break;
8189 }
8190 break;
8191 }
8192 break;
8193 case 230: /* thermal low to high */
8194 DRM_DEBUG("IH: thermal low to high\n");
8195 rdev->pm.dpm.thermal.high_to_low = false;
8196 queue_thermal = true;
8197 break;
8198 case 231: /* thermal high to low */
8199 DRM_DEBUG("IH: thermal high to low\n");
8200 rdev->pm.dpm.thermal.high_to_low = true;
8201 queue_thermal = true;
8202 break;
8203 case 233: /* GUI IDLE */
8204 DRM_DEBUG("IH: GUI idle\n");
8205 break;
8206 case 241: /* SDMA Privileged inst */
8207 case 247: /* SDMA Privileged inst */
8208 DRM_ERROR("Illegal instruction in SDMA command stream\n");
8209 /* XXX check the bitfield order! */
8210 me_id = (ring_id & 0x3) >> 0;
8211 queue_id = (ring_id & 0xc) >> 2;
8212 switch (me_id) {
8213 case 0:
8214 switch (queue_id) {
8215 case 0:
8216 queue_reset = true;
8217 break;
8218 case 1:
8219 /* XXX compute */
8220 queue_reset = true;
8221 break;
8222 case 2:
8223 /* XXX compute */
8224 queue_reset = true;
8225 break;
8226 }
8227 break;
8228 case 1:
8229 switch (queue_id) {
8230 case 0:
8231 queue_reset = true;
8232 break;
8233 case 1:
8234 /* XXX compute */
8235 queue_reset = true;
8236 break;
8237 case 2:
8238 /* XXX compute */
8239 queue_reset = true;
8240 break;
8241 }
8242 break;
8243 }
8244 break;
8245 default:
8246 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
8247 break;
8248 }
8249
8250 /* wptr/rptr are in bytes! */
8251 rptr += 16;
8252 rptr &= rdev->ih.ptr_mask;
8253 WREG32(IH_RB_RPTR, rptr);
8254 }
8255 if (queue_hotplug)
8256 schedule_work(&rdev->hotplug_work);
8257 if (queue_reset) {
8258 rdev->needs_reset = true;
8259 wake_up_all(&rdev->fence_queue);
8260 }
8261 if (queue_thermal)
8262 schedule_work(&rdev->pm.dpm.thermal.work);
8263 rdev->ih.rptr = rptr;
8264 atomic_set(&rdev->ih.lock, 0);
8265
8266 /* make sure wptr hasn't changed while processing */
8267 wptr = cik_get_ih_wptr(rdev);
8268 if (wptr != rptr)
8269 goto restart_ih;
8270
8271 return IRQ_HANDLED;
8272 }
8273
8274 /*
8275 * startup/shutdown callbacks
8276 */
8277 /**
8278 * cik_startup - program the asic to a functional state
8279 *
8280 * @rdev: radeon_device pointer
8281 *
8282 * Programs the asic to a functional state (CIK).
8283 * Called by cik_init() and cik_resume().
8284 * Returns 0 for success, error for failure.
8285 */
8286 static int cik_startup(struct radeon_device *rdev)
8287 {
8288 struct radeon_ring *ring;
8289 u32 nop;
8290 int r;
8291
8292 /* enable pcie gen2/3 link */
8293 cik_pcie_gen3_enable(rdev);
8294 /* enable aspm */
8295 cik_program_aspm(rdev);
8296
8297 /* scratch needs to be initialized before MC */
8298 r = r600_vram_scratch_init(rdev);
8299 if (r)
8300 return r;
8301
8302 cik_mc_program(rdev);
8303
8304 if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
8305 r = ci_mc_load_microcode(rdev);
8306 if (r) {
8307 DRM_ERROR("Failed to load MC firmware!\n");
8308 return r;
8309 }
8310 }
8311
8312 r = cik_pcie_gart_enable(rdev);
8313 if (r)
8314 return r;
8315 cik_gpu_init(rdev);
8316
8317 /* allocate rlc buffers */
8318 if (rdev->flags & RADEON_IS_IGP) {
8319 if (rdev->family == CHIP_KAVERI) {
8320 rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
8321 rdev->rlc.reg_list_size =
8322 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
8323 } else {
8324 rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
8325 rdev->rlc.reg_list_size =
8326 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
8327 }
8328 }
8329 rdev->rlc.cs_data = ci_cs_data;
8330 rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
8331 r = sumo_rlc_init(rdev);
8332 if (r) {
8333 DRM_ERROR("Failed to init rlc BOs!\n");
8334 return r;
8335 }
8336
8337 /* allocate wb buffer */
8338 r = radeon_wb_init(rdev);
8339 if (r)
8340 return r;
8341
8342 /* allocate mec buffers */
8343 r = cik_mec_init(rdev);
8344 if (r) {
8345 DRM_ERROR("Failed to init MEC BOs!\n");
8346 return r;
8347 }
8348
8349 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
8350 if (r) {
8351 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
8352 return r;
8353 }
8354
8355 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
8356 if (r) {
8357 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
8358 return r;
8359 }
8360
8361 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
8362 if (r) {
8363 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
8364 return r;
8365 }
8366
8367 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
8368 if (r) {
8369 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
8370 return r;
8371 }
8372
8373 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
8374 if (r) {
8375 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
8376 return r;
8377 }
8378
8379 r = radeon_uvd_resume(rdev);
8380 if (!r) {
8381 r = uvd_v4_2_resume(rdev);
8382 if (!r) {
8383 r = radeon_fence_driver_start_ring(rdev,
8384 R600_RING_TYPE_UVD_INDEX);
8385 if (r)
8386 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
8387 }
8388 }
8389 if (r)
8390 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
8391
8392 r = radeon_vce_resume(rdev);
8393 if (!r) {
8394 r = vce_v2_0_resume(rdev);
8395 if (!r)
8396 r = radeon_fence_driver_start_ring(rdev,
8397 TN_RING_TYPE_VCE1_INDEX);
8398 if (!r)
8399 r = radeon_fence_driver_start_ring(rdev,
8400 TN_RING_TYPE_VCE2_INDEX);
8401 }
8402 if (r) {
8403 dev_err(rdev->dev, "VCE init error (%d).\n", r);
8404 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
8405 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
8406 }
8407
8408 /* Enable IRQ */
8409 if (!rdev->irq.installed) {
8410 r = radeon_irq_kms_init(rdev);
8411 if (r)
8412 return r;
8413 }
8414
8415 r = cik_irq_init(rdev);
8416 if (r) {
8417 DRM_ERROR("radeon: IH init failed (%d).\n", r);
8418 radeon_irq_kms_fini(rdev);
8419 return r;
8420 }
8421 cik_irq_set(rdev);
8422
8423 if (rdev->family == CHIP_HAWAII) {
8424 if (rdev->new_fw)
8425 nop = PACKET3(PACKET3_NOP, 0x3FFF);
8426 else
8427 nop = RADEON_CP_PACKET2;
8428 } else {
8429 nop = PACKET3(PACKET3_NOP, 0x3FFF);
8430 }
8431
8432 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
8433 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
8434 nop);
8435 if (r)
8436 return r;
8437
8438 /* set up the compute queues */
8439 /* type-2 packets are deprecated on MEC, use type-3 instead */
8440 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
8441 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
8442 nop);
8443 if (r)
8444 return r;
8445 ring->me = 1; /* first MEC */
8446 ring->pipe = 0; /* first pipe */
8447 ring->queue = 0; /* first queue */
8448 ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
8449
8450 /* type-2 packets are deprecated on MEC, use type-3 instead */
8451 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
8452 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
8453 nop);
8454 if (r)
8455 return r;
8456 /* dGPU only have 1 MEC */
8457 ring->me = 1; /* first MEC */
8458 ring->pipe = 0; /* first pipe */
8459 ring->queue = 1; /* second queue */
8460 ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
8461
8462 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
8463 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
8464 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
8465 if (r)
8466 return r;
8467
8468 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
8469 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
8470 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
8471 if (r)
8472 return r;
8473
8474 r = cik_cp_resume(rdev);
8475 if (r)
8476 return r;
8477
8478 r = cik_sdma_resume(rdev);
8479 if (r)
8480 return r;
8481
8482 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
8483 if (ring->ring_size) {
8484 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
8485 RADEON_CP_PACKET2);
8486 if (!r)
8487 r = uvd_v1_0_init(rdev);
8488 if (r)
8489 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
8490 }
8491
8492 r = -ENOENT;
8493
8494 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
8495 if (ring->ring_size)
8496 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
8497 VCE_CMD_NO_OP);
8498
8499 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
8500 if (ring->ring_size)
8501 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
8502 VCE_CMD_NO_OP);
8503
8504 if (!r)
8505 r = vce_v1_0_init(rdev);
8506 else if (r != -ENOENT)
8507 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
8508
8509 r = radeon_ib_pool_init(rdev);
8510 if (r) {
8511 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
8512 return r;
8513 }
8514
8515 r = radeon_vm_manager_init(rdev);
8516 if (r) {
8517 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
8518 return r;
8519 }
8520
8521 r = radeon_audio_init(rdev);
8522 if (r)
8523 return r;
8524
8525 r = radeon_kfd_resume(rdev);
8526 if (r)
8527 return r;
8528
8529 return 0;
8530 }
8531
8532 /**
8533 * cik_resume - resume the asic to a functional state
8534 *
8535 * @rdev: radeon_device pointer
8536 *
8537 * Programs the asic to a functional state (CIK).
8538 * Called at resume.
8539 * Returns 0 for success, error for failure.
8540 */
8541 int cik_resume(struct radeon_device *rdev)
8542 {
8543 int r;
8544
8545 /* post card */
8546 atom_asic_init(rdev->mode_info.atom_context);
8547
8548 /* init golden registers */
8549 cik_init_golden_registers(rdev);
8550
8551 if (rdev->pm.pm_method == PM_METHOD_DPM)
8552 radeon_pm_resume(rdev);
8553
8554 rdev->accel_working = true;
8555 r = cik_startup(rdev);
8556 if (r) {
8557 DRM_ERROR("cik startup failed on resume\n");
8558 rdev->accel_working = false;
8559 return r;
8560 }
8561
8562 return r;
8563
8564 }
8565
8566 /**
8567 * cik_suspend - suspend the asic
8568 *
8569 * @rdev: radeon_device pointer
8570 *
8571 * Bring the chip into a state suitable for suspend (CIK).
8572 * Called at suspend.
8573 * Returns 0 for success.
8574 */
8575 int cik_suspend(struct radeon_device *rdev)
8576 {
8577 radeon_kfd_suspend(rdev);
8578 radeon_pm_suspend(rdev);
8579 radeon_audio_fini(rdev);
8580 radeon_vm_manager_fini(rdev);
8581 cik_cp_enable(rdev, false);
8582 cik_sdma_enable(rdev, false);
8583 uvd_v1_0_fini(rdev);
8584 radeon_uvd_suspend(rdev);
8585 radeon_vce_suspend(rdev);
8586 cik_fini_pg(rdev);
8587 cik_fini_cg(rdev);
8588 cik_irq_suspend(rdev);
8589 radeon_wb_disable(rdev);
8590 cik_pcie_gart_disable(rdev);
8591 return 0;
8592 }
8593
8594 /* Plan is to move initialization in that function and use
8595 * helper function so that radeon_device_init pretty much
8596 * do nothing more than calling asic specific function. This
8597 * should also allow to remove a bunch of callback function
8598 * like vram_info.
8599 */
8600 /**
8601 * cik_init - asic specific driver and hw init
8602 *
8603 * @rdev: radeon_device pointer
8604 *
8605 * Setup asic specific driver variables and program the hw
8606 * to a functional state (CIK).
8607 * Called at driver startup.
8608 * Returns 0 for success, errors for failure.
8609 */
8610 int cik_init(struct radeon_device *rdev)
8611 {
8612 struct radeon_ring *ring;
8613 int r;
8614
8615 /* Read BIOS */
8616 if (!radeon_get_bios(rdev)) {
8617 if (ASIC_IS_AVIVO(rdev))
8618 return -EINVAL;
8619 }
8620 /* Must be an ATOMBIOS */
8621 if (!rdev->is_atom_bios) {
8622 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
8623 return -EINVAL;
8624 }
8625 r = radeon_atombios_init(rdev);
8626 if (r)
8627 return r;
8628
8629 /* Post card if necessary */
8630 if (!radeon_card_posted(rdev)) {
8631 if (!rdev->bios) {
8632 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
8633 return -EINVAL;
8634 }
8635 DRM_INFO("GPU not posted. posting now...\n");
8636 atom_asic_init(rdev->mode_info.atom_context);
8637 }
8638 /* init golden registers */
8639 cik_init_golden_registers(rdev);
8640 /* Initialize scratch registers */
8641 cik_scratch_init(rdev);
8642 /* Initialize surface registers */
8643 radeon_surface_init(rdev);
8644 /* Initialize clocks */
8645 radeon_get_clock_info(rdev->ddev);
8646
8647 /* Fence driver */
8648 r = radeon_fence_driver_init(rdev);
8649 if (r)
8650 return r;
8651
8652 /* initialize memory controller */
8653 r = cik_mc_init(rdev);
8654 if (r)
8655 return r;
8656 /* Memory manager */
8657 r = radeon_bo_init(rdev);
8658 if (r)
8659 return r;
8660
8661 if (rdev->flags & RADEON_IS_IGP) {
8662 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
8663 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
8664 r = cik_init_microcode(rdev);
8665 if (r) {
8666 DRM_ERROR("Failed to load firmware!\n");
8667 return r;
8668 }
8669 }
8670 } else {
8671 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
8672 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
8673 !rdev->mc_fw) {
8674 r = cik_init_microcode(rdev);
8675 if (r) {
8676 DRM_ERROR("Failed to load firmware!\n");
8677 return r;
8678 }
8679 }
8680 }
8681
8682 /* Initialize power management */
8683 radeon_pm_init(rdev);
8684
8685 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
8686 ring->ring_obj = NULL;
8687 r600_ring_init(rdev, ring, 1024 * 1024);
8688
8689 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
8690 ring->ring_obj = NULL;
8691 r600_ring_init(rdev, ring, 1024 * 1024);
8692 r = radeon_doorbell_get(rdev, &ring->doorbell_index);
8693 if (r)
8694 return r;
8695
8696 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
8697 ring->ring_obj = NULL;
8698 r600_ring_init(rdev, ring, 1024 * 1024);
8699 r = radeon_doorbell_get(rdev, &ring->doorbell_index);
8700 if (r)
8701 return r;
8702
8703 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
8704 ring->ring_obj = NULL;
8705 r600_ring_init(rdev, ring, 256 * 1024);
8706
8707 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
8708 ring->ring_obj = NULL;
8709 r600_ring_init(rdev, ring, 256 * 1024);
8710
8711 r = radeon_uvd_init(rdev);
8712 if (!r) {
8713 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
8714 ring->ring_obj = NULL;
8715 r600_ring_init(rdev, ring, 4096);
8716 }
8717
8718 r = radeon_vce_init(rdev);
8719 if (!r) {
8720 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
8721 ring->ring_obj = NULL;
8722 r600_ring_init(rdev, ring, 4096);
8723
8724 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
8725 ring->ring_obj = NULL;
8726 r600_ring_init(rdev, ring, 4096);
8727 }
8728
8729 rdev->ih.ring_obj = NULL;
8730 r600_ih_ring_init(rdev, 64 * 1024);
8731
8732 r = r600_pcie_gart_init(rdev);
8733 if (r)
8734 return r;
8735
8736 rdev->accel_working = true;
8737 r = cik_startup(rdev);
8738 if (r) {
8739 dev_err(rdev->dev, "disabling GPU acceleration\n");
8740 cik_cp_fini(rdev);
8741 cik_sdma_fini(rdev);
8742 cik_irq_fini(rdev);
8743 sumo_rlc_fini(rdev);
8744 cik_mec_fini(rdev);
8745 radeon_wb_fini(rdev);
8746 radeon_ib_pool_fini(rdev);
8747 radeon_vm_manager_fini(rdev);
8748 radeon_irq_kms_fini(rdev);
8749 cik_pcie_gart_fini(rdev);
8750 rdev->accel_working = false;
8751 }
8752
8753 /* Don't start up if the MC ucode is missing.
8754 * The default clocks and voltages before the MC ucode
8755 * is loaded are not suffient for advanced operations.
8756 */
8757 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
8758 DRM_ERROR("radeon: MC ucode required for NI+.\n");
8759 return -EINVAL;
8760 }
8761
8762 return 0;
8763 }
8764
8765 /**
8766 * cik_fini - asic specific driver and hw fini
8767 *
8768 * @rdev: radeon_device pointer
8769 *
8770 * Tear down the asic specific driver variables and program the hw
8771 * to an idle state (CIK).
8772 * Called at driver unload.
8773 */
8774 void cik_fini(struct radeon_device *rdev)
8775 {
8776 radeon_pm_fini(rdev);
8777 cik_cp_fini(rdev);
8778 cik_sdma_fini(rdev);
8779 cik_fini_pg(rdev);
8780 cik_fini_cg(rdev);
8781 cik_irq_fini(rdev);
8782 sumo_rlc_fini(rdev);
8783 cik_mec_fini(rdev);
8784 radeon_wb_fini(rdev);
8785 radeon_vm_manager_fini(rdev);
8786 radeon_ib_pool_fini(rdev);
8787 radeon_irq_kms_fini(rdev);
8788 uvd_v1_0_fini(rdev);
8789 radeon_uvd_fini(rdev);
8790 radeon_vce_fini(rdev);
8791 cik_pcie_gart_fini(rdev);
8792 r600_vram_scratch_fini(rdev);
8793 radeon_gem_fini(rdev);
8794 radeon_fence_driver_fini(rdev);
8795 radeon_bo_fini(rdev);
8796 radeon_atombios_fini(rdev);
8797 kfree(rdev->bios);
8798 rdev->bios = NULL;
8799 }
8800
8801 void dce8_program_fmt(struct drm_encoder *encoder)
8802 {
8803 struct drm_device *dev = encoder->dev;
8804 struct radeon_device *rdev = dev->dev_private;
8805 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
8806 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
8807 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
8808 int bpc = 0;
8809 u32 tmp = 0;
8810 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
8811
8812 if (connector) {
8813 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
8814 bpc = radeon_get_monitor_bpc(connector);
8815 dither = radeon_connector->dither;
8816 }
8817
8818 /* LVDS/eDP FMT is set up by atom */
8819 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
8820 return;
8821
8822 /* not needed for analog */
8823 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
8824 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
8825 return;
8826
8827 if (bpc == 0)
8828 return;
8829
8830 switch (bpc) {
8831 case 6:
8832 if (dither == RADEON_FMT_DITHER_ENABLE)
8833 /* XXX sort out optimal dither settings */
8834 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
8835 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
8836 else
8837 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
8838 break;
8839 case 8:
8840 if (dither == RADEON_FMT_DITHER_ENABLE)
8841 /* XXX sort out optimal dither settings */
8842 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
8843 FMT_RGB_RANDOM_ENABLE |
8844 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
8845 else
8846 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
8847 break;
8848 case 10:
8849 if (dither == RADEON_FMT_DITHER_ENABLE)
8850 /* XXX sort out optimal dither settings */
8851 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
8852 FMT_RGB_RANDOM_ENABLE |
8853 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
8854 else
8855 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
8856 break;
8857 default:
8858 /* not needed */
8859 break;
8860 }
8861
8862 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
8863 }
8864
8865 /* display watermark setup */
8866 /**
8867 * dce8_line_buffer_adjust - Set up the line buffer
8868 *
8869 * @rdev: radeon_device pointer
8870 * @radeon_crtc: the selected display controller
8871 * @mode: the current display mode on the selected display
8872 * controller
8873 *
8874 * Setup up the line buffer allocation for
8875 * the selected display controller (CIK).
8876 * Returns the line buffer size in pixels.
8877 */
8878 static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
8879 struct radeon_crtc *radeon_crtc,
8880 struct drm_display_mode *mode)
8881 {
8882 u32 tmp, buffer_alloc, i;
8883 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
8884 /*
8885 * Line Buffer Setup
8886 * There are 6 line buffers, one for each display controllers.
8887 * There are 3 partitions per LB. Select the number of partitions
8888 * to enable based on the display width. For display widths larger
8889 * than 4096, you need use to use 2 display controllers and combine
8890 * them using the stereo blender.
8891 */
8892 if (radeon_crtc->base.enabled && mode) {
8893 if (mode->crtc_hdisplay < 1920) {
8894 tmp = 1;
8895 buffer_alloc = 2;
8896 } else if (mode->crtc_hdisplay < 2560) {
8897 tmp = 2;
8898 buffer_alloc = 2;
8899 } else if (mode->crtc_hdisplay < 4096) {
8900 tmp = 0;
8901 buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
8902 } else {
8903 DRM_DEBUG_KMS("Mode too big for LB!\n");
8904 tmp = 0;
8905 buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
8906 }
8907 } else {
8908 tmp = 1;
8909 buffer_alloc = 0;
8910 }
8911
8912 WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
8913 LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
8914
8915 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
8916 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
8917 for (i = 0; i < rdev->usec_timeout; i++) {
8918 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
8919 DMIF_BUFFERS_ALLOCATED_COMPLETED)
8920 break;
8921 udelay(1);
8922 }
8923
8924 if (radeon_crtc->base.enabled && mode) {
8925 switch (tmp) {
8926 case 0:
8927 default:
8928 return 4096 * 2;
8929 case 1:
8930 return 1920 * 2;
8931 case 2:
8932 return 2560 * 2;
8933 }
8934 }
8935
8936 /* controller not enabled, so no lb used */
8937 return 0;
8938 }
8939
8940 /**
8941 * cik_get_number_of_dram_channels - get the number of dram channels
8942 *
8943 * @rdev: radeon_device pointer
8944 *
8945 * Look up the number of video ram channels (CIK).
8946 * Used for display watermark bandwidth calculations
8947 * Returns the number of dram channels
8948 */
8949 static u32 cik_get_number_of_dram_channels(struct radeon_device *rdev)
8950 {
8951 u32 tmp = RREG32(MC_SHARED_CHMAP);
8952
8953 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
8954 case 0:
8955 default:
8956 return 1;
8957 case 1:
8958 return 2;
8959 case 2:
8960 return 4;
8961 case 3:
8962 return 8;
8963 case 4:
8964 return 3;
8965 case 5:
8966 return 6;
8967 case 6:
8968 return 10;
8969 case 7:
8970 return 12;
8971 case 8:
8972 return 16;
8973 }
8974 }
8975
8976 struct dce8_wm_params {
8977 u32 dram_channels; /* number of dram channels */
8978 u32 yclk; /* bandwidth per dram data pin in kHz */
8979 u32 sclk; /* engine clock in kHz */
8980 u32 disp_clk; /* display clock in kHz */
8981 u32 src_width; /* viewport width */
8982 u32 active_time; /* active display time in ns */
8983 u32 blank_time; /* blank time in ns */
8984 bool interlaced; /* mode is interlaced */
8985 fixed20_12 vsc; /* vertical scale ratio */
8986 u32 num_heads; /* number of active crtcs */
8987 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
8988 u32 lb_size; /* line buffer allocated to pipe */
8989 u32 vtaps; /* vertical scaler taps */
8990 };
8991
8992 /**
8993 * dce8_dram_bandwidth - get the dram bandwidth
8994 *
8995 * @wm: watermark calculation data
8996 *
8997 * Calculate the raw dram bandwidth (CIK).
8998 * Used for display watermark bandwidth calculations
8999 * Returns the dram bandwidth in MBytes/s
9000 */
9001 static u32 dce8_dram_bandwidth(struct dce8_wm_params *wm)
9002 {
9003 /* Calculate raw DRAM Bandwidth */
9004 fixed20_12 dram_efficiency; /* 0.7 */
9005 fixed20_12 yclk, dram_channels, bandwidth;
9006 fixed20_12 a;
9007
9008 a.full = dfixed_const(1000);
9009 yclk.full = dfixed_const(wm->yclk);
9010 yclk.full = dfixed_div(yclk, a);
9011 dram_channels.full = dfixed_const(wm->dram_channels * 4);
9012 a.full = dfixed_const(10);
9013 dram_efficiency.full = dfixed_const(7);
9014 dram_efficiency.full = dfixed_div(dram_efficiency, a);
9015 bandwidth.full = dfixed_mul(dram_channels, yclk);
9016 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
9017
9018 return dfixed_trunc(bandwidth);
9019 }
9020
9021 /**
9022 * dce8_dram_bandwidth_for_display - get the dram bandwidth for display
9023 *
9024 * @wm: watermark calculation data
9025 *
9026 * Calculate the dram bandwidth used for display (CIK).
9027 * Used for display watermark bandwidth calculations
9028 * Returns the dram bandwidth for display in MBytes/s
9029 */
9030 static u32 dce8_dram_bandwidth_for_display(struct dce8_wm_params *wm)
9031 {
9032 /* Calculate DRAM Bandwidth and the part allocated to display. */
9033 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
9034 fixed20_12 yclk, dram_channels, bandwidth;
9035 fixed20_12 a;
9036
9037 a.full = dfixed_const(1000);
9038 yclk.full = dfixed_const(wm->yclk);
9039 yclk.full = dfixed_div(yclk, a);
9040 dram_channels.full = dfixed_const(wm->dram_channels * 4);
9041 a.full = dfixed_const(10);
9042 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
9043 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
9044 bandwidth.full = dfixed_mul(dram_channels, yclk);
9045 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
9046
9047 return dfixed_trunc(bandwidth);
9048 }
9049
9050 /**
9051 * dce8_data_return_bandwidth - get the data return bandwidth
9052 *
9053 * @wm: watermark calculation data
9054 *
9055 * Calculate the data return bandwidth used for display (CIK).
9056 * Used for display watermark bandwidth calculations
9057 * Returns the data return bandwidth in MBytes/s
9058 */
9059 static u32 dce8_data_return_bandwidth(struct dce8_wm_params *wm)
9060 {
9061 /* Calculate the display Data return Bandwidth */
9062 fixed20_12 return_efficiency; /* 0.8 */
9063 fixed20_12 sclk, bandwidth;
9064 fixed20_12 a;
9065
9066 a.full = dfixed_const(1000);
9067 sclk.full = dfixed_const(wm->sclk);
9068 sclk.full = dfixed_div(sclk, a);
9069 a.full = dfixed_const(10);
9070 return_efficiency.full = dfixed_const(8);
9071 return_efficiency.full = dfixed_div(return_efficiency, a);
9072 a.full = dfixed_const(32);
9073 bandwidth.full = dfixed_mul(a, sclk);
9074 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
9075
9076 return dfixed_trunc(bandwidth);
9077 }
9078
9079 /**
9080 * dce8_dmif_request_bandwidth - get the dmif bandwidth
9081 *
9082 * @wm: watermark calculation data
9083 *
9084 * Calculate the dmif bandwidth used for display (CIK).
9085 * Used for display watermark bandwidth calculations
9086 * Returns the dmif bandwidth in MBytes/s
9087 */
9088 static u32 dce8_dmif_request_bandwidth(struct dce8_wm_params *wm)
9089 {
9090 /* Calculate the DMIF Request Bandwidth */
9091 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
9092 fixed20_12 disp_clk, bandwidth;
9093 fixed20_12 a, b;
9094
9095 a.full = dfixed_const(1000);
9096 disp_clk.full = dfixed_const(wm->disp_clk);
9097 disp_clk.full = dfixed_div(disp_clk, a);
9098 a.full = dfixed_const(32);
9099 b.full = dfixed_mul(a, disp_clk);
9100
9101 a.full = dfixed_const(10);
9102 disp_clk_request_efficiency.full = dfixed_const(8);
9103 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
9104
9105 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
9106
9107 return dfixed_trunc(bandwidth);
9108 }
9109
9110 /**
9111 * dce8_available_bandwidth - get the min available bandwidth
9112 *
9113 * @wm: watermark calculation data
9114 *
9115 * Calculate the min available bandwidth used for display (CIK).
9116 * Used for display watermark bandwidth calculations
9117 * Returns the min available bandwidth in MBytes/s
9118 */
9119 static u32 dce8_available_bandwidth(struct dce8_wm_params *wm)
9120 {
9121 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
9122 u32 dram_bandwidth = dce8_dram_bandwidth(wm);
9123 u32 data_return_bandwidth = dce8_data_return_bandwidth(wm);
9124 u32 dmif_req_bandwidth = dce8_dmif_request_bandwidth(wm);
9125
9126 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
9127 }
9128
9129 /**
9130 * dce8_average_bandwidth - get the average available bandwidth
9131 *
9132 * @wm: watermark calculation data
9133 *
9134 * Calculate the average available bandwidth used for display (CIK).
9135 * Used for display watermark bandwidth calculations
9136 * Returns the average available bandwidth in MBytes/s
9137 */
9138 static u32 dce8_average_bandwidth(struct dce8_wm_params *wm)
9139 {
9140 /* Calculate the display mode Average Bandwidth
9141 * DisplayMode should contain the source and destination dimensions,
9142 * timing, etc.
9143 */
9144 fixed20_12 bpp;
9145 fixed20_12 line_time;
9146 fixed20_12 src_width;
9147 fixed20_12 bandwidth;
9148 fixed20_12 a;
9149
9150 a.full = dfixed_const(1000);
9151 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
9152 line_time.full = dfixed_div(line_time, a);
9153 bpp.full = dfixed_const(wm->bytes_per_pixel);
9154 src_width.full = dfixed_const(wm->src_width);
9155 bandwidth.full = dfixed_mul(src_width, bpp);
9156 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
9157 bandwidth.full = dfixed_div(bandwidth, line_time);
9158
9159 return dfixed_trunc(bandwidth);
9160 }
9161
9162 /**
9163 * dce8_latency_watermark - get the latency watermark
9164 *
9165 * @wm: watermark calculation data
9166 *
9167 * Calculate the latency watermark (CIK).
9168 * Used for display watermark bandwidth calculations
9169 * Returns the latency watermark in ns
9170 */
9171 static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
9172 {
9173 /* First calculate the latency in ns */
9174 u32 mc_latency = 2000; /* 2000 ns. */
9175 u32 available_bandwidth = dce8_available_bandwidth(wm);
9176 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
9177 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
9178 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
9179 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
9180 (wm->num_heads * cursor_line_pair_return_time);
9181 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
9182 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
9183 u32 tmp, dmif_size = 12288;
9184 fixed20_12 a, b, c;
9185
9186 if (wm->num_heads == 0)
9187 return 0;
9188
9189 a.full = dfixed_const(2);
9190 b.full = dfixed_const(1);
9191 if ((wm->vsc.full > a.full) ||
9192 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
9193 (wm->vtaps >= 5) ||
9194 ((wm->vsc.full >= a.full) && wm->interlaced))
9195 max_src_lines_per_dst_line = 4;
9196 else
9197 max_src_lines_per_dst_line = 2;
9198
9199 a.full = dfixed_const(available_bandwidth);
9200 b.full = dfixed_const(wm->num_heads);
9201 a.full = dfixed_div(a, b);
9202
9203 b.full = dfixed_const(mc_latency + 512);
9204 c.full = dfixed_const(wm->disp_clk);
9205 b.full = dfixed_div(b, c);
9206
9207 c.full = dfixed_const(dmif_size);
9208 b.full = dfixed_div(c, b);
9209
9210 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
9211
9212 b.full = dfixed_const(1000);
9213 c.full = dfixed_const(wm->disp_clk);
9214 b.full = dfixed_div(c, b);
9215 c.full = dfixed_const(wm->bytes_per_pixel);
9216 b.full = dfixed_mul(b, c);
9217
9218 lb_fill_bw = min(tmp, dfixed_trunc(b));
9219
9220 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
9221 b.full = dfixed_const(1000);
9222 c.full = dfixed_const(lb_fill_bw);
9223 b.full = dfixed_div(c, b);
9224 a.full = dfixed_div(a, b);
9225 line_fill_time = dfixed_trunc(a);
9226
9227 if (line_fill_time < wm->active_time)
9228 return latency;
9229 else
9230 return latency + (line_fill_time - wm->active_time);
9231
9232 }
9233
9234 /**
9235 * dce8_average_bandwidth_vs_dram_bandwidth_for_display - check
9236 * average and available dram bandwidth
9237 *
9238 * @wm: watermark calculation data
9239 *
9240 * Check if the display average bandwidth fits in the display
9241 * dram bandwidth (CIK).
9242 * Used for display watermark bandwidth calculations
9243 * Returns true if the display fits, false if not.
9244 */
9245 static bool dce8_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
9246 {
9247 if (dce8_average_bandwidth(wm) <=
9248 (dce8_dram_bandwidth_for_display(wm) / wm->num_heads))
9249 return true;
9250 else
9251 return false;
9252 }
9253
9254 /**
9255 * dce8_average_bandwidth_vs_available_bandwidth - check
9256 * average and available bandwidth
9257 *
9258 * @wm: watermark calculation data
9259 *
9260 * Check if the display average bandwidth fits in the display
9261 * available bandwidth (CIK).
9262 * Used for display watermark bandwidth calculations
9263 * Returns true if the display fits, false if not.
9264 */
9265 static bool dce8_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
9266 {
9267 if (dce8_average_bandwidth(wm) <=
9268 (dce8_available_bandwidth(wm) / wm->num_heads))
9269 return true;
9270 else
9271 return false;
9272 }
9273
9274 /**
9275 * dce8_check_latency_hiding - check latency hiding
9276 *
9277 * @wm: watermark calculation data
9278 *
9279 * Check latency hiding (CIK).
9280 * Used for display watermark bandwidth calculations
9281 * Returns true if the display fits, false if not.
9282 */
9283 static bool dce8_check_latency_hiding(struct dce8_wm_params *wm)
9284 {
9285 u32 lb_partitions = wm->lb_size / wm->src_width;
9286 u32 line_time = wm->active_time + wm->blank_time;
9287 u32 latency_tolerant_lines;
9288 u32 latency_hiding;
9289 fixed20_12 a;
9290
9291 a.full = dfixed_const(1);
9292 if (wm->vsc.full > a.full)
9293 latency_tolerant_lines = 1;
9294 else {
9295 if (lb_partitions <= (wm->vtaps + 1))
9296 latency_tolerant_lines = 1;
9297 else
9298 latency_tolerant_lines = 2;
9299 }
9300
9301 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
9302
9303 if (dce8_latency_watermark(wm) <= latency_hiding)
9304 return true;
9305 else
9306 return false;
9307 }
9308
9309 /**
9310 * dce8_program_watermarks - program display watermarks
9311 *
9312 * @rdev: radeon_device pointer
9313 * @radeon_crtc: the selected display controller
9314 * @lb_size: line buffer size
9315 * @num_heads: number of display controllers in use
9316 *
9317 * Calculate and program the display watermarks for the
9318 * selected display controller (CIK).
9319 */
9320 static void dce8_program_watermarks(struct radeon_device *rdev,
9321 struct radeon_crtc *radeon_crtc,
9322 u32 lb_size, u32 num_heads)
9323 {
9324 struct drm_display_mode *mode = &radeon_crtc->base.mode;
9325 struct dce8_wm_params wm_low, wm_high;
9326 u32 pixel_period;
9327 u32 line_time = 0;
9328 u32 latency_watermark_a = 0, latency_watermark_b = 0;
9329 u32 tmp, wm_mask;
9330
9331 if (radeon_crtc->base.enabled && num_heads && mode) {
9332 pixel_period = 1000000 / (u32)mode->clock;
9333 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
9334
9335 /* watermark for high clocks */
9336 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
9337 rdev->pm.dpm_enabled) {
9338 wm_high.yclk =
9339 radeon_dpm_get_mclk(rdev, false) * 10;
9340 wm_high.sclk =
9341 radeon_dpm_get_sclk(rdev, false) * 10;
9342 } else {
9343 wm_high.yclk = rdev->pm.current_mclk * 10;
9344 wm_high.sclk = rdev->pm.current_sclk * 10;
9345 }
9346
9347 wm_high.disp_clk = mode->clock;
9348 wm_high.src_width = mode->crtc_hdisplay;
9349 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
9350 wm_high.blank_time = line_time - wm_high.active_time;
9351 wm_high.interlaced = false;
9352 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
9353 wm_high.interlaced = true;
9354 wm_high.vsc = radeon_crtc->vsc;
9355 wm_high.vtaps = 1;
9356 if (radeon_crtc->rmx_type != RMX_OFF)
9357 wm_high.vtaps = 2;
9358 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
9359 wm_high.lb_size = lb_size;
9360 wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
9361 wm_high.num_heads = num_heads;
9362
9363 /* set for high clocks */
9364 latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
9365
9366 /* possibly force display priority to high */
9367 /* should really do this at mode validation time... */
9368 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
9369 !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
9370 !dce8_check_latency_hiding(&wm_high) ||
9371 (rdev->disp_priority == 2)) {
9372 DRM_DEBUG_KMS("force priority to high\n");
9373 }
9374
9375 /* watermark for low clocks */
9376 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
9377 rdev->pm.dpm_enabled) {
9378 wm_low.yclk =
9379 radeon_dpm_get_mclk(rdev, true) * 10;
9380 wm_low.sclk =
9381 radeon_dpm_get_sclk(rdev, true) * 10;
9382 } else {
9383 wm_low.yclk = rdev->pm.current_mclk * 10;
9384 wm_low.sclk = rdev->pm.current_sclk * 10;
9385 }
9386
9387 wm_low.disp_clk = mode->clock;
9388 wm_low.src_width = mode->crtc_hdisplay;
9389 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
9390 wm_low.blank_time = line_time - wm_low.active_time;
9391 wm_low.interlaced = false;
9392 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
9393 wm_low.interlaced = true;
9394 wm_low.vsc = radeon_crtc->vsc;
9395 wm_low.vtaps = 1;
9396 if (radeon_crtc->rmx_type != RMX_OFF)
9397 wm_low.vtaps = 2;
9398 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
9399 wm_low.lb_size = lb_size;
9400 wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
9401 wm_low.num_heads = num_heads;
9402
9403 /* set for low clocks */
9404 latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
9405
9406 /* possibly force display priority to high */
9407 /* should really do this at mode validation time... */
9408 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
9409 !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
9410 !dce8_check_latency_hiding(&wm_low) ||
9411 (rdev->disp_priority == 2)) {
9412 DRM_DEBUG_KMS("force priority to high\n");
9413 }
9414 }
9415
9416 /* select wm A */
9417 wm_mask = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
9418 tmp = wm_mask;
9419 tmp &= ~LATENCY_WATERMARK_MASK(3);
9420 tmp |= LATENCY_WATERMARK_MASK(1);
9421 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
9422 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
9423 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
9424 LATENCY_HIGH_WATERMARK(line_time)));
9425 /* select wm B */
9426 tmp = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
9427 tmp &= ~LATENCY_WATERMARK_MASK(3);
9428 tmp |= LATENCY_WATERMARK_MASK(2);
9429 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
9430 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
9431 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
9432 LATENCY_HIGH_WATERMARK(line_time)));
9433 /* restore original selection */
9434 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
9435
9436 /* save values for DPM */
9437 radeon_crtc->line_time = line_time;
9438 radeon_crtc->wm_high = latency_watermark_a;
9439 radeon_crtc->wm_low = latency_watermark_b;
9440 }
9441
9442 /**
9443 * dce8_bandwidth_update - program display watermarks
9444 *
9445 * @rdev: radeon_device pointer
9446 *
9447 * Calculate and program the display watermarks and line
9448 * buffer allocation (CIK).
9449 */
9450 void dce8_bandwidth_update(struct radeon_device *rdev)
9451 {
9452 struct drm_display_mode *mode = NULL;
9453 u32 num_heads = 0, lb_size;
9454 int i;
9455
9456 if (!rdev->mode_info.mode_config_initialized)
9457 return;
9458
9459 radeon_update_display_priority(rdev);
9460
9461 for (i = 0; i < rdev->num_crtc; i++) {
9462 if (rdev->mode_info.crtcs[i]->base.enabled)
9463 num_heads++;
9464 }
9465 for (i = 0; i < rdev->num_crtc; i++) {
9466 mode = &rdev->mode_info.crtcs[i]->base.mode;
9467 lb_size = dce8_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode);
9468 dce8_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
9469 }
9470 }
9471
9472 /**
9473 * cik_get_gpu_clock_counter - return GPU clock counter snapshot
9474 *
9475 * @rdev: radeon_device pointer
9476 *
9477 * Fetches a GPU clock counter snapshot (SI).
9478 * Returns the 64 bit clock counter snapshot.
9479 */
9480 uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
9481 {
9482 uint64_t clock;
9483
9484 mutex_lock(&rdev->gpu_clock_mutex);
9485 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
9486 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
9487 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
9488 mutex_unlock(&rdev->gpu_clock_mutex);
9489 return clock;
9490 }
9491
9492 static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
9493 u32 cntl_reg, u32 status_reg)
9494 {
9495 int r, i;
9496 struct atom_clock_dividers dividers;
9497 uint32_t tmp;
9498
9499 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
9500 clock, false, &dividers);
9501 if (r)
9502 return r;
9503
9504 tmp = RREG32_SMC(cntl_reg);
9505 tmp &= ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK);
9506 tmp |= dividers.post_divider;
9507 WREG32_SMC(cntl_reg, tmp);
9508
9509 for (i = 0; i < 100; i++) {
9510 if (RREG32_SMC(status_reg) & DCLK_STATUS)
9511 break;
9512 mdelay(10);
9513 }
9514 if (i == 100)
9515 return -ETIMEDOUT;
9516
9517 return 0;
9518 }
9519
9520 int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
9521 {
9522 int r = 0;
9523
9524 r = cik_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
9525 if (r)
9526 return r;
9527
9528 r = cik_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
9529 return r;
9530 }
9531
9532 int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
9533 {
9534 int r, i;
9535 struct atom_clock_dividers dividers;
9536 u32 tmp;
9537
9538 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
9539 ecclk, false, &dividers);
9540 if (r)
9541 return r;
9542
9543 for (i = 0; i < 100; i++) {
9544 if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
9545 break;
9546 mdelay(10);
9547 }
9548 if (i == 100)
9549 return -ETIMEDOUT;
9550
9551 tmp = RREG32_SMC(CG_ECLK_CNTL);
9552 tmp &= ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK);
9553 tmp |= dividers.post_divider;
9554 WREG32_SMC(CG_ECLK_CNTL, tmp);
9555
9556 for (i = 0; i < 100; i++) {
9557 if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
9558 break;
9559 mdelay(10);
9560 }
9561 if (i == 100)
9562 return -ETIMEDOUT;
9563
9564 return 0;
9565 }
9566
9567 static void cik_pcie_gen3_enable(struct radeon_device *rdev)
9568 {
9569 struct pci_dev *root = rdev->pdev->bus->self;
9570 int bridge_pos, gpu_pos;
9571 u32 speed_cntl, mask, current_data_rate;
9572 int ret, i;
9573 u16 tmp16;
9574
9575 if (pci_is_root_bus(rdev->pdev->bus))
9576 return;
9577
9578 if (radeon_pcie_gen2 == 0)
9579 return;
9580
9581 if (rdev->flags & RADEON_IS_IGP)
9582 return;
9583
9584 if (!(rdev->flags & RADEON_IS_PCIE))
9585 return;
9586
9587 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
9588 if (ret != 0)
9589 return;
9590
9591 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
9592 return;
9593
9594 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
9595 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
9596 LC_CURRENT_DATA_RATE_SHIFT;
9597 if (mask & DRM_PCIE_SPEED_80) {
9598 if (current_data_rate == 2) {
9599 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
9600 return;
9601 }
9602 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
9603 } else if (mask & DRM_PCIE_SPEED_50) {
9604 if (current_data_rate == 1) {
9605 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
9606 return;
9607 }
9608 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
9609 }
9610
9611 bridge_pos = pci_pcie_cap(root);
9612 if (!bridge_pos)
9613 return;
9614
9615 gpu_pos = pci_pcie_cap(rdev->pdev);
9616 if (!gpu_pos)
9617 return;
9618
9619 if (mask & DRM_PCIE_SPEED_80) {
9620 /* re-try equalization if gen3 is not already enabled */
9621 if (current_data_rate != 2) {
9622 u16 bridge_cfg, gpu_cfg;
9623 u16 bridge_cfg2, gpu_cfg2;
9624 u32 max_lw, current_lw, tmp;
9625
9626 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
9627 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
9628
9629 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
9630 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
9631
9632 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
9633 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
9634
9635 tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
9636 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
9637 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
9638
9639 if (current_lw < max_lw) {
9640 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
9641 if (tmp & LC_RENEGOTIATION_SUPPORT) {
9642 tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
9643 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
9644 tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
9645 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
9646 }
9647 }
9648
9649 for (i = 0; i < 10; i++) {
9650 /* check status */
9651 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
9652 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
9653 break;
9654
9655 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
9656 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
9657
9658 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
9659 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
9660
9661 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
9662 tmp |= LC_SET_QUIESCE;
9663 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
9664
9665 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
9666 tmp |= LC_REDO_EQ;
9667 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
9668
9669 mdelay(100);
9670
9671 /* linkctl */
9672 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
9673 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
9674 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
9675 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
9676
9677 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
9678 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
9679 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
9680 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
9681
9682 /* linkctl2 */
9683 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
9684 tmp16 &= ~((1 << 4) | (7 << 9));
9685 tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
9686 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
9687
9688 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
9689 tmp16 &= ~((1 << 4) | (7 << 9));
9690 tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
9691 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
9692
9693 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
9694 tmp &= ~LC_SET_QUIESCE;
9695 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
9696 }
9697 }
9698 }
9699
9700 /* set the link speed */
9701 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
9702 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
9703 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
9704
9705 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
9706 tmp16 &= ~0xf;
9707 if (mask & DRM_PCIE_SPEED_80)
9708 tmp16 |= 3; /* gen3 */
9709 else if (mask & DRM_PCIE_SPEED_50)
9710 tmp16 |= 2; /* gen2 */
9711 else
9712 tmp16 |= 1; /* gen1 */
9713 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
9714
9715 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
9716 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
9717 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
9718
9719 for (i = 0; i < rdev->usec_timeout; i++) {
9720 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
9721 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
9722 break;
9723 udelay(1);
9724 }
9725 }
9726
9727 static void cik_program_aspm(struct radeon_device *rdev)
9728 {
9729 u32 data, orig;
9730 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
9731 bool disable_clkreq = false;
9732
9733 if (radeon_aspm == 0)
9734 return;
9735
9736 /* XXX double check IGPs */
9737 if (rdev->flags & RADEON_IS_IGP)
9738 return;
9739
9740 if (!(rdev->flags & RADEON_IS_PCIE))
9741 return;
9742
9743 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
9744 data &= ~LC_XMIT_N_FTS_MASK;
9745 data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
9746 if (orig != data)
9747 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
9748
9749 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
9750 data |= LC_GO_TO_RECOVERY;
9751 if (orig != data)
9752 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
9753
9754 orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
9755 data |= P_IGNORE_EDB_ERR;
9756 if (orig != data)
9757 WREG32_PCIE_PORT(PCIE_P_CNTL, data);
9758
9759 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
9760 data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
9761 data |= LC_PMI_TO_L1_DIS;
9762 if (!disable_l0s)
9763 data |= LC_L0S_INACTIVITY(7);
9764
9765 if (!disable_l1) {
9766 data |= LC_L1_INACTIVITY(7);
9767 data &= ~LC_PMI_TO_L1_DIS;
9768 if (orig != data)
9769 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
9770
9771 if (!disable_plloff_in_l1) {
9772 bool clk_req_support;
9773
9774 orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
9775 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
9776 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
9777 if (orig != data)
9778 WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
9779
9780 orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
9781 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
9782 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
9783 if (orig != data)
9784 WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
9785
9786 orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
9787 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
9788 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
9789 if (orig != data)
9790 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
9791
9792 orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
9793 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
9794 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
9795 if (orig != data)
9796 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
9797
9798 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
9799 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
9800 data |= LC_DYN_LANES_PWR_STATE(3);
9801 if (orig != data)
9802 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
9803
9804 if (!disable_clkreq &&
9805 !pci_is_root_bus(rdev->pdev->bus)) {
9806 struct pci_dev *root = rdev->pdev->bus->self;
9807 u32 lnkcap;
9808
9809 clk_req_support = false;
9810 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
9811 if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
9812 clk_req_support = true;
9813 } else {
9814 clk_req_support = false;
9815 }
9816
9817 if (clk_req_support) {
9818 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
9819 data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
9820 if (orig != data)
9821 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
9822
9823 orig = data = RREG32_SMC(THM_CLK_CNTL);
9824 data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
9825 data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
9826 if (orig != data)
9827 WREG32_SMC(THM_CLK_CNTL, data);
9828
9829 orig = data = RREG32_SMC(MISC_CLK_CTRL);
9830 data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
9831 data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
9832 if (orig != data)
9833 WREG32_SMC(MISC_CLK_CTRL, data);
9834
9835 orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
9836 data &= ~BCLK_AS_XCLK;
9837 if (orig != data)
9838 WREG32_SMC(CG_CLKPIN_CNTL, data);
9839
9840 orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
9841 data &= ~FORCE_BIF_REFCLK_EN;
9842 if (orig != data)
9843 WREG32_SMC(CG_CLKPIN_CNTL_2, data);
9844
9845 orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
9846 data &= ~MPLL_CLKOUT_SEL_MASK;
9847 data |= MPLL_CLKOUT_SEL(4);
9848 if (orig != data)
9849 WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
9850 }
9851 }
9852 } else {
9853 if (orig != data)
9854 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
9855 }
9856
9857 orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
9858 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
9859 if (orig != data)
9860 WREG32_PCIE_PORT(PCIE_CNTL2, data);
9861
9862 if (!disable_l0s) {
9863 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
9864 if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
9865 data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
9866 if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
9867 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
9868 data &= ~LC_L0S_INACTIVITY_MASK;
9869 if (orig != data)
9870 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
9871 }
9872 }
9873 }
9874 }
This page took 0.285694 seconds and 5 git commands to generate.