2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
29 #include "radeon_asic.h"
30 #include <drm/radeon_drm.h>
34 #include "cayman_blit_shaders.h"
35 #include "radeon_ucode.h"
36 #include "clearstate_cayman.h"
38 static u32 tn_rlc_save_restore_register_list
[] =
163 static u32 tn_rlc_save_restore_register_list_size
= ARRAY_SIZE(tn_rlc_save_restore_register_list
);
165 extern bool evergreen_is_display_hung(struct radeon_device
*rdev
);
166 extern void evergreen_print_gpu_status_regs(struct radeon_device
*rdev
);
167 extern void evergreen_mc_stop(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
);
168 extern void evergreen_mc_resume(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
);
169 extern int evergreen_mc_wait_for_idle(struct radeon_device
*rdev
);
170 extern void evergreen_mc_program(struct radeon_device
*rdev
);
171 extern void evergreen_irq_suspend(struct radeon_device
*rdev
);
172 extern int evergreen_mc_init(struct radeon_device
*rdev
);
173 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device
*rdev
);
174 extern void evergreen_pcie_gen2_enable(struct radeon_device
*rdev
);
175 extern void evergreen_program_aspm(struct radeon_device
*rdev
);
176 extern void sumo_rlc_fini(struct radeon_device
*rdev
);
177 extern int sumo_rlc_init(struct radeon_device
*rdev
);
180 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
181 MODULE_FIRMWARE("radeon/BARTS_me.bin");
182 MODULE_FIRMWARE("radeon/BARTS_mc.bin");
183 MODULE_FIRMWARE("radeon/BARTS_smc.bin");
184 MODULE_FIRMWARE("radeon/BTC_rlc.bin");
185 MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
186 MODULE_FIRMWARE("radeon/TURKS_me.bin");
187 MODULE_FIRMWARE("radeon/TURKS_mc.bin");
188 MODULE_FIRMWARE("radeon/TURKS_smc.bin");
189 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
190 MODULE_FIRMWARE("radeon/CAICOS_me.bin");
191 MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
192 MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
193 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
194 MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
195 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
196 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
197 MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
198 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
199 MODULE_FIRMWARE("radeon/ARUBA_me.bin");
200 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
203 static const u32 cayman_golden_registers2
[] =
205 0x3e5c, 0xffffffff, 0x00000000,
206 0x3e48, 0xffffffff, 0x00000000,
207 0x3e4c, 0xffffffff, 0x00000000,
208 0x3e64, 0xffffffff, 0x00000000,
209 0x3e50, 0xffffffff, 0x00000000,
210 0x3e60, 0xffffffff, 0x00000000
213 static const u32 cayman_golden_registers
[] =
215 0x5eb4, 0xffffffff, 0x00000002,
216 0x5e78, 0x8f311ff1, 0x001000f0,
217 0x3f90, 0xffff0000, 0xff000000,
218 0x9148, 0xffff0000, 0xff000000,
219 0x3f94, 0xffff0000, 0xff000000,
220 0x914c, 0xffff0000, 0xff000000,
221 0xc78, 0x00000080, 0x00000080,
222 0xbd4, 0x70073777, 0x00011003,
223 0xd02c, 0xbfffff1f, 0x08421000,
224 0xd0b8, 0x73773777, 0x02011003,
225 0x5bc0, 0x00200000, 0x50100000,
226 0x98f8, 0x33773777, 0x02011003,
227 0x98fc, 0xffffffff, 0x76541032,
228 0x7030, 0x31000311, 0x00000011,
229 0x2f48, 0x33773777, 0x42010001,
230 0x6b28, 0x00000010, 0x00000012,
231 0x7728, 0x00000010, 0x00000012,
232 0x10328, 0x00000010, 0x00000012,
233 0x10f28, 0x00000010, 0x00000012,
234 0x11b28, 0x00000010, 0x00000012,
235 0x12728, 0x00000010, 0x00000012,
236 0x240c, 0x000007ff, 0x00000000,
237 0x8a14, 0xf000001f, 0x00000007,
238 0x8b24, 0x3fff3fff, 0x00ff0fff,
239 0x8b10, 0x0000ff0f, 0x00000000,
240 0x28a4c, 0x07ffffff, 0x06000000,
241 0x10c, 0x00000001, 0x00010003,
242 0xa02c, 0xffffffff, 0x0000009b,
243 0x913c, 0x0000010f, 0x01000100,
244 0x8c04, 0xf8ff00ff, 0x40600060,
245 0x28350, 0x00000f01, 0x00000000,
246 0x9508, 0x3700001f, 0x00000002,
247 0x960c, 0xffffffff, 0x54763210,
248 0x88c4, 0x001f3ae3, 0x00000082,
249 0x88d0, 0xffffffff, 0x0f40df40,
250 0x88d4, 0x0000001f, 0x00000010,
251 0x8974, 0xffffffff, 0x00000000
254 static const u32 dvst_golden_registers2
[] =
256 0x8f8, 0xffffffff, 0,
257 0x8fc, 0x00380000, 0,
258 0x8f8, 0xffffffff, 1,
262 static const u32 dvst_golden_registers
[] =
264 0x690, 0x3fff3fff, 0x20c00033,
265 0x918c, 0x0fff0fff, 0x00010006,
266 0x91a8, 0x0fff0fff, 0x00010006,
267 0x9150, 0xffffdfff, 0x6e944040,
268 0x917c, 0x0fff0fff, 0x00030002,
269 0x9198, 0x0fff0fff, 0x00030002,
270 0x915c, 0x0fff0fff, 0x00010000,
271 0x3f90, 0xffff0001, 0xff000000,
272 0x9178, 0x0fff0fff, 0x00070000,
273 0x9194, 0x0fff0fff, 0x00070000,
274 0x9148, 0xffff0001, 0xff000000,
275 0x9190, 0x0fff0fff, 0x00090008,
276 0x91ac, 0x0fff0fff, 0x00090008,
277 0x3f94, 0xffff0000, 0xff000000,
278 0x914c, 0xffff0000, 0xff000000,
279 0x929c, 0x00000fff, 0x00000001,
280 0x55e4, 0xff607fff, 0xfc000100,
281 0x8a18, 0xff000fff, 0x00000100,
282 0x8b28, 0xff000fff, 0x00000100,
283 0x9144, 0xfffc0fff, 0x00000100,
284 0x6ed8, 0x00010101, 0x00010000,
285 0x9830, 0xffffffff, 0x00000000,
286 0x9834, 0xf00fffff, 0x00000400,
287 0x9838, 0xfffffffe, 0x00000000,
288 0xd0c0, 0xff000fff, 0x00000100,
289 0xd02c, 0xbfffff1f, 0x08421000,
290 0xd0b8, 0x73773777, 0x12010001,
291 0x5bb0, 0x000000f0, 0x00000070,
292 0x98f8, 0x73773777, 0x12010001,
293 0x98fc, 0xffffffff, 0x00000010,
294 0x9b7c, 0x00ff0000, 0x00fc0000,
295 0x8030, 0x00001f0f, 0x0000100a,
296 0x2f48, 0x73773777, 0x12010001,
297 0x2408, 0x00030000, 0x000c007f,
298 0x8a14, 0xf000003f, 0x00000007,
299 0x8b24, 0x3fff3fff, 0x00ff0fff,
300 0x8b10, 0x0000ff0f, 0x00000000,
301 0x28a4c, 0x07ffffff, 0x06000000,
302 0x4d8, 0x00000fff, 0x00000100,
303 0xa008, 0xffffffff, 0x00010000,
304 0x913c, 0xffff03ff, 0x01000100,
305 0x8c00, 0x000000ff, 0x00000003,
306 0x8c04, 0xf8ff00ff, 0x40600060,
307 0x8cf0, 0x1fff1fff, 0x08e00410,
308 0x28350, 0x00000f01, 0x00000000,
309 0x9508, 0xf700071f, 0x00000002,
310 0x960c, 0xffffffff, 0x54763210,
311 0x20ef8, 0x01ff01ff, 0x00000002,
312 0x20e98, 0xfffffbff, 0x00200000,
313 0x2015c, 0xffffffff, 0x00000f40,
314 0x88c4, 0x001f3ae3, 0x00000082,
315 0x8978, 0x3fffffff, 0x04050140,
316 0x88d4, 0x0000001f, 0x00000010,
317 0x8974, 0xffffffff, 0x00000000
320 static const u32 scrapper_golden_registers
[] =
322 0x690, 0x3fff3fff, 0x20c00033,
323 0x918c, 0x0fff0fff, 0x00010006,
324 0x918c, 0x0fff0fff, 0x00010006,
325 0x91a8, 0x0fff0fff, 0x00010006,
326 0x91a8, 0x0fff0fff, 0x00010006,
327 0x9150, 0xffffdfff, 0x6e944040,
328 0x9150, 0xffffdfff, 0x6e944040,
329 0x917c, 0x0fff0fff, 0x00030002,
330 0x917c, 0x0fff0fff, 0x00030002,
331 0x9198, 0x0fff0fff, 0x00030002,
332 0x9198, 0x0fff0fff, 0x00030002,
333 0x915c, 0x0fff0fff, 0x00010000,
334 0x915c, 0x0fff0fff, 0x00010000,
335 0x3f90, 0xffff0001, 0xff000000,
336 0x3f90, 0xffff0001, 0xff000000,
337 0x9178, 0x0fff0fff, 0x00070000,
338 0x9178, 0x0fff0fff, 0x00070000,
339 0x9194, 0x0fff0fff, 0x00070000,
340 0x9194, 0x0fff0fff, 0x00070000,
341 0x9148, 0xffff0001, 0xff000000,
342 0x9148, 0xffff0001, 0xff000000,
343 0x9190, 0x0fff0fff, 0x00090008,
344 0x9190, 0x0fff0fff, 0x00090008,
345 0x91ac, 0x0fff0fff, 0x00090008,
346 0x91ac, 0x0fff0fff, 0x00090008,
347 0x3f94, 0xffff0000, 0xff000000,
348 0x3f94, 0xffff0000, 0xff000000,
349 0x914c, 0xffff0000, 0xff000000,
350 0x914c, 0xffff0000, 0xff000000,
351 0x929c, 0x00000fff, 0x00000001,
352 0x929c, 0x00000fff, 0x00000001,
353 0x55e4, 0xff607fff, 0xfc000100,
354 0x8a18, 0xff000fff, 0x00000100,
355 0x8a18, 0xff000fff, 0x00000100,
356 0x8b28, 0xff000fff, 0x00000100,
357 0x8b28, 0xff000fff, 0x00000100,
358 0x9144, 0xfffc0fff, 0x00000100,
359 0x9144, 0xfffc0fff, 0x00000100,
360 0x6ed8, 0x00010101, 0x00010000,
361 0x9830, 0xffffffff, 0x00000000,
362 0x9830, 0xffffffff, 0x00000000,
363 0x9834, 0xf00fffff, 0x00000400,
364 0x9834, 0xf00fffff, 0x00000400,
365 0x9838, 0xfffffffe, 0x00000000,
366 0x9838, 0xfffffffe, 0x00000000,
367 0xd0c0, 0xff000fff, 0x00000100,
368 0xd02c, 0xbfffff1f, 0x08421000,
369 0xd02c, 0xbfffff1f, 0x08421000,
370 0xd0b8, 0x73773777, 0x12010001,
371 0xd0b8, 0x73773777, 0x12010001,
372 0x5bb0, 0x000000f0, 0x00000070,
373 0x98f8, 0x73773777, 0x12010001,
374 0x98f8, 0x73773777, 0x12010001,
375 0x98fc, 0xffffffff, 0x00000010,
376 0x98fc, 0xffffffff, 0x00000010,
377 0x9b7c, 0x00ff0000, 0x00fc0000,
378 0x9b7c, 0x00ff0000, 0x00fc0000,
379 0x8030, 0x00001f0f, 0x0000100a,
380 0x8030, 0x00001f0f, 0x0000100a,
381 0x2f48, 0x73773777, 0x12010001,
382 0x2f48, 0x73773777, 0x12010001,
383 0x2408, 0x00030000, 0x000c007f,
384 0x8a14, 0xf000003f, 0x00000007,
385 0x8a14, 0xf000003f, 0x00000007,
386 0x8b24, 0x3fff3fff, 0x00ff0fff,
387 0x8b24, 0x3fff3fff, 0x00ff0fff,
388 0x8b10, 0x0000ff0f, 0x00000000,
389 0x8b10, 0x0000ff0f, 0x00000000,
390 0x28a4c, 0x07ffffff, 0x06000000,
391 0x28a4c, 0x07ffffff, 0x06000000,
392 0x4d8, 0x00000fff, 0x00000100,
393 0x4d8, 0x00000fff, 0x00000100,
394 0xa008, 0xffffffff, 0x00010000,
395 0xa008, 0xffffffff, 0x00010000,
396 0x913c, 0xffff03ff, 0x01000100,
397 0x913c, 0xffff03ff, 0x01000100,
398 0x90e8, 0x001fffff, 0x010400c0,
399 0x8c00, 0x000000ff, 0x00000003,
400 0x8c00, 0x000000ff, 0x00000003,
401 0x8c04, 0xf8ff00ff, 0x40600060,
402 0x8c04, 0xf8ff00ff, 0x40600060,
403 0x8c30, 0x0000000f, 0x00040005,
404 0x8cf0, 0x1fff1fff, 0x08e00410,
405 0x8cf0, 0x1fff1fff, 0x08e00410,
406 0x900c, 0x00ffffff, 0x0017071f,
407 0x28350, 0x00000f01, 0x00000000,
408 0x28350, 0x00000f01, 0x00000000,
409 0x9508, 0xf700071f, 0x00000002,
410 0x9508, 0xf700071f, 0x00000002,
411 0x9688, 0x00300000, 0x0017000f,
412 0x960c, 0xffffffff, 0x54763210,
413 0x960c, 0xffffffff, 0x54763210,
414 0x20ef8, 0x01ff01ff, 0x00000002,
415 0x20e98, 0xfffffbff, 0x00200000,
416 0x2015c, 0xffffffff, 0x00000f40,
417 0x88c4, 0x001f3ae3, 0x00000082,
418 0x88c4, 0x001f3ae3, 0x00000082,
419 0x8978, 0x3fffffff, 0x04050140,
420 0x8978, 0x3fffffff, 0x04050140,
421 0x88d4, 0x0000001f, 0x00000010,
422 0x88d4, 0x0000001f, 0x00000010,
423 0x8974, 0xffffffff, 0x00000000,
424 0x8974, 0xffffffff, 0x00000000
427 static void ni_init_golden_registers(struct radeon_device
*rdev
)
429 switch (rdev
->family
) {
431 radeon_program_register_sequence(rdev
,
432 cayman_golden_registers
,
433 (const u32
)ARRAY_SIZE(cayman_golden_registers
));
434 radeon_program_register_sequence(rdev
,
435 cayman_golden_registers2
,
436 (const u32
)ARRAY_SIZE(cayman_golden_registers2
));
439 if ((rdev
->pdev
->device
== 0x9900) ||
440 (rdev
->pdev
->device
== 0x9901) ||
441 (rdev
->pdev
->device
== 0x9903) ||
442 (rdev
->pdev
->device
== 0x9904) ||
443 (rdev
->pdev
->device
== 0x9905) ||
444 (rdev
->pdev
->device
== 0x9906) ||
445 (rdev
->pdev
->device
== 0x9907) ||
446 (rdev
->pdev
->device
== 0x9908) ||
447 (rdev
->pdev
->device
== 0x9909) ||
448 (rdev
->pdev
->device
== 0x990A) ||
449 (rdev
->pdev
->device
== 0x990B) ||
450 (rdev
->pdev
->device
== 0x990C) ||
451 (rdev
->pdev
->device
== 0x990D) ||
452 (rdev
->pdev
->device
== 0x990E) ||
453 (rdev
->pdev
->device
== 0x990F) ||
454 (rdev
->pdev
->device
== 0x9910) ||
455 (rdev
->pdev
->device
== 0x9913) ||
456 (rdev
->pdev
->device
== 0x9917) ||
457 (rdev
->pdev
->device
== 0x9918)) {
458 radeon_program_register_sequence(rdev
,
459 dvst_golden_registers
,
460 (const u32
)ARRAY_SIZE(dvst_golden_registers
));
461 radeon_program_register_sequence(rdev
,
462 dvst_golden_registers2
,
463 (const u32
)ARRAY_SIZE(dvst_golden_registers2
));
465 radeon_program_register_sequence(rdev
,
466 scrapper_golden_registers
,
467 (const u32
)ARRAY_SIZE(scrapper_golden_registers
));
468 radeon_program_register_sequence(rdev
,
469 dvst_golden_registers2
,
470 (const u32
)ARRAY_SIZE(dvst_golden_registers2
));
478 #define BTC_IO_MC_REGS_SIZE 29
480 static const u32 barts_io_mc_regs
[BTC_IO_MC_REGS_SIZE
][2] = {
481 {0x00000077, 0xff010100},
482 {0x00000078, 0x00000000},
483 {0x00000079, 0x00001434},
484 {0x0000007a, 0xcc08ec08},
485 {0x0000007b, 0x00040000},
486 {0x0000007c, 0x000080c0},
487 {0x0000007d, 0x09000000},
488 {0x0000007e, 0x00210404},
489 {0x00000081, 0x08a8e800},
490 {0x00000082, 0x00030444},
491 {0x00000083, 0x00000000},
492 {0x00000085, 0x00000001},
493 {0x00000086, 0x00000002},
494 {0x00000087, 0x48490000},
495 {0x00000088, 0x20244647},
496 {0x00000089, 0x00000005},
497 {0x0000008b, 0x66030000},
498 {0x0000008c, 0x00006603},
499 {0x0000008d, 0x00000100},
500 {0x0000008f, 0x00001c0a},
501 {0x00000090, 0xff000001},
502 {0x00000094, 0x00101101},
503 {0x00000095, 0x00000fff},
504 {0x00000096, 0x00116fff},
505 {0x00000097, 0x60010000},
506 {0x00000098, 0x10010000},
507 {0x00000099, 0x00006000},
508 {0x0000009a, 0x00001000},
509 {0x0000009f, 0x00946a00}
512 static const u32 turks_io_mc_regs
[BTC_IO_MC_REGS_SIZE
][2] = {
513 {0x00000077, 0xff010100},
514 {0x00000078, 0x00000000},
515 {0x00000079, 0x00001434},
516 {0x0000007a, 0xcc08ec08},
517 {0x0000007b, 0x00040000},
518 {0x0000007c, 0x000080c0},
519 {0x0000007d, 0x09000000},
520 {0x0000007e, 0x00210404},
521 {0x00000081, 0x08a8e800},
522 {0x00000082, 0x00030444},
523 {0x00000083, 0x00000000},
524 {0x00000085, 0x00000001},
525 {0x00000086, 0x00000002},
526 {0x00000087, 0x48490000},
527 {0x00000088, 0x20244647},
528 {0x00000089, 0x00000005},
529 {0x0000008b, 0x66030000},
530 {0x0000008c, 0x00006603},
531 {0x0000008d, 0x00000100},
532 {0x0000008f, 0x00001c0a},
533 {0x00000090, 0xff000001},
534 {0x00000094, 0x00101101},
535 {0x00000095, 0x00000fff},
536 {0x00000096, 0x00116fff},
537 {0x00000097, 0x60010000},
538 {0x00000098, 0x10010000},
539 {0x00000099, 0x00006000},
540 {0x0000009a, 0x00001000},
541 {0x0000009f, 0x00936a00}
544 static const u32 caicos_io_mc_regs
[BTC_IO_MC_REGS_SIZE
][2] = {
545 {0x00000077, 0xff010100},
546 {0x00000078, 0x00000000},
547 {0x00000079, 0x00001434},
548 {0x0000007a, 0xcc08ec08},
549 {0x0000007b, 0x00040000},
550 {0x0000007c, 0x000080c0},
551 {0x0000007d, 0x09000000},
552 {0x0000007e, 0x00210404},
553 {0x00000081, 0x08a8e800},
554 {0x00000082, 0x00030444},
555 {0x00000083, 0x00000000},
556 {0x00000085, 0x00000001},
557 {0x00000086, 0x00000002},
558 {0x00000087, 0x48490000},
559 {0x00000088, 0x20244647},
560 {0x00000089, 0x00000005},
561 {0x0000008b, 0x66030000},
562 {0x0000008c, 0x00006603},
563 {0x0000008d, 0x00000100},
564 {0x0000008f, 0x00001c0a},
565 {0x00000090, 0xff000001},
566 {0x00000094, 0x00101101},
567 {0x00000095, 0x00000fff},
568 {0x00000096, 0x00116fff},
569 {0x00000097, 0x60010000},
570 {0x00000098, 0x10010000},
571 {0x00000099, 0x00006000},
572 {0x0000009a, 0x00001000},
573 {0x0000009f, 0x00916a00}
576 static const u32 cayman_io_mc_regs
[BTC_IO_MC_REGS_SIZE
][2] = {
577 {0x00000077, 0xff010100},
578 {0x00000078, 0x00000000},
579 {0x00000079, 0x00001434},
580 {0x0000007a, 0xcc08ec08},
581 {0x0000007b, 0x00040000},
582 {0x0000007c, 0x000080c0},
583 {0x0000007d, 0x09000000},
584 {0x0000007e, 0x00210404},
585 {0x00000081, 0x08a8e800},
586 {0x00000082, 0x00030444},
587 {0x00000083, 0x00000000},
588 {0x00000085, 0x00000001},
589 {0x00000086, 0x00000002},
590 {0x00000087, 0x48490000},
591 {0x00000088, 0x20244647},
592 {0x00000089, 0x00000005},
593 {0x0000008b, 0x66030000},
594 {0x0000008c, 0x00006603},
595 {0x0000008d, 0x00000100},
596 {0x0000008f, 0x00001c0a},
597 {0x00000090, 0xff000001},
598 {0x00000094, 0x00101101},
599 {0x00000095, 0x00000fff},
600 {0x00000096, 0x00116fff},
601 {0x00000097, 0x60010000},
602 {0x00000098, 0x10010000},
603 {0x00000099, 0x00006000},
604 {0x0000009a, 0x00001000},
605 {0x0000009f, 0x00976b00}
608 int ni_mc_load_microcode(struct radeon_device
*rdev
)
610 const __be32
*fw_data
;
611 u32 mem_type
, running
, blackout
= 0;
613 int i
, ucode_size
, regs_size
;
618 switch (rdev
->family
) {
620 io_mc_regs
= (u32
*)&barts_io_mc_regs
;
621 ucode_size
= BTC_MC_UCODE_SIZE
;
622 regs_size
= BTC_IO_MC_REGS_SIZE
;
625 io_mc_regs
= (u32
*)&turks_io_mc_regs
;
626 ucode_size
= BTC_MC_UCODE_SIZE
;
627 regs_size
= BTC_IO_MC_REGS_SIZE
;
631 io_mc_regs
= (u32
*)&caicos_io_mc_regs
;
632 ucode_size
= BTC_MC_UCODE_SIZE
;
633 regs_size
= BTC_IO_MC_REGS_SIZE
;
636 io_mc_regs
= (u32
*)&cayman_io_mc_regs
;
637 ucode_size
= CAYMAN_MC_UCODE_SIZE
;
638 regs_size
= BTC_IO_MC_REGS_SIZE
;
642 mem_type
= (RREG32(MC_SEQ_MISC0
) & MC_SEQ_MISC0_GDDR5_MASK
) >> MC_SEQ_MISC0_GDDR5_SHIFT
;
643 running
= RREG32(MC_SEQ_SUP_CNTL
) & RUN_MASK
;
645 if ((mem_type
== MC_SEQ_MISC0_GDDR5_VALUE
) && (running
== 0)) {
647 blackout
= RREG32(MC_SHARED_BLACKOUT_CNTL
);
648 WREG32(MC_SHARED_BLACKOUT_CNTL
, 1);
651 /* reset the engine and set to writable */
652 WREG32(MC_SEQ_SUP_CNTL
, 0x00000008);
653 WREG32(MC_SEQ_SUP_CNTL
, 0x00000010);
655 /* load mc io regs */
656 for (i
= 0; i
< regs_size
; i
++) {
657 WREG32(MC_SEQ_IO_DEBUG_INDEX
, io_mc_regs
[(i
<< 1)]);
658 WREG32(MC_SEQ_IO_DEBUG_DATA
, io_mc_regs
[(i
<< 1) + 1]);
660 /* load the MC ucode */
661 fw_data
= (const __be32
*)rdev
->mc_fw
->data
;
662 for (i
= 0; i
< ucode_size
; i
++)
663 WREG32(MC_SEQ_SUP_PGM
, be32_to_cpup(fw_data
++));
665 /* put the engine back into the active state */
666 WREG32(MC_SEQ_SUP_CNTL
, 0x00000008);
667 WREG32(MC_SEQ_SUP_CNTL
, 0x00000004);
668 WREG32(MC_SEQ_SUP_CNTL
, 0x00000001);
670 /* wait for training to complete */
671 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
672 if (RREG32(MC_IO_PAD_CNTL_D0
) & MEM_FALL_OUT_CMD
)
678 WREG32(MC_SHARED_BLACKOUT_CNTL
, blackout
);
684 int ni_init_microcode(struct radeon_device
*rdev
)
686 const char *chip_name
;
687 const char *rlc_chip_name
;
688 size_t pfp_req_size
, me_req_size
, rlc_req_size
, mc_req_size
;
689 size_t smc_req_size
= 0;
695 switch (rdev
->family
) {
698 rlc_chip_name
= "BTC";
699 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
700 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
701 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
702 mc_req_size
= BTC_MC_UCODE_SIZE
* 4;
703 smc_req_size
= ALIGN(BARTS_SMC_UCODE_SIZE
, 4);
707 rlc_chip_name
= "BTC";
708 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
709 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
710 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
711 mc_req_size
= BTC_MC_UCODE_SIZE
* 4;
712 smc_req_size
= ALIGN(TURKS_SMC_UCODE_SIZE
, 4);
715 chip_name
= "CAICOS";
716 rlc_chip_name
= "BTC";
717 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
718 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
719 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
720 mc_req_size
= BTC_MC_UCODE_SIZE
* 4;
721 smc_req_size
= ALIGN(CAICOS_SMC_UCODE_SIZE
, 4);
724 chip_name
= "CAYMAN";
725 rlc_chip_name
= "CAYMAN";
726 pfp_req_size
= CAYMAN_PFP_UCODE_SIZE
* 4;
727 me_req_size
= CAYMAN_PM4_UCODE_SIZE
* 4;
728 rlc_req_size
= CAYMAN_RLC_UCODE_SIZE
* 4;
729 mc_req_size
= CAYMAN_MC_UCODE_SIZE
* 4;
730 smc_req_size
= ALIGN(CAYMAN_SMC_UCODE_SIZE
, 4);
734 rlc_chip_name
= "ARUBA";
735 /* pfp/me same size as CAYMAN */
736 pfp_req_size
= CAYMAN_PFP_UCODE_SIZE
* 4;
737 me_req_size
= CAYMAN_PM4_UCODE_SIZE
* 4;
738 rlc_req_size
= ARUBA_RLC_UCODE_SIZE
* 4;
744 DRM_INFO("Loading %s Microcode\n", chip_name
);
746 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
747 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, rdev
->dev
);
750 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
752 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
753 rdev
->pfp_fw
->size
, fw_name
);
758 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
759 err
= request_firmware(&rdev
->me_fw
, fw_name
, rdev
->dev
);
762 if (rdev
->me_fw
->size
!= me_req_size
) {
764 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
765 rdev
->me_fw
->size
, fw_name
);
769 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_rlc.bin", rlc_chip_name
);
770 err
= request_firmware(&rdev
->rlc_fw
, fw_name
, rdev
->dev
);
773 if (rdev
->rlc_fw
->size
!= rlc_req_size
) {
775 "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
776 rdev
->rlc_fw
->size
, fw_name
);
780 /* no MC ucode on TN */
781 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
782 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_mc.bin", chip_name
);
783 err
= request_firmware(&rdev
->mc_fw
, fw_name
, rdev
->dev
);
786 if (rdev
->mc_fw
->size
!= mc_req_size
) {
788 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
789 rdev
->mc_fw
->size
, fw_name
);
794 if ((rdev
->family
>= CHIP_BARTS
) && (rdev
->family
<= CHIP_CAYMAN
)) {
795 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_smc.bin", chip_name
);
796 err
= request_firmware(&rdev
->smc_fw
, fw_name
, rdev
->dev
);
799 if (rdev
->smc_fw
->size
!= smc_req_size
) {
801 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
802 rdev
->mc_fw
->size
, fw_name
);
811 "ni_cp: Failed to load firmware \"%s\"\n",
813 release_firmware(rdev
->pfp_fw
);
815 release_firmware(rdev
->me_fw
);
817 release_firmware(rdev
->rlc_fw
);
819 release_firmware(rdev
->mc_fw
);
825 int tn_get_temp(struct radeon_device
*rdev
)
827 u32 temp
= RREG32_SMC(TN_CURRENT_GNB_TEMP
) & 0x7ff;
828 int actual_temp
= (temp
/ 8) - 49;
830 return actual_temp
* 1000;
836 static void cayman_gpu_init(struct radeon_device
*rdev
)
838 u32 gb_addr_config
= 0;
839 u32 mc_shared_chmap
, mc_arb_ramcfg
;
840 u32 cgts_tcc_disable
;
843 u32 cgts_sm_ctrl_reg
;
844 u32 hdp_host_path_cntl
;
846 u32 disabled_rb_mask
;
849 switch (rdev
->family
) {
851 rdev
->config
.cayman
.max_shader_engines
= 2;
852 rdev
->config
.cayman
.max_pipes_per_simd
= 4;
853 rdev
->config
.cayman
.max_tile_pipes
= 8;
854 rdev
->config
.cayman
.max_simds_per_se
= 12;
855 rdev
->config
.cayman
.max_backends_per_se
= 4;
856 rdev
->config
.cayman
.max_texture_channel_caches
= 8;
857 rdev
->config
.cayman
.max_gprs
= 256;
858 rdev
->config
.cayman
.max_threads
= 256;
859 rdev
->config
.cayman
.max_gs_threads
= 32;
860 rdev
->config
.cayman
.max_stack_entries
= 512;
861 rdev
->config
.cayman
.sx_num_of_sets
= 8;
862 rdev
->config
.cayman
.sx_max_export_size
= 256;
863 rdev
->config
.cayman
.sx_max_export_pos_size
= 64;
864 rdev
->config
.cayman
.sx_max_export_smx_size
= 192;
865 rdev
->config
.cayman
.max_hw_contexts
= 8;
866 rdev
->config
.cayman
.sq_num_cf_insts
= 2;
868 rdev
->config
.cayman
.sc_prim_fifo_size
= 0x100;
869 rdev
->config
.cayman
.sc_hiz_tile_fifo_size
= 0x30;
870 rdev
->config
.cayman
.sc_earlyz_tile_fifo_size
= 0x130;
871 gb_addr_config
= CAYMAN_GB_ADDR_CONFIG_GOLDEN
;
875 rdev
->config
.cayman
.max_shader_engines
= 1;
876 rdev
->config
.cayman
.max_pipes_per_simd
= 4;
877 rdev
->config
.cayman
.max_tile_pipes
= 2;
878 if ((rdev
->pdev
->device
== 0x9900) ||
879 (rdev
->pdev
->device
== 0x9901) ||
880 (rdev
->pdev
->device
== 0x9905) ||
881 (rdev
->pdev
->device
== 0x9906) ||
882 (rdev
->pdev
->device
== 0x9907) ||
883 (rdev
->pdev
->device
== 0x9908) ||
884 (rdev
->pdev
->device
== 0x9909) ||
885 (rdev
->pdev
->device
== 0x990B) ||
886 (rdev
->pdev
->device
== 0x990C) ||
887 (rdev
->pdev
->device
== 0x990F) ||
888 (rdev
->pdev
->device
== 0x9910) ||
889 (rdev
->pdev
->device
== 0x9917) ||
890 (rdev
->pdev
->device
== 0x9999) ||
891 (rdev
->pdev
->device
== 0x999C)) {
892 rdev
->config
.cayman
.max_simds_per_se
= 6;
893 rdev
->config
.cayman
.max_backends_per_se
= 2;
894 } else if ((rdev
->pdev
->device
== 0x9903) ||
895 (rdev
->pdev
->device
== 0x9904) ||
896 (rdev
->pdev
->device
== 0x990A) ||
897 (rdev
->pdev
->device
== 0x990D) ||
898 (rdev
->pdev
->device
== 0x990E) ||
899 (rdev
->pdev
->device
== 0x9913) ||
900 (rdev
->pdev
->device
== 0x9918) ||
901 (rdev
->pdev
->device
== 0x999D)) {
902 rdev
->config
.cayman
.max_simds_per_se
= 4;
903 rdev
->config
.cayman
.max_backends_per_se
= 2;
904 } else if ((rdev
->pdev
->device
== 0x9919) ||
905 (rdev
->pdev
->device
== 0x9990) ||
906 (rdev
->pdev
->device
== 0x9991) ||
907 (rdev
->pdev
->device
== 0x9994) ||
908 (rdev
->pdev
->device
== 0x9995) ||
909 (rdev
->pdev
->device
== 0x9996) ||
910 (rdev
->pdev
->device
== 0x999A) ||
911 (rdev
->pdev
->device
== 0x99A0)) {
912 rdev
->config
.cayman
.max_simds_per_se
= 3;
913 rdev
->config
.cayman
.max_backends_per_se
= 1;
915 rdev
->config
.cayman
.max_simds_per_se
= 2;
916 rdev
->config
.cayman
.max_backends_per_se
= 1;
918 rdev
->config
.cayman
.max_texture_channel_caches
= 2;
919 rdev
->config
.cayman
.max_gprs
= 256;
920 rdev
->config
.cayman
.max_threads
= 256;
921 rdev
->config
.cayman
.max_gs_threads
= 32;
922 rdev
->config
.cayman
.max_stack_entries
= 512;
923 rdev
->config
.cayman
.sx_num_of_sets
= 8;
924 rdev
->config
.cayman
.sx_max_export_size
= 256;
925 rdev
->config
.cayman
.sx_max_export_pos_size
= 64;
926 rdev
->config
.cayman
.sx_max_export_smx_size
= 192;
927 rdev
->config
.cayman
.max_hw_contexts
= 8;
928 rdev
->config
.cayman
.sq_num_cf_insts
= 2;
930 rdev
->config
.cayman
.sc_prim_fifo_size
= 0x40;
931 rdev
->config
.cayman
.sc_hiz_tile_fifo_size
= 0x30;
932 rdev
->config
.cayman
.sc_earlyz_tile_fifo_size
= 0x130;
933 gb_addr_config
= ARUBA_GB_ADDR_CONFIG_GOLDEN
;
938 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
939 WREG32((0x2c14 + j
), 0x00000000);
940 WREG32((0x2c18 + j
), 0x00000000);
941 WREG32((0x2c1c + j
), 0x00000000);
942 WREG32((0x2c20 + j
), 0x00000000);
943 WREG32((0x2c24 + j
), 0x00000000);
946 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
948 evergreen_fix_pci_max_read_req_size(rdev
);
950 mc_shared_chmap
= RREG32(MC_SHARED_CHMAP
);
951 mc_arb_ramcfg
= RREG32(MC_ARB_RAMCFG
);
953 tmp
= (mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
;
954 rdev
->config
.cayman
.mem_row_size_in_kb
= (4 * (1 << (8 + tmp
))) / 1024;
955 if (rdev
->config
.cayman
.mem_row_size_in_kb
> 4)
956 rdev
->config
.cayman
.mem_row_size_in_kb
= 4;
957 /* XXX use MC settings? */
958 rdev
->config
.cayman
.shader_engine_tile_size
= 32;
959 rdev
->config
.cayman
.num_gpus
= 1;
960 rdev
->config
.cayman
.multi_gpu_tile_size
= 64;
962 tmp
= (gb_addr_config
& NUM_PIPES_MASK
) >> NUM_PIPES_SHIFT
;
963 rdev
->config
.cayman
.num_tile_pipes
= (1 << tmp
);
964 tmp
= (gb_addr_config
& PIPE_INTERLEAVE_SIZE_MASK
) >> PIPE_INTERLEAVE_SIZE_SHIFT
;
965 rdev
->config
.cayman
.mem_max_burst_length_bytes
= (tmp
+ 1) * 256;
966 tmp
= (gb_addr_config
& NUM_SHADER_ENGINES_MASK
) >> NUM_SHADER_ENGINES_SHIFT
;
967 rdev
->config
.cayman
.num_shader_engines
= tmp
+ 1;
968 tmp
= (gb_addr_config
& NUM_GPUS_MASK
) >> NUM_GPUS_SHIFT
;
969 rdev
->config
.cayman
.num_gpus
= tmp
+ 1;
970 tmp
= (gb_addr_config
& MULTI_GPU_TILE_SIZE_MASK
) >> MULTI_GPU_TILE_SIZE_SHIFT
;
971 rdev
->config
.cayman
.multi_gpu_tile_size
= 1 << tmp
;
972 tmp
= (gb_addr_config
& ROW_SIZE_MASK
) >> ROW_SIZE_SHIFT
;
973 rdev
->config
.cayman
.mem_row_size_in_kb
= 1 << tmp
;
976 /* setup tiling info dword. gb_addr_config is not adequate since it does
977 * not have bank info, so create a custom tiling dword.
980 * bits 11:8 group_size
981 * bits 15:12 row_size
983 rdev
->config
.cayman
.tile_config
= 0;
984 switch (rdev
->config
.cayman
.num_tile_pipes
) {
987 rdev
->config
.cayman
.tile_config
|= (0 << 0);
990 rdev
->config
.cayman
.tile_config
|= (1 << 0);
993 rdev
->config
.cayman
.tile_config
|= (2 << 0);
996 rdev
->config
.cayman
.tile_config
|= (3 << 0);
1000 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
1001 if (rdev
->flags
& RADEON_IS_IGP
)
1002 rdev
->config
.cayman
.tile_config
|= 1 << 4;
1004 switch ((mc_arb_ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
) {
1005 case 0: /* four banks */
1006 rdev
->config
.cayman
.tile_config
|= 0 << 4;
1008 case 1: /* eight banks */
1009 rdev
->config
.cayman
.tile_config
|= 1 << 4;
1011 case 2: /* sixteen banks */
1013 rdev
->config
.cayman
.tile_config
|= 2 << 4;
1017 rdev
->config
.cayman
.tile_config
|=
1018 ((gb_addr_config
& PIPE_INTERLEAVE_SIZE_MASK
) >> PIPE_INTERLEAVE_SIZE_SHIFT
) << 8;
1019 rdev
->config
.cayman
.tile_config
|=
1020 ((gb_addr_config
& ROW_SIZE_MASK
) >> ROW_SIZE_SHIFT
) << 12;
1023 for (i
= (rdev
->config
.cayman
.max_shader_engines
- 1); i
>= 0; i
--) {
1024 u32 rb_disable_bitmap
;
1026 WREG32(GRBM_GFX_INDEX
, INSTANCE_BROADCAST_WRITES
| SE_INDEX(i
));
1027 WREG32(RLC_GFX_INDEX
, INSTANCE_BROADCAST_WRITES
| SE_INDEX(i
));
1028 rb_disable_bitmap
= (RREG32(CC_RB_BACKEND_DISABLE
) & 0x00ff0000) >> 16;
1030 tmp
|= rb_disable_bitmap
;
1032 /* enabled rb are just the one not disabled :) */
1033 disabled_rb_mask
= tmp
;
1035 for (i
= 0; i
< (rdev
->config
.cayman
.max_backends_per_se
* rdev
->config
.cayman
.max_shader_engines
); i
++)
1037 /* if all the backends are disabled, fix it up here */
1038 if ((disabled_rb_mask
& tmp
) == tmp
) {
1039 for (i
= 0; i
< (rdev
->config
.cayman
.max_backends_per_se
* rdev
->config
.cayman
.max_shader_engines
); i
++)
1040 disabled_rb_mask
&= ~(1 << i
);
1043 WREG32(GRBM_GFX_INDEX
, INSTANCE_BROADCAST_WRITES
| SE_BROADCAST_WRITES
);
1044 WREG32(RLC_GFX_INDEX
, INSTANCE_BROADCAST_WRITES
| SE_BROADCAST_WRITES
);
1046 WREG32(GB_ADDR_CONFIG
, gb_addr_config
);
1047 WREG32(DMIF_ADDR_CONFIG
, gb_addr_config
);
1048 if (ASIC_IS_DCE6(rdev
))
1049 WREG32(DMIF_ADDR_CALC
, gb_addr_config
);
1050 WREG32(HDP_ADDR_CONFIG
, gb_addr_config
);
1051 WREG32(DMA_TILING_CONFIG
+ DMA0_REGISTER_OFFSET
, gb_addr_config
);
1052 WREG32(DMA_TILING_CONFIG
+ DMA1_REGISTER_OFFSET
, gb_addr_config
);
1053 WREG32(UVD_UDEC_ADDR_CONFIG
, gb_addr_config
);
1054 WREG32(UVD_UDEC_DB_ADDR_CONFIG
, gb_addr_config
);
1055 WREG32(UVD_UDEC_DBW_ADDR_CONFIG
, gb_addr_config
);
1057 if ((rdev
->config
.cayman
.max_backends_per_se
== 1) &&
1058 (rdev
->flags
& RADEON_IS_IGP
)) {
1059 if ((disabled_rb_mask
& 3) == 1) {
1060 /* RB0 disabled, RB1 enabled */
1063 /* RB1 disabled, RB0 enabled */
1067 tmp
= gb_addr_config
& NUM_PIPES_MASK
;
1068 tmp
= r6xx_remap_render_backend(rdev
, tmp
,
1069 rdev
->config
.cayman
.max_backends_per_se
*
1070 rdev
->config
.cayman
.max_shader_engines
,
1071 CAYMAN_MAX_BACKENDS
, disabled_rb_mask
);
1073 WREG32(GB_BACKEND_MAP
, tmp
);
1075 cgts_tcc_disable
= 0xffff0000;
1076 for (i
= 0; i
< rdev
->config
.cayman
.max_texture_channel_caches
; i
++)
1077 cgts_tcc_disable
&= ~(1 << (16 + i
));
1078 WREG32(CGTS_TCC_DISABLE
, cgts_tcc_disable
);
1079 WREG32(CGTS_SYS_TCC_DISABLE
, cgts_tcc_disable
);
1080 WREG32(CGTS_USER_SYS_TCC_DISABLE
, cgts_tcc_disable
);
1081 WREG32(CGTS_USER_TCC_DISABLE
, cgts_tcc_disable
);
1083 /* reprogram the shader complex */
1084 cgts_sm_ctrl_reg
= RREG32(CGTS_SM_CTRL_REG
);
1085 for (i
= 0; i
< 16; i
++)
1086 WREG32(CGTS_SM_CTRL_REG
, OVERRIDE
);
1087 WREG32(CGTS_SM_CTRL_REG
, cgts_sm_ctrl_reg
);
1089 /* set HW defaults for 3D engine */
1090 WREG32(CP_MEQ_THRESHOLDS
, MEQ1_START(0x30) | MEQ2_START(0x60));
1092 sx_debug_1
= RREG32(SX_DEBUG_1
);
1093 sx_debug_1
|= ENABLE_NEW_SMX_ADDRESS
;
1094 WREG32(SX_DEBUG_1
, sx_debug_1
);
1096 smx_dc_ctl0
= RREG32(SMX_DC_CTL0
);
1097 smx_dc_ctl0
&= ~NUMBER_OF_SETS(0x1ff);
1098 smx_dc_ctl0
|= NUMBER_OF_SETS(rdev
->config
.cayman
.sx_num_of_sets
);
1099 WREG32(SMX_DC_CTL0
, smx_dc_ctl0
);
1101 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE
);
1103 /* need to be explicitly zero-ed */
1104 WREG32(VGT_OFFCHIP_LDS_BASE
, 0);
1105 WREG32(SQ_LSTMP_RING_BASE
, 0);
1106 WREG32(SQ_HSTMP_RING_BASE
, 0);
1107 WREG32(SQ_ESTMP_RING_BASE
, 0);
1108 WREG32(SQ_GSTMP_RING_BASE
, 0);
1109 WREG32(SQ_VSTMP_RING_BASE
, 0);
1110 WREG32(SQ_PSTMP_RING_BASE
, 0);
1112 WREG32(TA_CNTL_AUX
, DISABLE_CUBE_ANISO
);
1114 WREG32(SX_EXPORT_BUFFER_SIZES
, (COLOR_BUFFER_SIZE((rdev
->config
.cayman
.sx_max_export_size
/ 4) - 1) |
1115 POSITION_BUFFER_SIZE((rdev
->config
.cayman
.sx_max_export_pos_size
/ 4) - 1) |
1116 SMX_BUFFER_SIZE((rdev
->config
.cayman
.sx_max_export_smx_size
/ 4) - 1)));
1118 WREG32(PA_SC_FIFO_SIZE
, (SC_PRIM_FIFO_SIZE(rdev
->config
.cayman
.sc_prim_fifo_size
) |
1119 SC_HIZ_TILE_FIFO_SIZE(rdev
->config
.cayman
.sc_hiz_tile_fifo_size
) |
1120 SC_EARLYZ_TILE_FIFO_SIZE(rdev
->config
.cayman
.sc_earlyz_tile_fifo_size
)));
1123 WREG32(VGT_NUM_INSTANCES
, 1);
1125 WREG32(CP_PERFMON_CNTL
, 0);
1127 WREG32(SQ_MS_FIFO_SIZES
, (CACHE_FIFO_SIZE(16 * rdev
->config
.cayman
.sq_num_cf_insts
) |
1128 FETCH_FIFO_HIWATER(0x4) |
1129 DONE_FIFO_HIWATER(0xe0) |
1130 ALU_UPDATE_FIFO_HIWATER(0x8)));
1132 WREG32(SQ_GPR_RESOURCE_MGMT_1
, NUM_CLAUSE_TEMP_GPRS(4));
1133 WREG32(SQ_CONFIG
, (VC_ENABLE
|
1138 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, DYN_GPR_ENABLE
);
1140 WREG32(PA_SC_FORCE_EOV_MAX_CNTS
, (FORCE_EOV_MAX_CLK_CNT(4095) |
1141 FORCE_EOV_MAX_REZ_CNT(255)));
1143 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
) |
1144 AUTO_INVLD_EN(ES_AND_GS_AUTO
));
1146 WREG32(VGT_GS_VERTEX_REUSE
, 16);
1147 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
1149 WREG32(CB_PERF_CTR0_SEL_0
, 0);
1150 WREG32(CB_PERF_CTR0_SEL_1
, 0);
1151 WREG32(CB_PERF_CTR1_SEL_0
, 0);
1152 WREG32(CB_PERF_CTR1_SEL_1
, 0);
1153 WREG32(CB_PERF_CTR2_SEL_0
, 0);
1154 WREG32(CB_PERF_CTR2_SEL_1
, 0);
1155 WREG32(CB_PERF_CTR3_SEL_0
, 0);
1156 WREG32(CB_PERF_CTR3_SEL_1
, 0);
1158 tmp
= RREG32(HDP_MISC_CNTL
);
1159 tmp
|= HDP_FLUSH_INVALIDATE_CACHE
;
1160 WREG32(HDP_MISC_CNTL
, tmp
);
1162 hdp_host_path_cntl
= RREG32(HDP_HOST_PATH_CNTL
);
1163 WREG32(HDP_HOST_PATH_CNTL
, hdp_host_path_cntl
);
1165 WREG32(PA_CL_ENHANCE
, CLIP_VTX_REORDER_ENA
| NUM_CLIP_SEQ(3));
1169 /* set clockgating golden values on TN */
1170 if (rdev
->family
== CHIP_ARUBA
) {
1171 tmp
= RREG32_CG(CG_CGTT_LOCAL_0
);
1173 WREG32_CG(CG_CGTT_LOCAL_0
, tmp
);
1174 tmp
= RREG32_CG(CG_CGTT_LOCAL_1
);
1176 WREG32_CG(CG_CGTT_LOCAL_1
, tmp
);
1183 void cayman_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
1185 /* flush hdp cache */
1186 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
1188 /* bits 0-7 are the VM contexts0-7 */
1189 WREG32(VM_INVALIDATE_REQUEST
, 1);
1192 static int cayman_pcie_gart_enable(struct radeon_device
*rdev
)
1196 if (rdev
->gart
.robj
== NULL
) {
1197 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
1200 r
= radeon_gart_table_vram_pin(rdev
);
1203 radeon_gart_restore(rdev
);
1204 /* Setup TLB control */
1205 WREG32(MC_VM_MX_L1_TLB_CNTL
,
1208 ENABLE_L1_FRAGMENT_PROCESSING
|
1209 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1210 ENABLE_ADVANCED_DRIVER_MODEL
|
1211 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
);
1212 /* Setup L2 cache */
1213 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
|
1214 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1215 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE
|
1216 EFFECTIVE_L2_QUEUE_SIZE(7) |
1217 CONTEXT1_IDENTITY_ACCESS_MODE(1));
1218 WREG32(VM_L2_CNTL2
, INVALIDATE_ALL_L1_TLBS
| INVALIDATE_L2_CACHE
);
1219 WREG32(VM_L2_CNTL3
, L2_CACHE_BIGK_ASSOCIATIVITY
|
1220 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1221 /* setup context0 */
1222 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
1223 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
1224 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
1225 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
1226 (u32
)(rdev
->dummy_page
.addr
>> 12));
1227 WREG32(VM_CONTEXT0_CNTL2
, 0);
1228 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
1229 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
1235 /* empty context1-7 */
1236 /* Assign the pt base to something valid for now; the pts used for
1237 * the VMs are determined by the application and setup and assigned
1238 * on the fly in the vm part of radeon_gart.c
1240 for (i
= 1; i
< 8; i
++) {
1241 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
+ (i
<< 2), 0);
1242 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
+ (i
<< 2), rdev
->vm_manager
.max_pfn
);
1243 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ (i
<< 2),
1244 rdev
->gart
.table_addr
>> 12);
1247 /* enable context1-7 */
1248 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
,
1249 (u32
)(rdev
->dummy_page
.addr
>> 12));
1250 WREG32(VM_CONTEXT1_CNTL2
, 4);
1251 WREG32(VM_CONTEXT1_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(1) |
1252 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT
|
1253 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
|
1254 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT
|
1255 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
|
1256 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT
|
1257 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
|
1258 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT
|
1259 VALID_PROTECTION_FAULT_ENABLE_DEFAULT
|
1260 READ_PROTECTION_FAULT_ENABLE_INTERRUPT
|
1261 READ_PROTECTION_FAULT_ENABLE_DEFAULT
|
1262 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT
|
1263 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
);
1265 cayman_pcie_gart_tlb_flush(rdev
);
1266 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1267 (unsigned)(rdev
->mc
.gtt_size
>> 20),
1268 (unsigned long long)rdev
->gart
.table_addr
);
1269 rdev
->gart
.ready
= true;
1273 static void cayman_pcie_gart_disable(struct radeon_device
*rdev
)
1275 /* Disable all tables */
1276 WREG32(VM_CONTEXT0_CNTL
, 0);
1277 WREG32(VM_CONTEXT1_CNTL
, 0);
1278 /* Setup TLB control */
1279 WREG32(MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
|
1280 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1281 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
);
1282 /* Setup L2 cache */
1283 WREG32(VM_L2_CNTL
, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1284 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE
|
1285 EFFECTIVE_L2_QUEUE_SIZE(7) |
1286 CONTEXT1_IDENTITY_ACCESS_MODE(1));
1287 WREG32(VM_L2_CNTL2
, 0);
1288 WREG32(VM_L2_CNTL3
, L2_CACHE_BIGK_ASSOCIATIVITY
|
1289 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1290 radeon_gart_table_vram_unpin(rdev
);
1293 static void cayman_pcie_gart_fini(struct radeon_device
*rdev
)
1295 cayman_pcie_gart_disable(rdev
);
1296 radeon_gart_table_vram_free(rdev
);
1297 radeon_gart_fini(rdev
);
1300 void cayman_cp_int_cntl_setup(struct radeon_device
*rdev
,
1301 int ring
, u32 cp_int_cntl
)
1303 u32 srbm_gfx_cntl
= RREG32(SRBM_GFX_CNTL
) & ~3;
1305 WREG32(SRBM_GFX_CNTL
, srbm_gfx_cntl
| (ring
& 3));
1306 WREG32(CP_INT_CNTL
, cp_int_cntl
);
1312 void cayman_fence_ring_emit(struct radeon_device
*rdev
,
1313 struct radeon_fence
*fence
)
1315 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
1316 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
1318 /* flush read cache over gart for this vmid */
1319 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1320 radeon_ring_write(ring
, (CP_COHER_CNTL2
- PACKET3_SET_CONFIG_REG_START
) >> 2);
1321 radeon_ring_write(ring
, 0);
1322 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
1323 radeon_ring_write(ring
, PACKET3_TC_ACTION_ENA
| PACKET3_SH_ACTION_ENA
);
1324 radeon_ring_write(ring
, 0xFFFFFFFF);
1325 radeon_ring_write(ring
, 0);
1326 radeon_ring_write(ring
, 10); /* poll interval */
1327 /* EVENT_WRITE_EOP - flush caches, send int */
1328 radeon_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
1329 radeon_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS
) | EVENT_INDEX(5));
1330 radeon_ring_write(ring
, addr
& 0xffffffff);
1331 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1332 radeon_ring_write(ring
, fence
->seq
);
1333 radeon_ring_write(ring
, 0);
1336 void cayman_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
1338 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
1340 /* set to DX10/11 mode */
1341 radeon_ring_write(ring
, PACKET3(PACKET3_MODE_CONTROL
, 0));
1342 radeon_ring_write(ring
, 1);
1344 if (ring
->rptr_save_reg
) {
1345 uint32_t next_rptr
= ring
->wptr
+ 3 + 4 + 8;
1346 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1347 radeon_ring_write(ring
, ((ring
->rptr_save_reg
-
1348 PACKET3_SET_CONFIG_REG_START
) >> 2));
1349 radeon_ring_write(ring
, next_rptr
);
1352 radeon_ring_write(ring
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
1353 radeon_ring_write(ring
,
1357 (ib
->gpu_addr
& 0xFFFFFFFC));
1358 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
1359 radeon_ring_write(ring
, ib
->length_dw
|
1360 (ib
->vm
? (ib
->vm
->id
<< 24) : 0));
1362 /* flush read cache over gart for this vmid */
1363 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1364 radeon_ring_write(ring
, (CP_COHER_CNTL2
- PACKET3_SET_CONFIG_REG_START
) >> 2);
1365 radeon_ring_write(ring
, ib
->vm
? ib
->vm
->id
: 0);
1366 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
1367 radeon_ring_write(ring
, PACKET3_TC_ACTION_ENA
| PACKET3_SH_ACTION_ENA
);
1368 radeon_ring_write(ring
, 0xFFFFFFFF);
1369 radeon_ring_write(ring
, 0);
1370 radeon_ring_write(ring
, 10); /* poll interval */
1373 void cayman_uvd_semaphore_emit(struct radeon_device
*rdev
,
1374 struct radeon_ring
*ring
,
1375 struct radeon_semaphore
*semaphore
,
1378 uint64_t addr
= semaphore
->gpu_addr
;
1380 radeon_ring_write(ring
, PACKET0(UVD_SEMA_ADDR_LOW
, 0));
1381 radeon_ring_write(ring
, (addr
>> 3) & 0x000FFFFF);
1383 radeon_ring_write(ring
, PACKET0(UVD_SEMA_ADDR_HIGH
, 0));
1384 radeon_ring_write(ring
, (addr
>> 23) & 0x000FFFFF);
1386 radeon_ring_write(ring
, PACKET0(UVD_SEMA_CMD
, 0));
1387 radeon_ring_write(ring
, 0x80 | (emit_wait
? 1 : 0));
1390 static void cayman_cp_enable(struct radeon_device
*rdev
, bool enable
)
1393 WREG32(CP_ME_CNTL
, 0);
1395 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
1396 WREG32(CP_ME_CNTL
, (CP_ME_HALT
| CP_PFP_HALT
));
1397 WREG32(SCRATCH_UMSK
, 0);
1398 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
= false;
1402 static int cayman_cp_load_microcode(struct radeon_device
*rdev
)
1404 const __be32
*fw_data
;
1407 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
1410 cayman_cp_enable(rdev
, false);
1412 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
1413 WREG32(CP_PFP_UCODE_ADDR
, 0);
1414 for (i
= 0; i
< CAYMAN_PFP_UCODE_SIZE
; i
++)
1415 WREG32(CP_PFP_UCODE_DATA
, be32_to_cpup(fw_data
++));
1416 WREG32(CP_PFP_UCODE_ADDR
, 0);
1418 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
1419 WREG32(CP_ME_RAM_WADDR
, 0);
1420 for (i
= 0; i
< CAYMAN_PM4_UCODE_SIZE
; i
++)
1421 WREG32(CP_ME_RAM_DATA
, be32_to_cpup(fw_data
++));
1423 WREG32(CP_PFP_UCODE_ADDR
, 0);
1424 WREG32(CP_ME_RAM_WADDR
, 0);
1425 WREG32(CP_ME_RAM_RADDR
, 0);
1429 static int cayman_cp_start(struct radeon_device
*rdev
)
1431 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
1434 r
= radeon_ring_lock(rdev
, ring
, 7);
1436 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1439 radeon_ring_write(ring
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
1440 radeon_ring_write(ring
, 0x1);
1441 radeon_ring_write(ring
, 0x0);
1442 radeon_ring_write(ring
, rdev
->config
.cayman
.max_hw_contexts
- 1);
1443 radeon_ring_write(ring
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1444 radeon_ring_write(ring
, 0);
1445 radeon_ring_write(ring
, 0);
1446 radeon_ring_unlock_commit(rdev
, ring
);
1448 cayman_cp_enable(rdev
, true);
1450 r
= radeon_ring_lock(rdev
, ring
, cayman_default_size
+ 19);
1452 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1456 /* setup clear context state */
1457 radeon_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1458 radeon_ring_write(ring
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
1460 for (i
= 0; i
< cayman_default_size
; i
++)
1461 radeon_ring_write(ring
, cayman_default_state
[i
]);
1463 radeon_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1464 radeon_ring_write(ring
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
1466 /* set clear context state */
1467 radeon_ring_write(ring
, PACKET3(PACKET3_CLEAR_STATE
, 0));
1468 radeon_ring_write(ring
, 0);
1470 /* SQ_VTX_BASE_VTX_LOC */
1471 radeon_ring_write(ring
, 0xc0026f00);
1472 radeon_ring_write(ring
, 0x00000000);
1473 radeon_ring_write(ring
, 0x00000000);
1474 radeon_ring_write(ring
, 0x00000000);
1477 radeon_ring_write(ring
, 0xc0036f00);
1478 radeon_ring_write(ring
, 0x00000bc4);
1479 radeon_ring_write(ring
, 0xffffffff);
1480 radeon_ring_write(ring
, 0xffffffff);
1481 radeon_ring_write(ring
, 0xffffffff);
1483 radeon_ring_write(ring
, 0xc0026900);
1484 radeon_ring_write(ring
, 0x00000316);
1485 radeon_ring_write(ring
, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1486 radeon_ring_write(ring
, 0x00000010); /* */
1488 radeon_ring_unlock_commit(rdev
, ring
);
1490 /* XXX init other rings */
1495 static void cayman_cp_fini(struct radeon_device
*rdev
)
1497 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
1498 cayman_cp_enable(rdev
, false);
1499 radeon_ring_fini(rdev
, ring
);
1500 radeon_scratch_free(rdev
, ring
->rptr_save_reg
);
1503 static int cayman_cp_resume(struct radeon_device
*rdev
)
1505 static const int ridx
[] = {
1506 RADEON_RING_TYPE_GFX_INDEX
,
1507 CAYMAN_RING_TYPE_CP1_INDEX
,
1508 CAYMAN_RING_TYPE_CP2_INDEX
1510 static const unsigned cp_rb_cntl
[] = {
1515 static const unsigned cp_rb_rptr_addr
[] = {
1520 static const unsigned cp_rb_rptr_addr_hi
[] = {
1521 CP_RB0_RPTR_ADDR_HI
,
1522 CP_RB1_RPTR_ADDR_HI
,
1525 static const unsigned cp_rb_base
[] = {
1530 struct radeon_ring
*ring
;
1533 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1534 WREG32(GRBM_SOFT_RESET
, (SOFT_RESET_CP
|
1540 RREG32(GRBM_SOFT_RESET
);
1542 WREG32(GRBM_SOFT_RESET
, 0);
1543 RREG32(GRBM_SOFT_RESET
);
1545 WREG32(CP_SEM_WAIT_TIMER
, 0x0);
1546 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL
, 0x0);
1548 /* Set the write pointer delay */
1549 WREG32(CP_RB_WPTR_DELAY
, 0);
1551 WREG32(CP_DEBUG
, (1 << 27));
1553 /* set the wb address whether it's enabled or not */
1554 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
1555 WREG32(SCRATCH_UMSK
, 0xff);
1557 for (i
= 0; i
< 3; ++i
) {
1561 /* Set ring buffer size */
1562 ring
= &rdev
->ring
[ridx
[i
]];
1563 rb_cntl
= drm_order(ring
->ring_size
/ 8);
1564 rb_cntl
|= drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8;
1566 rb_cntl
|= BUF_SWAP_32BIT
;
1568 WREG32(cp_rb_cntl
[i
], rb_cntl
);
1570 /* set the wb address whether it's enabled or not */
1571 addr
= rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
;
1572 WREG32(cp_rb_rptr_addr
[i
], addr
& 0xFFFFFFFC);
1573 WREG32(cp_rb_rptr_addr_hi
[i
], upper_32_bits(addr
) & 0xFF);
1576 /* set the rb base addr, this causes an internal reset of ALL rings */
1577 for (i
= 0; i
< 3; ++i
) {
1578 ring
= &rdev
->ring
[ridx
[i
]];
1579 WREG32(cp_rb_base
[i
], ring
->gpu_addr
>> 8);
1582 for (i
= 0; i
< 3; ++i
) {
1583 /* Initialize the ring buffer's read and write pointers */
1584 ring
= &rdev
->ring
[ridx
[i
]];
1585 WREG32_P(cp_rb_cntl
[i
], RB_RPTR_WR_ENA
, ~RB_RPTR_WR_ENA
);
1587 ring
->rptr
= ring
->wptr
= 0;
1588 WREG32(ring
->rptr_reg
, ring
->rptr
);
1589 WREG32(ring
->wptr_reg
, ring
->wptr
);
1592 WREG32_P(cp_rb_cntl
[i
], 0, ~RB_RPTR_WR_ENA
);
1595 /* start the rings */
1596 cayman_cp_start(rdev
);
1597 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
= true;
1598 rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
].ready
= false;
1599 rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
].ready
= false;
1600 /* this only test cp0 */
1601 r
= radeon_ring_test(rdev
, RADEON_RING_TYPE_GFX_INDEX
, &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
]);
1603 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
= false;
1604 rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
].ready
= false;
1605 rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
].ready
= false;
1614 * Starting with R600, the GPU has an asynchronous
1615 * DMA engine. The programming model is very similar
1616 * to the 3D engine (ring buffer, IBs, etc.), but the
1617 * DMA controller has it's own packet format that is
1618 * different form the PM4 format used by the 3D engine.
1619 * It supports copying data, writing embedded data,
1620 * solid fills, and a number of other things. It also
1621 * has support for tiling/detiling of buffers.
1622 * Cayman and newer support two asynchronous DMA engines.
1625 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
1627 * @rdev: radeon_device pointer
1628 * @ib: IB object to schedule
1630 * Schedule an IB in the DMA ring (cayman-SI).
1632 void cayman_dma_ring_ib_execute(struct radeon_device
*rdev
,
1633 struct radeon_ib
*ib
)
1635 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
1637 if (rdev
->wb
.enabled
) {
1638 u32 next_rptr
= ring
->wptr
+ 4;
1639 while ((next_rptr
& 7) != 5)
1642 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_WRITE
, 0, 0, 1));
1643 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
1644 radeon_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xff);
1645 radeon_ring_write(ring
, next_rptr
);
1648 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
1649 * Pad as necessary with NOPs.
1651 while ((ring
->wptr
& 7) != 5)
1652 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_NOP
, 0, 0, 0));
1653 radeon_ring_write(ring
, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER
, ib
->vm
? ib
->vm
->id
: 0, 0));
1654 radeon_ring_write(ring
, (ib
->gpu_addr
& 0xFFFFFFE0));
1655 radeon_ring_write(ring
, (ib
->length_dw
<< 12) | (upper_32_bits(ib
->gpu_addr
) & 0xFF));
1660 * cayman_dma_stop - stop the async dma engines
1662 * @rdev: radeon_device pointer
1664 * Stop the async dma engines (cayman-SI).
1666 void cayman_dma_stop(struct radeon_device
*rdev
)
1670 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
1673 rb_cntl
= RREG32(DMA_RB_CNTL
+ DMA0_REGISTER_OFFSET
);
1674 rb_cntl
&= ~DMA_RB_ENABLE
;
1675 WREG32(DMA_RB_CNTL
+ DMA0_REGISTER_OFFSET
, rb_cntl
);
1678 rb_cntl
= RREG32(DMA_RB_CNTL
+ DMA1_REGISTER_OFFSET
);
1679 rb_cntl
&= ~DMA_RB_ENABLE
;
1680 WREG32(DMA_RB_CNTL
+ DMA1_REGISTER_OFFSET
, rb_cntl
);
1682 rdev
->ring
[R600_RING_TYPE_DMA_INDEX
].ready
= false;
1683 rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
].ready
= false;
1687 * cayman_dma_resume - setup and start the async dma engines
1689 * @rdev: radeon_device pointer
1691 * Set up the DMA ring buffers and enable them. (cayman-SI).
1692 * Returns 0 for success, error for failure.
1694 int cayman_dma_resume(struct radeon_device
*rdev
)
1696 struct radeon_ring
*ring
;
1697 u32 rb_cntl
, dma_cntl
, ib_cntl
;
1699 u32 reg_offset
, wb_offset
;
1703 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_DMA
| SOFT_RESET_DMA1
);
1704 RREG32(SRBM_SOFT_RESET
);
1706 WREG32(SRBM_SOFT_RESET
, 0);
1708 for (i
= 0; i
< 2; i
++) {
1710 ring
= &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
];
1711 reg_offset
= DMA0_REGISTER_OFFSET
;
1712 wb_offset
= R600_WB_DMA_RPTR_OFFSET
;
1714 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
];
1715 reg_offset
= DMA1_REGISTER_OFFSET
;
1716 wb_offset
= CAYMAN_WB_DMA1_RPTR_OFFSET
;
1719 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL
+ reg_offset
, 0);
1720 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL
+ reg_offset
, 0);
1722 /* Set ring buffer size in dwords */
1723 rb_bufsz
= drm_order(ring
->ring_size
/ 4);
1724 rb_cntl
= rb_bufsz
<< 1;
1726 rb_cntl
|= DMA_RB_SWAP_ENABLE
| DMA_RPTR_WRITEBACK_SWAP_ENABLE
;
1728 WREG32(DMA_RB_CNTL
+ reg_offset
, rb_cntl
);
1730 /* Initialize the ring buffer's read and write pointers */
1731 WREG32(DMA_RB_RPTR
+ reg_offset
, 0);
1732 WREG32(DMA_RB_WPTR
+ reg_offset
, 0);
1734 /* set the wb address whether it's enabled or not */
1735 WREG32(DMA_RB_RPTR_ADDR_HI
+ reg_offset
,
1736 upper_32_bits(rdev
->wb
.gpu_addr
+ wb_offset
) & 0xFF);
1737 WREG32(DMA_RB_RPTR_ADDR_LO
+ reg_offset
,
1738 ((rdev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFC));
1740 if (rdev
->wb
.enabled
)
1741 rb_cntl
|= DMA_RPTR_WRITEBACK_ENABLE
;
1743 WREG32(DMA_RB_BASE
+ reg_offset
, ring
->gpu_addr
>> 8);
1745 /* enable DMA IBs */
1746 ib_cntl
= DMA_IB_ENABLE
| CMD_VMID_FORCE
;
1748 ib_cntl
|= DMA_IB_SWAP_ENABLE
;
1750 WREG32(DMA_IB_CNTL
+ reg_offset
, ib_cntl
);
1752 dma_cntl
= RREG32(DMA_CNTL
+ reg_offset
);
1753 dma_cntl
&= ~CTXEMPTY_INT_ENABLE
;
1754 WREG32(DMA_CNTL
+ reg_offset
, dma_cntl
);
1757 WREG32(DMA_RB_WPTR
+ reg_offset
, ring
->wptr
<< 2);
1759 ring
->rptr
= RREG32(DMA_RB_RPTR
+ reg_offset
) >> 2;
1761 WREG32(DMA_RB_CNTL
+ reg_offset
, rb_cntl
| DMA_RB_ENABLE
);
1765 r
= radeon_ring_test(rdev
, ring
->idx
, ring
);
1767 ring
->ready
= false;
1772 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.real_vram_size
);
1778 * cayman_dma_fini - tear down the async dma engines
1780 * @rdev: radeon_device pointer
1782 * Stop the async dma engines and free the rings (cayman-SI).
1784 void cayman_dma_fini(struct radeon_device
*rdev
)
1786 cayman_dma_stop(rdev
);
1787 radeon_ring_fini(rdev
, &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
]);
1788 radeon_ring_fini(rdev
, &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
]);
1791 static u32
cayman_gpu_check_soft_reset(struct radeon_device
*rdev
)
1797 tmp
= RREG32(GRBM_STATUS
);
1798 if (tmp
& (PA_BUSY
| SC_BUSY
|
1800 TA_BUSY
| VGT_BUSY
|
1802 GDS_BUSY
| SPI_BUSY
|
1803 IA_BUSY
| IA_BUSY_NO_DMA
))
1804 reset_mask
|= RADEON_RESET_GFX
;
1806 if (tmp
& (CF_RQ_PENDING
| PF_RQ_PENDING
|
1807 CP_BUSY
| CP_COHERENCY_BUSY
))
1808 reset_mask
|= RADEON_RESET_CP
;
1810 if (tmp
& GRBM_EE_BUSY
)
1811 reset_mask
|= RADEON_RESET_GRBM
| RADEON_RESET_GFX
| RADEON_RESET_CP
;
1813 /* DMA_STATUS_REG 0 */
1814 tmp
= RREG32(DMA_STATUS_REG
+ DMA0_REGISTER_OFFSET
);
1815 if (!(tmp
& DMA_IDLE
))
1816 reset_mask
|= RADEON_RESET_DMA
;
1818 /* DMA_STATUS_REG 1 */
1819 tmp
= RREG32(DMA_STATUS_REG
+ DMA1_REGISTER_OFFSET
);
1820 if (!(tmp
& DMA_IDLE
))
1821 reset_mask
|= RADEON_RESET_DMA1
;
1824 tmp
= RREG32(SRBM_STATUS2
);
1826 reset_mask
|= RADEON_RESET_DMA
;
1828 if (tmp
& DMA1_BUSY
)
1829 reset_mask
|= RADEON_RESET_DMA1
;
1832 tmp
= RREG32(SRBM_STATUS
);
1833 if (tmp
& (RLC_RQ_PENDING
| RLC_BUSY
))
1834 reset_mask
|= RADEON_RESET_RLC
;
1837 reset_mask
|= RADEON_RESET_IH
;
1840 reset_mask
|= RADEON_RESET_SEM
;
1842 if (tmp
& GRBM_RQ_PENDING
)
1843 reset_mask
|= RADEON_RESET_GRBM
;
1846 reset_mask
|= RADEON_RESET_VMC
;
1848 if (tmp
& (MCB_BUSY
| MCB_NON_DISPLAY_BUSY
|
1849 MCC_BUSY
| MCD_BUSY
))
1850 reset_mask
|= RADEON_RESET_MC
;
1852 if (evergreen_is_display_hung(rdev
))
1853 reset_mask
|= RADEON_RESET_DISPLAY
;
1856 tmp
= RREG32(VM_L2_STATUS
);
1858 reset_mask
|= RADEON_RESET_VMC
;
1860 /* Skip MC reset as it's mostly likely not hung, just busy */
1861 if (reset_mask
& RADEON_RESET_MC
) {
1862 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask
);
1863 reset_mask
&= ~RADEON_RESET_MC
;
1869 static void cayman_gpu_soft_reset(struct radeon_device
*rdev
, u32 reset_mask
)
1871 struct evergreen_mc_save save
;
1872 u32 grbm_soft_reset
= 0, srbm_soft_reset
= 0;
1875 if (reset_mask
== 0)
1878 dev_info(rdev
->dev
, "GPU softreset: 0x%08X\n", reset_mask
);
1880 evergreen_print_gpu_status_regs(rdev
);
1881 dev_info(rdev
->dev
, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1883 dev_info(rdev
->dev
, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1885 dev_info(rdev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1887 dev_info(rdev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1890 /* Disable CP parsing/prefetching */
1891 WREG32(CP_ME_CNTL
, CP_ME_HALT
| CP_PFP_HALT
);
1893 if (reset_mask
& RADEON_RESET_DMA
) {
1895 tmp
= RREG32(DMA_RB_CNTL
+ DMA0_REGISTER_OFFSET
);
1896 tmp
&= ~DMA_RB_ENABLE
;
1897 WREG32(DMA_RB_CNTL
+ DMA0_REGISTER_OFFSET
, tmp
);
1900 if (reset_mask
& RADEON_RESET_DMA1
) {
1902 tmp
= RREG32(DMA_RB_CNTL
+ DMA1_REGISTER_OFFSET
);
1903 tmp
&= ~DMA_RB_ENABLE
;
1904 WREG32(DMA_RB_CNTL
+ DMA1_REGISTER_OFFSET
, tmp
);
1909 evergreen_mc_stop(rdev
, &save
);
1910 if (evergreen_mc_wait_for_idle(rdev
)) {
1911 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1914 if (reset_mask
& (RADEON_RESET_GFX
| RADEON_RESET_COMPUTE
)) {
1915 grbm_soft_reset
= SOFT_RESET_CB
|
1929 if (reset_mask
& RADEON_RESET_CP
) {
1930 grbm_soft_reset
|= SOFT_RESET_CP
| SOFT_RESET_VGT
;
1932 srbm_soft_reset
|= SOFT_RESET_GRBM
;
1935 if (reset_mask
& RADEON_RESET_DMA
)
1936 srbm_soft_reset
|= SOFT_RESET_DMA
;
1938 if (reset_mask
& RADEON_RESET_DMA1
)
1939 srbm_soft_reset
|= SOFT_RESET_DMA1
;
1941 if (reset_mask
& RADEON_RESET_DISPLAY
)
1942 srbm_soft_reset
|= SOFT_RESET_DC
;
1944 if (reset_mask
& RADEON_RESET_RLC
)
1945 srbm_soft_reset
|= SOFT_RESET_RLC
;
1947 if (reset_mask
& RADEON_RESET_SEM
)
1948 srbm_soft_reset
|= SOFT_RESET_SEM
;
1950 if (reset_mask
& RADEON_RESET_IH
)
1951 srbm_soft_reset
|= SOFT_RESET_IH
;
1953 if (reset_mask
& RADEON_RESET_GRBM
)
1954 srbm_soft_reset
|= SOFT_RESET_GRBM
;
1956 if (reset_mask
& RADEON_RESET_VMC
)
1957 srbm_soft_reset
|= SOFT_RESET_VMC
;
1959 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1960 if (reset_mask
& RADEON_RESET_MC
)
1961 srbm_soft_reset
|= SOFT_RESET_MC
;
1964 if (grbm_soft_reset
) {
1965 tmp
= RREG32(GRBM_SOFT_RESET
);
1966 tmp
|= grbm_soft_reset
;
1967 dev_info(rdev
->dev
, "GRBM_SOFT_RESET=0x%08X\n", tmp
);
1968 WREG32(GRBM_SOFT_RESET
, tmp
);
1969 tmp
= RREG32(GRBM_SOFT_RESET
);
1973 tmp
&= ~grbm_soft_reset
;
1974 WREG32(GRBM_SOFT_RESET
, tmp
);
1975 tmp
= RREG32(GRBM_SOFT_RESET
);
1978 if (srbm_soft_reset
) {
1979 tmp
= RREG32(SRBM_SOFT_RESET
);
1980 tmp
|= srbm_soft_reset
;
1981 dev_info(rdev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1982 WREG32(SRBM_SOFT_RESET
, tmp
);
1983 tmp
= RREG32(SRBM_SOFT_RESET
);
1987 tmp
&= ~srbm_soft_reset
;
1988 WREG32(SRBM_SOFT_RESET
, tmp
);
1989 tmp
= RREG32(SRBM_SOFT_RESET
);
1992 /* Wait a little for things to settle down */
1995 evergreen_mc_resume(rdev
, &save
);
1998 evergreen_print_gpu_status_regs(rdev
);
2001 int cayman_asic_reset(struct radeon_device
*rdev
)
2005 reset_mask
= cayman_gpu_check_soft_reset(rdev
);
2008 r600_set_bios_scratch_engine_hung(rdev
, true);
2010 cayman_gpu_soft_reset(rdev
, reset_mask
);
2012 reset_mask
= cayman_gpu_check_soft_reset(rdev
);
2015 r600_set_bios_scratch_engine_hung(rdev
, false);
2021 * cayman_gfx_is_lockup - Check if the GFX engine is locked up
2023 * @rdev: radeon_device pointer
2024 * @ring: radeon_ring structure holding ring information
2026 * Check if the GFX engine is locked up.
2027 * Returns true if the engine appears to be locked up, false if not.
2029 bool cayman_gfx_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
2031 u32 reset_mask
= cayman_gpu_check_soft_reset(rdev
);
2033 if (!(reset_mask
& (RADEON_RESET_GFX
|
2034 RADEON_RESET_COMPUTE
|
2035 RADEON_RESET_CP
))) {
2036 radeon_ring_lockup_update(ring
);
2039 /* force CP activities */
2040 radeon_ring_force_activity(rdev
, ring
);
2041 return radeon_ring_test_lockup(rdev
, ring
);
2045 * cayman_dma_is_lockup - Check if the DMA engine is locked up
2047 * @rdev: radeon_device pointer
2048 * @ring: radeon_ring structure holding ring information
2050 * Check if the async DMA engine is locked up.
2051 * Returns true if the engine appears to be locked up, false if not.
2053 bool cayman_dma_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
2055 u32 reset_mask
= cayman_gpu_check_soft_reset(rdev
);
2058 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
2059 mask
= RADEON_RESET_DMA
;
2061 mask
= RADEON_RESET_DMA1
;
2063 if (!(reset_mask
& mask
)) {
2064 radeon_ring_lockup_update(ring
);
2067 /* force ring activities */
2068 radeon_ring_force_activity(rdev
, ring
);
2069 return radeon_ring_test_lockup(rdev
, ring
);
2072 static int cayman_startup(struct radeon_device
*rdev
)
2074 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2077 /* enable pcie gen2 link */
2078 evergreen_pcie_gen2_enable(rdev
);
2080 evergreen_program_aspm(rdev
);
2082 if (rdev
->flags
& RADEON_IS_IGP
) {
2083 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
2084 r
= ni_init_microcode(rdev
);
2086 DRM_ERROR("Failed to load firmware!\n");
2091 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
|| !rdev
->mc_fw
) {
2092 r
= ni_init_microcode(rdev
);
2094 DRM_ERROR("Failed to load firmware!\n");
2099 r
= ni_mc_load_microcode(rdev
);
2101 DRM_ERROR("Failed to load MC firmware!\n");
2106 r
= r600_vram_scratch_init(rdev
);
2110 evergreen_mc_program(rdev
);
2111 r
= cayman_pcie_gart_enable(rdev
);
2114 cayman_gpu_init(rdev
);
2116 r
= evergreen_blit_init(rdev
);
2118 r600_blit_fini(rdev
);
2119 rdev
->asic
->copy
.copy
= NULL
;
2120 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
2123 /* allocate rlc buffers */
2124 if (rdev
->flags
& RADEON_IS_IGP
) {
2125 rdev
->rlc
.reg_list
= tn_rlc_save_restore_register_list
;
2126 rdev
->rlc
.reg_list_size
= tn_rlc_save_restore_register_list_size
;
2127 rdev
->rlc
.cs_data
= cayman_cs_data
;
2128 r
= sumo_rlc_init(rdev
);
2130 DRM_ERROR("Failed to init rlc BOs!\n");
2135 /* allocate wb buffer */
2136 r
= radeon_wb_init(rdev
);
2140 r
= radeon_fence_driver_start_ring(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
2142 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
2146 r
= rv770_uvd_resume(rdev
);
2148 r
= radeon_fence_driver_start_ring(rdev
,
2149 R600_RING_TYPE_UVD_INDEX
);
2151 dev_err(rdev
->dev
, "UVD fences init error (%d).\n", r
);
2154 rdev
->ring
[R600_RING_TYPE_UVD_INDEX
].ring_size
= 0;
2156 r
= radeon_fence_driver_start_ring(rdev
, CAYMAN_RING_TYPE_CP1_INDEX
);
2158 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
2162 r
= radeon_fence_driver_start_ring(rdev
, CAYMAN_RING_TYPE_CP2_INDEX
);
2164 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
2168 r
= radeon_fence_driver_start_ring(rdev
, R600_RING_TYPE_DMA_INDEX
);
2170 dev_err(rdev
->dev
, "failed initializing DMA fences (%d).\n", r
);
2174 r
= radeon_fence_driver_start_ring(rdev
, CAYMAN_RING_TYPE_DMA1_INDEX
);
2176 dev_err(rdev
->dev
, "failed initializing DMA fences (%d).\n", r
);
2181 if (!rdev
->irq
.installed
) {
2182 r
= radeon_irq_kms_init(rdev
);
2187 r
= r600_irq_init(rdev
);
2189 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
2190 radeon_irq_kms_fini(rdev
);
2193 evergreen_irq_set(rdev
);
2195 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, RADEON_WB_CP_RPTR_OFFSET
,
2196 CP_RB0_RPTR
, CP_RB0_WPTR
,
2197 0, 0xfffff, RADEON_CP_PACKET2
);
2201 ring
= &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
];
2202 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, R600_WB_DMA_RPTR_OFFSET
,
2203 DMA_RB_RPTR
+ DMA0_REGISTER_OFFSET
,
2204 DMA_RB_WPTR
+ DMA0_REGISTER_OFFSET
,
2205 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP
, 0, 0, 0));
2209 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
];
2210 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, CAYMAN_WB_DMA1_RPTR_OFFSET
,
2211 DMA_RB_RPTR
+ DMA1_REGISTER_OFFSET
,
2212 DMA_RB_WPTR
+ DMA1_REGISTER_OFFSET
,
2213 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP
, 0, 0, 0));
2217 r
= cayman_cp_load_microcode(rdev
);
2220 r
= cayman_cp_resume(rdev
);
2224 r
= cayman_dma_resume(rdev
);
2228 ring
= &rdev
->ring
[R600_RING_TYPE_UVD_INDEX
];
2229 if (ring
->ring_size
) {
2230 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
,
2231 R600_WB_UVD_RPTR_OFFSET
,
2232 UVD_RBC_RB_RPTR
, UVD_RBC_RB_WPTR
,
2233 0, 0xfffff, RADEON_CP_PACKET2
);
2235 r
= r600_uvd_init(rdev
);
2237 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r
);
2240 r
= radeon_ib_pool_init(rdev
);
2242 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
2246 r
= radeon_vm_manager_init(rdev
);
2248 dev_err(rdev
->dev
, "vm manager initialization failed (%d).\n", r
);
2252 r
= r600_audio_init(rdev
);
2259 int cayman_resume(struct radeon_device
*rdev
)
2263 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2264 * posting will perform necessary task to bring back GPU into good
2268 atom_asic_init(rdev
->mode_info
.atom_context
);
2270 /* init golden registers */
2271 ni_init_golden_registers(rdev
);
2273 rdev
->accel_working
= true;
2274 r
= cayman_startup(rdev
);
2276 DRM_ERROR("cayman startup failed on resume\n");
2277 rdev
->accel_working
= false;
2283 int cayman_suspend(struct radeon_device
*rdev
)
2285 r600_audio_fini(rdev
);
2286 radeon_vm_manager_fini(rdev
);
2287 cayman_cp_enable(rdev
, false);
2288 cayman_dma_stop(rdev
);
2289 r600_uvd_rbc_stop(rdev
);
2290 radeon_uvd_suspend(rdev
);
2291 evergreen_irq_suspend(rdev
);
2292 radeon_wb_disable(rdev
);
2293 cayman_pcie_gart_disable(rdev
);
2297 /* Plan is to move initialization in that function and use
2298 * helper function so that radeon_device_init pretty much
2299 * do nothing more than calling asic specific function. This
2300 * should also allow to remove a bunch of callback function
2303 int cayman_init(struct radeon_device
*rdev
)
2305 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2309 if (!radeon_get_bios(rdev
)) {
2310 if (ASIC_IS_AVIVO(rdev
))
2313 /* Must be an ATOMBIOS */
2314 if (!rdev
->is_atom_bios
) {
2315 dev_err(rdev
->dev
, "Expecting atombios for cayman GPU\n");
2318 r
= radeon_atombios_init(rdev
);
2322 /* Post card if necessary */
2323 if (!radeon_card_posted(rdev
)) {
2325 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
2328 DRM_INFO("GPU not posted. posting now...\n");
2329 atom_asic_init(rdev
->mode_info
.atom_context
);
2331 /* init golden registers */
2332 ni_init_golden_registers(rdev
);
2333 /* Initialize scratch registers */
2334 r600_scratch_init(rdev
);
2335 /* Initialize surface registers */
2336 radeon_surface_init(rdev
);
2337 /* Initialize clocks */
2338 radeon_get_clock_info(rdev
->ddev
);
2340 r
= radeon_fence_driver_init(rdev
);
2343 /* initialize memory controller */
2344 r
= evergreen_mc_init(rdev
);
2347 /* Memory manager */
2348 r
= radeon_bo_init(rdev
);
2352 ring
->ring_obj
= NULL
;
2353 r600_ring_init(rdev
, ring
, 1024 * 1024);
2355 ring
= &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
];
2356 ring
->ring_obj
= NULL
;
2357 r600_ring_init(rdev
, ring
, 64 * 1024);
2359 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
];
2360 ring
->ring_obj
= NULL
;
2361 r600_ring_init(rdev
, ring
, 64 * 1024);
2363 r
= radeon_uvd_init(rdev
);
2365 ring
= &rdev
->ring
[R600_RING_TYPE_UVD_INDEX
];
2366 ring
->ring_obj
= NULL
;
2367 r600_ring_init(rdev
, ring
, 4096);
2370 rdev
->ih
.ring_obj
= NULL
;
2371 r600_ih_ring_init(rdev
, 64 * 1024);
2373 r
= r600_pcie_gart_init(rdev
);
2377 rdev
->accel_working
= true;
2378 r
= cayman_startup(rdev
);
2380 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
2381 cayman_cp_fini(rdev
);
2382 cayman_dma_fini(rdev
);
2383 r600_irq_fini(rdev
);
2384 if (rdev
->flags
& RADEON_IS_IGP
)
2385 sumo_rlc_fini(rdev
);
2386 radeon_wb_fini(rdev
);
2387 radeon_ib_pool_fini(rdev
);
2388 radeon_vm_manager_fini(rdev
);
2389 radeon_irq_kms_fini(rdev
);
2390 cayman_pcie_gart_fini(rdev
);
2391 rdev
->accel_working
= false;
2394 /* Don't start up if the MC ucode is missing.
2395 * The default clocks and voltages before the MC ucode
2396 * is loaded are not suffient for advanced operations.
2398 * We can skip this check for TN, because there is no MC
2401 if (!rdev
->mc_fw
&& !(rdev
->flags
& RADEON_IS_IGP
)) {
2402 DRM_ERROR("radeon: MC ucode required for NI+.\n");
2409 void cayman_fini(struct radeon_device
*rdev
)
2411 r600_blit_fini(rdev
);
2412 cayman_cp_fini(rdev
);
2413 cayman_dma_fini(rdev
);
2414 r600_irq_fini(rdev
);
2415 if (rdev
->flags
& RADEON_IS_IGP
)
2416 sumo_rlc_fini(rdev
);
2417 radeon_wb_fini(rdev
);
2418 radeon_vm_manager_fini(rdev
);
2419 radeon_ib_pool_fini(rdev
);
2420 radeon_irq_kms_fini(rdev
);
2421 radeon_uvd_fini(rdev
);
2422 cayman_pcie_gart_fini(rdev
);
2423 r600_vram_scratch_fini(rdev
);
2424 radeon_gem_fini(rdev
);
2425 radeon_fence_driver_fini(rdev
);
2426 radeon_bo_fini(rdev
);
2427 radeon_atombios_fini(rdev
);
2435 int cayman_vm_init(struct radeon_device
*rdev
)
2438 rdev
->vm_manager
.nvm
= 8;
2439 /* base offset of vram pages */
2440 if (rdev
->flags
& RADEON_IS_IGP
) {
2441 u64 tmp
= RREG32(FUS_MC_VM_FB_OFFSET
);
2443 rdev
->vm_manager
.vram_base_offset
= tmp
;
2445 rdev
->vm_manager
.vram_base_offset
= 0;
2449 void cayman_vm_fini(struct radeon_device
*rdev
)
2454 * cayman_vm_decode_fault - print human readable fault info
2456 * @rdev: radeon_device pointer
2457 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
2458 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
2460 * Print human readable fault information (cayman/TN).
2462 void cayman_vm_decode_fault(struct radeon_device
*rdev
,
2463 u32 status
, u32 addr
)
2465 u32 mc_id
= (status
& MEMORY_CLIENT_ID_MASK
) >> MEMORY_CLIENT_ID_SHIFT
;
2466 u32 vmid
= (status
& FAULT_VMID_MASK
) >> FAULT_VMID_SHIFT
;
2467 u32 protections
= (status
& PROTECTIONS_MASK
) >> PROTECTIONS_SHIFT
;
2559 block
= "TC_TFETCH";
2569 block
= "TC_VFETCH";
2608 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
2609 protections
, vmid
, addr
,
2610 (status
& MEMORY_CLIENT_RW_MASK
) ? "write" : "read",
2614 #define R600_ENTRY_VALID (1 << 0)
2615 #define R600_PTE_SYSTEM (1 << 1)
2616 #define R600_PTE_SNOOPED (1 << 2)
2617 #define R600_PTE_READABLE (1 << 5)
2618 #define R600_PTE_WRITEABLE (1 << 6)
2620 uint32_t cayman_vm_page_flags(struct radeon_device
*rdev
, uint32_t flags
)
2622 uint32_t r600_flags
= 0;
2623 r600_flags
|= (flags
& RADEON_VM_PAGE_VALID
) ? R600_ENTRY_VALID
: 0;
2624 r600_flags
|= (flags
& RADEON_VM_PAGE_READABLE
) ? R600_PTE_READABLE
: 0;
2625 r600_flags
|= (flags
& RADEON_VM_PAGE_WRITEABLE
) ? R600_PTE_WRITEABLE
: 0;
2626 if (flags
& RADEON_VM_PAGE_SYSTEM
) {
2627 r600_flags
|= R600_PTE_SYSTEM
;
2628 r600_flags
|= (flags
& RADEON_VM_PAGE_SNOOPED
) ? R600_PTE_SNOOPED
: 0;
2634 * cayman_vm_set_page - update the page tables using the CP
2636 * @rdev: radeon_device pointer
2637 * @ib: indirect buffer to fill with commands
2638 * @pe: addr of the page entry
2639 * @addr: dst addr to write into pe
2640 * @count: number of page entries to update
2641 * @incr: increase next addr by incr bytes
2642 * @flags: access flags
2644 * Update the page tables using the CP (cayman/TN).
2646 void cayman_vm_set_page(struct radeon_device
*rdev
,
2647 struct radeon_ib
*ib
,
2649 uint64_t addr
, unsigned count
,
2650 uint32_t incr
, uint32_t flags
)
2652 uint32_t r600_flags
= cayman_vm_page_flags(rdev
, flags
);
2656 if (rdev
->asic
->vm
.pt_ring_index
== RADEON_RING_TYPE_GFX_INDEX
) {
2658 ndw
= 1 + count
* 2;
2662 ib
->ptr
[ib
->length_dw
++] = PACKET3(PACKET3_ME_WRITE
, ndw
);
2663 ib
->ptr
[ib
->length_dw
++] = pe
;
2664 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
) & 0xff;
2665 for (; ndw
> 1; ndw
-= 2, --count
, pe
+= 8) {
2666 if (flags
& RADEON_VM_PAGE_SYSTEM
) {
2667 value
= radeon_vm_map_gart(rdev
, addr
);
2668 value
&= 0xFFFFFFFFFFFFF000ULL
;
2669 } else if (flags
& RADEON_VM_PAGE_VALID
) {
2675 value
|= r600_flags
;
2676 ib
->ptr
[ib
->length_dw
++] = value
;
2677 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
2681 if ((flags
& RADEON_VM_PAGE_SYSTEM
) ||
2688 /* for non-physically contiguous pages (system) */
2689 ib
->ptr
[ib
->length_dw
++] = DMA_PACKET(DMA_PACKET_WRITE
, 0, 0, ndw
);
2690 ib
->ptr
[ib
->length_dw
++] = pe
;
2691 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
) & 0xff;
2692 for (; ndw
> 0; ndw
-= 2, --count
, pe
+= 8) {
2693 if (flags
& RADEON_VM_PAGE_SYSTEM
) {
2694 value
= radeon_vm_map_gart(rdev
, addr
);
2695 value
&= 0xFFFFFFFFFFFFF000ULL
;
2696 } else if (flags
& RADEON_VM_PAGE_VALID
) {
2702 value
|= r600_flags
;
2703 ib
->ptr
[ib
->length_dw
++] = value
;
2704 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
2707 while (ib
->length_dw
& 0x7)
2708 ib
->ptr
[ib
->length_dw
++] = DMA_PACKET(DMA_PACKET_NOP
, 0, 0, 0);
2715 if (flags
& RADEON_VM_PAGE_VALID
)
2719 /* for physically contiguous pages (vram) */
2720 ib
->ptr
[ib
->length_dw
++] = DMA_PTE_PDE_PACKET(ndw
);
2721 ib
->ptr
[ib
->length_dw
++] = pe
; /* dst addr */
2722 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
) & 0xff;
2723 ib
->ptr
[ib
->length_dw
++] = r600_flags
; /* mask */
2724 ib
->ptr
[ib
->length_dw
++] = 0;
2725 ib
->ptr
[ib
->length_dw
++] = value
; /* value */
2726 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
2727 ib
->ptr
[ib
->length_dw
++] = incr
; /* increment size */
2728 ib
->ptr
[ib
->length_dw
++] = 0;
2730 addr
+= (ndw
/ 2) * incr
;
2734 while (ib
->length_dw
& 0x7)
2735 ib
->ptr
[ib
->length_dw
++] = DMA_PACKET(DMA_PACKET_NOP
, 0, 0, 0);
2740 * cayman_vm_flush - vm flush using the CP
2742 * @rdev: radeon_device pointer
2744 * Update the page table base and flush the VM TLB
2745 * using the CP (cayman-si).
2747 void cayman_vm_flush(struct radeon_device
*rdev
, int ridx
, struct radeon_vm
*vm
)
2749 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
2754 radeon_ring_write(ring
, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ (vm
->id
<< 2), 0));
2755 radeon_ring_write(ring
, vm
->pd_gpu_addr
>> 12);
2757 /* flush hdp cache */
2758 radeon_ring_write(ring
, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL
, 0));
2759 radeon_ring_write(ring
, 0x1);
2761 /* bits 0-7 are the VM contexts0-7 */
2762 radeon_ring_write(ring
, PACKET0(VM_INVALIDATE_REQUEST
, 0));
2763 radeon_ring_write(ring
, 1 << vm
->id
);
2765 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2766 radeon_ring_write(ring
, PACKET3(PACKET3_PFP_SYNC_ME
, 0));
2767 radeon_ring_write(ring
, 0x0);
2770 void cayman_dma_vm_flush(struct radeon_device
*rdev
, int ridx
, struct radeon_vm
*vm
)
2772 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
2777 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_SRBM_WRITE
, 0, 0, 0));
2778 radeon_ring_write(ring
, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ (vm
->id
<< 2)) >> 2));
2779 radeon_ring_write(ring
, vm
->pd_gpu_addr
>> 12);
2781 /* flush hdp cache */
2782 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_SRBM_WRITE
, 0, 0, 0));
2783 radeon_ring_write(ring
, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL
>> 2));
2784 radeon_ring_write(ring
, 1);
2786 /* bits 0-7 are the VM contexts0-7 */
2787 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_SRBM_WRITE
, 0, 0, 0));
2788 radeon_ring_write(ring
, (0xf << 16) | (VM_INVALIDATE_REQUEST
>> 2));
2789 radeon_ring_write(ring
, 1 << vm
->id
);