Merge remote-tracking branch 'omap_dss2/for-next'
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / gfx_v7_0.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include "drmP.h"
25 #include "amdgpu.h"
26 #include "amdgpu_ih.h"
27 #include "amdgpu_gfx.h"
28 #include "cikd.h"
29 #include "cik.h"
30 #include "atom.h"
31 #include "amdgpu_ucode.h"
32 #include "clearstate_ci.h"
33
34 #include "dce/dce_8_0_d.h"
35 #include "dce/dce_8_0_sh_mask.h"
36
37 #include "bif/bif_4_1_d.h"
38 #include "bif/bif_4_1_sh_mask.h"
39
40 #include "gca/gfx_7_0_d.h"
41 #include "gca/gfx_7_2_enum.h"
42 #include "gca/gfx_7_2_sh_mask.h"
43
44 #include "gmc/gmc_7_0_d.h"
45 #include "gmc/gmc_7_0_sh_mask.h"
46
47 #include "oss/oss_2_0_d.h"
48 #include "oss/oss_2_0_sh_mask.h"
49
50 #define GFX7_NUM_GFX_RINGS 1
51 #define GFX7_NUM_COMPUTE_RINGS 8
52
53 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
54 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
56
57 MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
58 MODULE_FIRMWARE("radeon/bonaire_me.bin");
59 MODULE_FIRMWARE("radeon/bonaire_ce.bin");
60 MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
61 MODULE_FIRMWARE("radeon/bonaire_mec.bin");
62
63 MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
64 MODULE_FIRMWARE("radeon/hawaii_me.bin");
65 MODULE_FIRMWARE("radeon/hawaii_ce.bin");
66 MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
67 MODULE_FIRMWARE("radeon/hawaii_mec.bin");
68
69 MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
70 MODULE_FIRMWARE("radeon/kaveri_me.bin");
71 MODULE_FIRMWARE("radeon/kaveri_ce.bin");
72 MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
73 MODULE_FIRMWARE("radeon/kaveri_mec.bin");
74 MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
75
76 MODULE_FIRMWARE("radeon/kabini_pfp.bin");
77 MODULE_FIRMWARE("radeon/kabini_me.bin");
78 MODULE_FIRMWARE("radeon/kabini_ce.bin");
79 MODULE_FIRMWARE("radeon/kabini_rlc.bin");
80 MODULE_FIRMWARE("radeon/kabini_mec.bin");
81
82 MODULE_FIRMWARE("radeon/mullins_pfp.bin");
83 MODULE_FIRMWARE("radeon/mullins_me.bin");
84 MODULE_FIRMWARE("radeon/mullins_ce.bin");
85 MODULE_FIRMWARE("radeon/mullins_rlc.bin");
86 MODULE_FIRMWARE("radeon/mullins_mec.bin");
87
88 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
89 {
90 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
91 {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
92 {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
93 {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
94 {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
95 {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
96 {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
97 {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
98 {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
99 {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
100 {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
101 {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
102 {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
103 {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
104 {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
105 {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
106 };
107
108 static const u32 spectre_rlc_save_restore_register_list[] =
109 {
110 (0x0e00 << 16) | (0xc12c >> 2),
111 0x00000000,
112 (0x0e00 << 16) | (0xc140 >> 2),
113 0x00000000,
114 (0x0e00 << 16) | (0xc150 >> 2),
115 0x00000000,
116 (0x0e00 << 16) | (0xc15c >> 2),
117 0x00000000,
118 (0x0e00 << 16) | (0xc168 >> 2),
119 0x00000000,
120 (0x0e00 << 16) | (0xc170 >> 2),
121 0x00000000,
122 (0x0e00 << 16) | (0xc178 >> 2),
123 0x00000000,
124 (0x0e00 << 16) | (0xc204 >> 2),
125 0x00000000,
126 (0x0e00 << 16) | (0xc2b4 >> 2),
127 0x00000000,
128 (0x0e00 << 16) | (0xc2b8 >> 2),
129 0x00000000,
130 (0x0e00 << 16) | (0xc2bc >> 2),
131 0x00000000,
132 (0x0e00 << 16) | (0xc2c0 >> 2),
133 0x00000000,
134 (0x0e00 << 16) | (0x8228 >> 2),
135 0x00000000,
136 (0x0e00 << 16) | (0x829c >> 2),
137 0x00000000,
138 (0x0e00 << 16) | (0x869c >> 2),
139 0x00000000,
140 (0x0600 << 16) | (0x98f4 >> 2),
141 0x00000000,
142 (0x0e00 << 16) | (0x98f8 >> 2),
143 0x00000000,
144 (0x0e00 << 16) | (0x9900 >> 2),
145 0x00000000,
146 (0x0e00 << 16) | (0xc260 >> 2),
147 0x00000000,
148 (0x0e00 << 16) | (0x90e8 >> 2),
149 0x00000000,
150 (0x0e00 << 16) | (0x3c000 >> 2),
151 0x00000000,
152 (0x0e00 << 16) | (0x3c00c >> 2),
153 0x00000000,
154 (0x0e00 << 16) | (0x8c1c >> 2),
155 0x00000000,
156 (0x0e00 << 16) | (0x9700 >> 2),
157 0x00000000,
158 (0x0e00 << 16) | (0xcd20 >> 2),
159 0x00000000,
160 (0x4e00 << 16) | (0xcd20 >> 2),
161 0x00000000,
162 (0x5e00 << 16) | (0xcd20 >> 2),
163 0x00000000,
164 (0x6e00 << 16) | (0xcd20 >> 2),
165 0x00000000,
166 (0x7e00 << 16) | (0xcd20 >> 2),
167 0x00000000,
168 (0x8e00 << 16) | (0xcd20 >> 2),
169 0x00000000,
170 (0x9e00 << 16) | (0xcd20 >> 2),
171 0x00000000,
172 (0xae00 << 16) | (0xcd20 >> 2),
173 0x00000000,
174 (0xbe00 << 16) | (0xcd20 >> 2),
175 0x00000000,
176 (0x0e00 << 16) | (0x89bc >> 2),
177 0x00000000,
178 (0x0e00 << 16) | (0x8900 >> 2),
179 0x00000000,
180 0x3,
181 (0x0e00 << 16) | (0xc130 >> 2),
182 0x00000000,
183 (0x0e00 << 16) | (0xc134 >> 2),
184 0x00000000,
185 (0x0e00 << 16) | (0xc1fc >> 2),
186 0x00000000,
187 (0x0e00 << 16) | (0xc208 >> 2),
188 0x00000000,
189 (0x0e00 << 16) | (0xc264 >> 2),
190 0x00000000,
191 (0x0e00 << 16) | (0xc268 >> 2),
192 0x00000000,
193 (0x0e00 << 16) | (0xc26c >> 2),
194 0x00000000,
195 (0x0e00 << 16) | (0xc270 >> 2),
196 0x00000000,
197 (0x0e00 << 16) | (0xc274 >> 2),
198 0x00000000,
199 (0x0e00 << 16) | (0xc278 >> 2),
200 0x00000000,
201 (0x0e00 << 16) | (0xc27c >> 2),
202 0x00000000,
203 (0x0e00 << 16) | (0xc280 >> 2),
204 0x00000000,
205 (0x0e00 << 16) | (0xc284 >> 2),
206 0x00000000,
207 (0x0e00 << 16) | (0xc288 >> 2),
208 0x00000000,
209 (0x0e00 << 16) | (0xc28c >> 2),
210 0x00000000,
211 (0x0e00 << 16) | (0xc290 >> 2),
212 0x00000000,
213 (0x0e00 << 16) | (0xc294 >> 2),
214 0x00000000,
215 (0x0e00 << 16) | (0xc298 >> 2),
216 0x00000000,
217 (0x0e00 << 16) | (0xc29c >> 2),
218 0x00000000,
219 (0x0e00 << 16) | (0xc2a0 >> 2),
220 0x00000000,
221 (0x0e00 << 16) | (0xc2a4 >> 2),
222 0x00000000,
223 (0x0e00 << 16) | (0xc2a8 >> 2),
224 0x00000000,
225 (0x0e00 << 16) | (0xc2ac >> 2),
226 0x00000000,
227 (0x0e00 << 16) | (0xc2b0 >> 2),
228 0x00000000,
229 (0x0e00 << 16) | (0x301d0 >> 2),
230 0x00000000,
231 (0x0e00 << 16) | (0x30238 >> 2),
232 0x00000000,
233 (0x0e00 << 16) | (0x30250 >> 2),
234 0x00000000,
235 (0x0e00 << 16) | (0x30254 >> 2),
236 0x00000000,
237 (0x0e00 << 16) | (0x30258 >> 2),
238 0x00000000,
239 (0x0e00 << 16) | (0x3025c >> 2),
240 0x00000000,
241 (0x4e00 << 16) | (0xc900 >> 2),
242 0x00000000,
243 (0x5e00 << 16) | (0xc900 >> 2),
244 0x00000000,
245 (0x6e00 << 16) | (0xc900 >> 2),
246 0x00000000,
247 (0x7e00 << 16) | (0xc900 >> 2),
248 0x00000000,
249 (0x8e00 << 16) | (0xc900 >> 2),
250 0x00000000,
251 (0x9e00 << 16) | (0xc900 >> 2),
252 0x00000000,
253 (0xae00 << 16) | (0xc900 >> 2),
254 0x00000000,
255 (0xbe00 << 16) | (0xc900 >> 2),
256 0x00000000,
257 (0x4e00 << 16) | (0xc904 >> 2),
258 0x00000000,
259 (0x5e00 << 16) | (0xc904 >> 2),
260 0x00000000,
261 (0x6e00 << 16) | (0xc904 >> 2),
262 0x00000000,
263 (0x7e00 << 16) | (0xc904 >> 2),
264 0x00000000,
265 (0x8e00 << 16) | (0xc904 >> 2),
266 0x00000000,
267 (0x9e00 << 16) | (0xc904 >> 2),
268 0x00000000,
269 (0xae00 << 16) | (0xc904 >> 2),
270 0x00000000,
271 (0xbe00 << 16) | (0xc904 >> 2),
272 0x00000000,
273 (0x4e00 << 16) | (0xc908 >> 2),
274 0x00000000,
275 (0x5e00 << 16) | (0xc908 >> 2),
276 0x00000000,
277 (0x6e00 << 16) | (0xc908 >> 2),
278 0x00000000,
279 (0x7e00 << 16) | (0xc908 >> 2),
280 0x00000000,
281 (0x8e00 << 16) | (0xc908 >> 2),
282 0x00000000,
283 (0x9e00 << 16) | (0xc908 >> 2),
284 0x00000000,
285 (0xae00 << 16) | (0xc908 >> 2),
286 0x00000000,
287 (0xbe00 << 16) | (0xc908 >> 2),
288 0x00000000,
289 (0x4e00 << 16) | (0xc90c >> 2),
290 0x00000000,
291 (0x5e00 << 16) | (0xc90c >> 2),
292 0x00000000,
293 (0x6e00 << 16) | (0xc90c >> 2),
294 0x00000000,
295 (0x7e00 << 16) | (0xc90c >> 2),
296 0x00000000,
297 (0x8e00 << 16) | (0xc90c >> 2),
298 0x00000000,
299 (0x9e00 << 16) | (0xc90c >> 2),
300 0x00000000,
301 (0xae00 << 16) | (0xc90c >> 2),
302 0x00000000,
303 (0xbe00 << 16) | (0xc90c >> 2),
304 0x00000000,
305 (0x4e00 << 16) | (0xc910 >> 2),
306 0x00000000,
307 (0x5e00 << 16) | (0xc910 >> 2),
308 0x00000000,
309 (0x6e00 << 16) | (0xc910 >> 2),
310 0x00000000,
311 (0x7e00 << 16) | (0xc910 >> 2),
312 0x00000000,
313 (0x8e00 << 16) | (0xc910 >> 2),
314 0x00000000,
315 (0x9e00 << 16) | (0xc910 >> 2),
316 0x00000000,
317 (0xae00 << 16) | (0xc910 >> 2),
318 0x00000000,
319 (0xbe00 << 16) | (0xc910 >> 2),
320 0x00000000,
321 (0x0e00 << 16) | (0xc99c >> 2),
322 0x00000000,
323 (0x0e00 << 16) | (0x9834 >> 2),
324 0x00000000,
325 (0x0000 << 16) | (0x30f00 >> 2),
326 0x00000000,
327 (0x0001 << 16) | (0x30f00 >> 2),
328 0x00000000,
329 (0x0000 << 16) | (0x30f04 >> 2),
330 0x00000000,
331 (0x0001 << 16) | (0x30f04 >> 2),
332 0x00000000,
333 (0x0000 << 16) | (0x30f08 >> 2),
334 0x00000000,
335 (0x0001 << 16) | (0x30f08 >> 2),
336 0x00000000,
337 (0x0000 << 16) | (0x30f0c >> 2),
338 0x00000000,
339 (0x0001 << 16) | (0x30f0c >> 2),
340 0x00000000,
341 (0x0600 << 16) | (0x9b7c >> 2),
342 0x00000000,
343 (0x0e00 << 16) | (0x8a14 >> 2),
344 0x00000000,
345 (0x0e00 << 16) | (0x8a18 >> 2),
346 0x00000000,
347 (0x0600 << 16) | (0x30a00 >> 2),
348 0x00000000,
349 (0x0e00 << 16) | (0x8bf0 >> 2),
350 0x00000000,
351 (0x0e00 << 16) | (0x8bcc >> 2),
352 0x00000000,
353 (0x0e00 << 16) | (0x8b24 >> 2),
354 0x00000000,
355 (0x0e00 << 16) | (0x30a04 >> 2),
356 0x00000000,
357 (0x0600 << 16) | (0x30a10 >> 2),
358 0x00000000,
359 (0x0600 << 16) | (0x30a14 >> 2),
360 0x00000000,
361 (0x0600 << 16) | (0x30a18 >> 2),
362 0x00000000,
363 (0x0600 << 16) | (0x30a2c >> 2),
364 0x00000000,
365 (0x0e00 << 16) | (0xc700 >> 2),
366 0x00000000,
367 (0x0e00 << 16) | (0xc704 >> 2),
368 0x00000000,
369 (0x0e00 << 16) | (0xc708 >> 2),
370 0x00000000,
371 (0x0e00 << 16) | (0xc768 >> 2),
372 0x00000000,
373 (0x0400 << 16) | (0xc770 >> 2),
374 0x00000000,
375 (0x0400 << 16) | (0xc774 >> 2),
376 0x00000000,
377 (0x0400 << 16) | (0xc778 >> 2),
378 0x00000000,
379 (0x0400 << 16) | (0xc77c >> 2),
380 0x00000000,
381 (0x0400 << 16) | (0xc780 >> 2),
382 0x00000000,
383 (0x0400 << 16) | (0xc784 >> 2),
384 0x00000000,
385 (0x0400 << 16) | (0xc788 >> 2),
386 0x00000000,
387 (0x0400 << 16) | (0xc78c >> 2),
388 0x00000000,
389 (0x0400 << 16) | (0xc798 >> 2),
390 0x00000000,
391 (0x0400 << 16) | (0xc79c >> 2),
392 0x00000000,
393 (0x0400 << 16) | (0xc7a0 >> 2),
394 0x00000000,
395 (0x0400 << 16) | (0xc7a4 >> 2),
396 0x00000000,
397 (0x0400 << 16) | (0xc7a8 >> 2),
398 0x00000000,
399 (0x0400 << 16) | (0xc7ac >> 2),
400 0x00000000,
401 (0x0400 << 16) | (0xc7b0 >> 2),
402 0x00000000,
403 (0x0400 << 16) | (0xc7b4 >> 2),
404 0x00000000,
405 (0x0e00 << 16) | (0x9100 >> 2),
406 0x00000000,
407 (0x0e00 << 16) | (0x3c010 >> 2),
408 0x00000000,
409 (0x0e00 << 16) | (0x92a8 >> 2),
410 0x00000000,
411 (0x0e00 << 16) | (0x92ac >> 2),
412 0x00000000,
413 (0x0e00 << 16) | (0x92b4 >> 2),
414 0x00000000,
415 (0x0e00 << 16) | (0x92b8 >> 2),
416 0x00000000,
417 (0x0e00 << 16) | (0x92bc >> 2),
418 0x00000000,
419 (0x0e00 << 16) | (0x92c0 >> 2),
420 0x00000000,
421 (0x0e00 << 16) | (0x92c4 >> 2),
422 0x00000000,
423 (0x0e00 << 16) | (0x92c8 >> 2),
424 0x00000000,
425 (0x0e00 << 16) | (0x92cc >> 2),
426 0x00000000,
427 (0x0e00 << 16) | (0x92d0 >> 2),
428 0x00000000,
429 (0x0e00 << 16) | (0x8c00 >> 2),
430 0x00000000,
431 (0x0e00 << 16) | (0x8c04 >> 2),
432 0x00000000,
433 (0x0e00 << 16) | (0x8c20 >> 2),
434 0x00000000,
435 (0x0e00 << 16) | (0x8c38 >> 2),
436 0x00000000,
437 (0x0e00 << 16) | (0x8c3c >> 2),
438 0x00000000,
439 (0x0e00 << 16) | (0xae00 >> 2),
440 0x00000000,
441 (0x0e00 << 16) | (0x9604 >> 2),
442 0x00000000,
443 (0x0e00 << 16) | (0xac08 >> 2),
444 0x00000000,
445 (0x0e00 << 16) | (0xac0c >> 2),
446 0x00000000,
447 (0x0e00 << 16) | (0xac10 >> 2),
448 0x00000000,
449 (0x0e00 << 16) | (0xac14 >> 2),
450 0x00000000,
451 (0x0e00 << 16) | (0xac58 >> 2),
452 0x00000000,
453 (0x0e00 << 16) | (0xac68 >> 2),
454 0x00000000,
455 (0x0e00 << 16) | (0xac6c >> 2),
456 0x00000000,
457 (0x0e00 << 16) | (0xac70 >> 2),
458 0x00000000,
459 (0x0e00 << 16) | (0xac74 >> 2),
460 0x00000000,
461 (0x0e00 << 16) | (0xac78 >> 2),
462 0x00000000,
463 (0x0e00 << 16) | (0xac7c >> 2),
464 0x00000000,
465 (0x0e00 << 16) | (0xac80 >> 2),
466 0x00000000,
467 (0x0e00 << 16) | (0xac84 >> 2),
468 0x00000000,
469 (0x0e00 << 16) | (0xac88 >> 2),
470 0x00000000,
471 (0x0e00 << 16) | (0xac8c >> 2),
472 0x00000000,
473 (0x0e00 << 16) | (0x970c >> 2),
474 0x00000000,
475 (0x0e00 << 16) | (0x9714 >> 2),
476 0x00000000,
477 (0x0e00 << 16) | (0x9718 >> 2),
478 0x00000000,
479 (0x0e00 << 16) | (0x971c >> 2),
480 0x00000000,
481 (0x0e00 << 16) | (0x31068 >> 2),
482 0x00000000,
483 (0x4e00 << 16) | (0x31068 >> 2),
484 0x00000000,
485 (0x5e00 << 16) | (0x31068 >> 2),
486 0x00000000,
487 (0x6e00 << 16) | (0x31068 >> 2),
488 0x00000000,
489 (0x7e00 << 16) | (0x31068 >> 2),
490 0x00000000,
491 (0x8e00 << 16) | (0x31068 >> 2),
492 0x00000000,
493 (0x9e00 << 16) | (0x31068 >> 2),
494 0x00000000,
495 (0xae00 << 16) | (0x31068 >> 2),
496 0x00000000,
497 (0xbe00 << 16) | (0x31068 >> 2),
498 0x00000000,
499 (0x0e00 << 16) | (0xcd10 >> 2),
500 0x00000000,
501 (0x0e00 << 16) | (0xcd14 >> 2),
502 0x00000000,
503 (0x0e00 << 16) | (0x88b0 >> 2),
504 0x00000000,
505 (0x0e00 << 16) | (0x88b4 >> 2),
506 0x00000000,
507 (0x0e00 << 16) | (0x88b8 >> 2),
508 0x00000000,
509 (0x0e00 << 16) | (0x88bc >> 2),
510 0x00000000,
511 (0x0400 << 16) | (0x89c0 >> 2),
512 0x00000000,
513 (0x0e00 << 16) | (0x88c4 >> 2),
514 0x00000000,
515 (0x0e00 << 16) | (0x88c8 >> 2),
516 0x00000000,
517 (0x0e00 << 16) | (0x88d0 >> 2),
518 0x00000000,
519 (0x0e00 << 16) | (0x88d4 >> 2),
520 0x00000000,
521 (0x0e00 << 16) | (0x88d8 >> 2),
522 0x00000000,
523 (0x0e00 << 16) | (0x8980 >> 2),
524 0x00000000,
525 (0x0e00 << 16) | (0x30938 >> 2),
526 0x00000000,
527 (0x0e00 << 16) | (0x3093c >> 2),
528 0x00000000,
529 (0x0e00 << 16) | (0x30940 >> 2),
530 0x00000000,
531 (0x0e00 << 16) | (0x89a0 >> 2),
532 0x00000000,
533 (0x0e00 << 16) | (0x30900 >> 2),
534 0x00000000,
535 (0x0e00 << 16) | (0x30904 >> 2),
536 0x00000000,
537 (0x0e00 << 16) | (0x89b4 >> 2),
538 0x00000000,
539 (0x0e00 << 16) | (0x3c210 >> 2),
540 0x00000000,
541 (0x0e00 << 16) | (0x3c214 >> 2),
542 0x00000000,
543 (0x0e00 << 16) | (0x3c218 >> 2),
544 0x00000000,
545 (0x0e00 << 16) | (0x8904 >> 2),
546 0x00000000,
547 0x5,
548 (0x0e00 << 16) | (0x8c28 >> 2),
549 (0x0e00 << 16) | (0x8c2c >> 2),
550 (0x0e00 << 16) | (0x8c30 >> 2),
551 (0x0e00 << 16) | (0x8c34 >> 2),
552 (0x0e00 << 16) | (0x9600 >> 2),
553 };
554
555 static const u32 kalindi_rlc_save_restore_register_list[] =
556 {
557 (0x0e00 << 16) | (0xc12c >> 2),
558 0x00000000,
559 (0x0e00 << 16) | (0xc140 >> 2),
560 0x00000000,
561 (0x0e00 << 16) | (0xc150 >> 2),
562 0x00000000,
563 (0x0e00 << 16) | (0xc15c >> 2),
564 0x00000000,
565 (0x0e00 << 16) | (0xc168 >> 2),
566 0x00000000,
567 (0x0e00 << 16) | (0xc170 >> 2),
568 0x00000000,
569 (0x0e00 << 16) | (0xc204 >> 2),
570 0x00000000,
571 (0x0e00 << 16) | (0xc2b4 >> 2),
572 0x00000000,
573 (0x0e00 << 16) | (0xc2b8 >> 2),
574 0x00000000,
575 (0x0e00 << 16) | (0xc2bc >> 2),
576 0x00000000,
577 (0x0e00 << 16) | (0xc2c0 >> 2),
578 0x00000000,
579 (0x0e00 << 16) | (0x8228 >> 2),
580 0x00000000,
581 (0x0e00 << 16) | (0x829c >> 2),
582 0x00000000,
583 (0x0e00 << 16) | (0x869c >> 2),
584 0x00000000,
585 (0x0600 << 16) | (0x98f4 >> 2),
586 0x00000000,
587 (0x0e00 << 16) | (0x98f8 >> 2),
588 0x00000000,
589 (0x0e00 << 16) | (0x9900 >> 2),
590 0x00000000,
591 (0x0e00 << 16) | (0xc260 >> 2),
592 0x00000000,
593 (0x0e00 << 16) | (0x90e8 >> 2),
594 0x00000000,
595 (0x0e00 << 16) | (0x3c000 >> 2),
596 0x00000000,
597 (0x0e00 << 16) | (0x3c00c >> 2),
598 0x00000000,
599 (0x0e00 << 16) | (0x8c1c >> 2),
600 0x00000000,
601 (0x0e00 << 16) | (0x9700 >> 2),
602 0x00000000,
603 (0x0e00 << 16) | (0xcd20 >> 2),
604 0x00000000,
605 (0x4e00 << 16) | (0xcd20 >> 2),
606 0x00000000,
607 (0x5e00 << 16) | (0xcd20 >> 2),
608 0x00000000,
609 (0x6e00 << 16) | (0xcd20 >> 2),
610 0x00000000,
611 (0x7e00 << 16) | (0xcd20 >> 2),
612 0x00000000,
613 (0x0e00 << 16) | (0x89bc >> 2),
614 0x00000000,
615 (0x0e00 << 16) | (0x8900 >> 2),
616 0x00000000,
617 0x3,
618 (0x0e00 << 16) | (0xc130 >> 2),
619 0x00000000,
620 (0x0e00 << 16) | (0xc134 >> 2),
621 0x00000000,
622 (0x0e00 << 16) | (0xc1fc >> 2),
623 0x00000000,
624 (0x0e00 << 16) | (0xc208 >> 2),
625 0x00000000,
626 (0x0e00 << 16) | (0xc264 >> 2),
627 0x00000000,
628 (0x0e00 << 16) | (0xc268 >> 2),
629 0x00000000,
630 (0x0e00 << 16) | (0xc26c >> 2),
631 0x00000000,
632 (0x0e00 << 16) | (0xc270 >> 2),
633 0x00000000,
634 (0x0e00 << 16) | (0xc274 >> 2),
635 0x00000000,
636 (0x0e00 << 16) | (0xc28c >> 2),
637 0x00000000,
638 (0x0e00 << 16) | (0xc290 >> 2),
639 0x00000000,
640 (0x0e00 << 16) | (0xc294 >> 2),
641 0x00000000,
642 (0x0e00 << 16) | (0xc298 >> 2),
643 0x00000000,
644 (0x0e00 << 16) | (0xc2a0 >> 2),
645 0x00000000,
646 (0x0e00 << 16) | (0xc2a4 >> 2),
647 0x00000000,
648 (0x0e00 << 16) | (0xc2a8 >> 2),
649 0x00000000,
650 (0x0e00 << 16) | (0xc2ac >> 2),
651 0x00000000,
652 (0x0e00 << 16) | (0x301d0 >> 2),
653 0x00000000,
654 (0x0e00 << 16) | (0x30238 >> 2),
655 0x00000000,
656 (0x0e00 << 16) | (0x30250 >> 2),
657 0x00000000,
658 (0x0e00 << 16) | (0x30254 >> 2),
659 0x00000000,
660 (0x0e00 << 16) | (0x30258 >> 2),
661 0x00000000,
662 (0x0e00 << 16) | (0x3025c >> 2),
663 0x00000000,
664 (0x4e00 << 16) | (0xc900 >> 2),
665 0x00000000,
666 (0x5e00 << 16) | (0xc900 >> 2),
667 0x00000000,
668 (0x6e00 << 16) | (0xc900 >> 2),
669 0x00000000,
670 (0x7e00 << 16) | (0xc900 >> 2),
671 0x00000000,
672 (0x4e00 << 16) | (0xc904 >> 2),
673 0x00000000,
674 (0x5e00 << 16) | (0xc904 >> 2),
675 0x00000000,
676 (0x6e00 << 16) | (0xc904 >> 2),
677 0x00000000,
678 (0x7e00 << 16) | (0xc904 >> 2),
679 0x00000000,
680 (0x4e00 << 16) | (0xc908 >> 2),
681 0x00000000,
682 (0x5e00 << 16) | (0xc908 >> 2),
683 0x00000000,
684 (0x6e00 << 16) | (0xc908 >> 2),
685 0x00000000,
686 (0x7e00 << 16) | (0xc908 >> 2),
687 0x00000000,
688 (0x4e00 << 16) | (0xc90c >> 2),
689 0x00000000,
690 (0x5e00 << 16) | (0xc90c >> 2),
691 0x00000000,
692 (0x6e00 << 16) | (0xc90c >> 2),
693 0x00000000,
694 (0x7e00 << 16) | (0xc90c >> 2),
695 0x00000000,
696 (0x4e00 << 16) | (0xc910 >> 2),
697 0x00000000,
698 (0x5e00 << 16) | (0xc910 >> 2),
699 0x00000000,
700 (0x6e00 << 16) | (0xc910 >> 2),
701 0x00000000,
702 (0x7e00 << 16) | (0xc910 >> 2),
703 0x00000000,
704 (0x0e00 << 16) | (0xc99c >> 2),
705 0x00000000,
706 (0x0e00 << 16) | (0x9834 >> 2),
707 0x00000000,
708 (0x0000 << 16) | (0x30f00 >> 2),
709 0x00000000,
710 (0x0000 << 16) | (0x30f04 >> 2),
711 0x00000000,
712 (0x0000 << 16) | (0x30f08 >> 2),
713 0x00000000,
714 (0x0000 << 16) | (0x30f0c >> 2),
715 0x00000000,
716 (0x0600 << 16) | (0x9b7c >> 2),
717 0x00000000,
718 (0x0e00 << 16) | (0x8a14 >> 2),
719 0x00000000,
720 (0x0e00 << 16) | (0x8a18 >> 2),
721 0x00000000,
722 (0x0600 << 16) | (0x30a00 >> 2),
723 0x00000000,
724 (0x0e00 << 16) | (0x8bf0 >> 2),
725 0x00000000,
726 (0x0e00 << 16) | (0x8bcc >> 2),
727 0x00000000,
728 (0x0e00 << 16) | (0x8b24 >> 2),
729 0x00000000,
730 (0x0e00 << 16) | (0x30a04 >> 2),
731 0x00000000,
732 (0x0600 << 16) | (0x30a10 >> 2),
733 0x00000000,
734 (0x0600 << 16) | (0x30a14 >> 2),
735 0x00000000,
736 (0x0600 << 16) | (0x30a18 >> 2),
737 0x00000000,
738 (0x0600 << 16) | (0x30a2c >> 2),
739 0x00000000,
740 (0x0e00 << 16) | (0xc700 >> 2),
741 0x00000000,
742 (0x0e00 << 16) | (0xc704 >> 2),
743 0x00000000,
744 (0x0e00 << 16) | (0xc708 >> 2),
745 0x00000000,
746 (0x0e00 << 16) | (0xc768 >> 2),
747 0x00000000,
748 (0x0400 << 16) | (0xc770 >> 2),
749 0x00000000,
750 (0x0400 << 16) | (0xc774 >> 2),
751 0x00000000,
752 (0x0400 << 16) | (0xc798 >> 2),
753 0x00000000,
754 (0x0400 << 16) | (0xc79c >> 2),
755 0x00000000,
756 (0x0e00 << 16) | (0x9100 >> 2),
757 0x00000000,
758 (0x0e00 << 16) | (0x3c010 >> 2),
759 0x00000000,
760 (0x0e00 << 16) | (0x8c00 >> 2),
761 0x00000000,
762 (0x0e00 << 16) | (0x8c04 >> 2),
763 0x00000000,
764 (0x0e00 << 16) | (0x8c20 >> 2),
765 0x00000000,
766 (0x0e00 << 16) | (0x8c38 >> 2),
767 0x00000000,
768 (0x0e00 << 16) | (0x8c3c >> 2),
769 0x00000000,
770 (0x0e00 << 16) | (0xae00 >> 2),
771 0x00000000,
772 (0x0e00 << 16) | (0x9604 >> 2),
773 0x00000000,
774 (0x0e00 << 16) | (0xac08 >> 2),
775 0x00000000,
776 (0x0e00 << 16) | (0xac0c >> 2),
777 0x00000000,
778 (0x0e00 << 16) | (0xac10 >> 2),
779 0x00000000,
780 (0x0e00 << 16) | (0xac14 >> 2),
781 0x00000000,
782 (0x0e00 << 16) | (0xac58 >> 2),
783 0x00000000,
784 (0x0e00 << 16) | (0xac68 >> 2),
785 0x00000000,
786 (0x0e00 << 16) | (0xac6c >> 2),
787 0x00000000,
788 (0x0e00 << 16) | (0xac70 >> 2),
789 0x00000000,
790 (0x0e00 << 16) | (0xac74 >> 2),
791 0x00000000,
792 (0x0e00 << 16) | (0xac78 >> 2),
793 0x00000000,
794 (0x0e00 << 16) | (0xac7c >> 2),
795 0x00000000,
796 (0x0e00 << 16) | (0xac80 >> 2),
797 0x00000000,
798 (0x0e00 << 16) | (0xac84 >> 2),
799 0x00000000,
800 (0x0e00 << 16) | (0xac88 >> 2),
801 0x00000000,
802 (0x0e00 << 16) | (0xac8c >> 2),
803 0x00000000,
804 (0x0e00 << 16) | (0x970c >> 2),
805 0x00000000,
806 (0x0e00 << 16) | (0x9714 >> 2),
807 0x00000000,
808 (0x0e00 << 16) | (0x9718 >> 2),
809 0x00000000,
810 (0x0e00 << 16) | (0x971c >> 2),
811 0x00000000,
812 (0x0e00 << 16) | (0x31068 >> 2),
813 0x00000000,
814 (0x4e00 << 16) | (0x31068 >> 2),
815 0x00000000,
816 (0x5e00 << 16) | (0x31068 >> 2),
817 0x00000000,
818 (0x6e00 << 16) | (0x31068 >> 2),
819 0x00000000,
820 (0x7e00 << 16) | (0x31068 >> 2),
821 0x00000000,
822 (0x0e00 << 16) | (0xcd10 >> 2),
823 0x00000000,
824 (0x0e00 << 16) | (0xcd14 >> 2),
825 0x00000000,
826 (0x0e00 << 16) | (0x88b0 >> 2),
827 0x00000000,
828 (0x0e00 << 16) | (0x88b4 >> 2),
829 0x00000000,
830 (0x0e00 << 16) | (0x88b8 >> 2),
831 0x00000000,
832 (0x0e00 << 16) | (0x88bc >> 2),
833 0x00000000,
834 (0x0400 << 16) | (0x89c0 >> 2),
835 0x00000000,
836 (0x0e00 << 16) | (0x88c4 >> 2),
837 0x00000000,
838 (0x0e00 << 16) | (0x88c8 >> 2),
839 0x00000000,
840 (0x0e00 << 16) | (0x88d0 >> 2),
841 0x00000000,
842 (0x0e00 << 16) | (0x88d4 >> 2),
843 0x00000000,
844 (0x0e00 << 16) | (0x88d8 >> 2),
845 0x00000000,
846 (0x0e00 << 16) | (0x8980 >> 2),
847 0x00000000,
848 (0x0e00 << 16) | (0x30938 >> 2),
849 0x00000000,
850 (0x0e00 << 16) | (0x3093c >> 2),
851 0x00000000,
852 (0x0e00 << 16) | (0x30940 >> 2),
853 0x00000000,
854 (0x0e00 << 16) | (0x89a0 >> 2),
855 0x00000000,
856 (0x0e00 << 16) | (0x30900 >> 2),
857 0x00000000,
858 (0x0e00 << 16) | (0x30904 >> 2),
859 0x00000000,
860 (0x0e00 << 16) | (0x89b4 >> 2),
861 0x00000000,
862 (0x0e00 << 16) | (0x3e1fc >> 2),
863 0x00000000,
864 (0x0e00 << 16) | (0x3c210 >> 2),
865 0x00000000,
866 (0x0e00 << 16) | (0x3c214 >> 2),
867 0x00000000,
868 (0x0e00 << 16) | (0x3c218 >> 2),
869 0x00000000,
870 (0x0e00 << 16) | (0x8904 >> 2),
871 0x00000000,
872 0x5,
873 (0x0e00 << 16) | (0x8c28 >> 2),
874 (0x0e00 << 16) | (0x8c2c >> 2),
875 (0x0e00 << 16) | (0x8c30 >> 2),
876 (0x0e00 << 16) | (0x8c34 >> 2),
877 (0x0e00 << 16) | (0x9600 >> 2),
878 };
879
880 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
881 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
882 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
883 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
884 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
885
886 /*
887 * Core functions
888 */
889 /**
890 * gfx_v7_0_init_microcode - load ucode images from disk
891 *
892 * @adev: amdgpu_device pointer
893 *
894 * Use the firmware interface to load the ucode images into
895 * the driver (not loaded into hw).
896 * Returns 0 on success, error on failure.
897 */
898 static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
899 {
900 const char *chip_name;
901 char fw_name[30];
902 int err;
903
904 DRM_DEBUG("\n");
905
906 switch (adev->asic_type) {
907 case CHIP_BONAIRE:
908 chip_name = "bonaire";
909 break;
910 case CHIP_HAWAII:
911 chip_name = "hawaii";
912 break;
913 case CHIP_KAVERI:
914 chip_name = "kaveri";
915 break;
916 case CHIP_KABINI:
917 chip_name = "kabini";
918 break;
919 case CHIP_MULLINS:
920 chip_name = "mullins";
921 break;
922 default: BUG();
923 }
924
925 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
926 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
927 if (err)
928 goto out;
929 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
930 if (err)
931 goto out;
932
933 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
934 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
935 if (err)
936 goto out;
937 err = amdgpu_ucode_validate(adev->gfx.me_fw);
938 if (err)
939 goto out;
940
941 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
942 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
943 if (err)
944 goto out;
945 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
946 if (err)
947 goto out;
948
949 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
950 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
951 if (err)
952 goto out;
953 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
954 if (err)
955 goto out;
956
957 if (adev->asic_type == CHIP_KAVERI) {
958 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
959 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
960 if (err)
961 goto out;
962 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
963 if (err)
964 goto out;
965 }
966
967 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
968 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
969 if (err)
970 goto out;
971 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
972
973 out:
974 if (err) {
975 printk(KERN_ERR
976 "gfx7: Failed to load firmware \"%s\"\n",
977 fw_name);
978 release_firmware(adev->gfx.pfp_fw);
979 adev->gfx.pfp_fw = NULL;
980 release_firmware(adev->gfx.me_fw);
981 adev->gfx.me_fw = NULL;
982 release_firmware(adev->gfx.ce_fw);
983 adev->gfx.ce_fw = NULL;
984 release_firmware(adev->gfx.mec_fw);
985 adev->gfx.mec_fw = NULL;
986 release_firmware(adev->gfx.mec2_fw);
987 adev->gfx.mec2_fw = NULL;
988 release_firmware(adev->gfx.rlc_fw);
989 adev->gfx.rlc_fw = NULL;
990 }
991 return err;
992 }
993
994 static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
995 {
996 release_firmware(adev->gfx.pfp_fw);
997 adev->gfx.pfp_fw = NULL;
998 release_firmware(adev->gfx.me_fw);
999 adev->gfx.me_fw = NULL;
1000 release_firmware(adev->gfx.ce_fw);
1001 adev->gfx.ce_fw = NULL;
1002 release_firmware(adev->gfx.mec_fw);
1003 adev->gfx.mec_fw = NULL;
1004 release_firmware(adev->gfx.mec2_fw);
1005 adev->gfx.mec2_fw = NULL;
1006 release_firmware(adev->gfx.rlc_fw);
1007 adev->gfx.rlc_fw = NULL;
1008 }
1009
1010 /**
1011 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
1012 *
1013 * @adev: amdgpu_device pointer
1014 *
1015 * Starting with SI, the tiling setup is done globally in a
1016 * set of 32 tiling modes. Rather than selecting each set of
1017 * parameters per surface as on older asics, we just select
1018 * which index in the tiling table we want to use, and the
1019 * surface uses those parameters (CIK).
1020 */
1021 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
1022 {
1023 const u32 num_tile_mode_states =
1024 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
1025 const u32 num_secondary_tile_mode_states =
1026 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
1027 u32 reg_offset, split_equal_to_row_size;
1028 uint32_t *tile, *macrotile;
1029
1030 tile = adev->gfx.config.tile_mode_array;
1031 macrotile = adev->gfx.config.macrotile_mode_array;
1032
1033 switch (adev->gfx.config.mem_row_size_in_kb) {
1034 case 1:
1035 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
1036 break;
1037 case 2:
1038 default:
1039 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1040 break;
1041 case 4:
1042 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1043 break;
1044 }
1045
1046 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1047 tile[reg_offset] = 0;
1048 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1049 macrotile[reg_offset] = 0;
1050
1051 switch (adev->asic_type) {
1052 case CHIP_BONAIRE:
1053 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1054 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1055 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1056 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1057 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1058 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1059 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1060 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1061 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1062 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1063 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1064 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1065 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1066 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1067 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1068 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1069 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1070 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1071 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1072 TILE_SPLIT(split_equal_to_row_size));
1073 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1074 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1075 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1076 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1077 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1078 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1079 TILE_SPLIT(split_equal_to_row_size));
1080 tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1081 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1082 PIPE_CONFIG(ADDR_SURF_P4_16x16));
1083 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1084 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1085 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1086 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1087 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1088 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1089 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1090 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1091 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1092 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1093 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1094 tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1095 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1096 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1097 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1098 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1099 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1100 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1101 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1102 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1103 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1104 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1105 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1106 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1107 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1108 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1109 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1110 tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1111 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1112 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1113 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1114 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1115 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1116 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1117 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1118 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1119 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1120 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1121 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1122 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1123 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1124 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1125 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1126 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1127 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1128 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1129 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1130 tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1131 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1132 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1133 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1134 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1135 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1136 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1137 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1138 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1139 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1140 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1141 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1142 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1143 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1144 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1145 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1146 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1147 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1148 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1149 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1150 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1151 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1152 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1153 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1154 tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1155
1156 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1157 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1158 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1159 NUM_BANKS(ADDR_SURF_16_BANK));
1160 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1161 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1162 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1163 NUM_BANKS(ADDR_SURF_16_BANK));
1164 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1165 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1166 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1167 NUM_BANKS(ADDR_SURF_16_BANK));
1168 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1169 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1170 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1171 NUM_BANKS(ADDR_SURF_16_BANK));
1172 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1173 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1174 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1175 NUM_BANKS(ADDR_SURF_16_BANK));
1176 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1177 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1178 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1179 NUM_BANKS(ADDR_SURF_8_BANK));
1180 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1181 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1182 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1183 NUM_BANKS(ADDR_SURF_4_BANK));
1184 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1185 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1186 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1187 NUM_BANKS(ADDR_SURF_16_BANK));
1188 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1189 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1190 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1191 NUM_BANKS(ADDR_SURF_16_BANK));
1192 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1193 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1194 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1195 NUM_BANKS(ADDR_SURF_16_BANK));
1196 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1197 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1198 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1199 NUM_BANKS(ADDR_SURF_16_BANK));
1200 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1201 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1202 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1203 NUM_BANKS(ADDR_SURF_16_BANK));
1204 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1205 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1206 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1207 NUM_BANKS(ADDR_SURF_8_BANK));
1208 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1209 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1210 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1211 NUM_BANKS(ADDR_SURF_4_BANK));
1212
1213 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1214 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1215 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1216 if (reg_offset != 7)
1217 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1218 break;
1219 case CHIP_HAWAII:
1220 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1221 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1222 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1223 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1224 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1225 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1226 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1227 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1228 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1229 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1230 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1231 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1232 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1233 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1234 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1235 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1236 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1237 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1238 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1239 TILE_SPLIT(split_equal_to_row_size));
1240 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1241 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1242 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1243 TILE_SPLIT(split_equal_to_row_size));
1244 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1245 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1246 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1247 TILE_SPLIT(split_equal_to_row_size));
1248 tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1249 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1250 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1251 TILE_SPLIT(split_equal_to_row_size));
1252 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1253 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1254 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1255 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1256 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1257 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1258 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1259 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1260 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1261 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1262 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1263 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1264 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1265 tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1266 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1267 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1268 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1269 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1270 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1271 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1272 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1273 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1274 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1275 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1276 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1277 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1278 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1279 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1280 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1281 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1282 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1283 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1284 tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1285 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1286 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1287 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1288 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1289 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1290 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1291 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1292 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1293 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1294 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1295 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1296 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1297 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1298 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1299 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1300 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1301 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1302 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1303 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1304 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1305 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1306 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1307 tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1308 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1309 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1310 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1311 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1312 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1313 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1314 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1315 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1316 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1317 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1318 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1319 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1320 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1321 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1322 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1323 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1324 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1325 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1326 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1327 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1328 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1329 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1330 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1331 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1332 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1333 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1334 tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1335 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1336 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1337 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1338
1339 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1340 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1341 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1342 NUM_BANKS(ADDR_SURF_16_BANK));
1343 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1344 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1345 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1346 NUM_BANKS(ADDR_SURF_16_BANK));
1347 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1348 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1349 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1350 NUM_BANKS(ADDR_SURF_16_BANK));
1351 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1352 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1353 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1354 NUM_BANKS(ADDR_SURF_16_BANK));
1355 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1356 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1357 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1358 NUM_BANKS(ADDR_SURF_8_BANK));
1359 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1360 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1361 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1362 NUM_BANKS(ADDR_SURF_4_BANK));
1363 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1364 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1365 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1366 NUM_BANKS(ADDR_SURF_4_BANK));
1367 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1368 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1369 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1370 NUM_BANKS(ADDR_SURF_16_BANK));
1371 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1372 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1373 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1374 NUM_BANKS(ADDR_SURF_16_BANK));
1375 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1376 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1377 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1378 NUM_BANKS(ADDR_SURF_16_BANK));
1379 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1380 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1381 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1382 NUM_BANKS(ADDR_SURF_8_BANK));
1383 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1384 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1385 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1386 NUM_BANKS(ADDR_SURF_16_BANK));
1387 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1388 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1389 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1390 NUM_BANKS(ADDR_SURF_8_BANK));
1391 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1392 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1393 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1394 NUM_BANKS(ADDR_SURF_4_BANK));
1395
1396 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1397 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1398 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1399 if (reg_offset != 7)
1400 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1401 break;
1402 case CHIP_KABINI:
1403 case CHIP_KAVERI:
1404 case CHIP_MULLINS:
1405 default:
1406 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1407 PIPE_CONFIG(ADDR_SURF_P2) |
1408 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1409 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1410 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1411 PIPE_CONFIG(ADDR_SURF_P2) |
1412 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1413 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1414 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1415 PIPE_CONFIG(ADDR_SURF_P2) |
1416 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1417 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1418 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1419 PIPE_CONFIG(ADDR_SURF_P2) |
1420 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1421 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1422 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1423 PIPE_CONFIG(ADDR_SURF_P2) |
1424 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1425 TILE_SPLIT(split_equal_to_row_size));
1426 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1427 PIPE_CONFIG(ADDR_SURF_P2) |
1428 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1429 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1430 PIPE_CONFIG(ADDR_SURF_P2) |
1431 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1432 TILE_SPLIT(split_equal_to_row_size));
1433 tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1434 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1435 PIPE_CONFIG(ADDR_SURF_P2));
1436 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1437 PIPE_CONFIG(ADDR_SURF_P2) |
1438 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1439 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1440 PIPE_CONFIG(ADDR_SURF_P2) |
1441 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1442 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1443 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1444 PIPE_CONFIG(ADDR_SURF_P2) |
1445 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1446 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1447 tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1448 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1449 PIPE_CONFIG(ADDR_SURF_P2) |
1450 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1451 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1452 PIPE_CONFIG(ADDR_SURF_P2) |
1453 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1454 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1455 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1456 PIPE_CONFIG(ADDR_SURF_P2) |
1457 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1458 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1459 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1460 PIPE_CONFIG(ADDR_SURF_P2) |
1461 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1462 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1463 tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1464 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1465 PIPE_CONFIG(ADDR_SURF_P2) |
1466 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1467 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1468 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1469 PIPE_CONFIG(ADDR_SURF_P2) |
1470 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1471 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1472 PIPE_CONFIG(ADDR_SURF_P2) |
1473 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1474 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1475 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1476 PIPE_CONFIG(ADDR_SURF_P2) |
1477 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1478 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1479 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1480 PIPE_CONFIG(ADDR_SURF_P2) |
1481 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1482 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1483 tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1484 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1485 PIPE_CONFIG(ADDR_SURF_P2) |
1486 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1487 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1488 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1489 PIPE_CONFIG(ADDR_SURF_P2) |
1490 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1491 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1492 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1493 PIPE_CONFIG(ADDR_SURF_P2) |
1494 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1495 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1496 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1497 PIPE_CONFIG(ADDR_SURF_P2) |
1498 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1499 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1500 PIPE_CONFIG(ADDR_SURF_P2) |
1501 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1502 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1503 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1504 PIPE_CONFIG(ADDR_SURF_P2) |
1505 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1506 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1507 tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1508
1509 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1510 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1511 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1512 NUM_BANKS(ADDR_SURF_8_BANK));
1513 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1514 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1515 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1516 NUM_BANKS(ADDR_SURF_8_BANK));
1517 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1518 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1519 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1520 NUM_BANKS(ADDR_SURF_8_BANK));
1521 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1524 NUM_BANKS(ADDR_SURF_8_BANK));
1525 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1526 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1527 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1528 NUM_BANKS(ADDR_SURF_8_BANK));
1529 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1530 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1531 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1532 NUM_BANKS(ADDR_SURF_8_BANK));
1533 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1534 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1535 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1536 NUM_BANKS(ADDR_SURF_8_BANK));
1537 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1538 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1539 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1540 NUM_BANKS(ADDR_SURF_16_BANK));
1541 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1542 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1543 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1544 NUM_BANKS(ADDR_SURF_16_BANK));
1545 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1546 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1547 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1548 NUM_BANKS(ADDR_SURF_16_BANK));
1549 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1550 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1551 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1552 NUM_BANKS(ADDR_SURF_16_BANK));
1553 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1554 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1555 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1556 NUM_BANKS(ADDR_SURF_16_BANK));
1557 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1558 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1559 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1560 NUM_BANKS(ADDR_SURF_16_BANK));
1561 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1562 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1563 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1564 NUM_BANKS(ADDR_SURF_8_BANK));
1565
1566 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1567 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1568 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1569 if (reg_offset != 7)
1570 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1571 break;
1572 }
1573 }
1574
1575 /**
1576 * gfx_v7_0_select_se_sh - select which SE, SH to address
1577 *
1578 * @adev: amdgpu_device pointer
1579 * @se_num: shader engine to address
1580 * @sh_num: sh block to address
1581 *
1582 * Select which SE, SH combinations to address. Certain
1583 * registers are instanced per SE or SH. 0xffffffff means
1584 * broadcast to all SEs or SHs (CIK).
1585 */
1586 static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1587 u32 se_num, u32 sh_num, u32 instance)
1588 {
1589 u32 data;
1590
1591 if (instance == 0xffffffff)
1592 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1593 else
1594 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1595
1596 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1597 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1598 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1599 else if (se_num == 0xffffffff)
1600 data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1601 (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1602 else if (sh_num == 0xffffffff)
1603 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1604 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1605 else
1606 data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1607 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1608 WREG32(mmGRBM_GFX_INDEX, data);
1609 }
1610
1611 /**
1612 * gfx_v7_0_create_bitmask - create a bitmask
1613 *
1614 * @bit_width: length of the mask
1615 *
1616 * create a variable length bit mask (CIK).
1617 * Returns the bitmask.
1618 */
1619 static u32 gfx_v7_0_create_bitmask(u32 bit_width)
1620 {
1621 return (u32)((1ULL << bit_width) - 1);
1622 }
1623
1624 /**
1625 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1626 *
1627 * @adev: amdgpu_device pointer
1628 *
1629 * Calculates the bitmask of enabled RBs (CIK).
1630 * Returns the enabled RB bitmask.
1631 */
1632 static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1633 {
1634 u32 data, mask;
1635
1636 data = RREG32(mmCC_RB_BACKEND_DISABLE);
1637 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1638
1639 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1640 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1641
1642 mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se /
1643 adev->gfx.config.max_sh_per_se);
1644
1645 return (~data) & mask;
1646 }
1647
1648 /**
1649 * gfx_v7_0_setup_rb - setup the RBs on the asic
1650 *
1651 * @adev: amdgpu_device pointer
1652 * @se_num: number of SEs (shader engines) for the asic
1653 * @sh_per_se: number of SH blocks per SE for the asic
1654 *
1655 * Configures per-SE/SH RB registers (CIK).
1656 */
1657 static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1658 {
1659 int i, j;
1660 u32 data;
1661 u32 active_rbs = 0;
1662 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1663 adev->gfx.config.max_sh_per_se;
1664
1665 mutex_lock(&adev->grbm_idx_mutex);
1666 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1667 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1668 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1669 data = gfx_v7_0_get_rb_active_bitmap(adev);
1670 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1671 rb_bitmap_width_per_sh);
1672 }
1673 }
1674 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1675 mutex_unlock(&adev->grbm_idx_mutex);
1676
1677 adev->gfx.config.backend_enable_mask = active_rbs;
1678 adev->gfx.config.num_rbs = hweight32(active_rbs);
1679 }
1680
1681 /**
1682 * gmc_v7_0_init_compute_vmid - gart enable
1683 *
1684 * @rdev: amdgpu_device pointer
1685 *
1686 * Initialize compute vmid sh_mem registers
1687 *
1688 */
1689 #define DEFAULT_SH_MEM_BASES (0x6000)
1690 #define FIRST_COMPUTE_VMID (8)
1691 #define LAST_COMPUTE_VMID (16)
1692 static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
1693 {
1694 int i;
1695 uint32_t sh_mem_config;
1696 uint32_t sh_mem_bases;
1697
1698 /*
1699 * Configure apertures:
1700 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1701 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1702 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1703 */
1704 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1705 sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1706 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1707 sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
1708 mutex_lock(&adev->srbm_mutex);
1709 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1710 cik_srbm_select(adev, 0, 0, 0, i);
1711 /* CP and shaders */
1712 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
1713 WREG32(mmSH_MEM_APE1_BASE, 1);
1714 WREG32(mmSH_MEM_APE1_LIMIT, 0);
1715 WREG32(mmSH_MEM_BASES, sh_mem_bases);
1716 }
1717 cik_srbm_select(adev, 0, 0, 0, 0);
1718 mutex_unlock(&adev->srbm_mutex);
1719 }
1720
1721 /**
1722 * gfx_v7_0_gpu_init - setup the 3D engine
1723 *
1724 * @adev: amdgpu_device pointer
1725 *
1726 * Configures the 3D engine and tiling configuration
1727 * registers so that the 3D engine is usable.
1728 */
1729 static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
1730 {
1731 u32 tmp, sh_mem_cfg;
1732 int i;
1733
1734 WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
1735
1736 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1737 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1738 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
1739
1740 gfx_v7_0_tiling_mode_table_init(adev);
1741
1742 gfx_v7_0_setup_rb(adev);
1743 gfx_v7_0_get_cu_info(adev);
1744
1745 /* set HW defaults for 3D engine */
1746 WREG32(mmCP_MEQ_THRESHOLDS,
1747 (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
1748 (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
1749
1750 mutex_lock(&adev->grbm_idx_mutex);
1751 /*
1752 * making sure that the following register writes will be broadcasted
1753 * to all the shaders
1754 */
1755 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1756
1757 /* XXX SH_MEM regs */
1758 /* where to put LDS, scratch, GPUVM in FSA64 space */
1759 sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1760 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1761
1762 mutex_lock(&adev->srbm_mutex);
1763 for (i = 0; i < 16; i++) {
1764 cik_srbm_select(adev, 0, 0, 0, i);
1765 /* CP and shaders */
1766 WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
1767 WREG32(mmSH_MEM_APE1_BASE, 1);
1768 WREG32(mmSH_MEM_APE1_LIMIT, 0);
1769 WREG32(mmSH_MEM_BASES, 0);
1770 }
1771 cik_srbm_select(adev, 0, 0, 0, 0);
1772 mutex_unlock(&adev->srbm_mutex);
1773
1774 gmc_v7_0_init_compute_vmid(adev);
1775
1776 WREG32(mmSX_DEBUG_1, 0x20);
1777
1778 WREG32(mmTA_CNTL_AUX, 0x00010000);
1779
1780 tmp = RREG32(mmSPI_CONFIG_CNTL);
1781 tmp |= 0x03000000;
1782 WREG32(mmSPI_CONFIG_CNTL, tmp);
1783
1784 WREG32(mmSQ_CONFIG, 1);
1785
1786 WREG32(mmDB_DEBUG, 0);
1787
1788 tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
1789 tmp |= 0x00000400;
1790 WREG32(mmDB_DEBUG2, tmp);
1791
1792 tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
1793 tmp |= 0x00020200;
1794 WREG32(mmDB_DEBUG3, tmp);
1795
1796 tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
1797 tmp |= 0x00018208;
1798 WREG32(mmCB_HW_CONTROL, tmp);
1799
1800 WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
1801
1802 WREG32(mmPA_SC_FIFO_SIZE,
1803 ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1804 (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1805 (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1806 (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
1807
1808 WREG32(mmVGT_NUM_INSTANCES, 1);
1809
1810 WREG32(mmCP_PERFMON_CNTL, 0);
1811
1812 WREG32(mmSQ_CONFIG, 0);
1813
1814 WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
1815 ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
1816 (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
1817
1818 WREG32(mmVGT_CACHE_INVALIDATION,
1819 (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
1820 (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
1821
1822 WREG32(mmVGT_GS_VERTEX_REUSE, 16);
1823 WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
1824
1825 WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
1826 (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
1827 WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
1828 mutex_unlock(&adev->grbm_idx_mutex);
1829
1830 udelay(50);
1831 }
1832
1833 /*
1834 * GPU scratch registers helpers function.
1835 */
1836 /**
1837 * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
1838 *
1839 * @adev: amdgpu_device pointer
1840 *
1841 * Set up the number and offset of the CP scratch registers.
1842 * NOTE: use of CP scratch registers is a legacy inferface and
1843 * is not used by default on newer asics (r6xx+). On newer asics,
1844 * memory buffers are used for fences rather than scratch regs.
1845 */
1846 static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
1847 {
1848 int i;
1849
1850 adev->gfx.scratch.num_reg = 7;
1851 adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
1852 for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
1853 adev->gfx.scratch.free[i] = true;
1854 adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
1855 }
1856 }
1857
1858 /**
1859 * gfx_v7_0_ring_test_ring - basic gfx ring test
1860 *
1861 * @adev: amdgpu_device pointer
1862 * @ring: amdgpu_ring structure holding ring information
1863 *
1864 * Allocate a scratch register and write to it using the gfx ring (CIK).
1865 * Provides a basic gfx ring test to verify that the ring is working.
1866 * Used by gfx_v7_0_cp_gfx_resume();
1867 * Returns 0 on success, error on failure.
1868 */
1869 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1870 {
1871 struct amdgpu_device *adev = ring->adev;
1872 uint32_t scratch;
1873 uint32_t tmp = 0;
1874 unsigned i;
1875 int r;
1876
1877 r = amdgpu_gfx_scratch_get(adev, &scratch);
1878 if (r) {
1879 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
1880 return r;
1881 }
1882 WREG32(scratch, 0xCAFEDEAD);
1883 r = amdgpu_ring_alloc(ring, 3);
1884 if (r) {
1885 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
1886 amdgpu_gfx_scratch_free(adev, scratch);
1887 return r;
1888 }
1889 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1890 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1891 amdgpu_ring_write(ring, 0xDEADBEEF);
1892 amdgpu_ring_commit(ring);
1893
1894 for (i = 0; i < adev->usec_timeout; i++) {
1895 tmp = RREG32(scratch);
1896 if (tmp == 0xDEADBEEF)
1897 break;
1898 DRM_UDELAY(1);
1899 }
1900 if (i < adev->usec_timeout) {
1901 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
1902 } else {
1903 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
1904 ring->idx, scratch, tmp);
1905 r = -EINVAL;
1906 }
1907 amdgpu_gfx_scratch_free(adev, scratch);
1908 return r;
1909 }
1910
1911 /**
1912 * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
1913 *
1914 * @adev: amdgpu_device pointer
1915 * @ridx: amdgpu ring index
1916 *
1917 * Emits an hdp flush on the cp.
1918 */
1919 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1920 {
1921 u32 ref_and_mask;
1922 int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
1923
1924 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
1925 switch (ring->me) {
1926 case 1:
1927 ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
1928 break;
1929 case 2:
1930 ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
1931 break;
1932 default:
1933 return;
1934 }
1935 } else {
1936 ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
1937 }
1938
1939 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1940 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
1941 WAIT_REG_MEM_FUNCTION(3) | /* == */
1942 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
1943 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
1944 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
1945 amdgpu_ring_write(ring, ref_and_mask);
1946 amdgpu_ring_write(ring, ref_and_mask);
1947 amdgpu_ring_write(ring, 0x20); /* poll interval */
1948 }
1949
1950 /**
1951 * gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
1952 *
1953 * @adev: amdgpu_device pointer
1954 * @ridx: amdgpu ring index
1955 *
1956 * Emits an hdp invalidate on the cp.
1957 */
1958 static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
1959 {
1960 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1961 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
1962 WRITE_DATA_DST_SEL(0) |
1963 WR_CONFIRM));
1964 amdgpu_ring_write(ring, mmHDP_DEBUG0);
1965 amdgpu_ring_write(ring, 0);
1966 amdgpu_ring_write(ring, 1);
1967 }
1968
1969 /**
1970 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
1971 *
1972 * @adev: amdgpu_device pointer
1973 * @fence: amdgpu fence object
1974 *
1975 * Emits a fence sequnce number on the gfx ring and flushes
1976 * GPU caches.
1977 */
1978 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
1979 u64 seq, unsigned flags)
1980 {
1981 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
1982 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
1983 /* Workaround for cache flush problems. First send a dummy EOP
1984 * event down the pipe with seq one below.
1985 */
1986 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1987 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
1988 EOP_TC_ACTION_EN |
1989 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
1990 EVENT_INDEX(5)));
1991 amdgpu_ring_write(ring, addr & 0xfffffffc);
1992 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
1993 DATA_SEL(1) | INT_SEL(0));
1994 amdgpu_ring_write(ring, lower_32_bits(seq - 1));
1995 amdgpu_ring_write(ring, upper_32_bits(seq - 1));
1996
1997 /* Then send the real EOP event down the pipe. */
1998 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1999 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2000 EOP_TC_ACTION_EN |
2001 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2002 EVENT_INDEX(5)));
2003 amdgpu_ring_write(ring, addr & 0xfffffffc);
2004 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2005 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2006 amdgpu_ring_write(ring, lower_32_bits(seq));
2007 amdgpu_ring_write(ring, upper_32_bits(seq));
2008 }
2009
2010 /**
2011 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2012 *
2013 * @adev: amdgpu_device pointer
2014 * @fence: amdgpu fence object
2015 *
2016 * Emits a fence sequnce number on the compute ring and flushes
2017 * GPU caches.
2018 */
2019 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2020 u64 addr, u64 seq,
2021 unsigned flags)
2022 {
2023 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2024 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2025
2026 /* RELEASE_MEM - flush caches, send int */
2027 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2028 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2029 EOP_TC_ACTION_EN |
2030 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2031 EVENT_INDEX(5)));
2032 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2033 amdgpu_ring_write(ring, addr & 0xfffffffc);
2034 amdgpu_ring_write(ring, upper_32_bits(addr));
2035 amdgpu_ring_write(ring, lower_32_bits(seq));
2036 amdgpu_ring_write(ring, upper_32_bits(seq));
2037 }
2038
2039 /*
2040 * IB stuff
2041 */
2042 /**
2043 * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
2044 *
2045 * @ring: amdgpu_ring structure holding ring information
2046 * @ib: amdgpu indirect buffer object
2047 *
2048 * Emits an DE (drawing engine) or CE (constant engine) IB
2049 * on the gfx ring. IBs are usually generated by userspace
2050 * acceleration drivers and submitted to the kernel for
2051 * sheduling on the ring. This function schedules the IB
2052 * on the gfx ring for execution by the GPU.
2053 */
2054 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2055 struct amdgpu_ib *ib,
2056 unsigned vm_id, bool ctx_switch)
2057 {
2058 u32 header, control = 0;
2059
2060 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2061 if (ctx_switch) {
2062 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2063 amdgpu_ring_write(ring, 0);
2064 }
2065
2066 if (ib->flags & AMDGPU_IB_FLAG_CE)
2067 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2068 else
2069 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2070
2071 control |= ib->length_dw | (vm_id << 24);
2072
2073 amdgpu_ring_write(ring, header);
2074 amdgpu_ring_write(ring,
2075 #ifdef __BIG_ENDIAN
2076 (2 << 0) |
2077 #endif
2078 (ib->gpu_addr & 0xFFFFFFFC));
2079 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2080 amdgpu_ring_write(ring, control);
2081 }
2082
2083 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2084 struct amdgpu_ib *ib,
2085 unsigned vm_id, bool ctx_switch)
2086 {
2087 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
2088
2089 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2090 amdgpu_ring_write(ring,
2091 #ifdef __BIG_ENDIAN
2092 (2 << 0) |
2093 #endif
2094 (ib->gpu_addr & 0xFFFFFFFC));
2095 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2096 amdgpu_ring_write(ring, control);
2097 }
2098
2099 /**
2100 * gfx_v7_0_ring_test_ib - basic ring IB test
2101 *
2102 * @ring: amdgpu_ring structure holding ring information
2103 *
2104 * Allocate an IB and execute it on the gfx ring (CIK).
2105 * Provides a basic gfx ring test to verify that IBs are working.
2106 * Returns 0 on success, error on failure.
2107 */
2108 static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2109 {
2110 struct amdgpu_device *adev = ring->adev;
2111 struct amdgpu_ib ib;
2112 struct fence *f = NULL;
2113 uint32_t scratch;
2114 uint32_t tmp = 0;
2115 long r;
2116
2117 r = amdgpu_gfx_scratch_get(adev, &scratch);
2118 if (r) {
2119 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
2120 return r;
2121 }
2122 WREG32(scratch, 0xCAFEDEAD);
2123 memset(&ib, 0, sizeof(ib));
2124 r = amdgpu_ib_get(adev, NULL, 256, &ib);
2125 if (r) {
2126 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
2127 goto err1;
2128 }
2129 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2130 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
2131 ib.ptr[2] = 0xDEADBEEF;
2132 ib.length_dw = 3;
2133
2134 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
2135 if (r)
2136 goto err2;
2137
2138 r = fence_wait_timeout(f, false, timeout);
2139 if (r == 0) {
2140 DRM_ERROR("amdgpu: IB test timed out\n");
2141 r = -ETIMEDOUT;
2142 goto err2;
2143 } else if (r < 0) {
2144 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
2145 goto err2;
2146 }
2147 tmp = RREG32(scratch);
2148 if (tmp == 0xDEADBEEF) {
2149 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
2150 r = 0;
2151 } else {
2152 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
2153 scratch, tmp);
2154 r = -EINVAL;
2155 }
2156
2157 err2:
2158 amdgpu_ib_free(adev, &ib, NULL);
2159 fence_put(f);
2160 err1:
2161 amdgpu_gfx_scratch_free(adev, scratch);
2162 return r;
2163 }
2164
2165 /*
2166 * CP.
2167 * On CIK, gfx and compute now have independant command processors.
2168 *
2169 * GFX
2170 * Gfx consists of a single ring and can process both gfx jobs and
2171 * compute jobs. The gfx CP consists of three microengines (ME):
2172 * PFP - Pre-Fetch Parser
2173 * ME - Micro Engine
2174 * CE - Constant Engine
2175 * The PFP and ME make up what is considered the Drawing Engine (DE).
2176 * The CE is an asynchronous engine used for updating buffer desciptors
2177 * used by the DE so that they can be loaded into cache in parallel
2178 * while the DE is processing state update packets.
2179 *
2180 * Compute
2181 * The compute CP consists of two microengines (ME):
2182 * MEC1 - Compute MicroEngine 1
2183 * MEC2 - Compute MicroEngine 2
2184 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2185 * The queues are exposed to userspace and are programmed directly
2186 * by the compute runtime.
2187 */
2188 /**
2189 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2190 *
2191 * @adev: amdgpu_device pointer
2192 * @enable: enable or disable the MEs
2193 *
2194 * Halts or unhalts the gfx MEs.
2195 */
2196 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2197 {
2198 int i;
2199
2200 if (enable) {
2201 WREG32(mmCP_ME_CNTL, 0);
2202 } else {
2203 WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
2204 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2205 adev->gfx.gfx_ring[i].ready = false;
2206 }
2207 udelay(50);
2208 }
2209
2210 /**
2211 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2212 *
2213 * @adev: amdgpu_device pointer
2214 *
2215 * Loads the gfx PFP, ME, and CE ucode.
2216 * Returns 0 for success, -EINVAL if the ucode is not available.
2217 */
2218 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2219 {
2220 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2221 const struct gfx_firmware_header_v1_0 *ce_hdr;
2222 const struct gfx_firmware_header_v1_0 *me_hdr;
2223 const __le32 *fw_data;
2224 unsigned i, fw_size;
2225
2226 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2227 return -EINVAL;
2228
2229 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2230 ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2231 me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2232
2233 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2234 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2235 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2236 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2237 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2238 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2239 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2240 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2241 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2242
2243 gfx_v7_0_cp_gfx_enable(adev, false);
2244
2245 /* PFP */
2246 fw_data = (const __le32 *)
2247 (adev->gfx.pfp_fw->data +
2248 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2249 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2250 WREG32(mmCP_PFP_UCODE_ADDR, 0);
2251 for (i = 0; i < fw_size; i++)
2252 WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2253 WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2254
2255 /* CE */
2256 fw_data = (const __le32 *)
2257 (adev->gfx.ce_fw->data +
2258 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2259 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2260 WREG32(mmCP_CE_UCODE_ADDR, 0);
2261 for (i = 0; i < fw_size; i++)
2262 WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2263 WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2264
2265 /* ME */
2266 fw_data = (const __le32 *)
2267 (adev->gfx.me_fw->data +
2268 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2269 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2270 WREG32(mmCP_ME_RAM_WADDR, 0);
2271 for (i = 0; i < fw_size; i++)
2272 WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2273 WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2274
2275 return 0;
2276 }
2277
2278 /**
2279 * gfx_v7_0_cp_gfx_start - start the gfx ring
2280 *
2281 * @adev: amdgpu_device pointer
2282 *
2283 * Enables the ring and loads the clear state context and other
2284 * packets required to init the ring.
2285 * Returns 0 for success, error for failure.
2286 */
2287 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2288 {
2289 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2290 const struct cs_section_def *sect = NULL;
2291 const struct cs_extent_def *ext = NULL;
2292 int r, i;
2293
2294 /* init the CP */
2295 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2296 WREG32(mmCP_ENDIAN_SWAP, 0);
2297 WREG32(mmCP_DEVICE_ID, 1);
2298
2299 gfx_v7_0_cp_gfx_enable(adev, true);
2300
2301 r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
2302 if (r) {
2303 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2304 return r;
2305 }
2306
2307 /* init the CE partitions. CE only used for gfx on CIK */
2308 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2309 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2310 amdgpu_ring_write(ring, 0x8000);
2311 amdgpu_ring_write(ring, 0x8000);
2312
2313 /* clear state buffer */
2314 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2315 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2316
2317 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2318 amdgpu_ring_write(ring, 0x80000000);
2319 amdgpu_ring_write(ring, 0x80000000);
2320
2321 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2322 for (ext = sect->section; ext->extent != NULL; ++ext) {
2323 if (sect->id == SECT_CONTEXT) {
2324 amdgpu_ring_write(ring,
2325 PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2326 amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2327 for (i = 0; i < ext->reg_count; i++)
2328 amdgpu_ring_write(ring, ext->extent[i]);
2329 }
2330 }
2331 }
2332
2333 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2334 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2335 switch (adev->asic_type) {
2336 case CHIP_BONAIRE:
2337 amdgpu_ring_write(ring, 0x16000012);
2338 amdgpu_ring_write(ring, 0x00000000);
2339 break;
2340 case CHIP_KAVERI:
2341 amdgpu_ring_write(ring, 0x00000000); /* XXX */
2342 amdgpu_ring_write(ring, 0x00000000);
2343 break;
2344 case CHIP_KABINI:
2345 case CHIP_MULLINS:
2346 amdgpu_ring_write(ring, 0x00000000); /* XXX */
2347 amdgpu_ring_write(ring, 0x00000000);
2348 break;
2349 case CHIP_HAWAII:
2350 amdgpu_ring_write(ring, 0x3a00161a);
2351 amdgpu_ring_write(ring, 0x0000002e);
2352 break;
2353 default:
2354 amdgpu_ring_write(ring, 0x00000000);
2355 amdgpu_ring_write(ring, 0x00000000);
2356 break;
2357 }
2358
2359 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2360 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2361
2362 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2363 amdgpu_ring_write(ring, 0);
2364
2365 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2366 amdgpu_ring_write(ring, 0x00000316);
2367 amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2368 amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2369
2370 amdgpu_ring_commit(ring);
2371
2372 return 0;
2373 }
2374
2375 /**
2376 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2377 *
2378 * @adev: amdgpu_device pointer
2379 *
2380 * Program the location and size of the gfx ring buffer
2381 * and test it to make sure it's working.
2382 * Returns 0 for success, error for failure.
2383 */
2384 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2385 {
2386 struct amdgpu_ring *ring;
2387 u32 tmp;
2388 u32 rb_bufsz;
2389 u64 rb_addr, rptr_addr;
2390 int r;
2391
2392 WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2393 if (adev->asic_type != CHIP_HAWAII)
2394 WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2395
2396 /* Set the write pointer delay */
2397 WREG32(mmCP_RB_WPTR_DELAY, 0);
2398
2399 /* set the RB to use vmid 0 */
2400 WREG32(mmCP_RB_VMID, 0);
2401
2402 WREG32(mmSCRATCH_ADDR, 0);
2403
2404 /* ring 0 - compute and gfx */
2405 /* Set ring buffer size */
2406 ring = &adev->gfx.gfx_ring[0];
2407 rb_bufsz = order_base_2(ring->ring_size / 8);
2408 tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2409 #ifdef __BIG_ENDIAN
2410 tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
2411 #endif
2412 WREG32(mmCP_RB0_CNTL, tmp);
2413
2414 /* Initialize the ring buffer's read and write pointers */
2415 WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2416 ring->wptr = 0;
2417 WREG32(mmCP_RB0_WPTR, ring->wptr);
2418
2419 /* set the wb address wether it's enabled or not */
2420 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2421 WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2422 WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2423
2424 /* scratch register shadowing is no longer supported */
2425 WREG32(mmSCRATCH_UMSK, 0);
2426
2427 mdelay(1);
2428 WREG32(mmCP_RB0_CNTL, tmp);
2429
2430 rb_addr = ring->gpu_addr >> 8;
2431 WREG32(mmCP_RB0_BASE, rb_addr);
2432 WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2433
2434 /* start the ring */
2435 gfx_v7_0_cp_gfx_start(adev);
2436 ring->ready = true;
2437 r = amdgpu_ring_test_ring(ring);
2438 if (r) {
2439 ring->ready = false;
2440 return r;
2441 }
2442
2443 return 0;
2444 }
2445
2446 static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
2447 {
2448 return ring->adev->wb.wb[ring->rptr_offs];
2449 }
2450
2451 static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2452 {
2453 struct amdgpu_device *adev = ring->adev;
2454
2455 return RREG32(mmCP_RB0_WPTR);
2456 }
2457
2458 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2459 {
2460 struct amdgpu_device *adev = ring->adev;
2461
2462 WREG32(mmCP_RB0_WPTR, ring->wptr);
2463 (void)RREG32(mmCP_RB0_WPTR);
2464 }
2465
2466 static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
2467 {
2468 return ring->adev->wb.wb[ring->rptr_offs];
2469 }
2470
2471 static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
2472 {
2473 /* XXX check if swapping is necessary on BE */
2474 return ring->adev->wb.wb[ring->wptr_offs];
2475 }
2476
2477 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2478 {
2479 struct amdgpu_device *adev = ring->adev;
2480
2481 /* XXX check if swapping is necessary on BE */
2482 adev->wb.wb[ring->wptr_offs] = ring->wptr;
2483 WDOORBELL32(ring->doorbell_index, ring->wptr);
2484 }
2485
2486 /**
2487 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2488 *
2489 * @adev: amdgpu_device pointer
2490 * @enable: enable or disable the MEs
2491 *
2492 * Halts or unhalts the compute MEs.
2493 */
2494 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2495 {
2496 int i;
2497
2498 if (enable) {
2499 WREG32(mmCP_MEC_CNTL, 0);
2500 } else {
2501 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2502 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2503 adev->gfx.compute_ring[i].ready = false;
2504 }
2505 udelay(50);
2506 }
2507
2508 /**
2509 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2510 *
2511 * @adev: amdgpu_device pointer
2512 *
2513 * Loads the compute MEC1&2 ucode.
2514 * Returns 0 for success, -EINVAL if the ucode is not available.
2515 */
2516 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2517 {
2518 const struct gfx_firmware_header_v1_0 *mec_hdr;
2519 const __le32 *fw_data;
2520 unsigned i, fw_size;
2521
2522 if (!adev->gfx.mec_fw)
2523 return -EINVAL;
2524
2525 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2526 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2527 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2528 adev->gfx.mec_feature_version = le32_to_cpu(
2529 mec_hdr->ucode_feature_version);
2530
2531 gfx_v7_0_cp_compute_enable(adev, false);
2532
2533 /* MEC1 */
2534 fw_data = (const __le32 *)
2535 (adev->gfx.mec_fw->data +
2536 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2537 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
2538 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2539 for (i = 0; i < fw_size; i++)
2540 WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
2541 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2542
2543 if (adev->asic_type == CHIP_KAVERI) {
2544 const struct gfx_firmware_header_v1_0 *mec2_hdr;
2545
2546 if (!adev->gfx.mec2_fw)
2547 return -EINVAL;
2548
2549 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2550 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2551 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2552 adev->gfx.mec2_feature_version = le32_to_cpu(
2553 mec2_hdr->ucode_feature_version);
2554
2555 /* MEC2 */
2556 fw_data = (const __le32 *)
2557 (adev->gfx.mec2_fw->data +
2558 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
2559 fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
2560 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2561 for (i = 0; i < fw_size; i++)
2562 WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
2563 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2564 }
2565
2566 return 0;
2567 }
2568
2569 /**
2570 * gfx_v7_0_cp_compute_fini - stop the compute queues
2571 *
2572 * @adev: amdgpu_device pointer
2573 *
2574 * Stop the compute queues and tear down the driver queue
2575 * info.
2576 */
2577 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
2578 {
2579 int i, r;
2580
2581 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2582 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2583
2584 if (ring->mqd_obj) {
2585 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2586 if (unlikely(r != 0))
2587 dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
2588
2589 amdgpu_bo_unpin(ring->mqd_obj);
2590 amdgpu_bo_unreserve(ring->mqd_obj);
2591
2592 amdgpu_bo_unref(&ring->mqd_obj);
2593 ring->mqd_obj = NULL;
2594 }
2595 }
2596 }
2597
2598 static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
2599 {
2600 int r;
2601
2602 if (adev->gfx.mec.hpd_eop_obj) {
2603 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
2604 if (unlikely(r != 0))
2605 dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
2606 amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
2607 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2608
2609 amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
2610 adev->gfx.mec.hpd_eop_obj = NULL;
2611 }
2612 }
2613
2614 #define MEC_HPD_SIZE 2048
2615
2616 static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2617 {
2618 int r;
2619 u32 *hpd;
2620
2621 /*
2622 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
2623 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
2624 * Nonetheless, we assign only 1 pipe because all other pipes will
2625 * be handled by KFD
2626 */
2627 adev->gfx.mec.num_mec = 1;
2628 adev->gfx.mec.num_pipe = 1;
2629 adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
2630
2631 if (adev->gfx.mec.hpd_eop_obj == NULL) {
2632 r = amdgpu_bo_create(adev,
2633 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
2634 PAGE_SIZE, true,
2635 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
2636 &adev->gfx.mec.hpd_eop_obj);
2637 if (r) {
2638 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
2639 return r;
2640 }
2641 }
2642
2643 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
2644 if (unlikely(r != 0)) {
2645 gfx_v7_0_mec_fini(adev);
2646 return r;
2647 }
2648 r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
2649 &adev->gfx.mec.hpd_eop_gpu_addr);
2650 if (r) {
2651 dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
2652 gfx_v7_0_mec_fini(adev);
2653 return r;
2654 }
2655 r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
2656 if (r) {
2657 dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
2658 gfx_v7_0_mec_fini(adev);
2659 return r;
2660 }
2661
2662 /* clear memory. Not sure if this is required or not */
2663 memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
2664
2665 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2666 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2667
2668 return 0;
2669 }
2670
2671 struct hqd_registers
2672 {
2673 u32 cp_mqd_base_addr;
2674 u32 cp_mqd_base_addr_hi;
2675 u32 cp_hqd_active;
2676 u32 cp_hqd_vmid;
2677 u32 cp_hqd_persistent_state;
2678 u32 cp_hqd_pipe_priority;
2679 u32 cp_hqd_queue_priority;
2680 u32 cp_hqd_quantum;
2681 u32 cp_hqd_pq_base;
2682 u32 cp_hqd_pq_base_hi;
2683 u32 cp_hqd_pq_rptr;
2684 u32 cp_hqd_pq_rptr_report_addr;
2685 u32 cp_hqd_pq_rptr_report_addr_hi;
2686 u32 cp_hqd_pq_wptr_poll_addr;
2687 u32 cp_hqd_pq_wptr_poll_addr_hi;
2688 u32 cp_hqd_pq_doorbell_control;
2689 u32 cp_hqd_pq_wptr;
2690 u32 cp_hqd_pq_control;
2691 u32 cp_hqd_ib_base_addr;
2692 u32 cp_hqd_ib_base_addr_hi;
2693 u32 cp_hqd_ib_rptr;
2694 u32 cp_hqd_ib_control;
2695 u32 cp_hqd_iq_timer;
2696 u32 cp_hqd_iq_rptr;
2697 u32 cp_hqd_dequeue_request;
2698 u32 cp_hqd_dma_offload;
2699 u32 cp_hqd_sema_cmd;
2700 u32 cp_hqd_msg_type;
2701 u32 cp_hqd_atomic0_preop_lo;
2702 u32 cp_hqd_atomic0_preop_hi;
2703 u32 cp_hqd_atomic1_preop_lo;
2704 u32 cp_hqd_atomic1_preop_hi;
2705 u32 cp_hqd_hq_scheduler0;
2706 u32 cp_hqd_hq_scheduler1;
2707 u32 cp_mqd_control;
2708 };
2709
2710 struct bonaire_mqd
2711 {
2712 u32 header;
2713 u32 dispatch_initiator;
2714 u32 dimensions[3];
2715 u32 start_idx[3];
2716 u32 num_threads[3];
2717 u32 pipeline_stat_enable;
2718 u32 perf_counter_enable;
2719 u32 pgm[2];
2720 u32 tba[2];
2721 u32 tma[2];
2722 u32 pgm_rsrc[2];
2723 u32 vmid;
2724 u32 resource_limits;
2725 u32 static_thread_mgmt01[2];
2726 u32 tmp_ring_size;
2727 u32 static_thread_mgmt23[2];
2728 u32 restart[3];
2729 u32 thread_trace_enable;
2730 u32 reserved1;
2731 u32 user_data[16];
2732 u32 vgtcs_invoke_count[2];
2733 struct hqd_registers queue_state;
2734 u32 dequeue_cntr;
2735 u32 interrupt_queue[64];
2736 };
2737
2738 /**
2739 * gfx_v7_0_cp_compute_resume - setup the compute queue registers
2740 *
2741 * @adev: amdgpu_device pointer
2742 *
2743 * Program the compute queues and test them to make sure they
2744 * are working.
2745 * Returns 0 for success, error for failure.
2746 */
2747 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
2748 {
2749 int r, i, j;
2750 u32 tmp;
2751 bool use_doorbell = true;
2752 u64 hqd_gpu_addr;
2753 u64 mqd_gpu_addr;
2754 u64 eop_gpu_addr;
2755 u64 wb_gpu_addr;
2756 u32 *buf;
2757 struct bonaire_mqd *mqd;
2758 struct amdgpu_ring *ring;
2759
2760 /* fix up chicken bits */
2761 tmp = RREG32(mmCP_CPF_DEBUG);
2762 tmp |= (1 << 23);
2763 WREG32(mmCP_CPF_DEBUG, tmp);
2764
2765 /* init the pipes */
2766 mutex_lock(&adev->srbm_mutex);
2767 for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
2768 int me = (i < 4) ? 1 : 2;
2769 int pipe = (i < 4) ? i : (i - 4);
2770
2771 eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
2772
2773 cik_srbm_select(adev, me, pipe, 0, 0);
2774
2775 /* write the EOP addr */
2776 WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2777 WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2778
2779 /* set the VMID assigned */
2780 WREG32(mmCP_HPD_EOP_VMID, 0);
2781
2782 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2783 tmp = RREG32(mmCP_HPD_EOP_CONTROL);
2784 tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
2785 tmp |= order_base_2(MEC_HPD_SIZE / 8);
2786 WREG32(mmCP_HPD_EOP_CONTROL, tmp);
2787 }
2788 cik_srbm_select(adev, 0, 0, 0, 0);
2789 mutex_unlock(&adev->srbm_mutex);
2790
2791 /* init the queues. Just two for now. */
2792 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2793 ring = &adev->gfx.compute_ring[i];
2794
2795 if (ring->mqd_obj == NULL) {
2796 r = amdgpu_bo_create(adev,
2797 sizeof(struct bonaire_mqd),
2798 PAGE_SIZE, true,
2799 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
2800 &ring->mqd_obj);
2801 if (r) {
2802 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
2803 return r;
2804 }
2805 }
2806
2807 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2808 if (unlikely(r != 0)) {
2809 gfx_v7_0_cp_compute_fini(adev);
2810 return r;
2811 }
2812 r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
2813 &mqd_gpu_addr);
2814 if (r) {
2815 dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
2816 gfx_v7_0_cp_compute_fini(adev);
2817 return r;
2818 }
2819 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
2820 if (r) {
2821 dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
2822 gfx_v7_0_cp_compute_fini(adev);
2823 return r;
2824 }
2825
2826 /* init the mqd struct */
2827 memset(buf, 0, sizeof(struct bonaire_mqd));
2828
2829 mqd = (struct bonaire_mqd *)buf;
2830 mqd->header = 0xC0310800;
2831 mqd->static_thread_mgmt01[0] = 0xffffffff;
2832 mqd->static_thread_mgmt01[1] = 0xffffffff;
2833 mqd->static_thread_mgmt23[0] = 0xffffffff;
2834 mqd->static_thread_mgmt23[1] = 0xffffffff;
2835
2836 mutex_lock(&adev->srbm_mutex);
2837 cik_srbm_select(adev, ring->me,
2838 ring->pipe,
2839 ring->queue, 0);
2840
2841 /* disable wptr polling */
2842 tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
2843 tmp &= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK;
2844 WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
2845
2846 /* enable doorbell? */
2847 mqd->queue_state.cp_hqd_pq_doorbell_control =
2848 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2849 if (use_doorbell)
2850 mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2851 else
2852 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2853 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
2854 mqd->queue_state.cp_hqd_pq_doorbell_control);
2855
2856 /* disable the queue if it's active */
2857 mqd->queue_state.cp_hqd_dequeue_request = 0;
2858 mqd->queue_state.cp_hqd_pq_rptr = 0;
2859 mqd->queue_state.cp_hqd_pq_wptr= 0;
2860 if (RREG32(mmCP_HQD_ACTIVE) & 1) {
2861 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
2862 for (j = 0; j < adev->usec_timeout; j++) {
2863 if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
2864 break;
2865 udelay(1);
2866 }
2867 WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
2868 WREG32(mmCP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
2869 WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
2870 }
2871
2872 /* set the pointer to the MQD */
2873 mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
2874 mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
2875 WREG32(mmCP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
2876 WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
2877 /* set MQD vmid to 0 */
2878 mqd->queue_state.cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
2879 mqd->queue_state.cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
2880 WREG32(mmCP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
2881
2882 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2883 hqd_gpu_addr = ring->gpu_addr >> 8;
2884 mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
2885 mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2886 WREG32(mmCP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
2887 WREG32(mmCP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
2888
2889 /* set up the HQD, this is similar to CP_RB0_CNTL */
2890 mqd->queue_state.cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
2891 mqd->queue_state.cp_hqd_pq_control &=
2892 ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
2893 CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
2894
2895 mqd->queue_state.cp_hqd_pq_control |=
2896 order_base_2(ring->ring_size / 8);
2897 mqd->queue_state.cp_hqd_pq_control |=
2898 (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
2899 #ifdef __BIG_ENDIAN
2900 mqd->queue_state.cp_hqd_pq_control |=
2901 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
2902 #endif
2903 mqd->queue_state.cp_hqd_pq_control &=
2904 ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
2905 CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
2906 CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
2907 mqd->queue_state.cp_hqd_pq_control |=
2908 CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
2909 CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
2910 WREG32(mmCP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
2911
2912 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2913 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2914 mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
2915 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2916 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
2917 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2918 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
2919
2920 /* set the wb address wether it's enabled or not */
2921 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2922 mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
2923 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
2924 upper_32_bits(wb_gpu_addr) & 0xffff;
2925 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2926 mqd->queue_state.cp_hqd_pq_rptr_report_addr);
2927 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2928 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
2929
2930 /* enable the doorbell if requested */
2931 if (use_doorbell) {
2932 mqd->queue_state.cp_hqd_pq_doorbell_control =
2933 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2934 mqd->queue_state.cp_hqd_pq_doorbell_control &=
2935 ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
2936 mqd->queue_state.cp_hqd_pq_doorbell_control |=
2937 (ring->doorbell_index <<
2938 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
2939 mqd->queue_state.cp_hqd_pq_doorbell_control |=
2940 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2941 mqd->queue_state.cp_hqd_pq_doorbell_control &=
2942 ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
2943 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
2944
2945 } else {
2946 mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
2947 }
2948 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
2949 mqd->queue_state.cp_hqd_pq_doorbell_control);
2950
2951 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2952 ring->wptr = 0;
2953 mqd->queue_state.cp_hqd_pq_wptr = ring->wptr;
2954 WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
2955 mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
2956
2957 /* set the vmid for the queue */
2958 mqd->queue_state.cp_hqd_vmid = 0;
2959 WREG32(mmCP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
2960
2961 /* activate the queue */
2962 mqd->queue_state.cp_hqd_active = 1;
2963 WREG32(mmCP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
2964
2965 cik_srbm_select(adev, 0, 0, 0, 0);
2966 mutex_unlock(&adev->srbm_mutex);
2967
2968 amdgpu_bo_kunmap(ring->mqd_obj);
2969 amdgpu_bo_unreserve(ring->mqd_obj);
2970
2971 ring->ready = true;
2972 }
2973
2974 gfx_v7_0_cp_compute_enable(adev, true);
2975
2976 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2977 ring = &adev->gfx.compute_ring[i];
2978
2979 r = amdgpu_ring_test_ring(ring);
2980 if (r)
2981 ring->ready = false;
2982 }
2983
2984 return 0;
2985 }
2986
2987 static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
2988 {
2989 gfx_v7_0_cp_gfx_enable(adev, enable);
2990 gfx_v7_0_cp_compute_enable(adev, enable);
2991 }
2992
2993 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
2994 {
2995 int r;
2996
2997 r = gfx_v7_0_cp_gfx_load_microcode(adev);
2998 if (r)
2999 return r;
3000 r = gfx_v7_0_cp_compute_load_microcode(adev);
3001 if (r)
3002 return r;
3003
3004 return 0;
3005 }
3006
3007 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3008 bool enable)
3009 {
3010 u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3011
3012 if (enable)
3013 tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3014 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3015 else
3016 tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3017 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3018 WREG32(mmCP_INT_CNTL_RING0, tmp);
3019 }
3020
3021 static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3022 {
3023 int r;
3024
3025 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3026
3027 r = gfx_v7_0_cp_load_microcode(adev);
3028 if (r)
3029 return r;
3030
3031 r = gfx_v7_0_cp_gfx_resume(adev);
3032 if (r)
3033 return r;
3034 r = gfx_v7_0_cp_compute_resume(adev);
3035 if (r)
3036 return r;
3037
3038 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3039
3040 return 0;
3041 }
3042
3043 /**
3044 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3045 *
3046 * @ring: the ring to emmit the commands to
3047 *
3048 * Sync the command pipeline with the PFP. E.g. wait for everything
3049 * to be completed.
3050 */
3051 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3052 {
3053 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3054 uint32_t seq = ring->fence_drv.sync_seq;
3055 uint64_t addr = ring->fence_drv.gpu_addr;
3056
3057 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3058 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3059 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3060 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
3061 amdgpu_ring_write(ring, addr & 0xfffffffc);
3062 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3063 amdgpu_ring_write(ring, seq);
3064 amdgpu_ring_write(ring, 0xffffffff);
3065 amdgpu_ring_write(ring, 4); /* poll interval */
3066
3067 if (usepfp) {
3068 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3069 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3070 amdgpu_ring_write(ring, 0);
3071 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3072 amdgpu_ring_write(ring, 0);
3073 }
3074 }
3075
3076 /*
3077 * vm
3078 * VMID 0 is the physical GPU addresses as used by the kernel.
3079 * VMIDs 1-15 are used for userspace clients and are handled
3080 * by the amdgpu vm/hsa code.
3081 */
3082 /**
3083 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3084 *
3085 * @adev: amdgpu_device pointer
3086 *
3087 * Update the page table base and flush the VM TLB
3088 * using the CP (CIK).
3089 */
3090 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3091 unsigned vm_id, uint64_t pd_addr)
3092 {
3093 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3094
3095 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3096 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3097 WRITE_DATA_DST_SEL(0)));
3098 if (vm_id < 8) {
3099 amdgpu_ring_write(ring,
3100 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
3101 } else {
3102 amdgpu_ring_write(ring,
3103 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
3104 }
3105 amdgpu_ring_write(ring, 0);
3106 amdgpu_ring_write(ring, pd_addr >> 12);
3107
3108 /* bits 0-15 are the VM contexts0-15 */
3109 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3110 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3111 WRITE_DATA_DST_SEL(0)));
3112 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3113 amdgpu_ring_write(ring, 0);
3114 amdgpu_ring_write(ring, 1 << vm_id);
3115
3116 /* wait for the invalidate to complete */
3117 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3118 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3119 WAIT_REG_MEM_FUNCTION(0) | /* always */
3120 WAIT_REG_MEM_ENGINE(0))); /* me */
3121 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3122 amdgpu_ring_write(ring, 0);
3123 amdgpu_ring_write(ring, 0); /* ref */
3124 amdgpu_ring_write(ring, 0); /* mask */
3125 amdgpu_ring_write(ring, 0x20); /* poll interval */
3126
3127 /* compute doesn't have PFP */
3128 if (usepfp) {
3129 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3130 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3131 amdgpu_ring_write(ring, 0x0);
3132
3133 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3134 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3135 amdgpu_ring_write(ring, 0);
3136 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3137 amdgpu_ring_write(ring, 0);
3138 }
3139 }
3140
3141 /*
3142 * RLC
3143 * The RLC is a multi-purpose microengine that handles a
3144 * variety of functions.
3145 */
3146 static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
3147 {
3148 int r;
3149
3150 /* save restore block */
3151 if (adev->gfx.rlc.save_restore_obj) {
3152 r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3153 if (unlikely(r != 0))
3154 dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
3155 amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
3156 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3157
3158 amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
3159 adev->gfx.rlc.save_restore_obj = NULL;
3160 }
3161
3162 /* clear state block */
3163 if (adev->gfx.rlc.clear_state_obj) {
3164 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3165 if (unlikely(r != 0))
3166 dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
3167 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
3168 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3169
3170 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
3171 adev->gfx.rlc.clear_state_obj = NULL;
3172 }
3173
3174 /* clear state block */
3175 if (adev->gfx.rlc.cp_table_obj) {
3176 r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
3177 if (unlikely(r != 0))
3178 dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3179 amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
3180 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3181
3182 amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
3183 adev->gfx.rlc.cp_table_obj = NULL;
3184 }
3185 }
3186
3187 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3188 {
3189 const u32 *src_ptr;
3190 volatile u32 *dst_ptr;
3191 u32 dws, i;
3192 const struct cs_section_def *cs_data;
3193 int r;
3194
3195 /* allocate rlc buffers */
3196 if (adev->flags & AMD_IS_APU) {
3197 if (adev->asic_type == CHIP_KAVERI) {
3198 adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3199 adev->gfx.rlc.reg_list_size =
3200 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
3201 } else {
3202 adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3203 adev->gfx.rlc.reg_list_size =
3204 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
3205 }
3206 }
3207 adev->gfx.rlc.cs_data = ci_cs_data;
3208 adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3209 adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3210
3211 src_ptr = adev->gfx.rlc.reg_list;
3212 dws = adev->gfx.rlc.reg_list_size;
3213 dws += (5 * 16) + 48 + 48 + 64;
3214
3215 cs_data = adev->gfx.rlc.cs_data;
3216
3217 if (src_ptr) {
3218 /* save restore block */
3219 if (adev->gfx.rlc.save_restore_obj == NULL) {
3220 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3221 AMDGPU_GEM_DOMAIN_VRAM,
3222 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3223 NULL, NULL,
3224 &adev->gfx.rlc.save_restore_obj);
3225 if (r) {
3226 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
3227 return r;
3228 }
3229 }
3230
3231 r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3232 if (unlikely(r != 0)) {
3233 gfx_v7_0_rlc_fini(adev);
3234 return r;
3235 }
3236 r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
3237 &adev->gfx.rlc.save_restore_gpu_addr);
3238 if (r) {
3239 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3240 dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
3241 gfx_v7_0_rlc_fini(adev);
3242 return r;
3243 }
3244
3245 r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
3246 if (r) {
3247 dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
3248 gfx_v7_0_rlc_fini(adev);
3249 return r;
3250 }
3251 /* write the sr buffer */
3252 dst_ptr = adev->gfx.rlc.sr_ptr;
3253 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3254 dst_ptr[i] = cpu_to_le32(src_ptr[i]);
3255 amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
3256 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3257 }
3258
3259 if (cs_data) {
3260 /* clear state block */
3261 adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
3262
3263 if (adev->gfx.rlc.clear_state_obj == NULL) {
3264 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3265 AMDGPU_GEM_DOMAIN_VRAM,
3266 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3267 NULL, NULL,
3268 &adev->gfx.rlc.clear_state_obj);
3269 if (r) {
3270 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3271 gfx_v7_0_rlc_fini(adev);
3272 return r;
3273 }
3274 }
3275 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3276 if (unlikely(r != 0)) {
3277 gfx_v7_0_rlc_fini(adev);
3278 return r;
3279 }
3280 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
3281 &adev->gfx.rlc.clear_state_gpu_addr);
3282 if (r) {
3283 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3284 dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
3285 gfx_v7_0_rlc_fini(adev);
3286 return r;
3287 }
3288
3289 r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
3290 if (r) {
3291 dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
3292 gfx_v7_0_rlc_fini(adev);
3293 return r;
3294 }
3295 /* set up the cs buffer */
3296 dst_ptr = adev->gfx.rlc.cs_ptr;
3297 gfx_v7_0_get_csb_buffer(adev, dst_ptr);
3298 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
3299 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3300 }
3301
3302 if (adev->gfx.rlc.cp_table_size) {
3303 if (adev->gfx.rlc.cp_table_obj == NULL) {
3304 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
3305 AMDGPU_GEM_DOMAIN_VRAM,
3306 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3307 NULL, NULL,
3308 &adev->gfx.rlc.cp_table_obj);
3309 if (r) {
3310 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3311 gfx_v7_0_rlc_fini(adev);
3312 return r;
3313 }
3314 }
3315
3316 r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
3317 if (unlikely(r != 0)) {
3318 dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3319 gfx_v7_0_rlc_fini(adev);
3320 return r;
3321 }
3322 r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
3323 &adev->gfx.rlc.cp_table_gpu_addr);
3324 if (r) {
3325 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3326 dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
3327 gfx_v7_0_rlc_fini(adev);
3328 return r;
3329 }
3330 r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
3331 if (r) {
3332 dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
3333 gfx_v7_0_rlc_fini(adev);
3334 return r;
3335 }
3336
3337 gfx_v7_0_init_cp_pg_table(adev);
3338
3339 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
3340 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3341
3342 }
3343
3344 return 0;
3345 }
3346
3347 static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3348 {
3349 u32 tmp;
3350
3351 tmp = RREG32(mmRLC_LB_CNTL);
3352 if (enable)
3353 tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3354 else
3355 tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3356 WREG32(mmRLC_LB_CNTL, tmp);
3357 }
3358
3359 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3360 {
3361 u32 i, j, k;
3362 u32 mask;
3363
3364 mutex_lock(&adev->grbm_idx_mutex);
3365 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3366 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3367 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
3368 for (k = 0; k < adev->usec_timeout; k++) {
3369 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3370 break;
3371 udelay(1);
3372 }
3373 }
3374 }
3375 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3376 mutex_unlock(&adev->grbm_idx_mutex);
3377
3378 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3379 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3380 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3381 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3382 for (k = 0; k < adev->usec_timeout; k++) {
3383 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3384 break;
3385 udelay(1);
3386 }
3387 }
3388
3389 static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3390 {
3391 u32 tmp;
3392
3393 tmp = RREG32(mmRLC_CNTL);
3394 if (tmp != rlc)
3395 WREG32(mmRLC_CNTL, rlc);
3396 }
3397
3398 static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3399 {
3400 u32 data, orig;
3401
3402 orig = data = RREG32(mmRLC_CNTL);
3403
3404 if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
3405 u32 i;
3406
3407 data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
3408 WREG32(mmRLC_CNTL, data);
3409
3410 for (i = 0; i < adev->usec_timeout; i++) {
3411 if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
3412 break;
3413 udelay(1);
3414 }
3415
3416 gfx_v7_0_wait_for_rlc_serdes(adev);
3417 }
3418
3419 return orig;
3420 }
3421
3422 static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3423 {
3424 u32 tmp, i, mask;
3425
3426 tmp = 0x1 | (1 << 1);
3427 WREG32(mmRLC_GPR_REG2, tmp);
3428
3429 mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
3430 RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
3431 for (i = 0; i < adev->usec_timeout; i++) {
3432 if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
3433 break;
3434 udelay(1);
3435 }
3436
3437 for (i = 0; i < adev->usec_timeout; i++) {
3438 if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
3439 break;
3440 udelay(1);
3441 }
3442 }
3443
3444 static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3445 {
3446 u32 tmp;
3447
3448 tmp = 0x1 | (0 << 1);
3449 WREG32(mmRLC_GPR_REG2, tmp);
3450 }
3451
3452 /**
3453 * gfx_v7_0_rlc_stop - stop the RLC ME
3454 *
3455 * @adev: amdgpu_device pointer
3456 *
3457 * Halt the RLC ME (MicroEngine) (CIK).
3458 */
3459 static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3460 {
3461 WREG32(mmRLC_CNTL, 0);
3462
3463 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3464
3465 gfx_v7_0_wait_for_rlc_serdes(adev);
3466 }
3467
3468 /**
3469 * gfx_v7_0_rlc_start - start the RLC ME
3470 *
3471 * @adev: amdgpu_device pointer
3472 *
3473 * Unhalt the RLC ME (MicroEngine) (CIK).
3474 */
3475 static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3476 {
3477 WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
3478
3479 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3480
3481 udelay(50);
3482 }
3483
3484 static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3485 {
3486 u32 tmp = RREG32(mmGRBM_SOFT_RESET);
3487
3488 tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3489 WREG32(mmGRBM_SOFT_RESET, tmp);
3490 udelay(50);
3491 tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3492 WREG32(mmGRBM_SOFT_RESET, tmp);
3493 udelay(50);
3494 }
3495
3496 /**
3497 * gfx_v7_0_rlc_resume - setup the RLC hw
3498 *
3499 * @adev: amdgpu_device pointer
3500 *
3501 * Initialize the RLC registers, load the ucode,
3502 * and start the RLC (CIK).
3503 * Returns 0 for success, -EINVAL if the ucode is not available.
3504 */
3505 static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3506 {
3507 const struct rlc_firmware_header_v1_0 *hdr;
3508 const __le32 *fw_data;
3509 unsigned i, fw_size;
3510 u32 tmp;
3511
3512 if (!adev->gfx.rlc_fw)
3513 return -EINVAL;
3514
3515 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
3516 amdgpu_ucode_print_rlc_hdr(&hdr->header);
3517 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
3518 adev->gfx.rlc_feature_version = le32_to_cpu(
3519 hdr->ucode_feature_version);
3520
3521 gfx_v7_0_rlc_stop(adev);
3522
3523 /* disable CG */
3524 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
3525 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
3526
3527 gfx_v7_0_rlc_reset(adev);
3528
3529 gfx_v7_0_init_pg(adev);
3530
3531 WREG32(mmRLC_LB_CNTR_INIT, 0);
3532 WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
3533
3534 mutex_lock(&adev->grbm_idx_mutex);
3535 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3536 WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
3537 WREG32(mmRLC_LB_PARAMS, 0x00600408);
3538 WREG32(mmRLC_LB_CNTL, 0x80000004);
3539 mutex_unlock(&adev->grbm_idx_mutex);
3540
3541 WREG32(mmRLC_MC_CNTL, 0);
3542 WREG32(mmRLC_UCODE_CNTL, 0);
3543
3544 fw_data = (const __le32 *)
3545 (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3546 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3547 WREG32(mmRLC_GPM_UCODE_ADDR, 0);
3548 for (i = 0; i < fw_size; i++)
3549 WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3550 WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3551
3552 /* XXX - find out what chips support lbpw */
3553 gfx_v7_0_enable_lbpw(adev, false);
3554
3555 if (adev->asic_type == CHIP_BONAIRE)
3556 WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
3557
3558 gfx_v7_0_rlc_start(adev);
3559
3560 return 0;
3561 }
3562
3563 static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3564 {
3565 u32 data, orig, tmp, tmp2;
3566
3567 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
3568
3569 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3570 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3571
3572 tmp = gfx_v7_0_halt_rlc(adev);
3573
3574 mutex_lock(&adev->grbm_idx_mutex);
3575 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3576 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3577 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3578 tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3579 RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
3580 RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
3581 WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
3582 mutex_unlock(&adev->grbm_idx_mutex);
3583
3584 gfx_v7_0_update_rlc(adev, tmp);
3585
3586 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3587 } else {
3588 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3589
3590 RREG32(mmCB_CGTT_SCLK_CTRL);
3591 RREG32(mmCB_CGTT_SCLK_CTRL);
3592 RREG32(mmCB_CGTT_SCLK_CTRL);
3593 RREG32(mmCB_CGTT_SCLK_CTRL);
3594
3595 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3596 }
3597
3598 if (orig != data)
3599 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3600
3601 }
3602
3603 static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3604 {
3605 u32 data, orig, tmp = 0;
3606
3607 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3608 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3609 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3610 orig = data = RREG32(mmCP_MEM_SLP_CNTL);
3611 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3612 if (orig != data)
3613 WREG32(mmCP_MEM_SLP_CNTL, data);
3614 }
3615 }
3616
3617 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3618 data |= 0x00000001;
3619 data &= 0xfffffffd;
3620 if (orig != data)
3621 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3622
3623 tmp = gfx_v7_0_halt_rlc(adev);
3624
3625 mutex_lock(&adev->grbm_idx_mutex);
3626 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3627 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3628 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3629 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3630 RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
3631 WREG32(mmRLC_SERDES_WR_CTRL, data);
3632 mutex_unlock(&adev->grbm_idx_mutex);
3633
3634 gfx_v7_0_update_rlc(adev, tmp);
3635
3636 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3637 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3638 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
3639 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
3640 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
3641 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
3642 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3643 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3644 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3645 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
3646 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
3647 data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
3648 if (orig != data)
3649 WREG32(mmCGTS_SM_CTRL_REG, data);
3650 }
3651 } else {
3652 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3653 data |= 0x00000003;
3654 if (orig != data)
3655 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3656
3657 data = RREG32(mmRLC_MEM_SLP_CNTL);
3658 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3659 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3660 WREG32(mmRLC_MEM_SLP_CNTL, data);
3661 }
3662
3663 data = RREG32(mmCP_MEM_SLP_CNTL);
3664 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3665 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3666 WREG32(mmCP_MEM_SLP_CNTL, data);
3667 }
3668
3669 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3670 data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3671 if (orig != data)
3672 WREG32(mmCGTS_SM_CTRL_REG, data);
3673
3674 tmp = gfx_v7_0_halt_rlc(adev);
3675
3676 mutex_lock(&adev->grbm_idx_mutex);
3677 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3678 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3679 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3680 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
3681 WREG32(mmRLC_SERDES_WR_CTRL, data);
3682 mutex_unlock(&adev->grbm_idx_mutex);
3683
3684 gfx_v7_0_update_rlc(adev, tmp);
3685 }
3686 }
3687
3688 static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
3689 bool enable)
3690 {
3691 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3692 /* order matters! */
3693 if (enable) {
3694 gfx_v7_0_enable_mgcg(adev, true);
3695 gfx_v7_0_enable_cgcg(adev, true);
3696 } else {
3697 gfx_v7_0_enable_cgcg(adev, false);
3698 gfx_v7_0_enable_mgcg(adev, false);
3699 }
3700 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3701 }
3702
3703 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3704 bool enable)
3705 {
3706 u32 data, orig;
3707
3708 orig = data = RREG32(mmRLC_PG_CNTL);
3709 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3710 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3711 else
3712 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3713 if (orig != data)
3714 WREG32(mmRLC_PG_CNTL, data);
3715 }
3716
3717 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3718 bool enable)
3719 {
3720 u32 data, orig;
3721
3722 orig = data = RREG32(mmRLC_PG_CNTL);
3723 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3724 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3725 else
3726 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3727 if (orig != data)
3728 WREG32(mmRLC_PG_CNTL, data);
3729 }
3730
3731 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3732 {
3733 u32 data, orig;
3734
3735 orig = data = RREG32(mmRLC_PG_CNTL);
3736 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3737 data &= ~0x8000;
3738 else
3739 data |= 0x8000;
3740 if (orig != data)
3741 WREG32(mmRLC_PG_CNTL, data);
3742 }
3743
3744 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3745 {
3746 u32 data, orig;
3747
3748 orig = data = RREG32(mmRLC_PG_CNTL);
3749 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3750 data &= ~0x2000;
3751 else
3752 data |= 0x2000;
3753 if (orig != data)
3754 WREG32(mmRLC_PG_CNTL, data);
3755 }
3756
3757 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
3758 {
3759 const __le32 *fw_data;
3760 volatile u32 *dst_ptr;
3761 int me, i, max_me = 4;
3762 u32 bo_offset = 0;
3763 u32 table_offset, table_size;
3764
3765 if (adev->asic_type == CHIP_KAVERI)
3766 max_me = 5;
3767
3768 if (adev->gfx.rlc.cp_table_ptr == NULL)
3769 return;
3770
3771 /* write the cp table buffer */
3772 dst_ptr = adev->gfx.rlc.cp_table_ptr;
3773 for (me = 0; me < max_me; me++) {
3774 if (me == 0) {
3775 const struct gfx_firmware_header_v1_0 *hdr =
3776 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
3777 fw_data = (const __le32 *)
3778 (adev->gfx.ce_fw->data +
3779 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3780 table_offset = le32_to_cpu(hdr->jt_offset);
3781 table_size = le32_to_cpu(hdr->jt_size);
3782 } else if (me == 1) {
3783 const struct gfx_firmware_header_v1_0 *hdr =
3784 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
3785 fw_data = (const __le32 *)
3786 (adev->gfx.pfp_fw->data +
3787 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3788 table_offset = le32_to_cpu(hdr->jt_offset);
3789 table_size = le32_to_cpu(hdr->jt_size);
3790 } else if (me == 2) {
3791 const struct gfx_firmware_header_v1_0 *hdr =
3792 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
3793 fw_data = (const __le32 *)
3794 (adev->gfx.me_fw->data +
3795 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3796 table_offset = le32_to_cpu(hdr->jt_offset);
3797 table_size = le32_to_cpu(hdr->jt_size);
3798 } else if (me == 3) {
3799 const struct gfx_firmware_header_v1_0 *hdr =
3800 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3801 fw_data = (const __le32 *)
3802 (adev->gfx.mec_fw->data +
3803 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3804 table_offset = le32_to_cpu(hdr->jt_offset);
3805 table_size = le32_to_cpu(hdr->jt_size);
3806 } else {
3807 const struct gfx_firmware_header_v1_0 *hdr =
3808 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3809 fw_data = (const __le32 *)
3810 (adev->gfx.mec2_fw->data +
3811 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3812 table_offset = le32_to_cpu(hdr->jt_offset);
3813 table_size = le32_to_cpu(hdr->jt_size);
3814 }
3815
3816 for (i = 0; i < table_size; i ++) {
3817 dst_ptr[bo_offset + i] =
3818 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
3819 }
3820
3821 bo_offset += table_size;
3822 }
3823 }
3824
3825 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
3826 bool enable)
3827 {
3828 u32 data, orig;
3829
3830 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
3831 orig = data = RREG32(mmRLC_PG_CNTL);
3832 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3833 if (orig != data)
3834 WREG32(mmRLC_PG_CNTL, data);
3835
3836 orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3837 data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3838 if (orig != data)
3839 WREG32(mmRLC_AUTO_PG_CTRL, data);
3840 } else {
3841 orig = data = RREG32(mmRLC_PG_CNTL);
3842 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3843 if (orig != data)
3844 WREG32(mmRLC_PG_CNTL, data);
3845
3846 orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3847 data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3848 if (orig != data)
3849 WREG32(mmRLC_AUTO_PG_CTRL, data);
3850
3851 data = RREG32(mmDB_RENDER_CONTROL);
3852 }
3853 }
3854
3855 static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
3856 u32 bitmap)
3857 {
3858 u32 data;
3859
3860 if (!bitmap)
3861 return;
3862
3863 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3864 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3865
3866 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
3867 }
3868
3869 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
3870 {
3871 u32 data, mask;
3872
3873 data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
3874 data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
3875
3876 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3877 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3878
3879 mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
3880
3881 return (~data) & mask;
3882 }
3883
3884 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
3885 {
3886 u32 tmp;
3887
3888 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
3889
3890 tmp = RREG32(mmRLC_MAX_PG_CU);
3891 tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
3892 tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
3893 WREG32(mmRLC_MAX_PG_CU, tmp);
3894 }
3895
3896 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
3897 bool enable)
3898 {
3899 u32 data, orig;
3900
3901 orig = data = RREG32(mmRLC_PG_CNTL);
3902 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
3903 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3904 else
3905 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3906 if (orig != data)
3907 WREG32(mmRLC_PG_CNTL, data);
3908 }
3909
3910 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
3911 bool enable)
3912 {
3913 u32 data, orig;
3914
3915 orig = data = RREG32(mmRLC_PG_CNTL);
3916 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
3917 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3918 else
3919 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3920 if (orig != data)
3921 WREG32(mmRLC_PG_CNTL, data);
3922 }
3923
3924 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
3925 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
3926
3927 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
3928 {
3929 u32 data, orig;
3930 u32 i;
3931
3932 if (adev->gfx.rlc.cs_data) {
3933 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3934 WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3935 WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3936 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
3937 } else {
3938 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3939 for (i = 0; i < 3; i++)
3940 WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
3941 }
3942 if (adev->gfx.rlc.reg_list) {
3943 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
3944 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3945 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
3946 }
3947
3948 orig = data = RREG32(mmRLC_PG_CNTL);
3949 data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
3950 if (orig != data)
3951 WREG32(mmRLC_PG_CNTL, data);
3952
3953 WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
3954 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
3955
3956 data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
3957 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3958 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3959 WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
3960
3961 data = 0x10101010;
3962 WREG32(mmRLC_PG_DELAY, data);
3963
3964 data = RREG32(mmRLC_PG_DELAY_2);
3965 data &= ~0xff;
3966 data |= 0x3;
3967 WREG32(mmRLC_PG_DELAY_2, data);
3968
3969 data = RREG32(mmRLC_AUTO_PG_CTRL);
3970 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
3971 data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
3972 WREG32(mmRLC_AUTO_PG_CTRL, data);
3973
3974 }
3975
3976 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
3977 {
3978 gfx_v7_0_enable_gfx_cgpg(adev, enable);
3979 gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
3980 gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
3981 }
3982
3983 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
3984 {
3985 u32 count = 0;
3986 const struct cs_section_def *sect = NULL;
3987 const struct cs_extent_def *ext = NULL;
3988
3989 if (adev->gfx.rlc.cs_data == NULL)
3990 return 0;
3991
3992 /* begin clear state */
3993 count += 2;
3994 /* context control state */
3995 count += 3;
3996
3997 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3998 for (ext = sect->section; ext->extent != NULL; ++ext) {
3999 if (sect->id == SECT_CONTEXT)
4000 count += 2 + ext->reg_count;
4001 else
4002 return 0;
4003 }
4004 }
4005 /* pa_sc_raster_config/pa_sc_raster_config1 */
4006 count += 4;
4007 /* end clear state */
4008 count += 2;
4009 /* clear state */
4010 count += 2;
4011
4012 return count;
4013 }
4014
4015 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
4016 volatile u32 *buffer)
4017 {
4018 u32 count = 0, i;
4019 const struct cs_section_def *sect = NULL;
4020 const struct cs_extent_def *ext = NULL;
4021
4022 if (adev->gfx.rlc.cs_data == NULL)
4023 return;
4024 if (buffer == NULL)
4025 return;
4026
4027 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4028 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4029
4030 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4031 buffer[count++] = cpu_to_le32(0x80000000);
4032 buffer[count++] = cpu_to_le32(0x80000000);
4033
4034 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
4035 for (ext = sect->section; ext->extent != NULL; ++ext) {
4036 if (sect->id == SECT_CONTEXT) {
4037 buffer[count++] =
4038 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
4039 buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4040 for (i = 0; i < ext->reg_count; i++)
4041 buffer[count++] = cpu_to_le32(ext->extent[i]);
4042 } else {
4043 return;
4044 }
4045 }
4046 }
4047
4048 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4049 buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4050 switch (adev->asic_type) {
4051 case CHIP_BONAIRE:
4052 buffer[count++] = cpu_to_le32(0x16000012);
4053 buffer[count++] = cpu_to_le32(0x00000000);
4054 break;
4055 case CHIP_KAVERI:
4056 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4057 buffer[count++] = cpu_to_le32(0x00000000);
4058 break;
4059 case CHIP_KABINI:
4060 case CHIP_MULLINS:
4061 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4062 buffer[count++] = cpu_to_le32(0x00000000);
4063 break;
4064 case CHIP_HAWAII:
4065 buffer[count++] = cpu_to_le32(0x3a00161a);
4066 buffer[count++] = cpu_to_le32(0x0000002e);
4067 break;
4068 default:
4069 buffer[count++] = cpu_to_le32(0x00000000);
4070 buffer[count++] = cpu_to_le32(0x00000000);
4071 break;
4072 }
4073
4074 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4075 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
4076
4077 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
4078 buffer[count++] = cpu_to_le32(0);
4079 }
4080
4081 static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4082 {
4083 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4084 AMD_PG_SUPPORT_GFX_SMG |
4085 AMD_PG_SUPPORT_GFX_DMG |
4086 AMD_PG_SUPPORT_CP |
4087 AMD_PG_SUPPORT_GDS |
4088 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4089 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4090 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4091 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4092 gfx_v7_0_init_gfx_cgpg(adev);
4093 gfx_v7_0_enable_cp_pg(adev, true);
4094 gfx_v7_0_enable_gds_pg(adev, true);
4095 }
4096 gfx_v7_0_init_ao_cu_mask(adev);
4097 gfx_v7_0_update_gfx_pg(adev, true);
4098 }
4099 }
4100
4101 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4102 {
4103 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4104 AMD_PG_SUPPORT_GFX_SMG |
4105 AMD_PG_SUPPORT_GFX_DMG |
4106 AMD_PG_SUPPORT_CP |
4107 AMD_PG_SUPPORT_GDS |
4108 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4109 gfx_v7_0_update_gfx_pg(adev, false);
4110 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4111 gfx_v7_0_enable_cp_pg(adev, false);
4112 gfx_v7_0_enable_gds_pg(adev, false);
4113 }
4114 }
4115 }
4116
4117 /**
4118 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
4119 *
4120 * @adev: amdgpu_device pointer
4121 *
4122 * Fetches a GPU clock counter snapshot (SI).
4123 * Returns the 64 bit clock counter snapshot.
4124 */
4125 static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4126 {
4127 uint64_t clock;
4128
4129 mutex_lock(&adev->gfx.gpu_clock_mutex);
4130 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4131 clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
4132 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4133 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4134 return clock;
4135 }
4136
4137 static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4138 uint32_t vmid,
4139 uint32_t gds_base, uint32_t gds_size,
4140 uint32_t gws_base, uint32_t gws_size,
4141 uint32_t oa_base, uint32_t oa_size)
4142 {
4143 gds_base = gds_base >> AMDGPU_GDS_SHIFT;
4144 gds_size = gds_size >> AMDGPU_GDS_SHIFT;
4145
4146 gws_base = gws_base >> AMDGPU_GWS_SHIFT;
4147 gws_size = gws_size >> AMDGPU_GWS_SHIFT;
4148
4149 oa_base = oa_base >> AMDGPU_OA_SHIFT;
4150 oa_size = oa_size >> AMDGPU_OA_SHIFT;
4151
4152 /* GDS Base */
4153 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4154 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4155 WRITE_DATA_DST_SEL(0)));
4156 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4157 amdgpu_ring_write(ring, 0);
4158 amdgpu_ring_write(ring, gds_base);
4159
4160 /* GDS Size */
4161 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4162 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4163 WRITE_DATA_DST_SEL(0)));
4164 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4165 amdgpu_ring_write(ring, 0);
4166 amdgpu_ring_write(ring, gds_size);
4167
4168 /* GWS */
4169 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4170 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4171 WRITE_DATA_DST_SEL(0)));
4172 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4173 amdgpu_ring_write(ring, 0);
4174 amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4175
4176 /* OA */
4177 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4178 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4179 WRITE_DATA_DST_SEL(0)));
4180 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4181 amdgpu_ring_write(ring, 0);
4182 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4183 }
4184
4185 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4186 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4187 .select_se_sh = &gfx_v7_0_select_se_sh,
4188 };
4189
4190 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
4191 .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
4192 .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
4193 };
4194
4195 static int gfx_v7_0_early_init(void *handle)
4196 {
4197 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4198
4199 adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4200 adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
4201 adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4202 adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4203 gfx_v7_0_set_ring_funcs(adev);
4204 gfx_v7_0_set_irq_funcs(adev);
4205 gfx_v7_0_set_gds_init(adev);
4206
4207 return 0;
4208 }
4209
4210 static int gfx_v7_0_late_init(void *handle)
4211 {
4212 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4213 int r;
4214
4215 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4216 if (r)
4217 return r;
4218
4219 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4220 if (r)
4221 return r;
4222
4223 return 0;
4224 }
4225
4226 static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4227 {
4228 u32 gb_addr_config;
4229 u32 mc_shared_chmap, mc_arb_ramcfg;
4230 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
4231 u32 tmp;
4232
4233 switch (adev->asic_type) {
4234 case CHIP_BONAIRE:
4235 adev->gfx.config.max_shader_engines = 2;
4236 adev->gfx.config.max_tile_pipes = 4;
4237 adev->gfx.config.max_cu_per_sh = 7;
4238 adev->gfx.config.max_sh_per_se = 1;
4239 adev->gfx.config.max_backends_per_se = 2;
4240 adev->gfx.config.max_texture_channel_caches = 4;
4241 adev->gfx.config.max_gprs = 256;
4242 adev->gfx.config.max_gs_threads = 32;
4243 adev->gfx.config.max_hw_contexts = 8;
4244
4245 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4246 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4247 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4248 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4249 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4250 break;
4251 case CHIP_HAWAII:
4252 adev->gfx.config.max_shader_engines = 4;
4253 adev->gfx.config.max_tile_pipes = 16;
4254 adev->gfx.config.max_cu_per_sh = 11;
4255 adev->gfx.config.max_sh_per_se = 1;
4256 adev->gfx.config.max_backends_per_se = 4;
4257 adev->gfx.config.max_texture_channel_caches = 16;
4258 adev->gfx.config.max_gprs = 256;
4259 adev->gfx.config.max_gs_threads = 32;
4260 adev->gfx.config.max_hw_contexts = 8;
4261
4262 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4263 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4264 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4265 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4266 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
4267 break;
4268 case CHIP_KAVERI:
4269 adev->gfx.config.max_shader_engines = 1;
4270 adev->gfx.config.max_tile_pipes = 4;
4271 if ((adev->pdev->device == 0x1304) ||
4272 (adev->pdev->device == 0x1305) ||
4273 (adev->pdev->device == 0x130C) ||
4274 (adev->pdev->device == 0x130F) ||
4275 (adev->pdev->device == 0x1310) ||
4276 (adev->pdev->device == 0x1311) ||
4277 (adev->pdev->device == 0x131C)) {
4278 adev->gfx.config.max_cu_per_sh = 8;
4279 adev->gfx.config.max_backends_per_se = 2;
4280 } else if ((adev->pdev->device == 0x1309) ||
4281 (adev->pdev->device == 0x130A) ||
4282 (adev->pdev->device == 0x130D) ||
4283 (adev->pdev->device == 0x1313) ||
4284 (adev->pdev->device == 0x131D)) {
4285 adev->gfx.config.max_cu_per_sh = 6;
4286 adev->gfx.config.max_backends_per_se = 2;
4287 } else if ((adev->pdev->device == 0x1306) ||
4288 (adev->pdev->device == 0x1307) ||
4289 (adev->pdev->device == 0x130B) ||
4290 (adev->pdev->device == 0x130E) ||
4291 (adev->pdev->device == 0x1315) ||
4292 (adev->pdev->device == 0x131B)) {
4293 adev->gfx.config.max_cu_per_sh = 4;
4294 adev->gfx.config.max_backends_per_se = 1;
4295 } else {
4296 adev->gfx.config.max_cu_per_sh = 3;
4297 adev->gfx.config.max_backends_per_se = 1;
4298 }
4299 adev->gfx.config.max_sh_per_se = 1;
4300 adev->gfx.config.max_texture_channel_caches = 4;
4301 adev->gfx.config.max_gprs = 256;
4302 adev->gfx.config.max_gs_threads = 16;
4303 adev->gfx.config.max_hw_contexts = 8;
4304
4305 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4306 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4307 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4308 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4309 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4310 break;
4311 case CHIP_KABINI:
4312 case CHIP_MULLINS:
4313 default:
4314 adev->gfx.config.max_shader_engines = 1;
4315 adev->gfx.config.max_tile_pipes = 2;
4316 adev->gfx.config.max_cu_per_sh = 2;
4317 adev->gfx.config.max_sh_per_se = 1;
4318 adev->gfx.config.max_backends_per_se = 1;
4319 adev->gfx.config.max_texture_channel_caches = 2;
4320 adev->gfx.config.max_gprs = 256;
4321 adev->gfx.config.max_gs_threads = 16;
4322 adev->gfx.config.max_hw_contexts = 8;
4323
4324 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4325 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4326 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4327 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4328 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4329 break;
4330 }
4331
4332 mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
4333 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
4334 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
4335
4336 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
4337 adev->gfx.config.mem_max_burst_length_bytes = 256;
4338 if (adev->flags & AMD_IS_APU) {
4339 /* Get memory bank mapping mode. */
4340 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
4341 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4342 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4343
4344 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
4345 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4346 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4347
4348 /* Validate settings in case only one DIMM installed. */
4349 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
4350 dimm00_addr_map = 0;
4351 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
4352 dimm01_addr_map = 0;
4353 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
4354 dimm10_addr_map = 0;
4355 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
4356 dimm11_addr_map = 0;
4357
4358 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4359 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4360 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
4361 adev->gfx.config.mem_row_size_in_kb = 2;
4362 else
4363 adev->gfx.config.mem_row_size_in_kb = 1;
4364 } else {
4365 tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
4366 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
4367 if (adev->gfx.config.mem_row_size_in_kb > 4)
4368 adev->gfx.config.mem_row_size_in_kb = 4;
4369 }
4370 /* XXX use MC settings? */
4371 adev->gfx.config.shader_engine_tile_size = 32;
4372 adev->gfx.config.num_gpus = 1;
4373 adev->gfx.config.multi_gpu_tile_size = 64;
4374
4375 /* fix up row size */
4376 gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
4377 switch (adev->gfx.config.mem_row_size_in_kb) {
4378 case 1:
4379 default:
4380 gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4381 break;
4382 case 2:
4383 gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4384 break;
4385 case 4:
4386 gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4387 break;
4388 }
4389 adev->gfx.config.gb_addr_config = gb_addr_config;
4390 }
4391
4392 static int gfx_v7_0_sw_init(void *handle)
4393 {
4394 struct amdgpu_ring *ring;
4395 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4396 int i, r;
4397
4398 /* EOP Event */
4399 r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
4400 if (r)
4401 return r;
4402
4403 /* Privileged reg */
4404 r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
4405 if (r)
4406 return r;
4407
4408 /* Privileged inst */
4409 r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
4410 if (r)
4411 return r;
4412
4413 gfx_v7_0_scratch_init(adev);
4414
4415 r = gfx_v7_0_init_microcode(adev);
4416 if (r) {
4417 DRM_ERROR("Failed to load gfx firmware!\n");
4418 return r;
4419 }
4420
4421 r = gfx_v7_0_rlc_init(adev);
4422 if (r) {
4423 DRM_ERROR("Failed to init rlc BOs!\n");
4424 return r;
4425 }
4426
4427 /* allocate mec buffers */
4428 r = gfx_v7_0_mec_init(adev);
4429 if (r) {
4430 DRM_ERROR("Failed to init MEC BOs!\n");
4431 return r;
4432 }
4433
4434 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4435 ring = &adev->gfx.gfx_ring[i];
4436 ring->ring_obj = NULL;
4437 sprintf(ring->name, "gfx");
4438 r = amdgpu_ring_init(adev, ring, 1024,
4439 PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
4440 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
4441 AMDGPU_RING_TYPE_GFX);
4442 if (r)
4443 return r;
4444 }
4445
4446 /* set up the compute queues */
4447 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4448 unsigned irq_type;
4449
4450 /* max 32 queues per MEC */
4451 if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
4452 DRM_ERROR("Too many (%d) compute rings!\n", i);
4453 break;
4454 }
4455 ring = &adev->gfx.compute_ring[i];
4456 ring->ring_obj = NULL;
4457 ring->use_doorbell = true;
4458 ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
4459 ring->me = 1; /* first MEC */
4460 ring->pipe = i / 8;
4461 ring->queue = i % 8;
4462 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
4463 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
4464 /* type-2 packets are deprecated on MEC, use type-3 instead */
4465 r = amdgpu_ring_init(adev, ring, 1024,
4466 PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
4467 &adev->gfx.eop_irq, irq_type,
4468 AMDGPU_RING_TYPE_COMPUTE);
4469 if (r)
4470 return r;
4471 }
4472
4473 /* reserve GDS, GWS and OA resource for gfx */
4474 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
4475 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
4476 &adev->gds.gds_gfx_bo, NULL, NULL);
4477 if (r)
4478 return r;
4479
4480 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
4481 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
4482 &adev->gds.gws_gfx_bo, NULL, NULL);
4483 if (r)
4484 return r;
4485
4486 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
4487 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
4488 &adev->gds.oa_gfx_bo, NULL, NULL);
4489 if (r)
4490 return r;
4491
4492 adev->gfx.ce_ram_size = 0x8000;
4493
4494 gfx_v7_0_gpu_early_init(adev);
4495
4496 return r;
4497 }
4498
4499 static int gfx_v7_0_sw_fini(void *handle)
4500 {
4501 int i;
4502 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4503
4504 amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
4505 amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
4506 amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
4507
4508 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4509 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4510 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4511 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4512
4513 gfx_v7_0_cp_compute_fini(adev);
4514 gfx_v7_0_rlc_fini(adev);
4515 gfx_v7_0_mec_fini(adev);
4516 gfx_v7_0_free_microcode(adev);
4517
4518 return 0;
4519 }
4520
4521 static int gfx_v7_0_hw_init(void *handle)
4522 {
4523 int r;
4524 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4525
4526 gfx_v7_0_gpu_init(adev);
4527
4528 /* init rlc */
4529 r = gfx_v7_0_rlc_resume(adev);
4530 if (r)
4531 return r;
4532
4533 r = gfx_v7_0_cp_resume(adev);
4534 if (r)
4535 return r;
4536
4537 return r;
4538 }
4539
4540 static int gfx_v7_0_hw_fini(void *handle)
4541 {
4542 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4543
4544 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4545 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4546 gfx_v7_0_cp_enable(adev, false);
4547 gfx_v7_0_rlc_stop(adev);
4548 gfx_v7_0_fini_pg(adev);
4549
4550 return 0;
4551 }
4552
4553 static int gfx_v7_0_suspend(void *handle)
4554 {
4555 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4556
4557 return gfx_v7_0_hw_fini(adev);
4558 }
4559
4560 static int gfx_v7_0_resume(void *handle)
4561 {
4562 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4563
4564 return gfx_v7_0_hw_init(adev);
4565 }
4566
4567 static bool gfx_v7_0_is_idle(void *handle)
4568 {
4569 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4570
4571 if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
4572 return false;
4573 else
4574 return true;
4575 }
4576
4577 static int gfx_v7_0_wait_for_idle(void *handle)
4578 {
4579 unsigned i;
4580 u32 tmp;
4581 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4582
4583 for (i = 0; i < adev->usec_timeout; i++) {
4584 /* read MC_STATUS */
4585 tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
4586
4587 if (!tmp)
4588 return 0;
4589 udelay(1);
4590 }
4591 return -ETIMEDOUT;
4592 }
4593
4594 static int gfx_v7_0_soft_reset(void *handle)
4595 {
4596 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4597 u32 tmp;
4598 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4599
4600 /* GRBM_STATUS */
4601 tmp = RREG32(mmGRBM_STATUS);
4602 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4603 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4604 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4605 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4606 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4607 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
4608 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
4609 GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
4610
4611 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4612 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
4613 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4614 }
4615
4616 /* GRBM_STATUS2 */
4617 tmp = RREG32(mmGRBM_STATUS2);
4618 if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
4619 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
4620
4621 /* SRBM_STATUS */
4622 tmp = RREG32(mmSRBM_STATUS);
4623 if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
4624 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4625
4626 if (grbm_soft_reset || srbm_soft_reset) {
4627 /* disable CG/PG */
4628 gfx_v7_0_fini_pg(adev);
4629 gfx_v7_0_update_cg(adev, false);
4630
4631 /* stop the rlc */
4632 gfx_v7_0_rlc_stop(adev);
4633
4634 /* Disable GFX parsing/prefetching */
4635 WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
4636
4637 /* Disable MEC parsing/prefetching */
4638 WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
4639
4640 if (grbm_soft_reset) {
4641 tmp = RREG32(mmGRBM_SOFT_RESET);
4642 tmp |= grbm_soft_reset;
4643 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4644 WREG32(mmGRBM_SOFT_RESET, tmp);
4645 tmp = RREG32(mmGRBM_SOFT_RESET);
4646
4647 udelay(50);
4648
4649 tmp &= ~grbm_soft_reset;
4650 WREG32(mmGRBM_SOFT_RESET, tmp);
4651 tmp = RREG32(mmGRBM_SOFT_RESET);
4652 }
4653
4654 if (srbm_soft_reset) {
4655 tmp = RREG32(mmSRBM_SOFT_RESET);
4656 tmp |= srbm_soft_reset;
4657 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4658 WREG32(mmSRBM_SOFT_RESET, tmp);
4659 tmp = RREG32(mmSRBM_SOFT_RESET);
4660
4661 udelay(50);
4662
4663 tmp &= ~srbm_soft_reset;
4664 WREG32(mmSRBM_SOFT_RESET, tmp);
4665 tmp = RREG32(mmSRBM_SOFT_RESET);
4666 }
4667 /* Wait a little for things to settle down */
4668 udelay(50);
4669 }
4670 return 0;
4671 }
4672
4673 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4674 enum amdgpu_interrupt_state state)
4675 {
4676 u32 cp_int_cntl;
4677
4678 switch (state) {
4679 case AMDGPU_IRQ_STATE_DISABLE:
4680 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4681 cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4682 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4683 break;
4684 case AMDGPU_IRQ_STATE_ENABLE:
4685 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4686 cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4687 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4688 break;
4689 default:
4690 break;
4691 }
4692 }
4693
4694 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4695 int me, int pipe,
4696 enum amdgpu_interrupt_state state)
4697 {
4698 u32 mec_int_cntl, mec_int_cntl_reg;
4699
4700 /*
4701 * amdgpu controls only pipe 0 of MEC1. That's why this function only
4702 * handles the setting of interrupts for this specific pipe. All other
4703 * pipes' interrupts are set by amdkfd.
4704 */
4705
4706 if (me == 1) {
4707 switch (pipe) {
4708 case 0:
4709 mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4710 break;
4711 default:
4712 DRM_DEBUG("invalid pipe %d\n", pipe);
4713 return;
4714 }
4715 } else {
4716 DRM_DEBUG("invalid me %d\n", me);
4717 return;
4718 }
4719
4720 switch (state) {
4721 case AMDGPU_IRQ_STATE_DISABLE:
4722 mec_int_cntl = RREG32(mec_int_cntl_reg);
4723 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4724 WREG32(mec_int_cntl_reg, mec_int_cntl);
4725 break;
4726 case AMDGPU_IRQ_STATE_ENABLE:
4727 mec_int_cntl = RREG32(mec_int_cntl_reg);
4728 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4729 WREG32(mec_int_cntl_reg, mec_int_cntl);
4730 break;
4731 default:
4732 break;
4733 }
4734 }
4735
4736 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4737 struct amdgpu_irq_src *src,
4738 unsigned type,
4739 enum amdgpu_interrupt_state state)
4740 {
4741 u32 cp_int_cntl;
4742
4743 switch (state) {
4744 case AMDGPU_IRQ_STATE_DISABLE:
4745 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4746 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4747 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4748 break;
4749 case AMDGPU_IRQ_STATE_ENABLE:
4750 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4751 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4752 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4753 break;
4754 default:
4755 break;
4756 }
4757
4758 return 0;
4759 }
4760
4761 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4762 struct amdgpu_irq_src *src,
4763 unsigned type,
4764 enum amdgpu_interrupt_state state)
4765 {
4766 u32 cp_int_cntl;
4767
4768 switch (state) {
4769 case AMDGPU_IRQ_STATE_DISABLE:
4770 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4771 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4772 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4773 break;
4774 case AMDGPU_IRQ_STATE_ENABLE:
4775 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4776 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4777 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4778 break;
4779 default:
4780 break;
4781 }
4782
4783 return 0;
4784 }
4785
4786 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4787 struct amdgpu_irq_src *src,
4788 unsigned type,
4789 enum amdgpu_interrupt_state state)
4790 {
4791 switch (type) {
4792 case AMDGPU_CP_IRQ_GFX_EOP:
4793 gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
4794 break;
4795 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4796 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4797 break;
4798 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4799 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4800 break;
4801 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4802 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4803 break;
4804 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4805 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4806 break;
4807 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4808 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4809 break;
4810 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4811 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4812 break;
4813 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4814 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4815 break;
4816 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4817 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4818 break;
4819 default:
4820 break;
4821 }
4822 return 0;
4823 }
4824
4825 static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
4826 struct amdgpu_irq_src *source,
4827 struct amdgpu_iv_entry *entry)
4828 {
4829 u8 me_id, pipe_id;
4830 struct amdgpu_ring *ring;
4831 int i;
4832
4833 DRM_DEBUG("IH: CP EOP\n");
4834 me_id = (entry->ring_id & 0x0c) >> 2;
4835 pipe_id = (entry->ring_id & 0x03) >> 0;
4836 switch (me_id) {
4837 case 0:
4838 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4839 break;
4840 case 1:
4841 case 2:
4842 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4843 ring = &adev->gfx.compute_ring[i];
4844 if ((ring->me == me_id) && (ring->pipe == pipe_id))
4845 amdgpu_fence_process(ring);
4846 }
4847 break;
4848 }
4849 return 0;
4850 }
4851
4852 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
4853 struct amdgpu_irq_src *source,
4854 struct amdgpu_iv_entry *entry)
4855 {
4856 DRM_ERROR("Illegal register access in command stream\n");
4857 schedule_work(&adev->reset_work);
4858 return 0;
4859 }
4860
4861 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
4862 struct amdgpu_irq_src *source,
4863 struct amdgpu_iv_entry *entry)
4864 {
4865 DRM_ERROR("Illegal instruction in command stream\n");
4866 // XXX soft reset the gfx block only
4867 schedule_work(&adev->reset_work);
4868 return 0;
4869 }
4870
4871 static int gfx_v7_0_set_clockgating_state(void *handle,
4872 enum amd_clockgating_state state)
4873 {
4874 bool gate = false;
4875 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4876
4877 if (state == AMD_CG_STATE_GATE)
4878 gate = true;
4879
4880 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
4881 /* order matters! */
4882 if (gate) {
4883 gfx_v7_0_enable_mgcg(adev, true);
4884 gfx_v7_0_enable_cgcg(adev, true);
4885 } else {
4886 gfx_v7_0_enable_cgcg(adev, false);
4887 gfx_v7_0_enable_mgcg(adev, false);
4888 }
4889 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4890
4891 return 0;
4892 }
4893
4894 static int gfx_v7_0_set_powergating_state(void *handle,
4895 enum amd_powergating_state state)
4896 {
4897 bool gate = false;
4898 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4899
4900 if (state == AMD_PG_STATE_GATE)
4901 gate = true;
4902
4903 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4904 AMD_PG_SUPPORT_GFX_SMG |
4905 AMD_PG_SUPPORT_GFX_DMG |
4906 AMD_PG_SUPPORT_CP |
4907 AMD_PG_SUPPORT_GDS |
4908 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4909 gfx_v7_0_update_gfx_pg(adev, gate);
4910 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4911 gfx_v7_0_enable_cp_pg(adev, gate);
4912 gfx_v7_0_enable_gds_pg(adev, gate);
4913 }
4914 }
4915
4916 return 0;
4917 }
4918
4919 const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
4920 .name = "gfx_v7_0",
4921 .early_init = gfx_v7_0_early_init,
4922 .late_init = gfx_v7_0_late_init,
4923 .sw_init = gfx_v7_0_sw_init,
4924 .sw_fini = gfx_v7_0_sw_fini,
4925 .hw_init = gfx_v7_0_hw_init,
4926 .hw_fini = gfx_v7_0_hw_fini,
4927 .suspend = gfx_v7_0_suspend,
4928 .resume = gfx_v7_0_resume,
4929 .is_idle = gfx_v7_0_is_idle,
4930 .wait_for_idle = gfx_v7_0_wait_for_idle,
4931 .soft_reset = gfx_v7_0_soft_reset,
4932 .set_clockgating_state = gfx_v7_0_set_clockgating_state,
4933 .set_powergating_state = gfx_v7_0_set_powergating_state,
4934 };
4935
4936 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
4937 .get_rptr = gfx_v7_0_ring_get_rptr_gfx,
4938 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
4939 .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
4940 .parse_cs = NULL,
4941 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
4942 .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
4943 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
4944 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
4945 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
4946 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
4947 .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
4948 .test_ring = gfx_v7_0_ring_test_ring,
4949 .test_ib = gfx_v7_0_ring_test_ib,
4950 .insert_nop = amdgpu_ring_insert_nop,
4951 .pad_ib = amdgpu_ring_generic_pad_ib,
4952 };
4953
4954 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
4955 .get_rptr = gfx_v7_0_ring_get_rptr_compute,
4956 .get_wptr = gfx_v7_0_ring_get_wptr_compute,
4957 .set_wptr = gfx_v7_0_ring_set_wptr_compute,
4958 .parse_cs = NULL,
4959 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
4960 .emit_fence = gfx_v7_0_ring_emit_fence_compute,
4961 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
4962 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
4963 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
4964 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
4965 .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
4966 .test_ring = gfx_v7_0_ring_test_ring,
4967 .test_ib = gfx_v7_0_ring_test_ib,
4968 .insert_nop = amdgpu_ring_insert_nop,
4969 .pad_ib = amdgpu_ring_generic_pad_ib,
4970 };
4971
4972 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
4973 {
4974 int i;
4975
4976 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4977 adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
4978 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4979 adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
4980 }
4981
4982 static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
4983 .set = gfx_v7_0_set_eop_interrupt_state,
4984 .process = gfx_v7_0_eop_irq,
4985 };
4986
4987 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
4988 .set = gfx_v7_0_set_priv_reg_fault_state,
4989 .process = gfx_v7_0_priv_reg_irq,
4990 };
4991
4992 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
4993 .set = gfx_v7_0_set_priv_inst_fault_state,
4994 .process = gfx_v7_0_priv_inst_irq,
4995 };
4996
4997 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
4998 {
4999 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5000 adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5001
5002 adev->gfx.priv_reg_irq.num_types = 1;
5003 adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5004
5005 adev->gfx.priv_inst_irq.num_types = 1;
5006 adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5007 }
5008
5009 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5010 {
5011 /* init asci gds info */
5012 adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
5013 adev->gds.gws.total_size = 64;
5014 adev->gds.oa.total_size = 16;
5015
5016 if (adev->gds.mem.total_size == 64 * 1024) {
5017 adev->gds.mem.gfx_partition_size = 4096;
5018 adev->gds.mem.cs_partition_size = 4096;
5019
5020 adev->gds.gws.gfx_partition_size = 4;
5021 adev->gds.gws.cs_partition_size = 4;
5022
5023 adev->gds.oa.gfx_partition_size = 4;
5024 adev->gds.oa.cs_partition_size = 1;
5025 } else {
5026 adev->gds.mem.gfx_partition_size = 1024;
5027 adev->gds.mem.cs_partition_size = 1024;
5028
5029 adev->gds.gws.gfx_partition_size = 16;
5030 adev->gds.gws.cs_partition_size = 16;
5031
5032 adev->gds.oa.gfx_partition_size = 4;
5033 adev->gds.oa.cs_partition_size = 4;
5034 }
5035 }
5036
5037
5038 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5039 {
5040 int i, j, k, counter, active_cu_number = 0;
5041 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5042 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5043 unsigned disable_masks[4 * 2];
5044
5045 memset(cu_info, 0, sizeof(*cu_info));
5046
5047 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5048
5049 mutex_lock(&adev->grbm_idx_mutex);
5050 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5051 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5052 mask = 1;
5053 ao_bitmap = 0;
5054 counter = 0;
5055 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
5056 if (i < 4 && j < 2)
5057 gfx_v7_0_set_user_cu_inactive_bitmap(
5058 adev, disable_masks[i * 2 + j]);
5059 bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5060 cu_info->bitmap[i][j] = bitmap;
5061
5062 for (k = 0; k < 16; k ++) {
5063 if (bitmap & mask) {
5064 if (counter < 2)
5065 ao_bitmap |= mask;
5066 counter ++;
5067 }
5068 mask <<= 1;
5069 }
5070 active_cu_number += counter;
5071 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5072 }
5073 }
5074 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5075 mutex_unlock(&adev->grbm_idx_mutex);
5076
5077 cu_info->number = active_cu_number;
5078 cu_info->ao_cu_mask = ao_cu_mask;
5079 }
This page took 0.140389 seconds and 5 git commands to generate.