Merge tag 'iio-fixes-for-4.6d' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / gmc_v7_0.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include "drmP.h"
25 #include "amdgpu.h"
26 #include "cikd.h"
27 #include "cik.h"
28 #include "gmc_v7_0.h"
29 #include "amdgpu_ucode.h"
30
31 #include "bif/bif_4_1_d.h"
32 #include "bif/bif_4_1_sh_mask.h"
33
34 #include "gmc/gmc_7_1_d.h"
35 #include "gmc/gmc_7_1_sh_mask.h"
36
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
39
40 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42
43 MODULE_FIRMWARE("radeon/bonaire_mc.bin");
44 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
45 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
46
47 static const u32 golden_settings_iceland_a11[] =
48 {
49 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
50 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
51 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
52 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
53 };
54
55 static const u32 iceland_mgcg_cgcg_init[] =
56 {
57 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
58 };
59
60 static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
61 {
62 switch (adev->asic_type) {
63 case CHIP_TOPAZ:
64 amdgpu_program_register_sequence(adev,
65 iceland_mgcg_cgcg_init,
66 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
67 amdgpu_program_register_sequence(adev,
68 golden_settings_iceland_a11,
69 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
70 break;
71 default:
72 break;
73 }
74 }
75
76 /**
77 * gmc7_mc_wait_for_idle - wait for MC idle callback.
78 *
79 * @adev: amdgpu_device pointer
80 *
81 * Wait for the MC (memory controller) to be idle.
82 * (evergreen+).
83 * Returns 0 if the MC is idle, -1 if not.
84 */
85 int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
86 {
87 unsigned i;
88 u32 tmp;
89
90 for (i = 0; i < adev->usec_timeout; i++) {
91 /* read MC_STATUS */
92 tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
93 if (!tmp)
94 return 0;
95 udelay(1);
96 }
97 return -1;
98 }
99
100 void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
101 struct amdgpu_mode_mc_save *save)
102 {
103 u32 blackout;
104
105 if (adev->mode_info.num_crtc)
106 amdgpu_display_stop_mc_access(adev, save);
107
108 amdgpu_asic_wait_for_mc_idle(adev);
109
110 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
111 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
112 /* Block CPU access */
113 WREG32(mmBIF_FB_EN, 0);
114 /* blackout the MC */
115 blackout = REG_SET_FIELD(blackout,
116 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
117 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
118 }
119 /* wait for the MC to settle */
120 udelay(100);
121 }
122
123 void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
124 struct amdgpu_mode_mc_save *save)
125 {
126 u32 tmp;
127
128 /* unblackout the MC */
129 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
130 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
131 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
132 /* allow CPU access */
133 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
134 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
135 WREG32(mmBIF_FB_EN, tmp);
136
137 if (adev->mode_info.num_crtc)
138 amdgpu_display_resume_mc_access(adev, save);
139 }
140
141 /**
142 * gmc_v7_0_init_microcode - load ucode images from disk
143 *
144 * @adev: amdgpu_device pointer
145 *
146 * Use the firmware interface to load the ucode images into
147 * the driver (not loaded into hw).
148 * Returns 0 on success, error on failure.
149 */
150 static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
151 {
152 const char *chip_name;
153 char fw_name[30];
154 int err;
155
156 DRM_DEBUG("\n");
157
158 switch (adev->asic_type) {
159 case CHIP_BONAIRE:
160 chip_name = "bonaire";
161 break;
162 case CHIP_HAWAII:
163 chip_name = "hawaii";
164 break;
165 case CHIP_TOPAZ:
166 chip_name = "topaz";
167 break;
168 case CHIP_KAVERI:
169 case CHIP_KABINI:
170 return 0;
171 default: BUG();
172 }
173
174 if (adev->asic_type == CHIP_TOPAZ)
175 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
176 else
177 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
178
179 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
180 if (err)
181 goto out;
182 err = amdgpu_ucode_validate(adev->mc.fw);
183
184 out:
185 if (err) {
186 printk(KERN_ERR
187 "cik_mc: Failed to load firmware \"%s\"\n",
188 fw_name);
189 release_firmware(adev->mc.fw);
190 adev->mc.fw = NULL;
191 }
192 return err;
193 }
194
195 /**
196 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
197 *
198 * @adev: amdgpu_device pointer
199 *
200 * Load the GDDR MC ucode into the hw (CIK).
201 * Returns 0 on success, error on failure.
202 */
203 static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
204 {
205 const struct mc_firmware_header_v1_0 *hdr;
206 const __le32 *fw_data = NULL;
207 const __le32 *io_mc_regs = NULL;
208 u32 running, blackout = 0;
209 int i, ucode_size, regs_size;
210
211 if (!adev->mc.fw)
212 return -EINVAL;
213
214 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
215 amdgpu_ucode_print_mc_hdr(&hdr->header);
216
217 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
218 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
219 io_mc_regs = (const __le32 *)
220 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
221 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
222 fw_data = (const __le32 *)
223 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
224
225 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
226
227 if (running == 0) {
228 if (running) {
229 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
230 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
231 }
232
233 /* reset the engine and set to writable */
234 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
235 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
236
237 /* load mc io regs */
238 for (i = 0; i < regs_size; i++) {
239 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
240 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
241 }
242 /* load the MC ucode */
243 for (i = 0; i < ucode_size; i++)
244 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
245
246 /* put the engine back into the active state */
247 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
248 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
249 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
250
251 /* wait for training to complete */
252 for (i = 0; i < adev->usec_timeout; i++) {
253 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
254 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
255 break;
256 udelay(1);
257 }
258 for (i = 0; i < adev->usec_timeout; i++) {
259 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
260 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
261 break;
262 udelay(1);
263 }
264
265 if (running)
266 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
267 }
268
269 return 0;
270 }
271
272 static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
273 struct amdgpu_mc *mc)
274 {
275 if (mc->mc_vram_size > 0xFFC0000000ULL) {
276 /* leave room for at least 1024M GTT */
277 dev_warn(adev->dev, "limiting VRAM\n");
278 mc->real_vram_size = 0xFFC0000000ULL;
279 mc->mc_vram_size = 0xFFC0000000ULL;
280 }
281 amdgpu_vram_location(adev, &adev->mc, 0);
282 adev->mc.gtt_base_align = 0;
283 amdgpu_gtt_location(adev, mc);
284 }
285
286 /**
287 * gmc_v7_0_mc_program - program the GPU memory controller
288 *
289 * @adev: amdgpu_device pointer
290 *
291 * Set the location of vram, gart, and AGP in the GPU's
292 * physical address space (CIK).
293 */
294 static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
295 {
296 struct amdgpu_mode_mc_save save;
297 u32 tmp;
298 int i, j;
299
300 /* Initialize HDP */
301 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
302 WREG32((0xb05 + j), 0x00000000);
303 WREG32((0xb06 + j), 0x00000000);
304 WREG32((0xb07 + j), 0x00000000);
305 WREG32((0xb08 + j), 0x00000000);
306 WREG32((0xb09 + j), 0x00000000);
307 }
308 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
309
310 if (adev->mode_info.num_crtc)
311 amdgpu_display_set_vga_render_state(adev, false);
312
313 gmc_v7_0_mc_stop(adev, &save);
314 if (amdgpu_asic_wait_for_mc_idle(adev)) {
315 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
316 }
317 /* Update configuration */
318 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
319 adev->mc.vram_start >> 12);
320 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
321 adev->mc.vram_end >> 12);
322 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
323 adev->vram_scratch.gpu_addr >> 12);
324 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
325 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
326 WREG32(mmMC_VM_FB_LOCATION, tmp);
327 /* XXX double check these! */
328 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
329 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
330 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
331 WREG32(mmMC_VM_AGP_BASE, 0);
332 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
333 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
334 if (amdgpu_asic_wait_for_mc_idle(adev)) {
335 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
336 }
337 gmc_v7_0_mc_resume(adev, &save);
338
339 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
340
341 tmp = RREG32(mmHDP_MISC_CNTL);
342 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
343 WREG32(mmHDP_MISC_CNTL, tmp);
344
345 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
346 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
347 }
348
349 /**
350 * gmc_v7_0_mc_init - initialize the memory controller driver params
351 *
352 * @adev: amdgpu_device pointer
353 *
354 * Look up the amount of vram, vram width, and decide how to place
355 * vram and gart within the GPU's physical address space (CIK).
356 * Returns 0 for success.
357 */
358 static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
359 {
360 u32 tmp;
361 int chansize, numchan;
362
363 /* Get VRAM informations */
364 tmp = RREG32(mmMC_ARB_RAMCFG);
365 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
366 chansize = 64;
367 } else {
368 chansize = 32;
369 }
370 tmp = RREG32(mmMC_SHARED_CHMAP);
371 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
372 case 0:
373 default:
374 numchan = 1;
375 break;
376 case 1:
377 numchan = 2;
378 break;
379 case 2:
380 numchan = 4;
381 break;
382 case 3:
383 numchan = 8;
384 break;
385 case 4:
386 numchan = 3;
387 break;
388 case 5:
389 numchan = 6;
390 break;
391 case 6:
392 numchan = 10;
393 break;
394 case 7:
395 numchan = 12;
396 break;
397 case 8:
398 numchan = 16;
399 break;
400 }
401 adev->mc.vram_width = numchan * chansize;
402 /* Could aper size report 0 ? */
403 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
404 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
405 /* size in MB on si */
406 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
407 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
408 adev->mc.visible_vram_size = adev->mc.aper_size;
409
410 /* In case the PCI BAR is larger than the actual amount of vram */
411 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
412 adev->mc.visible_vram_size = adev->mc.real_vram_size;
413
414 /* unless the user had overridden it, set the gart
415 * size equal to the 1024 or vram, whichever is larger.
416 */
417 if (amdgpu_gart_size == -1)
418 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
419 else
420 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
421
422 gmc_v7_0_vram_gtt_location(adev, &adev->mc);
423
424 return 0;
425 }
426
427 /*
428 * GART
429 * VMID 0 is the physical GPU addresses as used by the kernel.
430 * VMIDs 1-15 are used for userspace clients and are handled
431 * by the amdgpu vm/hsa code.
432 */
433
434 /**
435 * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
436 *
437 * @adev: amdgpu_device pointer
438 * @vmid: vm instance to flush
439 *
440 * Flush the TLB for the requested page table (CIK).
441 */
442 static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
443 uint32_t vmid)
444 {
445 /* flush hdp cache */
446 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
447
448 /* bits 0-15 are the VM contexts0-15 */
449 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
450 }
451
452 /**
453 * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
454 *
455 * @adev: amdgpu_device pointer
456 * @cpu_pt_addr: cpu address of the page table
457 * @gpu_page_idx: entry in the page table to update
458 * @addr: dst addr to write into pte/pde
459 * @flags: access flags
460 *
461 * Update the page tables using the CPU.
462 */
463 static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
464 void *cpu_pt_addr,
465 uint32_t gpu_page_idx,
466 uint64_t addr,
467 uint32_t flags)
468 {
469 void __iomem *ptr = (void *)cpu_pt_addr;
470 uint64_t value;
471
472 value = addr & 0xFFFFFFFFFFFFF000ULL;
473 value |= flags;
474 writeq(value, ptr + (gpu_page_idx * 8));
475
476 return 0;
477 }
478
479 /**
480 * gmc_v8_0_set_fault_enable_default - update VM fault handling
481 *
482 * @adev: amdgpu_device pointer
483 * @value: true redirects VM faults to the default page
484 */
485 static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
486 bool value)
487 {
488 u32 tmp;
489
490 tmp = RREG32(mmVM_CONTEXT1_CNTL);
491 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
492 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
493 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
494 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
495 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
496 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
497 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
498 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
499 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
500 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
501 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
502 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
503 WREG32(mmVM_CONTEXT1_CNTL, tmp);
504 }
505
506 /**
507 * gmc_v7_0_gart_enable - gart enable
508 *
509 * @adev: amdgpu_device pointer
510 *
511 * This sets up the TLBs, programs the page tables for VMID0,
512 * sets up the hw for VMIDs 1-15 which are allocated on
513 * demand, and sets up the global locations for the LDS, GDS,
514 * and GPUVM for FSA64 clients (CIK).
515 * Returns 0 for success, errors for failure.
516 */
517 static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
518 {
519 int r, i;
520 u32 tmp;
521
522 if (adev->gart.robj == NULL) {
523 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
524 return -EINVAL;
525 }
526 r = amdgpu_gart_table_vram_pin(adev);
527 if (r)
528 return r;
529 /* Setup TLB control */
530 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
531 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
532 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
533 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
534 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
535 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
536 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
537 /* Setup L2 cache */
538 tmp = RREG32(mmVM_L2_CNTL);
539 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
540 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
541 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
542 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
543 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
544 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
545 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
546 WREG32(mmVM_L2_CNTL, tmp);
547 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
548 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
549 WREG32(mmVM_L2_CNTL2, tmp);
550 tmp = RREG32(mmVM_L2_CNTL3);
551 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
552 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
553 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
554 WREG32(mmVM_L2_CNTL3, tmp);
555 /* setup context0 */
556 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
557 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
558 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
559 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
560 (u32)(adev->dummy_page.addr >> 12));
561 WREG32(mmVM_CONTEXT0_CNTL2, 0);
562 tmp = RREG32(mmVM_CONTEXT0_CNTL);
563 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
564 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
565 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
566 WREG32(mmVM_CONTEXT0_CNTL, tmp);
567
568 WREG32(0x575, 0);
569 WREG32(0x576, 0);
570 WREG32(0x577, 0);
571
572 /* empty context1-15 */
573 /* FIXME start with 4G, once using 2 level pt switch to full
574 * vm size space
575 */
576 /* set vm size, must be a multiple of 4 */
577 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
578 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
579 for (i = 1; i < 16; i++) {
580 if (i < 8)
581 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
582 adev->gart.table_addr >> 12);
583 else
584 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
585 adev->gart.table_addr >> 12);
586 }
587
588 /* enable context1-15 */
589 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
590 (u32)(adev->dummy_page.addr >> 12));
591 WREG32(mmVM_CONTEXT1_CNTL2, 4);
592 tmp = RREG32(mmVM_CONTEXT1_CNTL);
593 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
594 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
595 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
596 amdgpu_vm_block_size - 9);
597 WREG32(mmVM_CONTEXT1_CNTL, tmp);
598 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
599 gmc_v7_0_set_fault_enable_default(adev, false);
600 else
601 gmc_v7_0_set_fault_enable_default(adev, true);
602
603 if (adev->asic_type == CHIP_KAVERI) {
604 tmp = RREG32(mmCHUB_CONTROL);
605 tmp &= ~BYPASS_VM;
606 WREG32(mmCHUB_CONTROL, tmp);
607 }
608
609 gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
610 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
611 (unsigned)(adev->mc.gtt_size >> 20),
612 (unsigned long long)adev->gart.table_addr);
613 adev->gart.ready = true;
614 return 0;
615 }
616
617 static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
618 {
619 int r;
620
621 if (adev->gart.robj) {
622 WARN(1, "R600 PCIE GART already initialized\n");
623 return 0;
624 }
625 /* Initialize common gart structure */
626 r = amdgpu_gart_init(adev);
627 if (r)
628 return r;
629 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
630 return amdgpu_gart_table_vram_alloc(adev);
631 }
632
633 /**
634 * gmc_v7_0_gart_disable - gart disable
635 *
636 * @adev: amdgpu_device pointer
637 *
638 * This disables all VM page table (CIK).
639 */
640 static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
641 {
642 u32 tmp;
643
644 /* Disable all tables */
645 WREG32(mmVM_CONTEXT0_CNTL, 0);
646 WREG32(mmVM_CONTEXT1_CNTL, 0);
647 /* Setup TLB control */
648 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
649 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
650 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
651 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
652 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
653 /* Setup L2 cache */
654 tmp = RREG32(mmVM_L2_CNTL);
655 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
656 WREG32(mmVM_L2_CNTL, tmp);
657 WREG32(mmVM_L2_CNTL2, 0);
658 amdgpu_gart_table_vram_unpin(adev);
659 }
660
661 /**
662 * gmc_v7_0_gart_fini - vm fini callback
663 *
664 * @adev: amdgpu_device pointer
665 *
666 * Tears down the driver GART/VM setup (CIK).
667 */
668 static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
669 {
670 amdgpu_gart_table_vram_free(adev);
671 amdgpu_gart_fini(adev);
672 }
673
674 /*
675 * vm
676 * VMID 0 is the physical GPU addresses as used by the kernel.
677 * VMIDs 1-15 are used for userspace clients and are handled
678 * by the amdgpu vm/hsa code.
679 */
680 /**
681 * gmc_v7_0_vm_init - cik vm init callback
682 *
683 * @adev: amdgpu_device pointer
684 *
685 * Inits cik specific vm parameters (number of VMs, base of vram for
686 * VMIDs 1-15) (CIK).
687 * Returns 0 for success.
688 */
689 static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
690 {
691 /*
692 * number of VMs
693 * VMID 0 is reserved for System
694 * amdgpu graphics/compute will use VMIDs 1-7
695 * amdkfd will use VMIDs 8-15
696 */
697 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
698 amdgpu_vm_manager_init(adev);
699
700 /* base offset of vram pages */
701 if (adev->flags & AMD_IS_APU) {
702 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
703 tmp <<= 22;
704 adev->vm_manager.vram_base_offset = tmp;
705 } else
706 adev->vm_manager.vram_base_offset = 0;
707
708 return 0;
709 }
710
711 /**
712 * gmc_v7_0_vm_fini - cik vm fini callback
713 *
714 * @adev: amdgpu_device pointer
715 *
716 * Tear down any asic specific VM setup (CIK).
717 */
718 static void gmc_v7_0_vm_fini(struct amdgpu_device *adev)
719 {
720 }
721
722 /**
723 * gmc_v7_0_vm_decode_fault - print human readable fault info
724 *
725 * @adev: amdgpu_device pointer
726 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
727 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
728 *
729 * Print human readable fault information (CIK).
730 */
731 static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
732 u32 status, u32 addr, u32 mc_client)
733 {
734 u32 mc_id;
735 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
736 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
737 PROTECTIONS);
738 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
739 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
740
741 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
742 MEMORY_CLIENT_ID);
743
744 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
745 protections, vmid, addr,
746 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
747 MEMORY_CLIENT_RW) ?
748 "write" : "read", block, mc_client, mc_id);
749 }
750
751
752 static const u32 mc_cg_registers[] = {
753 mmMC_HUB_MISC_HUB_CG,
754 mmMC_HUB_MISC_SIP_CG,
755 mmMC_HUB_MISC_VM_CG,
756 mmMC_XPB_CLK_GAT,
757 mmATC_MISC_CG,
758 mmMC_CITF_MISC_WR_CG,
759 mmMC_CITF_MISC_RD_CG,
760 mmMC_CITF_MISC_VM_CG,
761 mmVM_L2_CG,
762 };
763
764 static const u32 mc_cg_ls_en[] = {
765 MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
766 MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
767 MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
768 MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
769 ATC_MISC_CG__MEM_LS_ENABLE_MASK,
770 MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
771 MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
772 MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
773 VM_L2_CG__MEM_LS_ENABLE_MASK,
774 };
775
776 static const u32 mc_cg_en[] = {
777 MC_HUB_MISC_HUB_CG__ENABLE_MASK,
778 MC_HUB_MISC_SIP_CG__ENABLE_MASK,
779 MC_HUB_MISC_VM_CG__ENABLE_MASK,
780 MC_XPB_CLK_GAT__ENABLE_MASK,
781 ATC_MISC_CG__ENABLE_MASK,
782 MC_CITF_MISC_WR_CG__ENABLE_MASK,
783 MC_CITF_MISC_RD_CG__ENABLE_MASK,
784 MC_CITF_MISC_VM_CG__ENABLE_MASK,
785 VM_L2_CG__ENABLE_MASK,
786 };
787
788 static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
789 bool enable)
790 {
791 int i;
792 u32 orig, data;
793
794 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
795 orig = data = RREG32(mc_cg_registers[i]);
796 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
797 data |= mc_cg_ls_en[i];
798 else
799 data &= ~mc_cg_ls_en[i];
800 if (data != orig)
801 WREG32(mc_cg_registers[i], data);
802 }
803 }
804
805 static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
806 bool enable)
807 {
808 int i;
809 u32 orig, data;
810
811 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
812 orig = data = RREG32(mc_cg_registers[i]);
813 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
814 data |= mc_cg_en[i];
815 else
816 data &= ~mc_cg_en[i];
817 if (data != orig)
818 WREG32(mc_cg_registers[i], data);
819 }
820 }
821
822 static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
823 bool enable)
824 {
825 u32 orig, data;
826
827 orig = data = RREG32_PCIE(ixPCIE_CNTL2);
828
829 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
830 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
831 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
832 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
833 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
834 } else {
835 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
836 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
837 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
838 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
839 }
840
841 if (orig != data)
842 WREG32_PCIE(ixPCIE_CNTL2, data);
843 }
844
845 static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
846 bool enable)
847 {
848 u32 orig, data;
849
850 orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
851
852 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
853 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
854 else
855 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
856
857 if (orig != data)
858 WREG32(mmHDP_HOST_PATH_CNTL, data);
859 }
860
861 static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
862 bool enable)
863 {
864 u32 orig, data;
865
866 orig = data = RREG32(mmHDP_MEM_POWER_LS);
867
868 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
869 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
870 else
871 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
872
873 if (orig != data)
874 WREG32(mmHDP_MEM_POWER_LS, data);
875 }
876
877 static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
878 {
879 switch (mc_seq_vram_type) {
880 case MC_SEQ_MISC0__MT__GDDR1:
881 return AMDGPU_VRAM_TYPE_GDDR1;
882 case MC_SEQ_MISC0__MT__DDR2:
883 return AMDGPU_VRAM_TYPE_DDR2;
884 case MC_SEQ_MISC0__MT__GDDR3:
885 return AMDGPU_VRAM_TYPE_GDDR3;
886 case MC_SEQ_MISC0__MT__GDDR4:
887 return AMDGPU_VRAM_TYPE_GDDR4;
888 case MC_SEQ_MISC0__MT__GDDR5:
889 return AMDGPU_VRAM_TYPE_GDDR5;
890 case MC_SEQ_MISC0__MT__HBM:
891 return AMDGPU_VRAM_TYPE_HBM;
892 case MC_SEQ_MISC0__MT__DDR3:
893 return AMDGPU_VRAM_TYPE_DDR3;
894 default:
895 return AMDGPU_VRAM_TYPE_UNKNOWN;
896 }
897 }
898
899 static int gmc_v7_0_early_init(void *handle)
900 {
901 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
902
903 gmc_v7_0_set_gart_funcs(adev);
904 gmc_v7_0_set_irq_funcs(adev);
905
906 return 0;
907 }
908
909 static int gmc_v7_0_late_init(void *handle)
910 {
911 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
912
913 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
914 }
915
916 static int gmc_v7_0_sw_init(void *handle)
917 {
918 int r;
919 int dma_bits;
920 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
921
922 if (adev->flags & AMD_IS_APU) {
923 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
924 } else {
925 u32 tmp = RREG32(mmMC_SEQ_MISC0);
926 tmp &= MC_SEQ_MISC0__MT__MASK;
927 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
928 }
929
930 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
931 if (r)
932 return r;
933
934 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
935 if (r)
936 return r;
937
938 /* Adjust VM size here.
939 * Currently set to 4GB ((1 << 20) 4k pages).
940 * Max GPUVM size for cayman and SI is 40 bits.
941 */
942 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
943
944 /* Set the internal MC address mask
945 * This is the max address of the GPU's
946 * internal address space.
947 */
948 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
949
950 /* set DMA mask + need_dma32 flags.
951 * PCIE - can handle 40-bits.
952 * IGP - can handle 40-bits
953 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
954 */
955 adev->need_dma32 = false;
956 dma_bits = adev->need_dma32 ? 32 : 40;
957 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
958 if (r) {
959 adev->need_dma32 = true;
960 dma_bits = 32;
961 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
962 }
963 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
964 if (r) {
965 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
966 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
967 }
968
969 r = gmc_v7_0_init_microcode(adev);
970 if (r) {
971 DRM_ERROR("Failed to load mc firmware!\n");
972 return r;
973 }
974
975 r = gmc_v7_0_mc_init(adev);
976 if (r)
977 return r;
978
979 /* Memory manager */
980 r = amdgpu_bo_init(adev);
981 if (r)
982 return r;
983
984 r = gmc_v7_0_gart_init(adev);
985 if (r)
986 return r;
987
988 if (!adev->vm_manager.enabled) {
989 r = gmc_v7_0_vm_init(adev);
990 if (r) {
991 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
992 return r;
993 }
994 adev->vm_manager.enabled = true;
995 }
996
997 return r;
998 }
999
1000 static int gmc_v7_0_sw_fini(void *handle)
1001 {
1002 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1003
1004 if (adev->vm_manager.enabled) {
1005 amdgpu_vm_manager_fini(adev);
1006 gmc_v7_0_vm_fini(adev);
1007 adev->vm_manager.enabled = false;
1008 }
1009 gmc_v7_0_gart_fini(adev);
1010 amdgpu_gem_force_release(adev);
1011 amdgpu_bo_fini(adev);
1012
1013 return 0;
1014 }
1015
1016 static int gmc_v7_0_hw_init(void *handle)
1017 {
1018 int r;
1019 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1020
1021 gmc_v7_0_init_golden_registers(adev);
1022
1023 gmc_v7_0_mc_program(adev);
1024
1025 if (!(adev->flags & AMD_IS_APU)) {
1026 r = gmc_v7_0_mc_load_microcode(adev);
1027 if (r) {
1028 DRM_ERROR("Failed to load MC firmware!\n");
1029 return r;
1030 }
1031 }
1032
1033 r = gmc_v7_0_gart_enable(adev);
1034 if (r)
1035 return r;
1036
1037 return r;
1038 }
1039
1040 static int gmc_v7_0_hw_fini(void *handle)
1041 {
1042 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1043
1044 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1045 gmc_v7_0_gart_disable(adev);
1046
1047 return 0;
1048 }
1049
1050 static int gmc_v7_0_suspend(void *handle)
1051 {
1052 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1053
1054 if (adev->vm_manager.enabled) {
1055 gmc_v7_0_vm_fini(adev);
1056 adev->vm_manager.enabled = false;
1057 }
1058 gmc_v7_0_hw_fini(adev);
1059
1060 return 0;
1061 }
1062
1063 static int gmc_v7_0_resume(void *handle)
1064 {
1065 int r;
1066 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1067
1068 r = gmc_v7_0_hw_init(adev);
1069 if (r)
1070 return r;
1071
1072 if (!adev->vm_manager.enabled) {
1073 r = gmc_v7_0_vm_init(adev);
1074 if (r) {
1075 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1076 return r;
1077 }
1078 adev->vm_manager.enabled = true;
1079 }
1080
1081 return r;
1082 }
1083
1084 static bool gmc_v7_0_is_idle(void *handle)
1085 {
1086 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1087 u32 tmp = RREG32(mmSRBM_STATUS);
1088
1089 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1090 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1091 return false;
1092
1093 return true;
1094 }
1095
1096 static int gmc_v7_0_wait_for_idle(void *handle)
1097 {
1098 unsigned i;
1099 u32 tmp;
1100 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1101
1102 for (i = 0; i < adev->usec_timeout; i++) {
1103 /* read MC_STATUS */
1104 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1105 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1106 SRBM_STATUS__MCC_BUSY_MASK |
1107 SRBM_STATUS__MCD_BUSY_MASK |
1108 SRBM_STATUS__VMC_BUSY_MASK);
1109 if (!tmp)
1110 return 0;
1111 udelay(1);
1112 }
1113 return -ETIMEDOUT;
1114
1115 }
1116
1117 static void gmc_v7_0_print_status(void *handle)
1118 {
1119 int i, j;
1120 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1121
1122 dev_info(adev->dev, "GMC 8.x registers\n");
1123 dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
1124 RREG32(mmSRBM_STATUS));
1125 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1126 RREG32(mmSRBM_STATUS2));
1127
1128 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1129 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
1130 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1131 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
1132 dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1133 RREG32(mmMC_VM_MX_L1_TLB_CNTL));
1134 dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
1135 RREG32(mmVM_L2_CNTL));
1136 dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
1137 RREG32(mmVM_L2_CNTL2));
1138 dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
1139 RREG32(mmVM_L2_CNTL3));
1140 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1141 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
1142 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1143 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
1144 dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1145 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
1146 dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
1147 RREG32(mmVM_CONTEXT0_CNTL2));
1148 dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
1149 RREG32(mmVM_CONTEXT0_CNTL));
1150 dev_info(adev->dev, " 0x15D4=0x%08X\n",
1151 RREG32(0x575));
1152 dev_info(adev->dev, " 0x15D8=0x%08X\n",
1153 RREG32(0x576));
1154 dev_info(adev->dev, " 0x15DC=0x%08X\n",
1155 RREG32(0x577));
1156 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1157 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
1158 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1159 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
1160 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1161 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
1162 dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
1163 RREG32(mmVM_CONTEXT1_CNTL2));
1164 dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
1165 RREG32(mmVM_CONTEXT1_CNTL));
1166 for (i = 0; i < 16; i++) {
1167 if (i < 8)
1168 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1169 i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
1170 else
1171 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1172 i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
1173 }
1174 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1175 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
1176 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1177 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
1178 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1179 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
1180 dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
1181 RREG32(mmMC_VM_FB_LOCATION));
1182 dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
1183 RREG32(mmMC_VM_AGP_BASE));
1184 dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
1185 RREG32(mmMC_VM_AGP_TOP));
1186 dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
1187 RREG32(mmMC_VM_AGP_BOT));
1188
1189 if (adev->asic_type == CHIP_KAVERI) {
1190 dev_info(adev->dev, " CHUB_CONTROL=0x%08X\n",
1191 RREG32(mmCHUB_CONTROL));
1192 }
1193
1194 dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1195 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
1196 dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
1197 RREG32(mmHDP_NONSURFACE_BASE));
1198 dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
1199 RREG32(mmHDP_NONSURFACE_INFO));
1200 dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
1201 RREG32(mmHDP_NONSURFACE_SIZE));
1202 dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
1203 RREG32(mmHDP_MISC_CNTL));
1204 dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
1205 RREG32(mmHDP_HOST_PATH_CNTL));
1206
1207 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
1208 dev_info(adev->dev, " %d:\n", i);
1209 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1210 0xb05 + j, RREG32(0xb05 + j));
1211 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1212 0xb06 + j, RREG32(0xb06 + j));
1213 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1214 0xb07 + j, RREG32(0xb07 + j));
1215 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1216 0xb08 + j, RREG32(0xb08 + j));
1217 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1218 0xb09 + j, RREG32(0xb09 + j));
1219 }
1220
1221 dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
1222 RREG32(mmBIF_FB_EN));
1223 }
1224
1225 static int gmc_v7_0_soft_reset(void *handle)
1226 {
1227 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1228 struct amdgpu_mode_mc_save save;
1229 u32 srbm_soft_reset = 0;
1230 u32 tmp = RREG32(mmSRBM_STATUS);
1231
1232 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1233 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1234 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1235
1236 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1237 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1238 if (!(adev->flags & AMD_IS_APU))
1239 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1240 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1241 }
1242
1243 if (srbm_soft_reset) {
1244 gmc_v7_0_print_status((void *)adev);
1245
1246 gmc_v7_0_mc_stop(adev, &save);
1247 if (gmc_v7_0_wait_for_idle(adev)) {
1248 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1249 }
1250
1251
1252 tmp = RREG32(mmSRBM_SOFT_RESET);
1253 tmp |= srbm_soft_reset;
1254 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1255 WREG32(mmSRBM_SOFT_RESET, tmp);
1256 tmp = RREG32(mmSRBM_SOFT_RESET);
1257
1258 udelay(50);
1259
1260 tmp &= ~srbm_soft_reset;
1261 WREG32(mmSRBM_SOFT_RESET, tmp);
1262 tmp = RREG32(mmSRBM_SOFT_RESET);
1263
1264 /* Wait a little for things to settle down */
1265 udelay(50);
1266
1267 gmc_v7_0_mc_resume(adev, &save);
1268 udelay(50);
1269
1270 gmc_v7_0_print_status((void *)adev);
1271 }
1272
1273 return 0;
1274 }
1275
1276 static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1277 struct amdgpu_irq_src *src,
1278 unsigned type,
1279 enum amdgpu_interrupt_state state)
1280 {
1281 u32 tmp;
1282 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1283 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1284 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1285 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1286 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1287 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1288
1289 switch (state) {
1290 case AMDGPU_IRQ_STATE_DISABLE:
1291 /* system context */
1292 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1293 tmp &= ~bits;
1294 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1295 /* VMs */
1296 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1297 tmp &= ~bits;
1298 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1299 break;
1300 case AMDGPU_IRQ_STATE_ENABLE:
1301 /* system context */
1302 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1303 tmp |= bits;
1304 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1305 /* VMs */
1306 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1307 tmp |= bits;
1308 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1309 break;
1310 default:
1311 break;
1312 }
1313
1314 return 0;
1315 }
1316
1317 static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1318 struct amdgpu_irq_src *source,
1319 struct amdgpu_iv_entry *entry)
1320 {
1321 u32 addr, status, mc_client;
1322
1323 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1324 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1325 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1326 /* reset addr and status */
1327 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1328
1329 if (!addr && !status)
1330 return 0;
1331
1332 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1333 gmc_v7_0_set_fault_enable_default(adev, false);
1334
1335 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1336 entry->src_id, entry->src_data);
1337 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1338 addr);
1339 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1340 status);
1341 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
1342
1343 return 0;
1344 }
1345
1346 static int gmc_v7_0_set_clockgating_state(void *handle,
1347 enum amd_clockgating_state state)
1348 {
1349 bool gate = false;
1350 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1351
1352 if (state == AMD_CG_STATE_GATE)
1353 gate = true;
1354
1355 if (!(adev->flags & AMD_IS_APU)) {
1356 gmc_v7_0_enable_mc_mgcg(adev, gate);
1357 gmc_v7_0_enable_mc_ls(adev, gate);
1358 }
1359 gmc_v7_0_enable_bif_mgls(adev, gate);
1360 gmc_v7_0_enable_hdp_mgcg(adev, gate);
1361 gmc_v7_0_enable_hdp_ls(adev, gate);
1362
1363 return 0;
1364 }
1365
1366 static int gmc_v7_0_set_powergating_state(void *handle,
1367 enum amd_powergating_state state)
1368 {
1369 return 0;
1370 }
1371
1372 const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1373 .early_init = gmc_v7_0_early_init,
1374 .late_init = gmc_v7_0_late_init,
1375 .sw_init = gmc_v7_0_sw_init,
1376 .sw_fini = gmc_v7_0_sw_fini,
1377 .hw_init = gmc_v7_0_hw_init,
1378 .hw_fini = gmc_v7_0_hw_fini,
1379 .suspend = gmc_v7_0_suspend,
1380 .resume = gmc_v7_0_resume,
1381 .is_idle = gmc_v7_0_is_idle,
1382 .wait_for_idle = gmc_v7_0_wait_for_idle,
1383 .soft_reset = gmc_v7_0_soft_reset,
1384 .print_status = gmc_v7_0_print_status,
1385 .set_clockgating_state = gmc_v7_0_set_clockgating_state,
1386 .set_powergating_state = gmc_v7_0_set_powergating_state,
1387 };
1388
1389 static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
1390 .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
1391 .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
1392 };
1393
1394 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1395 .set = gmc_v7_0_vm_fault_interrupt_state,
1396 .process = gmc_v7_0_process_interrupt,
1397 };
1398
1399 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
1400 {
1401 if (adev->gart.gart_funcs == NULL)
1402 adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
1403 }
1404
1405 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1406 {
1407 adev->mc.vm_fault.num_types = 1;
1408 adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1409 }
This page took 0.059092 seconds and 5 git commands to generate.