drm/radeon: separate UVD code v3
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
6eac752e
AD
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
1b5331d9
JG
101 "LAST",
102};
103
2e1b65f9
AD
104/**
105 * radeon_program_register_sequence - program an array of registers.
106 *
107 * @rdev: radeon_device pointer
108 * @registers: pointer to the register array
109 * @array_size: size of the register array
110 *
111 * Programs an array or registers with and and or masks.
112 * This is a helper for setting golden registers.
113 */
114void radeon_program_register_sequence(struct radeon_device *rdev,
115 const u32 *registers,
116 const u32 array_size)
117{
118 u32 tmp, reg, and_mask, or_mask;
119 int i;
120
121 if (array_size % 3)
122 return;
123
124 for (i = 0; i < array_size; i +=3) {
125 reg = registers[i + 0];
126 and_mask = registers[i + 1];
127 or_mask = registers[i + 2];
128
129 if (and_mask == 0xffffffff) {
130 tmp = or_mask;
131 } else {
132 tmp = RREG32(reg);
133 tmp &= ~and_mask;
134 tmp |= or_mask;
135 }
136 WREG32(reg, tmp);
137 }
138}
139
0c195119
AD
140/**
141 * radeon_surface_init - Clear GPU surface registers.
142 *
143 * @rdev: radeon_device pointer
144 *
145 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 146 */
3ce0a23d 147void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
148{
149 /* FIXME: check this out */
150 if (rdev->family < CHIP_R600) {
151 int i;
152
550e2d92
DA
153 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
154 if (rdev->surface_regs[i].bo)
155 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
156 else
157 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 158 }
e024e110
DA
159 /* enable surfaces */
160 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
161 }
162}
163
771fe6b9
JG
164/*
165 * GPU scratch registers helpers function.
166 */
0c195119
AD
167/**
168 * radeon_scratch_init - Init scratch register driver information.
169 *
170 * @rdev: radeon_device pointer
171 *
172 * Init CP scratch register driver information (r1xx-r5xx)
173 */
3ce0a23d 174void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
175{
176 int i;
177
178 /* FIXME: check this out */
179 if (rdev->family < CHIP_R300) {
180 rdev->scratch.num_reg = 5;
181 } else {
182 rdev->scratch.num_reg = 7;
183 }
724c80e1 184 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
185 for (i = 0; i < rdev->scratch.num_reg; i++) {
186 rdev->scratch.free[i] = true;
724c80e1 187 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
188 }
189}
190
0c195119
AD
191/**
192 * radeon_scratch_get - Allocate a scratch register
193 *
194 * @rdev: radeon_device pointer
195 * @reg: scratch register mmio offset
196 *
197 * Allocate a CP scratch register for use by the driver (all asics).
198 * Returns 0 on success or -EINVAL on failure.
199 */
771fe6b9
JG
200int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
201{
202 int i;
203
204 for (i = 0; i < rdev->scratch.num_reg; i++) {
205 if (rdev->scratch.free[i]) {
206 rdev->scratch.free[i] = false;
207 *reg = rdev->scratch.reg[i];
208 return 0;
209 }
210 }
211 return -EINVAL;
212}
213
0c195119
AD
214/**
215 * radeon_scratch_free - Free a scratch register
216 *
217 * @rdev: radeon_device pointer
218 * @reg: scratch register mmio offset
219 *
220 * Free a CP scratch register allocated for use by the driver (all asics)
221 */
771fe6b9
JG
222void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
223{
224 int i;
225
226 for (i = 0; i < rdev->scratch.num_reg; i++) {
227 if (rdev->scratch.reg[i] == reg) {
228 rdev->scratch.free[i] = true;
229 return;
230 }
231 }
232}
233
75efdee1
AD
234/*
235 * GPU doorbell aperture helpers function.
236 */
237/**
238 * radeon_doorbell_init - Init doorbell driver information.
239 *
240 * @rdev: radeon_device pointer
241 *
242 * Init doorbell driver information (CIK)
243 * Returns 0 on success, error on failure.
244 */
245int radeon_doorbell_init(struct radeon_device *rdev)
246{
247 int i;
248
249 /* doorbell bar mapping */
250 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
251 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
252
253 /* limit to 4 MB for now */
254 if (rdev->doorbell.size > (4 * 1024 * 1024))
255 rdev->doorbell.size = 4 * 1024 * 1024;
256
257 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
258 if (rdev->doorbell.ptr == NULL) {
259 return -ENOMEM;
260 }
261 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
262 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
263
264 rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
265
266 for (i = 0; i < rdev->doorbell.num_pages; i++) {
267 rdev->doorbell.free[i] = true;
268 }
269 return 0;
270}
271
272/**
273 * radeon_doorbell_fini - Tear down doorbell driver information.
274 *
275 * @rdev: radeon_device pointer
276 *
277 * Tear down doorbell driver information (CIK)
278 */
279void radeon_doorbell_fini(struct radeon_device *rdev)
280{
281 iounmap(rdev->doorbell.ptr);
282 rdev->doorbell.ptr = NULL;
283}
284
285/**
286 * radeon_doorbell_get - Allocate a doorbell page
287 *
288 * @rdev: radeon_device pointer
289 * @doorbell: doorbell page number
290 *
291 * Allocate a doorbell page for use by the driver (all asics).
292 * Returns 0 on success or -EINVAL on failure.
293 */
294int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
295{
296 int i;
297
298 for (i = 0; i < rdev->doorbell.num_pages; i++) {
299 if (rdev->doorbell.free[i]) {
300 rdev->doorbell.free[i] = false;
301 *doorbell = i;
302 return 0;
303 }
304 }
305 return -EINVAL;
306}
307
308/**
309 * radeon_doorbell_free - Free a doorbell page
310 *
311 * @rdev: radeon_device pointer
312 * @doorbell: doorbell page number
313 *
314 * Free a doorbell page allocated for use by the driver (all asics)
315 */
316void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
317{
318 if (doorbell < rdev->doorbell.num_pages)
319 rdev->doorbell.free[doorbell] = true;
320}
321
0c195119
AD
322/*
323 * radeon_wb_*()
324 * Writeback is the the method by which the the GPU updates special pages
325 * in memory with the status of certain GPU events (fences, ring pointers,
326 * etc.).
327 */
328
329/**
330 * radeon_wb_disable - Disable Writeback
331 *
332 * @rdev: radeon_device pointer
333 *
334 * Disables Writeback (all asics). Used for suspend.
335 */
724c80e1
AD
336void radeon_wb_disable(struct radeon_device *rdev)
337{
724c80e1
AD
338 rdev->wb.enabled = false;
339}
340
0c195119
AD
341/**
342 * radeon_wb_fini - Disable Writeback and free memory
343 *
344 * @rdev: radeon_device pointer
345 *
346 * Disables Writeback and frees the Writeback memory (all asics).
347 * Used at driver shutdown.
348 */
724c80e1
AD
349void radeon_wb_fini(struct radeon_device *rdev)
350{
351 radeon_wb_disable(rdev);
352 if (rdev->wb.wb_obj) {
089920f2
JG
353 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
354 radeon_bo_kunmap(rdev->wb.wb_obj);
355 radeon_bo_unpin(rdev->wb.wb_obj);
356 radeon_bo_unreserve(rdev->wb.wb_obj);
357 }
724c80e1
AD
358 radeon_bo_unref(&rdev->wb.wb_obj);
359 rdev->wb.wb = NULL;
360 rdev->wb.wb_obj = NULL;
361 }
362}
363
0c195119
AD
364/**
365 * radeon_wb_init- Init Writeback driver info and allocate memory
366 *
367 * @rdev: radeon_device pointer
368 *
369 * Disables Writeback and frees the Writeback memory (all asics).
370 * Used at driver startup.
371 * Returns 0 on success or an -error on failure.
372 */
724c80e1
AD
373int radeon_wb_init(struct radeon_device *rdev)
374{
375 int r;
376
377 if (rdev->wb.wb_obj == NULL) {
441921d5 378 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 379 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
380 if (r) {
381 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
382 return r;
383 }
089920f2
JG
384 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
385 if (unlikely(r != 0)) {
386 radeon_wb_fini(rdev);
387 return r;
388 }
389 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
390 &rdev->wb.gpu_addr);
391 if (r) {
392 radeon_bo_unreserve(rdev->wb.wb_obj);
393 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
394 radeon_wb_fini(rdev);
395 return r;
396 }
397 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 398 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
399 if (r) {
400 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
401 radeon_wb_fini(rdev);
402 return r;
403 }
724c80e1
AD
404 }
405
e6ba7599
AD
406 /* clear wb memory */
407 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
408 /* disable event_write fences */
409 rdev->wb.use_event = false;
724c80e1 410 /* disabled via module param */
3b7a2b24 411 if (radeon_no_wb == 1) {
724c80e1 412 rdev->wb.enabled = false;
3b7a2b24 413 } else {
724c80e1 414 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
415 /* often unreliable on AGP */
416 rdev->wb.enabled = false;
417 } else if (rdev->family < CHIP_R300) {
418 /* often unreliable on pre-r300 */
724c80e1 419 rdev->wb.enabled = false;
d0f8a854 420 } else {
724c80e1 421 rdev->wb.enabled = true;
d0f8a854 422 /* event_write fences are only available on r600+ */
3b7a2b24 423 if (rdev->family >= CHIP_R600) {
d0f8a854 424 rdev->wb.use_event = true;
3b7a2b24 425 }
d0f8a854 426 }
724c80e1 427 }
c994ead6
AD
428 /* always use writeback/events on NI, APUs */
429 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
430 rdev->wb.enabled = true;
431 rdev->wb.use_event = true;
432 }
724c80e1
AD
433
434 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
435
436 return 0;
437}
438
d594e46a
JG
439/**
440 * radeon_vram_location - try to find VRAM location
441 * @rdev: radeon device structure holding all necessary informations
442 * @mc: memory controller structure holding memory informations
443 * @base: base address at which to put VRAM
444 *
445 * Function will place try to place VRAM at base address provided
446 * as parameter (which is so far either PCI aperture address or
447 * for IGP TOM base address).
448 *
449 * If there is not enough space to fit the unvisible VRAM in the 32bits
450 * address space then we limit the VRAM size to the aperture.
451 *
452 * If we are using AGP and if the AGP aperture doesn't allow us to have
453 * room for all the VRAM than we restrict the VRAM to the PCI aperture
454 * size and print a warning.
455 *
456 * This function will never fails, worst case are limiting VRAM.
457 *
458 * Note: GTT start, end, size should be initialized before calling this
459 * function on AGP platform.
460 *
25985edc 461 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
462 * this shouldn't be a problem as we are using the PCI aperture as a reference.
463 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
464 * not IGP.
465 *
466 * Note: we use mc_vram_size as on some board we need to program the mc to
467 * cover the whole aperture even if VRAM size is inferior to aperture size
468 * Novell bug 204882 + along with lots of ubuntu ones
469 *
470 * Note: when limiting vram it's safe to overwritte real_vram_size because
471 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
472 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
473 * ones)
474 *
475 * Note: IGP TOM addr should be the same as the aperture addr, we don't
476 * explicitly check for that thought.
477 *
478 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 479 */
d594e46a 480void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 481{
1bcb04f7
CK
482 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
483
d594e46a 484 mc->vram_start = base;
9ed8b1f9 485 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
486 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
487 mc->real_vram_size = mc->aper_size;
488 mc->mc_vram_size = mc->aper_size;
489 }
490 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 491 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
492 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
493 mc->real_vram_size = mc->aper_size;
494 mc->mc_vram_size = mc->aper_size;
495 }
496 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
497 if (limit && limit < mc->real_vram_size)
498 mc->real_vram_size = limit;
dd7cc55a 499 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
500 mc->mc_vram_size >> 20, mc->vram_start,
501 mc->vram_end, mc->real_vram_size >> 20);
502}
771fe6b9 503
d594e46a
JG
504/**
505 * radeon_gtt_location - try to find GTT location
506 * @rdev: radeon device structure holding all necessary informations
507 * @mc: memory controller structure holding memory informations
508 *
509 * Function will place try to place GTT before or after VRAM.
510 *
511 * If GTT size is bigger than space left then we ajust GTT size.
512 * Thus function will never fails.
513 *
514 * FIXME: when reducing GTT size align new size on power of 2.
515 */
516void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
517{
518 u64 size_af, size_bf;
519
9ed8b1f9 520 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 521 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
522 if (size_bf > size_af) {
523 if (mc->gtt_size > size_bf) {
524 dev_warn(rdev->dev, "limiting GTT\n");
525 mc->gtt_size = size_bf;
771fe6b9 526 }
8d369bb1 527 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 528 } else {
d594e46a
JG
529 if (mc->gtt_size > size_af) {
530 dev_warn(rdev->dev, "limiting GTT\n");
531 mc->gtt_size = size_af;
532 }
8d369bb1 533 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 534 }
d594e46a 535 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 536 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 537 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
538}
539
771fe6b9
JG
540/*
541 * GPU helpers function.
542 */
0c195119
AD
543/**
544 * radeon_card_posted - check if the hw has already been initialized
545 *
546 * @rdev: radeon_device pointer
547 *
548 * Check if the asic has been initialized (all asics).
549 * Used at driver startup.
550 * Returns true if initialized or false if not.
551 */
9f022ddf 552bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
553{
554 uint32_t reg;
555
50a583f6 556 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 557 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
558 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
559 (rdev->family < CHIP_R600))
bcc65fd8
MG
560 return false;
561
2cf3a4fc
AD
562 if (ASIC_IS_NODCE(rdev))
563 goto check_memsize;
564
771fe6b9 565 /* first check CRTCs */
09fb8bd1 566 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
567 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
568 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
569 if (rdev->num_crtc >= 4) {
570 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
571 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
572 }
573 if (rdev->num_crtc >= 6) {
574 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
575 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
576 }
bcc1c2a1
AD
577 if (reg & EVERGREEN_CRTC_MASTER_EN)
578 return true;
579 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
580 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
581 RREG32(AVIVO_D2CRTC_CONTROL);
582 if (reg & AVIVO_CRTC_EN) {
583 return true;
584 }
585 } else {
586 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
587 RREG32(RADEON_CRTC2_GEN_CNTL);
588 if (reg & RADEON_CRTC_EN) {
589 return true;
590 }
591 }
592
2cf3a4fc 593check_memsize:
771fe6b9
JG
594 /* then check MEM_SIZE, in case the crtcs are off */
595 if (rdev->family >= CHIP_R600)
596 reg = RREG32(R600_CONFIG_MEMSIZE);
597 else
598 reg = RREG32(RADEON_CONFIG_MEMSIZE);
599
600 if (reg)
601 return true;
602
603 return false;
604
605}
606
0c195119
AD
607/**
608 * radeon_update_bandwidth_info - update display bandwidth params
609 *
610 * @rdev: radeon_device pointer
611 *
612 * Used when sclk/mclk are switched or display modes are set.
613 * params are used to calculate display watermarks (all asics)
614 */
f47299c5
AD
615void radeon_update_bandwidth_info(struct radeon_device *rdev)
616{
617 fixed20_12 a;
8807286e
AD
618 u32 sclk = rdev->pm.current_sclk;
619 u32 mclk = rdev->pm.current_mclk;
f47299c5 620
8807286e
AD
621 /* sclk/mclk in Mhz */
622 a.full = dfixed_const(100);
623 rdev->pm.sclk.full = dfixed_const(sclk);
624 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
625 rdev->pm.mclk.full = dfixed_const(mclk);
626 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 627
8807286e 628 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 629 a.full = dfixed_const(16);
f47299c5 630 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 631 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
632 }
633}
634
0c195119
AD
635/**
636 * radeon_boot_test_post_card - check and possibly initialize the hw
637 *
638 * @rdev: radeon_device pointer
639 *
640 * Check if the asic is initialized and if not, attempt to initialize
641 * it (all asics).
642 * Returns true if initialized or false if not.
643 */
72542d77
DA
644bool radeon_boot_test_post_card(struct radeon_device *rdev)
645{
646 if (radeon_card_posted(rdev))
647 return true;
648
649 if (rdev->bios) {
650 DRM_INFO("GPU not posted. posting now...\n");
651 if (rdev->is_atom_bios)
652 atom_asic_init(rdev->mode_info.atom_context);
653 else
654 radeon_combios_asic_init(rdev->ddev);
655 return true;
656 } else {
657 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
658 return false;
659 }
660}
661
0c195119
AD
662/**
663 * radeon_dummy_page_init - init dummy page used by the driver
664 *
665 * @rdev: radeon_device pointer
666 *
667 * Allocate the dummy page used by the driver (all asics).
668 * This dummy page is used by the driver as a filler for gart entries
669 * when pages are taken out of the GART
670 * Returns 0 on sucess, -ENOMEM on failure.
671 */
3ce0a23d
JG
672int radeon_dummy_page_init(struct radeon_device *rdev)
673{
82568565
DA
674 if (rdev->dummy_page.page)
675 return 0;
3ce0a23d
JG
676 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
677 if (rdev->dummy_page.page == NULL)
678 return -ENOMEM;
679 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
680 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
681 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
682 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
683 __free_page(rdev->dummy_page.page);
684 rdev->dummy_page.page = NULL;
685 return -ENOMEM;
686 }
687 return 0;
688}
689
0c195119
AD
690/**
691 * radeon_dummy_page_fini - free dummy page used by the driver
692 *
693 * @rdev: radeon_device pointer
694 *
695 * Frees the dummy page used by the driver (all asics).
696 */
3ce0a23d
JG
697void radeon_dummy_page_fini(struct radeon_device *rdev)
698{
699 if (rdev->dummy_page.page == NULL)
700 return;
701 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
702 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
703 __free_page(rdev->dummy_page.page);
704 rdev->dummy_page.page = NULL;
705}
706
771fe6b9 707
771fe6b9 708/* ATOM accessor methods */
0c195119
AD
709/*
710 * ATOM is an interpreted byte code stored in tables in the vbios. The
711 * driver registers callbacks to access registers and the interpreter
712 * in the driver parses the tables and executes then to program specific
713 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
714 * atombios.h, and atom.c
715 */
716
717/**
718 * cail_pll_read - read PLL register
719 *
720 * @info: atom card_info pointer
721 * @reg: PLL register offset
722 *
723 * Provides a PLL register accessor for the atom interpreter (r4xx+).
724 * Returns the value of the PLL register.
725 */
771fe6b9
JG
726static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
727{
728 struct radeon_device *rdev = info->dev->dev_private;
729 uint32_t r;
730
731 r = rdev->pll_rreg(rdev, reg);
732 return r;
733}
734
0c195119
AD
735/**
736 * cail_pll_write - write PLL register
737 *
738 * @info: atom card_info pointer
739 * @reg: PLL register offset
740 * @val: value to write to the pll register
741 *
742 * Provides a PLL register accessor for the atom interpreter (r4xx+).
743 */
771fe6b9
JG
744static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
745{
746 struct radeon_device *rdev = info->dev->dev_private;
747
748 rdev->pll_wreg(rdev, reg, val);
749}
750
0c195119
AD
751/**
752 * cail_mc_read - read MC (Memory Controller) register
753 *
754 * @info: atom card_info pointer
755 * @reg: MC register offset
756 *
757 * Provides an MC register accessor for the atom interpreter (r4xx+).
758 * Returns the value of the MC register.
759 */
771fe6b9
JG
760static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
761{
762 struct radeon_device *rdev = info->dev->dev_private;
763 uint32_t r;
764
765 r = rdev->mc_rreg(rdev, reg);
766 return r;
767}
768
0c195119
AD
769/**
770 * cail_mc_write - write MC (Memory Controller) register
771 *
772 * @info: atom card_info pointer
773 * @reg: MC register offset
774 * @val: value to write to the pll register
775 *
776 * Provides a MC register accessor for the atom interpreter (r4xx+).
777 */
771fe6b9
JG
778static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
779{
780 struct radeon_device *rdev = info->dev->dev_private;
781
782 rdev->mc_wreg(rdev, reg, val);
783}
784
0c195119
AD
785/**
786 * cail_reg_write - write MMIO register
787 *
788 * @info: atom card_info pointer
789 * @reg: MMIO register offset
790 * @val: value to write to the pll register
791 *
792 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
793 */
771fe6b9
JG
794static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
795{
796 struct radeon_device *rdev = info->dev->dev_private;
797
798 WREG32(reg*4, val);
799}
800
0c195119
AD
801/**
802 * cail_reg_read - read MMIO register
803 *
804 * @info: atom card_info pointer
805 * @reg: MMIO register offset
806 *
807 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
808 * Returns the value of the MMIO register.
809 */
771fe6b9
JG
810static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
811{
812 struct radeon_device *rdev = info->dev->dev_private;
813 uint32_t r;
814
815 r = RREG32(reg*4);
816 return r;
817}
818
0c195119
AD
819/**
820 * cail_ioreg_write - write IO register
821 *
822 * @info: atom card_info pointer
823 * @reg: IO register offset
824 * @val: value to write to the pll register
825 *
826 * Provides a IO register accessor for the atom interpreter (r4xx+).
827 */
351a52a2
AD
828static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
829{
830 struct radeon_device *rdev = info->dev->dev_private;
831
832 WREG32_IO(reg*4, val);
833}
834
0c195119
AD
835/**
836 * cail_ioreg_read - read IO register
837 *
838 * @info: atom card_info pointer
839 * @reg: IO register offset
840 *
841 * Provides an IO register accessor for the atom interpreter (r4xx+).
842 * Returns the value of the IO register.
843 */
351a52a2
AD
844static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
845{
846 struct radeon_device *rdev = info->dev->dev_private;
847 uint32_t r;
848
849 r = RREG32_IO(reg*4);
850 return r;
851}
852
0c195119
AD
853/**
854 * radeon_atombios_init - init the driver info and callbacks for atombios
855 *
856 * @rdev: radeon_device pointer
857 *
858 * Initializes the driver info and register access callbacks for the
859 * ATOM interpreter (r4xx+).
860 * Returns 0 on sucess, -ENOMEM on failure.
861 * Called at driver startup.
862 */
771fe6b9
JG
863int radeon_atombios_init(struct radeon_device *rdev)
864{
61c4b24b
MF
865 struct card_info *atom_card_info =
866 kzalloc(sizeof(struct card_info), GFP_KERNEL);
867
868 if (!atom_card_info)
869 return -ENOMEM;
870
871 rdev->mode_info.atom_card_info = atom_card_info;
872 atom_card_info->dev = rdev->ddev;
873 atom_card_info->reg_read = cail_reg_read;
874 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
875 /* needed for iio ops */
876 if (rdev->rio_mem) {
877 atom_card_info->ioreg_read = cail_ioreg_read;
878 atom_card_info->ioreg_write = cail_ioreg_write;
879 } else {
880 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
881 atom_card_info->ioreg_read = cail_reg_read;
882 atom_card_info->ioreg_write = cail_reg_write;
883 }
61c4b24b
MF
884 atom_card_info->mc_read = cail_mc_read;
885 atom_card_info->mc_write = cail_mc_write;
886 atom_card_info->pll_read = cail_pll_read;
887 atom_card_info->pll_write = cail_pll_write;
888
889 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
890 if (!rdev->mode_info.atom_context) {
891 radeon_atombios_fini(rdev);
892 return -ENOMEM;
893 }
894
c31ad97f 895 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 896 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 897 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
898 return 0;
899}
900
0c195119
AD
901/**
902 * radeon_atombios_fini - free the driver info and callbacks for atombios
903 *
904 * @rdev: radeon_device pointer
905 *
906 * Frees the driver info and register access callbacks for the ATOM
907 * interpreter (r4xx+).
908 * Called at driver shutdown.
909 */
771fe6b9
JG
910void radeon_atombios_fini(struct radeon_device *rdev)
911{
4a04a844
JG
912 if (rdev->mode_info.atom_context) {
913 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 914 }
0e34d094
TG
915 kfree(rdev->mode_info.atom_context);
916 rdev->mode_info.atom_context = NULL;
61c4b24b 917 kfree(rdev->mode_info.atom_card_info);
0e34d094 918 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
919}
920
0c195119
AD
921/* COMBIOS */
922/*
923 * COMBIOS is the bios format prior to ATOM. It provides
924 * command tables similar to ATOM, but doesn't have a unified
925 * parser. See radeon_combios.c
926 */
927
928/**
929 * radeon_combios_init - init the driver info for combios
930 *
931 * @rdev: radeon_device pointer
932 *
933 * Initializes the driver info for combios (r1xx-r3xx).
934 * Returns 0 on sucess.
935 * Called at driver startup.
936 */
771fe6b9
JG
937int radeon_combios_init(struct radeon_device *rdev)
938{
939 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
940 return 0;
941}
942
0c195119
AD
943/**
944 * radeon_combios_fini - free the driver info for combios
945 *
946 * @rdev: radeon_device pointer
947 *
948 * Frees the driver info for combios (r1xx-r3xx).
949 * Called at driver shutdown.
950 */
771fe6b9
JG
951void radeon_combios_fini(struct radeon_device *rdev)
952{
953}
954
0c195119
AD
955/* if we get transitioned to only one device, take VGA back */
956/**
957 * radeon_vga_set_decode - enable/disable vga decode
958 *
959 * @cookie: radeon_device pointer
960 * @state: enable/disable vga decode
961 *
962 * Enable/disable vga decode (all asics).
963 * Returns VGA resource flags.
964 */
28d52043
DA
965static unsigned int radeon_vga_set_decode(void *cookie, bool state)
966{
967 struct radeon_device *rdev = cookie;
28d52043
DA
968 radeon_vga_set_state(rdev, state);
969 if (state)
970 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
971 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
972 else
973 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
974}
c1176d6f 975
1bcb04f7
CK
976/**
977 * radeon_check_pot_argument - check that argument is a power of two
978 *
979 * @arg: value to check
980 *
981 * Validates that a certain argument is a power of two (all asics).
982 * Returns true if argument is valid.
983 */
984static bool radeon_check_pot_argument(int arg)
985{
986 return (arg & (arg - 1)) == 0;
987}
988
0c195119
AD
989/**
990 * radeon_check_arguments - validate module params
991 *
992 * @rdev: radeon_device pointer
993 *
994 * Validates certain module parameters and updates
995 * the associated values used by the driver (all asics).
996 */
1109ca09 997static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
998{
999 /* vramlimit must be a power of two */
1bcb04f7 1000 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1001 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1002 radeon_vram_limit);
1003 radeon_vram_limit = 0;
36421338 1004 }
1bcb04f7 1005
edcd26e8
AD
1006 if (radeon_gart_size == -1) {
1007 /* default to a larger gart size on newer asics */
1008 if (rdev->family >= CHIP_RV770)
1009 radeon_gart_size = 1024;
1010 else
1011 radeon_gart_size = 512;
1012 }
36421338 1013 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1014 if (radeon_gart_size < 32) {
edcd26e8 1015 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1016 radeon_gart_size);
edcd26e8
AD
1017 if (rdev->family >= CHIP_RV770)
1018 radeon_gart_size = 1024;
1019 else
1020 radeon_gart_size = 512;
1bcb04f7 1021 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1022 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1023 radeon_gart_size);
edcd26e8
AD
1024 if (rdev->family >= CHIP_RV770)
1025 radeon_gart_size = 1024;
1026 else
1027 radeon_gart_size = 512;
36421338 1028 }
1bcb04f7
CK
1029 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1030
36421338
JG
1031 /* AGP mode can only be -1, 1, 2, 4, 8 */
1032 switch (radeon_agpmode) {
1033 case -1:
1034 case 0:
1035 case 1:
1036 case 2:
1037 case 4:
1038 case 8:
1039 break;
1040 default:
1041 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1042 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1043 radeon_agpmode = 0;
1044 break;
1045 }
1046}
1047
d1f9809e
ML
1048/**
1049 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
1050 * needed for waking up.
1051 *
1052 * @pdev: pci dev pointer
1053 */
1054static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1055{
1056
1057 /* 6600m in a macbook pro */
1058 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1059 pdev->subsystem_device == 0x00e2) {
1060 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1061 return true;
1062 }
1063
1064 return false;
1065}
1066
0c195119
AD
1067/**
1068 * radeon_switcheroo_set_state - set switcheroo state
1069 *
1070 * @pdev: pci dev pointer
1071 * @state: vga switcheroo state
1072 *
1073 * Callback for the switcheroo driver. Suspends or resumes the
1074 * the asics before or after it is powered up using ACPI methods.
1075 */
6a9ee8af
DA
1076static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1077{
1078 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
1079 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1080 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1081 unsigned d3_delay = dev->pdev->d3_delay;
1082
6a9ee8af
DA
1083 printk(KERN_INFO "radeon: switched on\n");
1084 /* don't suspend or resume card normally */
5bcf719b 1085 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
1086
1087 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1088 dev->pdev->d3_delay = 20;
1089
6a9ee8af 1090 radeon_resume_kms(dev);
d1f9809e
ML
1091
1092 dev->pdev->d3_delay = d3_delay;
1093
5bcf719b 1094 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1095 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1096 } else {
1097 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1098 drm_kms_helper_poll_disable(dev);
5bcf719b 1099 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 1100 radeon_suspend_kms(dev, pmm);
5bcf719b 1101 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1102 }
1103}
1104
0c195119
AD
1105/**
1106 * radeon_switcheroo_can_switch - see if switcheroo state can change
1107 *
1108 * @pdev: pci dev pointer
1109 *
1110 * Callback for the switcheroo driver. Check of the switcheroo
1111 * state can be changed.
1112 * Returns true if the state can be changed, false if not.
1113 */
6a9ee8af
DA
1114static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1115{
1116 struct drm_device *dev = pci_get_drvdata(pdev);
1117 bool can_switch;
1118
1119 spin_lock(&dev->count_lock);
1120 can_switch = (dev->open_count == 0);
1121 spin_unlock(&dev->count_lock);
1122 return can_switch;
1123}
1124
26ec685f
TI
1125static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1126 .set_gpu_state = radeon_switcheroo_set_state,
1127 .reprobe = NULL,
1128 .can_switch = radeon_switcheroo_can_switch,
1129};
6a9ee8af 1130
0c195119
AD
1131/**
1132 * radeon_device_init - initialize the driver
1133 *
1134 * @rdev: radeon_device pointer
1135 * @pdev: drm dev pointer
1136 * @pdev: pci dev pointer
1137 * @flags: driver flags
1138 *
1139 * Initializes the driver info and hw (all asics).
1140 * Returns 0 for success or an error on failure.
1141 * Called at driver startup.
1142 */
771fe6b9
JG
1143int radeon_device_init(struct radeon_device *rdev,
1144 struct drm_device *ddev,
1145 struct pci_dev *pdev,
1146 uint32_t flags)
1147{
351a52a2 1148 int r, i;
ad49f501 1149 int dma_bits;
771fe6b9 1150
771fe6b9 1151 rdev->shutdown = false;
9f022ddf 1152 rdev->dev = &pdev->dev;
771fe6b9
JG
1153 rdev->ddev = ddev;
1154 rdev->pdev = pdev;
1155 rdev->flags = flags;
1156 rdev->family = flags & RADEON_FAMILY_MASK;
1157 rdev->is_atom_bios = false;
1158 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1159 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1160 rdev->accel_working = false;
8b25ed34
AD
1161 /* set up ring ids */
1162 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1163 rdev->ring[i].idx = i;
1164 }
1b5331d9 1165
d522d9cc
TR
1166 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1167 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1168 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1169
771fe6b9
JG
1170 /* mutex initialization are all done here so we
1171 * can recall function without having locking issues */
d6999bc7 1172 mutex_init(&rdev->ring_lock);
40bacf16 1173 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1174 atomic_set(&rdev->ih.lock, 0);
4c788679 1175 mutex_init(&rdev->gem.mutex);
c913e23a 1176 mutex_init(&rdev->pm.mutex);
6759a0a7 1177 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1178 mutex_init(&rdev->srbm_mutex);
db7fce39 1179 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1180 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1181 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1182 r = radeon_gem_init(rdev);
1183 if (r)
1184 return r;
721604a1 1185 /* initialize vm here */
36ff39c4 1186 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1187 /* Adjust VM size here.
1188 * Currently set to 4GB ((1 << 20) 4k pages).
1189 * Max GPUVM size for cayman and SI is 40 bits.
1190 */
721604a1
JG
1191 rdev->vm_manager.max_pfn = 1 << 20;
1192 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1193
4aac0473
JG
1194 /* Set asic functions */
1195 r = radeon_asic_init(rdev);
36421338 1196 if (r)
4aac0473 1197 return r;
36421338 1198 radeon_check_arguments(rdev);
4aac0473 1199
f95df9ca
AD
1200 /* all of the newer IGP chips have an internal gart
1201 * However some rs4xx report as AGP, so remove that here.
1202 */
1203 if ((rdev->family >= CHIP_RS400) &&
1204 (rdev->flags & RADEON_IS_IGP)) {
1205 rdev->flags &= ~RADEON_IS_AGP;
1206 }
1207
30256a3f 1208 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1209 radeon_agp_disable(rdev);
771fe6b9
JG
1210 }
1211
9ed8b1f9
AD
1212 /* Set the internal MC address mask
1213 * This is the max address of the GPU's
1214 * internal address space.
1215 */
1216 if (rdev->family >= CHIP_CAYMAN)
1217 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1218 else if (rdev->family >= CHIP_CEDAR)
1219 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1220 else
1221 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1222
ad49f501
DA
1223 /* set DMA mask + need_dma32 flags.
1224 * PCIE - can handle 40-bits.
005a83f1 1225 * IGP - can handle 40-bits
ad49f501 1226 * AGP - generally dma32 is safest
005a83f1 1227 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1228 */
1229 rdev->need_dma32 = false;
1230 if (rdev->flags & RADEON_IS_AGP)
1231 rdev->need_dma32 = true;
005a83f1 1232 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1233 (rdev->family <= CHIP_RS740))
ad49f501
DA
1234 rdev->need_dma32 = true;
1235
1236 dma_bits = rdev->need_dma32 ? 32 : 40;
1237 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1238 if (r) {
62fff811 1239 rdev->need_dma32 = true;
c52494f6 1240 dma_bits = 32;
771fe6b9
JG
1241 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1242 }
c52494f6
KRW
1243 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1244 if (r) {
1245 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1246 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1247 }
771fe6b9
JG
1248
1249 /* Registers mapping */
1250 /* TODO: block userspace mapping of io register */
2c385151 1251 spin_lock_init(&rdev->mmio_idx_lock);
efad86db
AD
1252 if (rdev->family >= CHIP_BONAIRE) {
1253 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1254 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1255 } else {
1256 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1257 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1258 }
771fe6b9
JG
1259 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1260 if (rdev->rmmio == NULL) {
1261 return -ENOMEM;
1262 }
1263 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1264 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1265
75efdee1
AD
1266 /* doorbell bar mapping */
1267 if (rdev->family >= CHIP_BONAIRE)
1268 radeon_doorbell_init(rdev);
1269
351a52a2
AD
1270 /* io port mapping */
1271 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1272 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1273 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1274 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1275 break;
1276 }
1277 }
1278 if (rdev->rio_mem == NULL)
1279 DRM_ERROR("Unable to find PCI I/O BAR\n");
1280
28d52043 1281 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1282 /* this will fail for cards that aren't VGA class devices, just
1283 * ignore it */
1284 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 1285 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 1286
3ce0a23d 1287 r = radeon_init(rdev);
b574f251 1288 if (r)
3ce0a23d 1289 return r;
3ce0a23d 1290
04eb2206
CK
1291 r = radeon_ib_ring_tests(rdev);
1292 if (r)
1293 DRM_ERROR("ib ring test failed (%d).\n", r);
1294
409851f4
JG
1295 r = radeon_gem_debugfs_init(rdev);
1296 if (r) {
1297 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1298 }
1299
b574f251
JG
1300 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1301 /* Acceleration not working on AGP card try again
1302 * with fallback to PCI or PCIE GART
1303 */
a2d07b74 1304 radeon_asic_reset(rdev);
b574f251
JG
1305 radeon_fini(rdev);
1306 radeon_agp_disable(rdev);
1307 r = radeon_init(rdev);
4aac0473
JG
1308 if (r)
1309 return r;
771fe6b9 1310 }
60a7e396 1311 if ((radeon_testing & 1)) {
ecc0b326
MD
1312 radeon_test_moves(rdev);
1313 }
60a7e396
CK
1314 if ((radeon_testing & 2)) {
1315 radeon_test_syncing(rdev);
1316 }
771fe6b9 1317 if (radeon_benchmarking) {
638dd7db 1318 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 1319 }
6cf8a3f5 1320 return 0;
771fe6b9
JG
1321}
1322
4d8bf9ae
CK
1323static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1324
0c195119
AD
1325/**
1326 * radeon_device_fini - tear down the driver
1327 *
1328 * @rdev: radeon_device pointer
1329 *
1330 * Tear down the driver info (all asics).
1331 * Called at driver shutdown.
1332 */
771fe6b9
JG
1333void radeon_device_fini(struct radeon_device *rdev)
1334{
771fe6b9
JG
1335 DRM_INFO("radeon: finishing device.\n");
1336 rdev->shutdown = true;
90aca4d2
JG
1337 /* evict vram memory */
1338 radeon_bo_evict_vram(rdev);
62a8ea3f 1339 radeon_fini(rdev);
6a9ee8af 1340 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1341 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1342 if (rdev->rio_mem)
1343 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1344 rdev->rio_mem = NULL;
771fe6b9
JG
1345 iounmap(rdev->rmmio);
1346 rdev->rmmio = NULL;
75efdee1
AD
1347 if (rdev->family >= CHIP_BONAIRE)
1348 radeon_doorbell_fini(rdev);
4d8bf9ae 1349 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1350}
1351
1352
1353/*
1354 * Suspend & resume.
1355 */
0c195119
AD
1356/**
1357 * radeon_suspend_kms - initiate device suspend
1358 *
1359 * @pdev: drm dev pointer
1360 * @state: suspend state
1361 *
1362 * Puts the hw in the suspend state (all asics).
1363 * Returns 0 for success or an error on failure.
1364 * Called at driver suspend.
1365 */
771fe6b9
JG
1366int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1367{
875c1866 1368 struct radeon_device *rdev;
771fe6b9 1369 struct drm_crtc *crtc;
d8dcaa1d 1370 struct drm_connector *connector;
7465280c 1371 int i, r;
5f8f635e 1372 bool force_completion = false;
771fe6b9 1373
875c1866 1374 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1375 return -ENODEV;
1376 }
1377 if (state.event == PM_EVENT_PRETHAW) {
1378 return 0;
1379 }
875c1866
DJ
1380 rdev = dev->dev_private;
1381
5bcf719b 1382 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1383 return 0;
d8dcaa1d 1384
86698c20
SF
1385 drm_kms_helper_poll_disable(dev);
1386
d8dcaa1d
AD
1387 /* turn off display hw */
1388 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1389 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1390 }
1391
771fe6b9
JG
1392 /* unpin the front buffers */
1393 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1394 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1395 struct radeon_bo *robj;
771fe6b9
JG
1396
1397 if (rfb == NULL || rfb->obj == NULL) {
1398 continue;
1399 }
7e4d15d9 1400 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1401 /* don't unpin kernel fb objects */
1402 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1403 r = radeon_bo_reserve(robj, false);
38651674 1404 if (r == 0) {
4c788679
JG
1405 radeon_bo_unpin(robj);
1406 radeon_bo_unreserve(robj);
1407 }
771fe6b9
JG
1408 }
1409 }
1410 /* evict vram memory */
4c788679 1411 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1412
1413 mutex_lock(&rdev->ring_lock);
771fe6b9 1414 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1415 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1416 r = radeon_fence_wait_empty_locked(rdev, i);
1417 if (r) {
1418 /* delay GPU reset to resume */
1419 force_completion = true;
1420 }
1421 }
1422 if (force_completion) {
1423 radeon_fence_driver_force_completion(rdev);
1424 }
8a47cc9e 1425 mutex_unlock(&rdev->ring_lock);
771fe6b9 1426
f657c2a7
YZ
1427 radeon_save_bios_scratch_regs(rdev);
1428
ce8f5370 1429 radeon_pm_suspend(rdev);
62a8ea3f 1430 radeon_suspend(rdev);
d4877cf2 1431 radeon_hpd_fini(rdev);
771fe6b9 1432 /* evict remaining vram memory */
4c788679 1433 radeon_bo_evict_vram(rdev);
771fe6b9 1434
10b06122
JG
1435 radeon_agp_suspend(rdev);
1436
771fe6b9
JG
1437 pci_save_state(dev->pdev);
1438 if (state.event == PM_EVENT_SUSPEND) {
1439 /* Shut down the device */
1440 pci_disable_device(dev->pdev);
1441 pci_set_power_state(dev->pdev, PCI_D3hot);
1442 }
ac751efa 1443 console_lock();
38651674 1444 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1445 console_unlock();
771fe6b9
JG
1446 return 0;
1447}
1448
0c195119
AD
1449/**
1450 * radeon_resume_kms - initiate device resume
1451 *
1452 * @pdev: drm dev pointer
1453 *
1454 * Bring the hw back to operating state (all asics).
1455 * Returns 0 for success or an error on failure.
1456 * Called at driver resume.
1457 */
771fe6b9
JG
1458int radeon_resume_kms(struct drm_device *dev)
1459{
09bdf591 1460 struct drm_connector *connector;
771fe6b9 1461 struct radeon_device *rdev = dev->dev_private;
04eb2206 1462 int r;
771fe6b9 1463
5bcf719b 1464 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1465 return 0;
1466
ac751efa 1467 console_lock();
771fe6b9
JG
1468 pci_set_power_state(dev->pdev, PCI_D0);
1469 pci_restore_state(dev->pdev);
1470 if (pci_enable_device(dev->pdev)) {
ac751efa 1471 console_unlock();
771fe6b9
JG
1472 return -1;
1473 }
0ebf1717
DA
1474 /* resume AGP if in use */
1475 radeon_agp_resume(rdev);
62a8ea3f 1476 radeon_resume(rdev);
04eb2206
CK
1477
1478 r = radeon_ib_ring_tests(rdev);
1479 if (r)
1480 DRM_ERROR("ib ring test failed (%d).\n", r);
1481
ce8f5370 1482 radeon_pm_resume(rdev);
f657c2a7 1483 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1484
38651674 1485 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1486 console_unlock();
771fe6b9 1487
3fa47d9e
AD
1488 /* init dig PHYs, disp eng pll */
1489 if (rdev->is_atom_bios) {
ac89af1e 1490 radeon_atom_encoder_init(rdev);
f3f1f03e 1491 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1492 /* turn on the BL */
1493 if (rdev->mode_info.bl_encoder) {
1494 u8 bl_level = radeon_get_backlight_level(rdev,
1495 rdev->mode_info.bl_encoder);
1496 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1497 bl_level);
1498 }
3fa47d9e 1499 }
d4877cf2
AD
1500 /* reset hpd state */
1501 radeon_hpd_init(rdev);
771fe6b9
JG
1502 /* blat the mode back in */
1503 drm_helper_resume_force_mode(dev);
a93f344d
AD
1504 /* turn on display hw */
1505 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1506 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1507 }
86698c20
SF
1508
1509 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1510 return 0;
1511}
1512
0c195119
AD
1513/**
1514 * radeon_gpu_reset - reset the asic
1515 *
1516 * @rdev: radeon device pointer
1517 *
1518 * Attempt the reset the GPU if it has hung (all asics).
1519 * Returns 0 for success or an error on failure.
1520 */
90aca4d2
JG
1521int radeon_gpu_reset(struct radeon_device *rdev)
1522{
55d7c221
CK
1523 unsigned ring_sizes[RADEON_NUM_RINGS];
1524 uint32_t *ring_data[RADEON_NUM_RINGS];
1525
1526 bool saved = false;
1527
1528 int i, r;
8fd1b84c 1529 int resched;
90aca4d2 1530
dee53e7f 1531 down_write(&rdev->exclusive_lock);
90aca4d2 1532 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1533 /* block TTM */
1534 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
95f59509 1535 radeon_pm_suspend(rdev);
90aca4d2
JG
1536 radeon_suspend(rdev);
1537
55d7c221
CK
1538 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1539 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1540 &ring_data[i]);
1541 if (ring_sizes[i]) {
1542 saved = true;
1543 dev_info(rdev->dev, "Saved %d dwords of commands "
1544 "on ring %d.\n", ring_sizes[i], i);
1545 }
1546 }
1547
1548retry:
90aca4d2
JG
1549 r = radeon_asic_reset(rdev);
1550 if (!r) {
55d7c221 1551 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1552 radeon_resume(rdev);
55d7c221 1553 }
04eb2206 1554
55d7c221 1555 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1556
55d7c221
CK
1557 if (!r) {
1558 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1559 radeon_ring_restore(rdev, &rdev->ring[i],
1560 ring_sizes[i], ring_data[i]);
f54b350d
CK
1561 ring_sizes[i] = 0;
1562 ring_data[i] = NULL;
55d7c221
CK
1563 }
1564
1565 r = radeon_ib_ring_tests(rdev);
1566 if (r) {
1567 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1568 if (saved) {
f54b350d 1569 saved = false;
55d7c221
CK
1570 radeon_suspend(rdev);
1571 goto retry;
1572 }
1573 }
1574 } else {
76903b96 1575 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1576 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1577 kfree(ring_data[i]);
1578 }
90aca4d2 1579 }
7a1619b9 1580
95f59509 1581 radeon_pm_resume(rdev);
d3493574
JG
1582 drm_helper_resume_force_mode(rdev->ddev);
1583
55d7c221 1584 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1585 if (r) {
1586 /* bad news, how to tell it to userspace ? */
1587 dev_info(rdev->dev, "GPU reset failed\n");
1588 }
1589
dee53e7f 1590 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1591 return r;
1592}
1593
771fe6b9
JG
1594
1595/*
1596 * Debugfs
1597 */
771fe6b9
JG
1598int radeon_debugfs_add_files(struct radeon_device *rdev,
1599 struct drm_info_list *files,
1600 unsigned nfiles)
1601{
1602 unsigned i;
1603
4d8bf9ae
CK
1604 for (i = 0; i < rdev->debugfs_count; i++) {
1605 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1606 /* Already registered */
1607 return 0;
1608 }
1609 }
c245cb9e 1610
4d8bf9ae 1611 i = rdev->debugfs_count + 1;
c245cb9e
MW
1612 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1613 DRM_ERROR("Reached maximum number of debugfs components.\n");
1614 DRM_ERROR("Report so we increase "
1615 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1616 return -EINVAL;
1617 }
4d8bf9ae
CK
1618 rdev->debugfs[rdev->debugfs_count].files = files;
1619 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1620 rdev->debugfs_count = i;
771fe6b9
JG
1621#if defined(CONFIG_DEBUG_FS)
1622 drm_debugfs_create_files(files, nfiles,
1623 rdev->ddev->control->debugfs_root,
1624 rdev->ddev->control);
1625 drm_debugfs_create_files(files, nfiles,
1626 rdev->ddev->primary->debugfs_root,
1627 rdev->ddev->primary);
1628#endif
1629 return 0;
1630}
1631
4d8bf9ae
CK
1632static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1633{
1634#if defined(CONFIG_DEBUG_FS)
1635 unsigned i;
1636
1637 for (i = 0; i < rdev->debugfs_count; i++) {
1638 drm_debugfs_remove_files(rdev->debugfs[i].files,
1639 rdev->debugfs[i].num_files,
1640 rdev->ddev->control);
1641 drm_debugfs_remove_files(rdev->debugfs[i].files,
1642 rdev->debugfs[i].num_files,
1643 rdev->ddev->primary);
1644 }
1645#endif
1646}
1647
771fe6b9
JG
1648#if defined(CONFIG_DEBUG_FS)
1649int radeon_debugfs_init(struct drm_minor *minor)
1650{
1651 return 0;
1652}
1653
1654void radeon_debugfs_cleanup(struct drm_minor *minor)
1655{
771fe6b9
JG
1656}
1657#endif
This page took 0.328842 seconds and 5 git commands to generate.