drm/radeon: use callbacks for ring pointer handling (v3)
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
6eac752e
AD
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
1b5331d9
JG
101 "LAST",
102};
103
2e1b65f9
AD
104/**
105 * radeon_program_register_sequence - program an array of registers.
106 *
107 * @rdev: radeon_device pointer
108 * @registers: pointer to the register array
109 * @array_size: size of the register array
110 *
111 * Programs an array or registers with and and or masks.
112 * This is a helper for setting golden registers.
113 */
114void radeon_program_register_sequence(struct radeon_device *rdev,
115 const u32 *registers,
116 const u32 array_size)
117{
118 u32 tmp, reg, and_mask, or_mask;
119 int i;
120
121 if (array_size % 3)
122 return;
123
124 for (i = 0; i < array_size; i +=3) {
125 reg = registers[i + 0];
126 and_mask = registers[i + 1];
127 or_mask = registers[i + 2];
128
129 if (and_mask == 0xffffffff) {
130 tmp = or_mask;
131 } else {
132 tmp = RREG32(reg);
133 tmp &= ~and_mask;
134 tmp |= or_mask;
135 }
136 WREG32(reg, tmp);
137 }
138}
139
0c195119
AD
140/**
141 * radeon_surface_init - Clear GPU surface registers.
142 *
143 * @rdev: radeon_device pointer
144 *
145 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 146 */
3ce0a23d 147void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
148{
149 /* FIXME: check this out */
150 if (rdev->family < CHIP_R600) {
151 int i;
152
550e2d92
DA
153 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
154 if (rdev->surface_regs[i].bo)
155 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
156 else
157 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 158 }
e024e110
DA
159 /* enable surfaces */
160 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
161 }
162}
163
771fe6b9
JG
164/*
165 * GPU scratch registers helpers function.
166 */
0c195119
AD
167/**
168 * radeon_scratch_init - Init scratch register driver information.
169 *
170 * @rdev: radeon_device pointer
171 *
172 * Init CP scratch register driver information (r1xx-r5xx)
173 */
3ce0a23d 174void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
175{
176 int i;
177
178 /* FIXME: check this out */
179 if (rdev->family < CHIP_R300) {
180 rdev->scratch.num_reg = 5;
181 } else {
182 rdev->scratch.num_reg = 7;
183 }
724c80e1 184 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
185 for (i = 0; i < rdev->scratch.num_reg; i++) {
186 rdev->scratch.free[i] = true;
724c80e1 187 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
188 }
189}
190
0c195119
AD
191/**
192 * radeon_scratch_get - Allocate a scratch register
193 *
194 * @rdev: radeon_device pointer
195 * @reg: scratch register mmio offset
196 *
197 * Allocate a CP scratch register for use by the driver (all asics).
198 * Returns 0 on success or -EINVAL on failure.
199 */
771fe6b9
JG
200int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
201{
202 int i;
203
204 for (i = 0; i < rdev->scratch.num_reg; i++) {
205 if (rdev->scratch.free[i]) {
206 rdev->scratch.free[i] = false;
207 *reg = rdev->scratch.reg[i];
208 return 0;
209 }
210 }
211 return -EINVAL;
212}
213
0c195119
AD
214/**
215 * radeon_scratch_free - Free a scratch register
216 *
217 * @rdev: radeon_device pointer
218 * @reg: scratch register mmio offset
219 *
220 * Free a CP scratch register allocated for use by the driver (all asics)
221 */
771fe6b9
JG
222void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
223{
224 int i;
225
226 for (i = 0; i < rdev->scratch.num_reg; i++) {
227 if (rdev->scratch.reg[i] == reg) {
228 rdev->scratch.free[i] = true;
229 return;
230 }
231 }
232}
233
0c195119
AD
234/*
235 * radeon_wb_*()
236 * Writeback is the the method by which the the GPU updates special pages
237 * in memory with the status of certain GPU events (fences, ring pointers,
238 * etc.).
239 */
240
241/**
242 * radeon_wb_disable - Disable Writeback
243 *
244 * @rdev: radeon_device pointer
245 *
246 * Disables Writeback (all asics). Used for suspend.
247 */
724c80e1
AD
248void radeon_wb_disable(struct radeon_device *rdev)
249{
724c80e1
AD
250 rdev->wb.enabled = false;
251}
252
0c195119
AD
253/**
254 * radeon_wb_fini - Disable Writeback and free memory
255 *
256 * @rdev: radeon_device pointer
257 *
258 * Disables Writeback and frees the Writeback memory (all asics).
259 * Used at driver shutdown.
260 */
724c80e1
AD
261void radeon_wb_fini(struct radeon_device *rdev)
262{
263 radeon_wb_disable(rdev);
264 if (rdev->wb.wb_obj) {
089920f2
JG
265 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
266 radeon_bo_kunmap(rdev->wb.wb_obj);
267 radeon_bo_unpin(rdev->wb.wb_obj);
268 radeon_bo_unreserve(rdev->wb.wb_obj);
269 }
724c80e1
AD
270 radeon_bo_unref(&rdev->wb.wb_obj);
271 rdev->wb.wb = NULL;
272 rdev->wb.wb_obj = NULL;
273 }
274}
275
0c195119
AD
276/**
277 * radeon_wb_init- Init Writeback driver info and allocate memory
278 *
279 * @rdev: radeon_device pointer
280 *
281 * Disables Writeback and frees the Writeback memory (all asics).
282 * Used at driver startup.
283 * Returns 0 on success or an -error on failure.
284 */
724c80e1
AD
285int radeon_wb_init(struct radeon_device *rdev)
286{
287 int r;
288
289 if (rdev->wb.wb_obj == NULL) {
441921d5 290 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 291 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
292 if (r) {
293 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
294 return r;
295 }
089920f2
JG
296 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
297 if (unlikely(r != 0)) {
298 radeon_wb_fini(rdev);
299 return r;
300 }
301 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
302 &rdev->wb.gpu_addr);
303 if (r) {
304 radeon_bo_unreserve(rdev->wb.wb_obj);
305 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
306 radeon_wb_fini(rdev);
307 return r;
308 }
309 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 310 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
311 if (r) {
312 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
313 radeon_wb_fini(rdev);
314 return r;
315 }
724c80e1
AD
316 }
317
e6ba7599
AD
318 /* clear wb memory */
319 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
320 /* disable event_write fences */
321 rdev->wb.use_event = false;
724c80e1 322 /* disabled via module param */
3b7a2b24 323 if (radeon_no_wb == 1) {
724c80e1 324 rdev->wb.enabled = false;
3b7a2b24 325 } else {
724c80e1 326 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
327 /* often unreliable on AGP */
328 rdev->wb.enabled = false;
329 } else if (rdev->family < CHIP_R300) {
330 /* often unreliable on pre-r300 */
724c80e1 331 rdev->wb.enabled = false;
d0f8a854 332 } else {
724c80e1 333 rdev->wb.enabled = true;
d0f8a854 334 /* event_write fences are only available on r600+ */
3b7a2b24 335 if (rdev->family >= CHIP_R600) {
d0f8a854 336 rdev->wb.use_event = true;
3b7a2b24 337 }
d0f8a854 338 }
724c80e1 339 }
c994ead6
AD
340 /* always use writeback/events on NI, APUs */
341 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
342 rdev->wb.enabled = true;
343 rdev->wb.use_event = true;
344 }
724c80e1
AD
345
346 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
347
348 return 0;
349}
350
d594e46a
JG
351/**
352 * radeon_vram_location - try to find VRAM location
353 * @rdev: radeon device structure holding all necessary informations
354 * @mc: memory controller structure holding memory informations
355 * @base: base address at which to put VRAM
356 *
357 * Function will place try to place VRAM at base address provided
358 * as parameter (which is so far either PCI aperture address or
359 * for IGP TOM base address).
360 *
361 * If there is not enough space to fit the unvisible VRAM in the 32bits
362 * address space then we limit the VRAM size to the aperture.
363 *
364 * If we are using AGP and if the AGP aperture doesn't allow us to have
365 * room for all the VRAM than we restrict the VRAM to the PCI aperture
366 * size and print a warning.
367 *
368 * This function will never fails, worst case are limiting VRAM.
369 *
370 * Note: GTT start, end, size should be initialized before calling this
371 * function on AGP platform.
372 *
25985edc 373 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
374 * this shouldn't be a problem as we are using the PCI aperture as a reference.
375 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
376 * not IGP.
377 *
378 * Note: we use mc_vram_size as on some board we need to program the mc to
379 * cover the whole aperture even if VRAM size is inferior to aperture size
380 * Novell bug 204882 + along with lots of ubuntu ones
381 *
382 * Note: when limiting vram it's safe to overwritte real_vram_size because
383 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
384 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
385 * ones)
386 *
387 * Note: IGP TOM addr should be the same as the aperture addr, we don't
388 * explicitly check for that thought.
389 *
390 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 391 */
d594e46a 392void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 393{
1bcb04f7
CK
394 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
395
d594e46a 396 mc->vram_start = base;
9ed8b1f9 397 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
398 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
399 mc->real_vram_size = mc->aper_size;
400 mc->mc_vram_size = mc->aper_size;
401 }
402 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 403 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
404 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
405 mc->real_vram_size = mc->aper_size;
406 mc->mc_vram_size = mc->aper_size;
407 }
408 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
409 if (limit && limit < mc->real_vram_size)
410 mc->real_vram_size = limit;
dd7cc55a 411 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
412 mc->mc_vram_size >> 20, mc->vram_start,
413 mc->vram_end, mc->real_vram_size >> 20);
414}
771fe6b9 415
d594e46a
JG
416/**
417 * radeon_gtt_location - try to find GTT location
418 * @rdev: radeon device structure holding all necessary informations
419 * @mc: memory controller structure holding memory informations
420 *
421 * Function will place try to place GTT before or after VRAM.
422 *
423 * If GTT size is bigger than space left then we ajust GTT size.
424 * Thus function will never fails.
425 *
426 * FIXME: when reducing GTT size align new size on power of 2.
427 */
428void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
429{
430 u64 size_af, size_bf;
431
9ed8b1f9 432 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 433 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
434 if (size_bf > size_af) {
435 if (mc->gtt_size > size_bf) {
436 dev_warn(rdev->dev, "limiting GTT\n");
437 mc->gtt_size = size_bf;
771fe6b9 438 }
8d369bb1 439 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 440 } else {
d594e46a
JG
441 if (mc->gtt_size > size_af) {
442 dev_warn(rdev->dev, "limiting GTT\n");
443 mc->gtt_size = size_af;
444 }
8d369bb1 445 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 446 }
d594e46a 447 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 448 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 449 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
450}
451
771fe6b9
JG
452/*
453 * GPU helpers function.
454 */
0c195119
AD
455/**
456 * radeon_card_posted - check if the hw has already been initialized
457 *
458 * @rdev: radeon_device pointer
459 *
460 * Check if the asic has been initialized (all asics).
461 * Used at driver startup.
462 * Returns true if initialized or false if not.
463 */
9f022ddf 464bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
465{
466 uint32_t reg;
467
50a583f6 468 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 469 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
470 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
471 (rdev->family < CHIP_R600))
bcc65fd8
MG
472 return false;
473
2cf3a4fc
AD
474 if (ASIC_IS_NODCE(rdev))
475 goto check_memsize;
476
771fe6b9 477 /* first check CRTCs */
09fb8bd1 478 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
479 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
480 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
481 if (rdev->num_crtc >= 4) {
482 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
483 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
484 }
485 if (rdev->num_crtc >= 6) {
486 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
487 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
488 }
bcc1c2a1
AD
489 if (reg & EVERGREEN_CRTC_MASTER_EN)
490 return true;
491 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
492 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
493 RREG32(AVIVO_D2CRTC_CONTROL);
494 if (reg & AVIVO_CRTC_EN) {
495 return true;
496 }
497 } else {
498 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
499 RREG32(RADEON_CRTC2_GEN_CNTL);
500 if (reg & RADEON_CRTC_EN) {
501 return true;
502 }
503 }
504
2cf3a4fc 505check_memsize:
771fe6b9
JG
506 /* then check MEM_SIZE, in case the crtcs are off */
507 if (rdev->family >= CHIP_R600)
508 reg = RREG32(R600_CONFIG_MEMSIZE);
509 else
510 reg = RREG32(RADEON_CONFIG_MEMSIZE);
511
512 if (reg)
513 return true;
514
515 return false;
516
517}
518
0c195119
AD
519/**
520 * radeon_update_bandwidth_info - update display bandwidth params
521 *
522 * @rdev: radeon_device pointer
523 *
524 * Used when sclk/mclk are switched or display modes are set.
525 * params are used to calculate display watermarks (all asics)
526 */
f47299c5
AD
527void radeon_update_bandwidth_info(struct radeon_device *rdev)
528{
529 fixed20_12 a;
8807286e
AD
530 u32 sclk = rdev->pm.current_sclk;
531 u32 mclk = rdev->pm.current_mclk;
f47299c5 532
8807286e
AD
533 /* sclk/mclk in Mhz */
534 a.full = dfixed_const(100);
535 rdev->pm.sclk.full = dfixed_const(sclk);
536 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
537 rdev->pm.mclk.full = dfixed_const(mclk);
538 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 539
8807286e 540 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 541 a.full = dfixed_const(16);
f47299c5 542 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 543 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
544 }
545}
546
0c195119
AD
547/**
548 * radeon_boot_test_post_card - check and possibly initialize the hw
549 *
550 * @rdev: radeon_device pointer
551 *
552 * Check if the asic is initialized and if not, attempt to initialize
553 * it (all asics).
554 * Returns true if initialized or false if not.
555 */
72542d77
DA
556bool radeon_boot_test_post_card(struct radeon_device *rdev)
557{
558 if (radeon_card_posted(rdev))
559 return true;
560
561 if (rdev->bios) {
562 DRM_INFO("GPU not posted. posting now...\n");
563 if (rdev->is_atom_bios)
564 atom_asic_init(rdev->mode_info.atom_context);
565 else
566 radeon_combios_asic_init(rdev->ddev);
567 return true;
568 } else {
569 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
570 return false;
571 }
572}
573
0c195119
AD
574/**
575 * radeon_dummy_page_init - init dummy page used by the driver
576 *
577 * @rdev: radeon_device pointer
578 *
579 * Allocate the dummy page used by the driver (all asics).
580 * This dummy page is used by the driver as a filler for gart entries
581 * when pages are taken out of the GART
582 * Returns 0 on sucess, -ENOMEM on failure.
583 */
3ce0a23d
JG
584int radeon_dummy_page_init(struct radeon_device *rdev)
585{
82568565
DA
586 if (rdev->dummy_page.page)
587 return 0;
3ce0a23d
JG
588 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
589 if (rdev->dummy_page.page == NULL)
590 return -ENOMEM;
591 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
592 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
593 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
594 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
595 __free_page(rdev->dummy_page.page);
596 rdev->dummy_page.page = NULL;
597 return -ENOMEM;
598 }
599 return 0;
600}
601
0c195119
AD
602/**
603 * radeon_dummy_page_fini - free dummy page used by the driver
604 *
605 * @rdev: radeon_device pointer
606 *
607 * Frees the dummy page used by the driver (all asics).
608 */
3ce0a23d
JG
609void radeon_dummy_page_fini(struct radeon_device *rdev)
610{
611 if (rdev->dummy_page.page == NULL)
612 return;
613 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
614 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
615 __free_page(rdev->dummy_page.page);
616 rdev->dummy_page.page = NULL;
617}
618
771fe6b9 619
771fe6b9 620/* ATOM accessor methods */
0c195119
AD
621/*
622 * ATOM is an interpreted byte code stored in tables in the vbios. The
623 * driver registers callbacks to access registers and the interpreter
624 * in the driver parses the tables and executes then to program specific
625 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
626 * atombios.h, and atom.c
627 */
628
629/**
630 * cail_pll_read - read PLL register
631 *
632 * @info: atom card_info pointer
633 * @reg: PLL register offset
634 *
635 * Provides a PLL register accessor for the atom interpreter (r4xx+).
636 * Returns the value of the PLL register.
637 */
771fe6b9
JG
638static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
639{
640 struct radeon_device *rdev = info->dev->dev_private;
641 uint32_t r;
642
643 r = rdev->pll_rreg(rdev, reg);
644 return r;
645}
646
0c195119
AD
647/**
648 * cail_pll_write - write PLL register
649 *
650 * @info: atom card_info pointer
651 * @reg: PLL register offset
652 * @val: value to write to the pll register
653 *
654 * Provides a PLL register accessor for the atom interpreter (r4xx+).
655 */
771fe6b9
JG
656static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
657{
658 struct radeon_device *rdev = info->dev->dev_private;
659
660 rdev->pll_wreg(rdev, reg, val);
661}
662
0c195119
AD
663/**
664 * cail_mc_read - read MC (Memory Controller) register
665 *
666 * @info: atom card_info pointer
667 * @reg: MC register offset
668 *
669 * Provides an MC register accessor for the atom interpreter (r4xx+).
670 * Returns the value of the MC register.
671 */
771fe6b9
JG
672static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
673{
674 struct radeon_device *rdev = info->dev->dev_private;
675 uint32_t r;
676
677 r = rdev->mc_rreg(rdev, reg);
678 return r;
679}
680
0c195119
AD
681/**
682 * cail_mc_write - write MC (Memory Controller) register
683 *
684 * @info: atom card_info pointer
685 * @reg: MC register offset
686 * @val: value to write to the pll register
687 *
688 * Provides a MC register accessor for the atom interpreter (r4xx+).
689 */
771fe6b9
JG
690static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
691{
692 struct radeon_device *rdev = info->dev->dev_private;
693
694 rdev->mc_wreg(rdev, reg, val);
695}
696
0c195119
AD
697/**
698 * cail_reg_write - write MMIO register
699 *
700 * @info: atom card_info pointer
701 * @reg: MMIO register offset
702 * @val: value to write to the pll register
703 *
704 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
705 */
771fe6b9
JG
706static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
707{
708 struct radeon_device *rdev = info->dev->dev_private;
709
710 WREG32(reg*4, val);
711}
712
0c195119
AD
713/**
714 * cail_reg_read - read MMIO register
715 *
716 * @info: atom card_info pointer
717 * @reg: MMIO register offset
718 *
719 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
720 * Returns the value of the MMIO register.
721 */
771fe6b9
JG
722static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
723{
724 struct radeon_device *rdev = info->dev->dev_private;
725 uint32_t r;
726
727 r = RREG32(reg*4);
728 return r;
729}
730
0c195119
AD
731/**
732 * cail_ioreg_write - write IO register
733 *
734 * @info: atom card_info pointer
735 * @reg: IO register offset
736 * @val: value to write to the pll register
737 *
738 * Provides a IO register accessor for the atom interpreter (r4xx+).
739 */
351a52a2
AD
740static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
741{
742 struct radeon_device *rdev = info->dev->dev_private;
743
744 WREG32_IO(reg*4, val);
745}
746
0c195119
AD
747/**
748 * cail_ioreg_read - read IO register
749 *
750 * @info: atom card_info pointer
751 * @reg: IO register offset
752 *
753 * Provides an IO register accessor for the atom interpreter (r4xx+).
754 * Returns the value of the IO register.
755 */
351a52a2
AD
756static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
757{
758 struct radeon_device *rdev = info->dev->dev_private;
759 uint32_t r;
760
761 r = RREG32_IO(reg*4);
762 return r;
763}
764
0c195119
AD
765/**
766 * radeon_atombios_init - init the driver info and callbacks for atombios
767 *
768 * @rdev: radeon_device pointer
769 *
770 * Initializes the driver info and register access callbacks for the
771 * ATOM interpreter (r4xx+).
772 * Returns 0 on sucess, -ENOMEM on failure.
773 * Called at driver startup.
774 */
771fe6b9
JG
775int radeon_atombios_init(struct radeon_device *rdev)
776{
61c4b24b
MF
777 struct card_info *atom_card_info =
778 kzalloc(sizeof(struct card_info), GFP_KERNEL);
779
780 if (!atom_card_info)
781 return -ENOMEM;
782
783 rdev->mode_info.atom_card_info = atom_card_info;
784 atom_card_info->dev = rdev->ddev;
785 atom_card_info->reg_read = cail_reg_read;
786 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
787 /* needed for iio ops */
788 if (rdev->rio_mem) {
789 atom_card_info->ioreg_read = cail_ioreg_read;
790 atom_card_info->ioreg_write = cail_ioreg_write;
791 } else {
792 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
793 atom_card_info->ioreg_read = cail_reg_read;
794 atom_card_info->ioreg_write = cail_reg_write;
795 }
61c4b24b
MF
796 atom_card_info->mc_read = cail_mc_read;
797 atom_card_info->mc_write = cail_mc_write;
798 atom_card_info->pll_read = cail_pll_read;
799 atom_card_info->pll_write = cail_pll_write;
800
801 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
802 if (!rdev->mode_info.atom_context) {
803 radeon_atombios_fini(rdev);
804 return -ENOMEM;
805 }
806
c31ad97f 807 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 808 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 809 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
810 return 0;
811}
812
0c195119
AD
813/**
814 * radeon_atombios_fini - free the driver info and callbacks for atombios
815 *
816 * @rdev: radeon_device pointer
817 *
818 * Frees the driver info and register access callbacks for the ATOM
819 * interpreter (r4xx+).
820 * Called at driver shutdown.
821 */
771fe6b9
JG
822void radeon_atombios_fini(struct radeon_device *rdev)
823{
4a04a844
JG
824 if (rdev->mode_info.atom_context) {
825 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 826 }
0e34d094
TG
827 kfree(rdev->mode_info.atom_context);
828 rdev->mode_info.atom_context = NULL;
61c4b24b 829 kfree(rdev->mode_info.atom_card_info);
0e34d094 830 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
831}
832
0c195119
AD
833/* COMBIOS */
834/*
835 * COMBIOS is the bios format prior to ATOM. It provides
836 * command tables similar to ATOM, but doesn't have a unified
837 * parser. See radeon_combios.c
838 */
839
840/**
841 * radeon_combios_init - init the driver info for combios
842 *
843 * @rdev: radeon_device pointer
844 *
845 * Initializes the driver info for combios (r1xx-r3xx).
846 * Returns 0 on sucess.
847 * Called at driver startup.
848 */
771fe6b9
JG
849int radeon_combios_init(struct radeon_device *rdev)
850{
851 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
852 return 0;
853}
854
0c195119
AD
855/**
856 * radeon_combios_fini - free the driver info for combios
857 *
858 * @rdev: radeon_device pointer
859 *
860 * Frees the driver info for combios (r1xx-r3xx).
861 * Called at driver shutdown.
862 */
771fe6b9
JG
863void radeon_combios_fini(struct radeon_device *rdev)
864{
865}
866
0c195119
AD
867/* if we get transitioned to only one device, take VGA back */
868/**
869 * radeon_vga_set_decode - enable/disable vga decode
870 *
871 * @cookie: radeon_device pointer
872 * @state: enable/disable vga decode
873 *
874 * Enable/disable vga decode (all asics).
875 * Returns VGA resource flags.
876 */
28d52043
DA
877static unsigned int radeon_vga_set_decode(void *cookie, bool state)
878{
879 struct radeon_device *rdev = cookie;
28d52043
DA
880 radeon_vga_set_state(rdev, state);
881 if (state)
882 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
883 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
884 else
885 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
886}
c1176d6f 887
1bcb04f7
CK
888/**
889 * radeon_check_pot_argument - check that argument is a power of two
890 *
891 * @arg: value to check
892 *
893 * Validates that a certain argument is a power of two (all asics).
894 * Returns true if argument is valid.
895 */
896static bool radeon_check_pot_argument(int arg)
897{
898 return (arg & (arg - 1)) == 0;
899}
900
0c195119
AD
901/**
902 * radeon_check_arguments - validate module params
903 *
904 * @rdev: radeon_device pointer
905 *
906 * Validates certain module parameters and updates
907 * the associated values used by the driver (all asics).
908 */
1109ca09 909static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
910{
911 /* vramlimit must be a power of two */
1bcb04f7 912 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
913 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
914 radeon_vram_limit);
915 radeon_vram_limit = 0;
36421338 916 }
1bcb04f7 917
36421338 918 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 919 if (radeon_gart_size < 32) {
36421338
JG
920 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
921 radeon_gart_size);
922 radeon_gart_size = 512;
1bcb04f7
CK
923
924 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
925 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
926 radeon_gart_size);
927 radeon_gart_size = 512;
36421338 928 }
1bcb04f7
CK
929 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
930
36421338
JG
931 /* AGP mode can only be -1, 1, 2, 4, 8 */
932 switch (radeon_agpmode) {
933 case -1:
934 case 0:
935 case 1:
936 case 2:
937 case 4:
938 case 8:
939 break;
940 default:
941 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
942 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
943 radeon_agpmode = 0;
944 break;
945 }
946}
947
d1f9809e
ML
948/**
949 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
950 * needed for waking up.
951 *
952 * @pdev: pci dev pointer
953 */
954static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
955{
956
957 /* 6600m in a macbook pro */
958 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
959 pdev->subsystem_device == 0x00e2) {
960 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
961 return true;
962 }
963
964 return false;
965}
966
0c195119
AD
967/**
968 * radeon_switcheroo_set_state - set switcheroo state
969 *
970 * @pdev: pci dev pointer
971 * @state: vga switcheroo state
972 *
973 * Callback for the switcheroo driver. Suspends or resumes the
974 * the asics before or after it is powered up using ACPI methods.
975 */
6a9ee8af
DA
976static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
977{
978 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
979 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
980 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
981 unsigned d3_delay = dev->pdev->d3_delay;
982
6a9ee8af
DA
983 printk(KERN_INFO "radeon: switched on\n");
984 /* don't suspend or resume card normally */
5bcf719b 985 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
986
987 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
988 dev->pdev->d3_delay = 20;
989
6a9ee8af 990 radeon_resume_kms(dev);
d1f9809e
ML
991
992 dev->pdev->d3_delay = d3_delay;
993
5bcf719b 994 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 995 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
996 } else {
997 printk(KERN_INFO "radeon: switched off\n");
fbf81762 998 drm_kms_helper_poll_disable(dev);
5bcf719b 999 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 1000 radeon_suspend_kms(dev, pmm);
5bcf719b 1001 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1002 }
1003}
1004
0c195119
AD
1005/**
1006 * radeon_switcheroo_can_switch - see if switcheroo state can change
1007 *
1008 * @pdev: pci dev pointer
1009 *
1010 * Callback for the switcheroo driver. Check of the switcheroo
1011 * state can be changed.
1012 * Returns true if the state can be changed, false if not.
1013 */
6a9ee8af
DA
1014static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1015{
1016 struct drm_device *dev = pci_get_drvdata(pdev);
1017 bool can_switch;
1018
1019 spin_lock(&dev->count_lock);
1020 can_switch = (dev->open_count == 0);
1021 spin_unlock(&dev->count_lock);
1022 return can_switch;
1023}
1024
26ec685f
TI
1025static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1026 .set_gpu_state = radeon_switcheroo_set_state,
1027 .reprobe = NULL,
1028 .can_switch = radeon_switcheroo_can_switch,
1029};
6a9ee8af 1030
0c195119
AD
1031/**
1032 * radeon_device_init - initialize the driver
1033 *
1034 * @rdev: radeon_device pointer
1035 * @pdev: drm dev pointer
1036 * @pdev: pci dev pointer
1037 * @flags: driver flags
1038 *
1039 * Initializes the driver info and hw (all asics).
1040 * Returns 0 for success or an error on failure.
1041 * Called at driver startup.
1042 */
771fe6b9
JG
1043int radeon_device_init(struct radeon_device *rdev,
1044 struct drm_device *ddev,
1045 struct pci_dev *pdev,
1046 uint32_t flags)
1047{
351a52a2 1048 int r, i;
ad49f501 1049 int dma_bits;
771fe6b9 1050
771fe6b9 1051 rdev->shutdown = false;
9f022ddf 1052 rdev->dev = &pdev->dev;
771fe6b9
JG
1053 rdev->ddev = ddev;
1054 rdev->pdev = pdev;
1055 rdev->flags = flags;
1056 rdev->family = flags & RADEON_FAMILY_MASK;
1057 rdev->is_atom_bios = false;
1058 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1059 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
733289c2 1060 rdev->accel_working = false;
8b25ed34
AD
1061 /* set up ring ids */
1062 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1063 rdev->ring[i].idx = i;
1064 }
1b5331d9 1065
d522d9cc
TR
1066 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1067 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1068 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1069
771fe6b9
JG
1070 /* mutex initialization are all done here so we
1071 * can recall function without having locking issues */
d6999bc7 1072 mutex_init(&rdev->ring_lock);
40bacf16 1073 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1074 atomic_set(&rdev->ih.lock, 0);
4c788679 1075 mutex_init(&rdev->gem.mutex);
c913e23a 1076 mutex_init(&rdev->pm.mutex);
6759a0a7 1077 mutex_init(&rdev->gpu_clock_mutex);
db7fce39 1078 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1079 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1080 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1081 r = radeon_gem_init(rdev);
1082 if (r)
1083 return r;
721604a1 1084 /* initialize vm here */
36ff39c4 1085 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1086 /* Adjust VM size here.
1087 * Currently set to 4GB ((1 << 20) 4k pages).
1088 * Max GPUVM size for cayman and SI is 40 bits.
1089 */
721604a1
JG
1090 rdev->vm_manager.max_pfn = 1 << 20;
1091 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1092
4aac0473
JG
1093 /* Set asic functions */
1094 r = radeon_asic_init(rdev);
36421338 1095 if (r)
4aac0473 1096 return r;
36421338 1097 radeon_check_arguments(rdev);
4aac0473 1098
f95df9ca
AD
1099 /* all of the newer IGP chips have an internal gart
1100 * However some rs4xx report as AGP, so remove that here.
1101 */
1102 if ((rdev->family >= CHIP_RS400) &&
1103 (rdev->flags & RADEON_IS_IGP)) {
1104 rdev->flags &= ~RADEON_IS_AGP;
1105 }
1106
30256a3f 1107 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1108 radeon_agp_disable(rdev);
771fe6b9
JG
1109 }
1110
9ed8b1f9
AD
1111 /* Set the internal MC address mask
1112 * This is the max address of the GPU's
1113 * internal address space.
1114 */
1115 if (rdev->family >= CHIP_CAYMAN)
1116 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1117 else if (rdev->family >= CHIP_CEDAR)
1118 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1119 else
1120 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1121
ad49f501
DA
1122 /* set DMA mask + need_dma32 flags.
1123 * PCIE - can handle 40-bits.
005a83f1 1124 * IGP - can handle 40-bits
ad49f501 1125 * AGP - generally dma32 is safest
005a83f1 1126 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1127 */
1128 rdev->need_dma32 = false;
1129 if (rdev->flags & RADEON_IS_AGP)
1130 rdev->need_dma32 = true;
005a83f1 1131 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1132 (rdev->family <= CHIP_RS740))
ad49f501
DA
1133 rdev->need_dma32 = true;
1134
1135 dma_bits = rdev->need_dma32 ? 32 : 40;
1136 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1137 if (r) {
62fff811 1138 rdev->need_dma32 = true;
c52494f6 1139 dma_bits = 32;
771fe6b9
JG
1140 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1141 }
c52494f6
KRW
1142 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1143 if (r) {
1144 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1145 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1146 }
771fe6b9
JG
1147
1148 /* Registers mapping */
1149 /* TODO: block userspace mapping of io register */
2c385151 1150 spin_lock_init(&rdev->mmio_idx_lock);
efad86db
AD
1151 if (rdev->family >= CHIP_BONAIRE) {
1152 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1153 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1154 } else {
1155 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1156 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1157 }
771fe6b9
JG
1158 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1159 if (rdev->rmmio == NULL) {
1160 return -ENOMEM;
1161 }
1162 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1163 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1164
351a52a2
AD
1165 /* io port mapping */
1166 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1167 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1168 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1169 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1170 break;
1171 }
1172 }
1173 if (rdev->rio_mem == NULL)
1174 DRM_ERROR("Unable to find PCI I/O BAR\n");
1175
28d52043 1176 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1177 /* this will fail for cards that aren't VGA class devices, just
1178 * ignore it */
1179 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 1180 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 1181
3ce0a23d 1182 r = radeon_init(rdev);
b574f251 1183 if (r)
3ce0a23d 1184 return r;
3ce0a23d 1185
04eb2206
CK
1186 r = radeon_ib_ring_tests(rdev);
1187 if (r)
1188 DRM_ERROR("ib ring test failed (%d).\n", r);
1189
409851f4
JG
1190 r = radeon_gem_debugfs_init(rdev);
1191 if (r) {
1192 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1193 }
1194
b574f251
JG
1195 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1196 /* Acceleration not working on AGP card try again
1197 * with fallback to PCI or PCIE GART
1198 */
a2d07b74 1199 radeon_asic_reset(rdev);
b574f251
JG
1200 radeon_fini(rdev);
1201 radeon_agp_disable(rdev);
1202 r = radeon_init(rdev);
4aac0473
JG
1203 if (r)
1204 return r;
771fe6b9 1205 }
60a7e396 1206 if ((radeon_testing & 1)) {
ecc0b326
MD
1207 radeon_test_moves(rdev);
1208 }
60a7e396
CK
1209 if ((radeon_testing & 2)) {
1210 radeon_test_syncing(rdev);
1211 }
771fe6b9 1212 if (radeon_benchmarking) {
638dd7db 1213 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 1214 }
6cf8a3f5 1215 return 0;
771fe6b9
JG
1216}
1217
4d8bf9ae
CK
1218static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1219
0c195119
AD
1220/**
1221 * radeon_device_fini - tear down the driver
1222 *
1223 * @rdev: radeon_device pointer
1224 *
1225 * Tear down the driver info (all asics).
1226 * Called at driver shutdown.
1227 */
771fe6b9
JG
1228void radeon_device_fini(struct radeon_device *rdev)
1229{
771fe6b9
JG
1230 DRM_INFO("radeon: finishing device.\n");
1231 rdev->shutdown = true;
90aca4d2
JG
1232 /* evict vram memory */
1233 radeon_bo_evict_vram(rdev);
62a8ea3f 1234 radeon_fini(rdev);
6a9ee8af 1235 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1236 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1237 if (rdev->rio_mem)
1238 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1239 rdev->rio_mem = NULL;
771fe6b9
JG
1240 iounmap(rdev->rmmio);
1241 rdev->rmmio = NULL;
4d8bf9ae 1242 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1243}
1244
1245
1246/*
1247 * Suspend & resume.
1248 */
0c195119
AD
1249/**
1250 * radeon_suspend_kms - initiate device suspend
1251 *
1252 * @pdev: drm dev pointer
1253 * @state: suspend state
1254 *
1255 * Puts the hw in the suspend state (all asics).
1256 * Returns 0 for success or an error on failure.
1257 * Called at driver suspend.
1258 */
771fe6b9
JG
1259int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1260{
875c1866 1261 struct radeon_device *rdev;
771fe6b9 1262 struct drm_crtc *crtc;
d8dcaa1d 1263 struct drm_connector *connector;
7465280c 1264 int i, r;
5f8f635e 1265 bool force_completion = false;
771fe6b9 1266
875c1866 1267 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1268 return -ENODEV;
1269 }
1270 if (state.event == PM_EVENT_PRETHAW) {
1271 return 0;
1272 }
875c1866
DJ
1273 rdev = dev->dev_private;
1274
5bcf719b 1275 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1276 return 0;
d8dcaa1d 1277
86698c20
SF
1278 drm_kms_helper_poll_disable(dev);
1279
d8dcaa1d
AD
1280 /* turn off display hw */
1281 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1282 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1283 }
1284
771fe6b9
JG
1285 /* unpin the front buffers */
1286 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1287 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1288 struct radeon_bo *robj;
771fe6b9
JG
1289
1290 if (rfb == NULL || rfb->obj == NULL) {
1291 continue;
1292 }
7e4d15d9 1293 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1294 /* don't unpin kernel fb objects */
1295 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1296 r = radeon_bo_reserve(robj, false);
38651674 1297 if (r == 0) {
4c788679
JG
1298 radeon_bo_unpin(robj);
1299 radeon_bo_unreserve(robj);
1300 }
771fe6b9
JG
1301 }
1302 }
1303 /* evict vram memory */
4c788679 1304 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1305
1306 mutex_lock(&rdev->ring_lock);
771fe6b9 1307 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1308 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1309 r = radeon_fence_wait_empty_locked(rdev, i);
1310 if (r) {
1311 /* delay GPU reset to resume */
1312 force_completion = true;
1313 }
1314 }
1315 if (force_completion) {
1316 radeon_fence_driver_force_completion(rdev);
1317 }
8a47cc9e 1318 mutex_unlock(&rdev->ring_lock);
771fe6b9 1319
f657c2a7
YZ
1320 radeon_save_bios_scratch_regs(rdev);
1321
ce8f5370 1322 radeon_pm_suspend(rdev);
62a8ea3f 1323 radeon_suspend(rdev);
d4877cf2 1324 radeon_hpd_fini(rdev);
771fe6b9 1325 /* evict remaining vram memory */
4c788679 1326 radeon_bo_evict_vram(rdev);
771fe6b9 1327
10b06122
JG
1328 radeon_agp_suspend(rdev);
1329
771fe6b9
JG
1330 pci_save_state(dev->pdev);
1331 if (state.event == PM_EVENT_SUSPEND) {
1332 /* Shut down the device */
1333 pci_disable_device(dev->pdev);
1334 pci_set_power_state(dev->pdev, PCI_D3hot);
1335 }
ac751efa 1336 console_lock();
38651674 1337 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1338 console_unlock();
771fe6b9
JG
1339 return 0;
1340}
1341
0c195119
AD
1342/**
1343 * radeon_resume_kms - initiate device resume
1344 *
1345 * @pdev: drm dev pointer
1346 *
1347 * Bring the hw back to operating state (all asics).
1348 * Returns 0 for success or an error on failure.
1349 * Called at driver resume.
1350 */
771fe6b9
JG
1351int radeon_resume_kms(struct drm_device *dev)
1352{
09bdf591 1353 struct drm_connector *connector;
771fe6b9 1354 struct radeon_device *rdev = dev->dev_private;
04eb2206 1355 int r;
771fe6b9 1356
5bcf719b 1357 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1358 return 0;
1359
ac751efa 1360 console_lock();
771fe6b9
JG
1361 pci_set_power_state(dev->pdev, PCI_D0);
1362 pci_restore_state(dev->pdev);
1363 if (pci_enable_device(dev->pdev)) {
ac751efa 1364 console_unlock();
771fe6b9
JG
1365 return -1;
1366 }
0ebf1717
DA
1367 /* resume AGP if in use */
1368 radeon_agp_resume(rdev);
62a8ea3f 1369 radeon_resume(rdev);
04eb2206
CK
1370
1371 r = radeon_ib_ring_tests(rdev);
1372 if (r)
1373 DRM_ERROR("ib ring test failed (%d).\n", r);
1374
ce8f5370 1375 radeon_pm_resume(rdev);
f657c2a7 1376 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1377
38651674 1378 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1379 console_unlock();
771fe6b9 1380
3fa47d9e
AD
1381 /* init dig PHYs, disp eng pll */
1382 if (rdev->is_atom_bios) {
ac89af1e 1383 radeon_atom_encoder_init(rdev);
f3f1f03e 1384 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1385 /* turn on the BL */
1386 if (rdev->mode_info.bl_encoder) {
1387 u8 bl_level = radeon_get_backlight_level(rdev,
1388 rdev->mode_info.bl_encoder);
1389 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1390 bl_level);
1391 }
3fa47d9e 1392 }
d4877cf2
AD
1393 /* reset hpd state */
1394 radeon_hpd_init(rdev);
771fe6b9
JG
1395 /* blat the mode back in */
1396 drm_helper_resume_force_mode(dev);
a93f344d
AD
1397 /* turn on display hw */
1398 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1399 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1400 }
86698c20
SF
1401
1402 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1403 return 0;
1404}
1405
0c195119
AD
1406/**
1407 * radeon_gpu_reset - reset the asic
1408 *
1409 * @rdev: radeon device pointer
1410 *
1411 * Attempt the reset the GPU if it has hung (all asics).
1412 * Returns 0 for success or an error on failure.
1413 */
90aca4d2
JG
1414int radeon_gpu_reset(struct radeon_device *rdev)
1415{
55d7c221
CK
1416 unsigned ring_sizes[RADEON_NUM_RINGS];
1417 uint32_t *ring_data[RADEON_NUM_RINGS];
1418
1419 bool saved = false;
1420
1421 int i, r;
8fd1b84c 1422 int resched;
90aca4d2 1423
dee53e7f 1424 down_write(&rdev->exclusive_lock);
90aca4d2 1425 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1426 /* block TTM */
1427 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
1428 radeon_suspend(rdev);
1429
55d7c221
CK
1430 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1431 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1432 &ring_data[i]);
1433 if (ring_sizes[i]) {
1434 saved = true;
1435 dev_info(rdev->dev, "Saved %d dwords of commands "
1436 "on ring %d.\n", ring_sizes[i], i);
1437 }
1438 }
1439
1440retry:
90aca4d2
JG
1441 r = radeon_asic_reset(rdev);
1442 if (!r) {
55d7c221 1443 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1444 radeon_resume(rdev);
55d7c221 1445 }
04eb2206 1446
55d7c221 1447 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1448
55d7c221
CK
1449 if (!r) {
1450 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1451 radeon_ring_restore(rdev, &rdev->ring[i],
1452 ring_sizes[i], ring_data[i]);
f54b350d
CK
1453 ring_sizes[i] = 0;
1454 ring_data[i] = NULL;
55d7c221
CK
1455 }
1456
1457 r = radeon_ib_ring_tests(rdev);
1458 if (r) {
1459 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1460 if (saved) {
f54b350d 1461 saved = false;
55d7c221
CK
1462 radeon_suspend(rdev);
1463 goto retry;
1464 }
1465 }
1466 } else {
76903b96 1467 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1468 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1469 kfree(ring_data[i]);
1470 }
90aca4d2 1471 }
7a1619b9 1472
d3493574
JG
1473 drm_helper_resume_force_mode(rdev->ddev);
1474
55d7c221 1475 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1476 if (r) {
1477 /* bad news, how to tell it to userspace ? */
1478 dev_info(rdev->dev, "GPU reset failed\n");
1479 }
1480
dee53e7f 1481 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1482 return r;
1483}
1484
771fe6b9
JG
1485
1486/*
1487 * Debugfs
1488 */
771fe6b9
JG
1489int radeon_debugfs_add_files(struct radeon_device *rdev,
1490 struct drm_info_list *files,
1491 unsigned nfiles)
1492{
1493 unsigned i;
1494
4d8bf9ae
CK
1495 for (i = 0; i < rdev->debugfs_count; i++) {
1496 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1497 /* Already registered */
1498 return 0;
1499 }
1500 }
c245cb9e 1501
4d8bf9ae 1502 i = rdev->debugfs_count + 1;
c245cb9e
MW
1503 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1504 DRM_ERROR("Reached maximum number of debugfs components.\n");
1505 DRM_ERROR("Report so we increase "
1506 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1507 return -EINVAL;
1508 }
4d8bf9ae
CK
1509 rdev->debugfs[rdev->debugfs_count].files = files;
1510 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1511 rdev->debugfs_count = i;
771fe6b9
JG
1512#if defined(CONFIG_DEBUG_FS)
1513 drm_debugfs_create_files(files, nfiles,
1514 rdev->ddev->control->debugfs_root,
1515 rdev->ddev->control);
1516 drm_debugfs_create_files(files, nfiles,
1517 rdev->ddev->primary->debugfs_root,
1518 rdev->ddev->primary);
1519#endif
1520 return 0;
1521}
1522
4d8bf9ae
CK
1523static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1524{
1525#if defined(CONFIG_DEBUG_FS)
1526 unsigned i;
1527
1528 for (i = 0; i < rdev->debugfs_count; i++) {
1529 drm_debugfs_remove_files(rdev->debugfs[i].files,
1530 rdev->debugfs[i].num_files,
1531 rdev->ddev->control);
1532 drm_debugfs_remove_files(rdev->debugfs[i].files,
1533 rdev->debugfs[i].num_files,
1534 rdev->ddev->primary);
1535 }
1536#endif
1537}
1538
771fe6b9
JG
1539#if defined(CONFIG_DEBUG_FS)
1540int radeon_debugfs_init(struct drm_minor *minor)
1541{
1542 return 0;
1543}
1544
1545void radeon_debugfs_cleanup(struct drm_minor *minor)
1546{
771fe6b9
JG
1547}
1548#endif
This page took 0.450436 seconds and 5 git commands to generate.