drm/radeon: add support for RS740 IGP chipsets.
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_cp.c
CommitLineData
f26c473c
DA
1/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
2/*
1da177e4
LT
3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
45e51905 5 * Copyright 2007 Advanced Micro Devices, Inc.
1da177e4
LT
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Kevin E. Martin <martin@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include "drmP.h"
33#include "drm.h"
34#include "radeon_drm.h"
35#include "radeon_drv.h"
414ed537 36#include "r300_reg.h"
1da177e4 37
9f18409e
AD
38#include "radeon_microcode.h"
39
1da177e4
LT
40#define RADEON_FIFO_DEBUG 0
41
84b1fd10 42static int radeon_do_cleanup_cp(struct drm_device * dev);
54f961a6 43static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
1da177e4 44
45e51905 45static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
3d5e2c13
DA
46{
47 u32 ret;
48 RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
49 ret = RADEON_READ(R520_MC_IND_DATA);
50 RADEON_WRITE(R520_MC_IND_INDEX, 0);
51 return ret;
52}
53
45e51905
AD
54static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
55{
56 u32 ret;
57 RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
58 ret = RADEON_READ(RS480_NB_MC_DATA);
59 RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
60 return ret;
61}
62
60f92683
MC
63static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
64{
45e51905 65 u32 ret;
60f92683 66 RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
45e51905
AD
67 ret = RADEON_READ(RS690_MC_DATA);
68 RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
69 return ret;
70}
71
72static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
73{
f0738e92
AD
74 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
75 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
45e51905
AD
76 return RS690_READ_MCIND(dev_priv, addr);
77 else
78 return RS480_READ_MCIND(dev_priv, addr);
60f92683
MC
79}
80
3d5e2c13
DA
81u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
82{
83
84 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
45e51905 85 return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
f0738e92
AD
86 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
87 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
60f92683 88 return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
3d5e2c13 89 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
45e51905 90 return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
3d5e2c13
DA
91 else
92 return RADEON_READ(RADEON_MC_FB_LOCATION);
93}
94
95static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
96{
97 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
45e51905 98 R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
f0738e92
AD
99 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
100 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
60f92683 101 RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
3d5e2c13 102 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
45e51905 103 R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
3d5e2c13
DA
104 else
105 RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
106}
107
108static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
109{
110 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
45e51905 111 R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
f0738e92
AD
112 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
113 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
60f92683 114 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
3d5e2c13 115 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
45e51905 116 R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
3d5e2c13
DA
117 else
118 RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
119}
120
70b13d51
DA
121static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
122{
123 u32 agp_base_hi = upper_32_bits(agp_base);
124 u32 agp_base_lo = agp_base & 0xffffffff;
125
126 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
127 R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
128 R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
f0738e92
AD
129 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
130 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
70b13d51
DA
131 RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
132 RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
133 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
134 R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
135 R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
5cfb6956
AD
136 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) {
137 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
138 RADEON_WRITE(RS480_AGP_BASE_2, 0);
70b13d51
DA
139 } else {
140 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
141 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
142 RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
143 }
144}
145
84b1fd10 146static int RADEON_READ_PLL(struct drm_device * dev, int addr)
1da177e4
LT
147{
148 drm_radeon_private_t *dev_priv = dev->dev_private;
149
150 RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
151 return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
152}
153
3d5e2c13 154static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
ea98a92f
DA
155{
156 RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
157 return RADEON_READ(RADEON_PCIE_DATA);
158}
159
1da177e4 160#if RADEON_FIFO_DEBUG
b5e89ed5 161static void radeon_status(drm_radeon_private_t * dev_priv)
1da177e4 162{
bf9d8929 163 printk("%s:\n", __func__);
b5e89ed5
DA
164 printk("RBBM_STATUS = 0x%08x\n",
165 (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
166 printk("CP_RB_RTPR = 0x%08x\n",
167 (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
168 printk("CP_RB_WTPR = 0x%08x\n",
169 (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
170 printk("AIC_CNTL = 0x%08x\n",
171 (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
172 printk("AIC_STAT = 0x%08x\n",
173 (unsigned int)RADEON_READ(RADEON_AIC_STAT));
174 printk("AIC_PT_BASE = 0x%08x\n",
175 (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
176 printk("TLB_ADDR = 0x%08x\n",
177 (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
178 printk("TLB_DATA = 0x%08x\n",
179 (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
1da177e4
LT
180}
181#endif
182
1da177e4
LT
183/* ================================================================
184 * Engine, FIFO control
185 */
186
b5e89ed5 187static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
1da177e4
LT
188{
189 u32 tmp;
190 int i;
191
192 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
193
259434ac
AD
194 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
195 tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
196 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
197 RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
198
199 for (i = 0; i < dev_priv->usec_timeout; i++) {
200 if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
201 & RADEON_RB3D_DC_BUSY)) {
202 return 0;
203 }
204 DRM_UDELAY(1);
205 }
206 } else {
54f961a6
JG
207 /* don't flush or purge cache here or lockup */
208 return 0;
1da177e4
LT
209 }
210
211#if RADEON_FIFO_DEBUG
b5e89ed5
DA
212 DRM_ERROR("failed!\n");
213 radeon_status(dev_priv);
1da177e4 214#endif
20caafa6 215 return -EBUSY;
1da177e4
LT
216}
217
b5e89ed5 218static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
1da177e4
LT
219{
220 int i;
221
222 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
223
b5e89ed5
DA
224 for (i = 0; i < dev_priv->usec_timeout; i++) {
225 int slots = (RADEON_READ(RADEON_RBBM_STATUS)
226 & RADEON_RBBM_FIFOCNT_MASK);
227 if (slots >= entries)
228 return 0;
229 DRM_UDELAY(1);
1da177e4 230 }
6c7be298 231 DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n",
54f961a6
JG
232 RADEON_READ(RADEON_RBBM_STATUS),
233 RADEON_READ(R300_VAP_CNTL_STATUS));
1da177e4
LT
234
235#if RADEON_FIFO_DEBUG
b5e89ed5
DA
236 DRM_ERROR("failed!\n");
237 radeon_status(dev_priv);
1da177e4 238#endif
20caafa6 239 return -EBUSY;
1da177e4
LT
240}
241
b5e89ed5 242static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
1da177e4
LT
243{
244 int i, ret;
245
246 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
247
b5e89ed5
DA
248 ret = radeon_do_wait_for_fifo(dev_priv, 64);
249 if (ret)
250 return ret;
1da177e4 251
b5e89ed5
DA
252 for (i = 0; i < dev_priv->usec_timeout; i++) {
253 if (!(RADEON_READ(RADEON_RBBM_STATUS)
254 & RADEON_RBBM_ACTIVE)) {
255 radeon_do_pixcache_flush(dev_priv);
1da177e4
LT
256 return 0;
257 }
b5e89ed5 258 DRM_UDELAY(1);
1da177e4 259 }
6c7be298 260 DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n",
54f961a6
JG
261 RADEON_READ(RADEON_RBBM_STATUS),
262 RADEON_READ(R300_VAP_CNTL_STATUS));
1da177e4
LT
263
264#if RADEON_FIFO_DEBUG
b5e89ed5
DA
265 DRM_ERROR("failed!\n");
266 radeon_status(dev_priv);
1da177e4 267#endif
20caafa6 268 return -EBUSY;
1da177e4
LT
269}
270
5b92c404
AD
271static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
272{
273 uint32_t gb_tile_config, gb_pipe_sel = 0;
274
275 /* RS4xx/RS6xx/R4xx/R5xx */
276 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
277 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
278 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
279 } else {
280 /* R3xx */
281 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
282 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
283 dev_priv->num_gb_pipes = 2;
284 } else {
285 /* R3Vxx */
286 dev_priv->num_gb_pipes = 1;
287 }
288 }
289 DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
290
291 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
292
293 switch (dev_priv->num_gb_pipes) {
294 case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
295 case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
296 case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
297 default:
298 case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
299 }
300
301 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
302 RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
303 RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
304 }
305 RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
306 radeon_do_wait_for_idle(dev_priv);
307 RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
308 RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
309 R300_DC_AUTOFLUSH_ENABLE |
310 R300_DC_DC_DISABLE_IGNORE_PE));
311
312
313}
314
1da177e4
LT
315/* ================================================================
316 * CP control, initialization
317 */
318
319/* Load the microcode for the CP */
b5e89ed5 320static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
1da177e4
LT
321{
322 int i;
b5e89ed5 323 DRM_DEBUG("\n");
1da177e4 324
b5e89ed5 325 radeon_do_wait_for_idle(dev_priv);
1da177e4 326
b5e89ed5 327 RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
9f18409e
AD
328 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
329 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
330 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
331 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
332 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
333 DRM_INFO("Loading R100 Microcode\n");
334 for (i = 0; i < 256; i++) {
335 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
336 R100_cp_microcode[i][1]);
337 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
338 R100_cp_microcode[i][0]);
339 }
340 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
341 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
342 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
343 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
1da177e4 344 DRM_INFO("Loading R200 Microcode\n");
b5e89ed5
DA
345 for (i = 0; i < 256; i++) {
346 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
347 R200_cp_microcode[i][1]);
348 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
349 R200_cp_microcode[i][0]);
1da177e4 350 }
9f18409e
AD
351 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
352 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
353 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
354 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
45e51905 355 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
1da177e4 356 DRM_INFO("Loading R300 Microcode\n");
b5e89ed5
DA
357 for (i = 0; i < 256; i++) {
358 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
359 R300_cp_microcode[i][1]);
360 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
361 R300_cp_microcode[i][0]);
1da177e4 362 }
9f18409e
AD
363 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
364 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
365 DRM_INFO("Loading R400 Microcode\n");
366 for (i = 0; i < 256; i++) {
367 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
368 R420_cp_microcode[i][1]);
369 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
370 R420_cp_microcode[i][0]);
371 }
f0738e92
AD
372 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
373 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
374 DRM_INFO("Loading RS690/RS740 Microcode\n");
9f18409e
AD
375 for (i = 0; i < 256; i++) {
376 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
377 RS690_cp_microcode[i][1]);
378 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
379 RS690_cp_microcode[i][0]);
380 }
381 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
382 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
383 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
384 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
385 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
386 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
387 DRM_INFO("Loading R500 Microcode\n");
b5e89ed5
DA
388 for (i = 0; i < 256; i++) {
389 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
9f18409e 390 R520_cp_microcode[i][1]);
b5e89ed5 391 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
9f18409e 392 R520_cp_microcode[i][0]);
1da177e4
LT
393 }
394 }
395}
396
397/* Flush any pending commands to the CP. This should only be used just
398 * prior to a wait for idle, as it informs the engine that the command
399 * stream is ending.
400 */
b5e89ed5 401static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
1da177e4 402{
b5e89ed5 403 DRM_DEBUG("\n");
1da177e4
LT
404#if 0
405 u32 tmp;
406
b5e89ed5
DA
407 tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
408 RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
1da177e4
LT
409#endif
410}
411
412/* Wait for the CP to go idle.
413 */
b5e89ed5 414int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
1da177e4
LT
415{
416 RING_LOCALS;
b5e89ed5 417 DRM_DEBUG("\n");
1da177e4 418
b5e89ed5 419 BEGIN_RING(6);
1da177e4
LT
420
421 RADEON_PURGE_CACHE();
422 RADEON_PURGE_ZCACHE();
423 RADEON_WAIT_UNTIL_IDLE();
424
425 ADVANCE_RING();
426 COMMIT_RING();
427
b5e89ed5 428 return radeon_do_wait_for_idle(dev_priv);
1da177e4
LT
429}
430
431/* Start the Command Processor.
432 */
b5e89ed5 433static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
1da177e4
LT
434{
435 RING_LOCALS;
b5e89ed5 436 DRM_DEBUG("\n");
1da177e4 437
b5e89ed5 438 radeon_do_wait_for_idle(dev_priv);
1da177e4 439
b5e89ed5 440 RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
1da177e4
LT
441
442 dev_priv->cp_running = 1;
443
54f961a6
JG
444 BEGIN_RING(8);
445 /* isync can only be written through cp on r5xx write it here */
446 OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
447 OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
448 RADEON_ISYNC_ANY3D_IDLE2D |
449 RADEON_ISYNC_WAIT_IDLEGUI |
450 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
1da177e4
LT
451 RADEON_PURGE_CACHE();
452 RADEON_PURGE_ZCACHE();
453 RADEON_WAIT_UNTIL_IDLE();
1da177e4
LT
454 ADVANCE_RING();
455 COMMIT_RING();
54f961a6
JG
456
457 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
1da177e4
LT
458}
459
460/* Reset the Command Processor. This will not flush any pending
461 * commands, so you must wait for the CP command stream to complete
462 * before calling this routine.
463 */
b5e89ed5 464static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
1da177e4
LT
465{
466 u32 cur_read_ptr;
b5e89ed5 467 DRM_DEBUG("\n");
1da177e4 468
b5e89ed5
DA
469 cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
470 RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
471 SET_RING_HEAD(dev_priv, cur_read_ptr);
1da177e4
LT
472 dev_priv->ring.tail = cur_read_ptr;
473}
474
475/* Stop the Command Processor. This will not flush any pending
476 * commands, so you must flush the command stream and wait for the CP
477 * to go idle before calling this routine.
478 */
b5e89ed5 479static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
1da177e4 480{
b5e89ed5 481 DRM_DEBUG("\n");
1da177e4 482
b5e89ed5 483 RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
1da177e4
LT
484
485 dev_priv->cp_running = 0;
486}
487
488/* Reset the engine. This will stop the CP if it is running.
489 */
84b1fd10 490static int radeon_do_engine_reset(struct drm_device * dev)
1da177e4
LT
491{
492 drm_radeon_private_t *dev_priv = dev->dev_private;
d396db32 493 u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
b5e89ed5 494 DRM_DEBUG("\n");
1da177e4 495
b5e89ed5
DA
496 radeon_do_pixcache_flush(dev_priv);
497
d396db32
AD
498 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
499 /* may need something similar for newer chips */
3d5e2c13
DA
500 clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
501 mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
502
503 RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
504 RADEON_FORCEON_MCLKA |
505 RADEON_FORCEON_MCLKB |
506 RADEON_FORCEON_YCLKA |
507 RADEON_FORCEON_YCLKB |
508 RADEON_FORCEON_MC |
509 RADEON_FORCEON_AIC));
d396db32 510 }
3d5e2c13 511
d396db32
AD
512 rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
513
514 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
515 RADEON_SOFT_RESET_CP |
516 RADEON_SOFT_RESET_HI |
517 RADEON_SOFT_RESET_SE |
518 RADEON_SOFT_RESET_RE |
519 RADEON_SOFT_RESET_PP |
520 RADEON_SOFT_RESET_E2 |
521 RADEON_SOFT_RESET_RB));
522 RADEON_READ(RADEON_RBBM_SOFT_RESET);
523 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
524 ~(RADEON_SOFT_RESET_CP |
525 RADEON_SOFT_RESET_HI |
526 RADEON_SOFT_RESET_SE |
527 RADEON_SOFT_RESET_RE |
528 RADEON_SOFT_RESET_PP |
529 RADEON_SOFT_RESET_E2 |
530 RADEON_SOFT_RESET_RB)));
531 RADEON_READ(RADEON_RBBM_SOFT_RESET);
532
533 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
3d5e2c13
DA
534 RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
535 RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
536 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
537 }
1da177e4 538
5b92c404
AD
539 /* setup the raster pipes */
540 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
541 radeon_init_pipes(dev_priv);
542
1da177e4 543 /* Reset the CP ring */
b5e89ed5 544 radeon_do_cp_reset(dev_priv);
1da177e4
LT
545
546 /* The CP is no longer running after an engine reset */
547 dev_priv->cp_running = 0;
548
549 /* Reset any pending vertex, indirect buffers */
b5e89ed5 550 radeon_freelist_reset(dev);
1da177e4
LT
551
552 return 0;
553}
554
84b1fd10 555static void radeon_cp_init_ring_buffer(struct drm_device * dev,
b5e89ed5 556 drm_radeon_private_t * dev_priv)
1da177e4
LT
557{
558 u32 ring_start, cur_read_ptr;
559 u32 tmp;
bc5f4523 560
d5ea702f
DA
561 /* Initialize the memory controller. With new memory map, the fb location
562 * is not changed, it should have been properly initialized already. Part
563 * of the problem is that the code below is bogus, assuming the GART is
564 * always appended to the fb which is not necessarily the case
565 */
566 if (!dev_priv->new_memmap)
3d5e2c13 567 radeon_write_fb_location(dev_priv,
d5ea702f
DA
568 ((dev_priv->gart_vm_start - 1) & 0xffff0000)
569 | (dev_priv->fb_location >> 16));
1da177e4
LT
570
571#if __OS_HAS_AGP
54a56ac5 572 if (dev_priv->flags & RADEON_IS_AGP) {
70b13d51
DA
573 radeon_write_agp_base(dev_priv, dev->agp->base);
574
3d5e2c13 575 radeon_write_agp_location(dev_priv,
b5e89ed5
DA
576 (((dev_priv->gart_vm_start - 1 +
577 dev_priv->gart_size) & 0xffff0000) |
578 (dev_priv->gart_vm_start >> 16)));
1da177e4
LT
579
580 ring_start = (dev_priv->cp_ring->offset
581 - dev->agp->base
582 + dev_priv->gart_vm_start);
b0917bd9 583 } else
1da177e4
LT
584#endif
585 ring_start = (dev_priv->cp_ring->offset
b0917bd9 586 - (unsigned long)dev->sg->virtual
1da177e4
LT
587 + dev_priv->gart_vm_start);
588
b5e89ed5 589 RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
1da177e4
LT
590
591 /* Set the write pointer delay */
b5e89ed5 592 RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
1da177e4
LT
593
594 /* Initialize the ring buffer's read and write pointers */
b5e89ed5
DA
595 cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
596 RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
597 SET_RING_HEAD(dev_priv, cur_read_ptr);
1da177e4
LT
598 dev_priv->ring.tail = cur_read_ptr;
599
600#if __OS_HAS_AGP
54a56ac5 601 if (dev_priv->flags & RADEON_IS_AGP) {
b5e89ed5
DA
602 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
603 dev_priv->ring_rptr->offset
604 - dev->agp->base + dev_priv->gart_vm_start);
1da177e4
LT
605 } else
606#endif
607 {
55910517 608 struct drm_sg_mem *entry = dev->sg;
1da177e4
LT
609 unsigned long tmp_ofs, page_ofs;
610
b0917bd9
IK
611 tmp_ofs = dev_priv->ring_rptr->offset -
612 (unsigned long)dev->sg->virtual;
1da177e4
LT
613 page_ofs = tmp_ofs >> PAGE_SHIFT;
614
b5e89ed5
DA
615 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
616 DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n",
617 (unsigned long)entry->busaddr[page_ofs],
618 entry->handle + tmp_ofs);
1da177e4
LT
619 }
620
d5ea702f
DA
621 /* Set ring buffer size */
622#ifdef __BIG_ENDIAN
623 RADEON_WRITE(RADEON_CP_RB_CNTL,
576cc458
RS
624 RADEON_BUF_SWAP_32BIT |
625 (dev_priv->ring.fetch_size_l2ow << 18) |
626 (dev_priv->ring.rptr_update_l2qw << 8) |
627 dev_priv->ring.size_l2qw);
d5ea702f 628#else
576cc458
RS
629 RADEON_WRITE(RADEON_CP_RB_CNTL,
630 (dev_priv->ring.fetch_size_l2ow << 18) |
631 (dev_priv->ring.rptr_update_l2qw << 8) |
632 dev_priv->ring.size_l2qw);
d5ea702f
DA
633#endif
634
d5ea702f 635
1da177e4
LT
636 /* Initialize the scratch register pointer. This will cause
637 * the scratch register values to be written out to memory
638 * whenever they are updated.
639 *
640 * We simply put this behind the ring read pointer, this works
641 * with PCI GART as well as (whatever kind of) AGP GART
642 */
b5e89ed5
DA
643 RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
644 + RADEON_SCRATCH_REG_OFFSET);
1da177e4
LT
645
646 dev_priv->scratch = ((__volatile__ u32 *)
647 dev_priv->ring_rptr->handle +
648 (RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
649
b5e89ed5 650 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
1da177e4 651
d5ea702f
DA
652 /* Turn on bus mastering */
653 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
654 RADEON_WRITE(RADEON_BUS_CNTL, tmp);
1da177e4
LT
655
656 dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
b5e89ed5 657 RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
1da177e4
LT
658
659 dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
b5e89ed5
DA
660 RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
661 dev_priv->sarea_priv->last_dispatch);
1da177e4
LT
662
663 dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
b5e89ed5 664 RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
1da177e4 665
b5e89ed5 666 radeon_do_wait_for_idle(dev_priv);
1da177e4 667
1da177e4 668 /* Sync everything up */
b5e89ed5
DA
669 RADEON_WRITE(RADEON_ISYNC_CNTL,
670 (RADEON_ISYNC_ANY2D_IDLE3D |
671 RADEON_ISYNC_ANY3D_IDLE2D |
672 RADEON_ISYNC_WAIT_IDLEGUI |
673 RADEON_ISYNC_CPSCRATCH_IDLEGUI));
d5ea702f
DA
674
675}
676
677static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
678{
679 u32 tmp;
680
6b79d521
DA
681 /* Start with assuming that writeback doesn't work */
682 dev_priv->writeback_works = 0;
683
d5ea702f
DA
684 /* Writeback doesn't seem to work everywhere, test it here and possibly
685 * enable it if it appears to work
686 */
687 DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
688 RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
689
690 for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
691 if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
692 0xdeadbeef)
693 break;
694 DRM_UDELAY(1);
695 }
696
697 if (tmp < dev_priv->usec_timeout) {
698 dev_priv->writeback_works = 1;
699 DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
700 } else {
701 dev_priv->writeback_works = 0;
702 DRM_INFO("writeback test failed\n");
703 }
704 if (radeon_no_wb == 1) {
705 dev_priv->writeback_works = 0;
706 DRM_INFO("writeback forced off\n");
707 }
ae1b1a48
MD
708
709 if (!dev_priv->writeback_works) {
710 /* Disable writeback to avoid unnecessary bus master transfer */
711 RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) |
712 RADEON_RB_NO_UPDATE);
713 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
714 }
1da177e4
LT
715}
716
f2b04cd2
DA
717/* Enable or disable IGP GART on the chip */
718static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
60f92683
MC
719{
720 u32 temp;
721
722 if (on) {
45e51905 723 DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
60f92683
MC
724 dev_priv->gart_vm_start,
725 (long)dev_priv->gart_info.bus_addr,
726 dev_priv->gart_size);
727
45e51905 728 temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
f0738e92
AD
729 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
730 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
45e51905
AD
731 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
732 RS690_BLOCK_GFX_D3_EN));
733 else
734 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
60f92683 735
45e51905
AD
736 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
737 RS480_VA_SIZE_32MB));
60f92683 738
45e51905
AD
739 temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
740 IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
741 RS480_TLB_ENABLE |
742 RS480_GTW_LAC_EN |
743 RS480_1LEVEL_GART));
60f92683 744
fa0d71b9
DA
745 temp = dev_priv->gart_info.bus_addr & 0xfffff000;
746 temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
45e51905
AD
747 IGP_WRITE_MCIND(RS480_GART_BASE, temp);
748
749 temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
750 IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
751 RS480_REQ_TYPE_SNOOP_DIS));
752
5cfb6956 753 radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
3722bfc6 754
60f92683
MC
755 dev_priv->gart_size = 32*1024*1024;
756 temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
757 0xffff0000) | (dev_priv->gart_vm_start >> 16));
758
45e51905 759 radeon_write_agp_location(dev_priv, temp);
60f92683 760
45e51905
AD
761 temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
762 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
763 RS480_VA_SIZE_32MB));
60f92683
MC
764
765 do {
45e51905
AD
766 temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
767 if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
60f92683
MC
768 break;
769 DRM_UDELAY(1);
770 } while (1);
771
45e51905
AD
772 IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
773 RS480_GART_CACHE_INVALIDATE);
2735977b 774
60f92683 775 do {
45e51905
AD
776 temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
777 if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
60f92683
MC
778 break;
779 DRM_UDELAY(1);
780 } while (1);
781
45e51905 782 IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
60f92683 783 } else {
45e51905 784 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
60f92683
MC
785 }
786}
787
ea98a92f
DA
788static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
789{
790 u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
791 if (on) {
792
793 DRM_DEBUG("programming pcie %08X %08lX %08X\n",
b5e89ed5
DA
794 dev_priv->gart_vm_start,
795 (long)dev_priv->gart_info.bus_addr,
ea98a92f 796 dev_priv->gart_size);
b5e89ed5
DA
797 RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
798 dev_priv->gart_vm_start);
799 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
800 dev_priv->gart_info.bus_addr);
801 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
802 dev_priv->gart_vm_start);
803 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
804 dev_priv->gart_vm_start +
805 dev_priv->gart_size - 1);
806
3d5e2c13 807 radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
b5e89ed5
DA
808
809 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
810 RADEON_PCIE_TX_GART_EN);
ea98a92f 811 } else {
b5e89ed5
DA
812 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
813 tmp & ~RADEON_PCIE_TX_GART_EN);
ea98a92f 814 }
1da177e4
LT
815}
816
817/* Enable or disable PCI GART on the chip */
b5e89ed5 818static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
1da177e4 819{
d985c108 820 u32 tmp;
1da177e4 821
45e51905 822 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
f0738e92 823 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
45e51905 824 (dev_priv->flags & RADEON_IS_IGPGART)) {
f2b04cd2
DA
825 radeon_set_igpgart(dev_priv, on);
826 return;
827 }
828
54a56ac5 829 if (dev_priv->flags & RADEON_IS_PCIE) {
ea98a92f
DA
830 radeon_set_pciegart(dev_priv, on);
831 return;
832 }
1da177e4 833
bc5f4523 834 tmp = RADEON_READ(RADEON_AIC_CNTL);
d985c108 835
b5e89ed5
DA
836 if (on) {
837 RADEON_WRITE(RADEON_AIC_CNTL,
838 tmp | RADEON_PCIGART_TRANSLATE_EN);
1da177e4
LT
839
840 /* set PCI GART page-table base address
841 */
ea98a92f 842 RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
1da177e4
LT
843
844 /* set address range for PCI address translate
845 */
b5e89ed5
DA
846 RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
847 RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
848 + dev_priv->gart_size - 1);
1da177e4
LT
849
850 /* Turn off AGP aperture -- is this required for PCI GART?
851 */
3d5e2c13 852 radeon_write_agp_location(dev_priv, 0xffffffc0);
b5e89ed5 853 RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
1da177e4 854 } else {
b5e89ed5
DA
855 RADEON_WRITE(RADEON_AIC_CNTL,
856 tmp & ~RADEON_PCIGART_TRANSLATE_EN);
1da177e4
LT
857 }
858}
859
84b1fd10 860static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1da177e4 861{
d985c108
DA
862 drm_radeon_private_t *dev_priv = dev->dev_private;
863
b5e89ed5 864 DRM_DEBUG("\n");
1da177e4 865
f3dd5c37 866 /* if we require new memory map but we don't have it fail */
54a56ac5 867 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
b15ec368 868 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
f3dd5c37 869 radeon_do_cleanup_cp(dev);
20caafa6 870 return -EINVAL;
f3dd5c37
DA
871 }
872
54a56ac5 873 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
d985c108 874 DRM_DEBUG("Forcing AGP card to PCI mode\n");
54a56ac5
DA
875 dev_priv->flags &= ~RADEON_IS_AGP;
876 } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
b15ec368
DA
877 && !init->is_pci) {
878 DRM_DEBUG("Restoring AGP flag\n");
54a56ac5 879 dev_priv->flags |= RADEON_IS_AGP;
d985c108 880 }
1da177e4 881
54a56ac5 882 if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
b5e89ed5 883 DRM_ERROR("PCI GART memory not allocated!\n");
1da177e4 884 radeon_do_cleanup_cp(dev);
20caafa6 885 return -EINVAL;
1da177e4
LT
886 }
887
888 dev_priv->usec_timeout = init->usec_timeout;
b5e89ed5
DA
889 if (dev_priv->usec_timeout < 1 ||
890 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
891 DRM_DEBUG("TIMEOUT problem!\n");
1da177e4 892 radeon_do_cleanup_cp(dev);
20caafa6 893 return -EINVAL;
1da177e4
LT
894 }
895
ddbee333
DA
896 /* Enable vblank on CRTC1 for older X servers
897 */
898 dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
899
d985c108 900 switch(init->func) {
1da177e4 901 case RADEON_INIT_R200_CP:
b5e89ed5 902 dev_priv->microcode_version = UCODE_R200;
1da177e4
LT
903 break;
904 case RADEON_INIT_R300_CP:
b5e89ed5 905 dev_priv->microcode_version = UCODE_R300;
1da177e4
LT
906 break;
907 default:
b5e89ed5 908 dev_priv->microcode_version = UCODE_R100;
1da177e4 909 }
b5e89ed5 910
1da177e4
LT
911 dev_priv->do_boxes = 0;
912 dev_priv->cp_mode = init->cp_mode;
913
914 /* We don't support anything other than bus-mastering ring mode,
915 * but the ring can be in either AGP or PCI space for the ring
916 * read pointer.
917 */
b5e89ed5
DA
918 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
919 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
920 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
1da177e4 921 radeon_do_cleanup_cp(dev);
20caafa6 922 return -EINVAL;
1da177e4
LT
923 }
924
b5e89ed5 925 switch (init->fb_bpp) {
1da177e4
LT
926 case 16:
927 dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
928 break;
929 case 32:
930 default:
931 dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
932 break;
933 }
b5e89ed5
DA
934 dev_priv->front_offset = init->front_offset;
935 dev_priv->front_pitch = init->front_pitch;
936 dev_priv->back_offset = init->back_offset;
937 dev_priv->back_pitch = init->back_pitch;
1da177e4 938
b5e89ed5 939 switch (init->depth_bpp) {
1da177e4
LT
940 case 16:
941 dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
942 break;
943 case 32:
944 default:
945 dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
946 break;
947 }
b5e89ed5
DA
948 dev_priv->depth_offset = init->depth_offset;
949 dev_priv->depth_pitch = init->depth_pitch;
1da177e4
LT
950
951 /* Hardware state for depth clears. Remove this if/when we no
952 * longer clear the depth buffer with a 3D rectangle. Hard-code
953 * all values to prevent unwanted 3D state from slipping through
954 * and screwing with the clear operation.
955 */
956 dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
957 (dev_priv->color_fmt << 10) |
b5e89ed5
DA
958 (dev_priv->microcode_version ==
959 UCODE_R100 ? RADEON_ZBLOCK16 : 0));
1da177e4 960
b5e89ed5
DA
961 dev_priv->depth_clear.rb3d_zstencilcntl =
962 (dev_priv->depth_fmt |
963 RADEON_Z_TEST_ALWAYS |
964 RADEON_STENCIL_TEST_ALWAYS |
965 RADEON_STENCIL_S_FAIL_REPLACE |
966 RADEON_STENCIL_ZPASS_REPLACE |
967 RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
1da177e4
LT
968
969 dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
970 RADEON_BFACE_SOLID |
971 RADEON_FFACE_SOLID |
972 RADEON_FLAT_SHADE_VTX_LAST |
973 RADEON_DIFFUSE_SHADE_FLAT |
974 RADEON_ALPHA_SHADE_FLAT |
975 RADEON_SPECULAR_SHADE_FLAT |
976 RADEON_FOG_SHADE_FLAT |
977 RADEON_VTX_PIX_CENTER_OGL |
978 RADEON_ROUND_MODE_TRUNC |
979 RADEON_ROUND_PREC_8TH_PIX);
980
1da177e4 981
1da177e4
LT
982 dev_priv->ring_offset = init->ring_offset;
983 dev_priv->ring_rptr_offset = init->ring_rptr_offset;
984 dev_priv->buffers_offset = init->buffers_offset;
985 dev_priv->gart_textures_offset = init->gart_textures_offset;
b5e89ed5 986
da509d7a 987 dev_priv->sarea = drm_getsarea(dev);
b5e89ed5 988 if (!dev_priv->sarea) {
1da177e4 989 DRM_ERROR("could not find sarea!\n");
1da177e4 990 radeon_do_cleanup_cp(dev);
20caafa6 991 return -EINVAL;
1da177e4
LT
992 }
993
1da177e4 994 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
b5e89ed5 995 if (!dev_priv->cp_ring) {
1da177e4 996 DRM_ERROR("could not find cp ring region!\n");
1da177e4 997 radeon_do_cleanup_cp(dev);
20caafa6 998 return -EINVAL;
1da177e4
LT
999 }
1000 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
b5e89ed5 1001 if (!dev_priv->ring_rptr) {
1da177e4 1002 DRM_ERROR("could not find ring read pointer!\n");
1da177e4 1003 radeon_do_cleanup_cp(dev);
20caafa6 1004 return -EINVAL;
1da177e4 1005 }
d1f2b55a 1006 dev->agp_buffer_token = init->buffers_offset;
1da177e4 1007 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
b5e89ed5 1008 if (!dev->agp_buffer_map) {
1da177e4 1009 DRM_ERROR("could not find dma buffer region!\n");
1da177e4 1010 radeon_do_cleanup_cp(dev);
20caafa6 1011 return -EINVAL;
1da177e4
LT
1012 }
1013
b5e89ed5
DA
1014 if (init->gart_textures_offset) {
1015 dev_priv->gart_textures =
1016 drm_core_findmap(dev, init->gart_textures_offset);
1017 if (!dev_priv->gart_textures) {
1da177e4 1018 DRM_ERROR("could not find GART texture region!\n");
1da177e4 1019 radeon_do_cleanup_cp(dev);
20caafa6 1020 return -EINVAL;
1da177e4
LT
1021 }
1022 }
1023
1024 dev_priv->sarea_priv =
b5e89ed5
DA
1025 (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
1026 init->sarea_priv_offset);
1da177e4
LT
1027
1028#if __OS_HAS_AGP
54a56ac5 1029 if (dev_priv->flags & RADEON_IS_AGP) {
b5e89ed5
DA
1030 drm_core_ioremap(dev_priv->cp_ring, dev);
1031 drm_core_ioremap(dev_priv->ring_rptr, dev);
1032 drm_core_ioremap(dev->agp_buffer_map, dev);
1033 if (!dev_priv->cp_ring->handle ||
1034 !dev_priv->ring_rptr->handle ||
1035 !dev->agp_buffer_map->handle) {
1da177e4 1036 DRM_ERROR("could not find ioremap agp regions!\n");
1da177e4 1037 radeon_do_cleanup_cp(dev);
20caafa6 1038 return -EINVAL;
1da177e4
LT
1039 }
1040 } else
1041#endif
1042 {
b5e89ed5 1043 dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset;
1da177e4 1044 dev_priv->ring_rptr->handle =
b5e89ed5
DA
1045 (void *)dev_priv->ring_rptr->offset;
1046 dev->agp_buffer_map->handle =
1047 (void *)dev->agp_buffer_map->offset;
1048
1049 DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
1050 dev_priv->cp_ring->handle);
1051 DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
1052 dev_priv->ring_rptr->handle);
1053 DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
1054 dev->agp_buffer_map->handle);
1da177e4
LT
1055 }
1056
3d5e2c13 1057 dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
bc5f4523 1058 dev_priv->fb_size =
3d5e2c13 1059 ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
d5ea702f 1060 - dev_priv->fb_location;
1da177e4 1061
b5e89ed5
DA
1062 dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
1063 ((dev_priv->front_offset
1064 + dev_priv->fb_location) >> 10));
1da177e4 1065
b5e89ed5
DA
1066 dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
1067 ((dev_priv->back_offset
1068 + dev_priv->fb_location) >> 10));
1da177e4 1069
b5e89ed5
DA
1070 dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
1071 ((dev_priv->depth_offset
1072 + dev_priv->fb_location) >> 10));
1da177e4
LT
1073
1074 dev_priv->gart_size = init->gart_size;
d5ea702f
DA
1075
1076 /* New let's set the memory map ... */
1077 if (dev_priv->new_memmap) {
1078 u32 base = 0;
1079
1080 DRM_INFO("Setting GART location based on new memory map\n");
1081
1082 /* If using AGP, try to locate the AGP aperture at the same
1083 * location in the card and on the bus, though we have to
1084 * align it down.
1085 */
1086#if __OS_HAS_AGP
54a56ac5 1087 if (dev_priv->flags & RADEON_IS_AGP) {
d5ea702f
DA
1088 base = dev->agp->base;
1089 /* Check if valid */
80b2c386
MD
1090 if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
1091 base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
d5ea702f
DA
1092 DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
1093 dev->agp->base);
1094 base = 0;
1095 }
1096 }
1097#endif
1098 /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
1099 if (base == 0) {
1100 base = dev_priv->fb_location + dev_priv->fb_size;
80b2c386
MD
1101 if (base < dev_priv->fb_location ||
1102 ((base + dev_priv->gart_size) & 0xfffffffful) < base)
d5ea702f
DA
1103 base = dev_priv->fb_location
1104 - dev_priv->gart_size;
bc5f4523 1105 }
d5ea702f
DA
1106 dev_priv->gart_vm_start = base & 0xffc00000u;
1107 if (dev_priv->gart_vm_start != base)
1108 DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
1109 base, dev_priv->gart_vm_start);
1110 } else {
1111 DRM_INFO("Setting GART location based on old memory map\n");
1112 dev_priv->gart_vm_start = dev_priv->fb_location +
1113 RADEON_READ(RADEON_CONFIG_APER_SIZE);
1114 }
1da177e4
LT
1115
1116#if __OS_HAS_AGP
54a56ac5 1117 if (dev_priv->flags & RADEON_IS_AGP)
1da177e4 1118 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
b5e89ed5
DA
1119 - dev->agp->base
1120 + dev_priv->gart_vm_start);
1da177e4
LT
1121 else
1122#endif
1123 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
b0917bd9
IK
1124 - (unsigned long)dev->sg->virtual
1125 + dev_priv->gart_vm_start);
1da177e4 1126
b5e89ed5
DA
1127 DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
1128 DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
1129 DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
1130 dev_priv->gart_buffers_offset);
1da177e4 1131
b5e89ed5
DA
1132 dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
1133 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
1da177e4
LT
1134 + init->ring_size / sizeof(u32));
1135 dev_priv->ring.size = init->ring_size;
b5e89ed5 1136 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
1da177e4 1137
576cc458
RS
1138 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
1139 dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
1140
1141 dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
1142 dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
b5e89ed5 1143 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1da177e4
LT
1144
1145 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
1146
1147#if __OS_HAS_AGP
54a56ac5 1148 if (dev_priv->flags & RADEON_IS_AGP) {
1da177e4 1149 /* Turn off PCI GART */
b5e89ed5 1150 radeon_set_pcigart(dev_priv, 0);
1da177e4
LT
1151 } else
1152#endif
1153 {
b05c2385 1154 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
ea98a92f 1155 /* if we have an offset set from userspace */
f2b04cd2 1156 if (dev_priv->pcigart_offset_set) {
b5e89ed5
DA
1157 dev_priv->gart_info.bus_addr =
1158 dev_priv->pcigart_offset + dev_priv->fb_location;
f26c473c 1159 dev_priv->gart_info.mapping.offset =
7fc86860 1160 dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
f26c473c 1161 dev_priv->gart_info.mapping.size =
f2b04cd2 1162 dev_priv->gart_info.table_size;
f26c473c 1163
242e3df8 1164 drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
b5e89ed5 1165 dev_priv->gart_info.addr =
f26c473c 1166 dev_priv->gart_info.mapping.handle;
b5e89ed5 1167
f2b04cd2
DA
1168 if (dev_priv->flags & RADEON_IS_PCIE)
1169 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
1170 else
1171 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
b5e89ed5
DA
1172 dev_priv->gart_info.gart_table_location =
1173 DRM_ATI_GART_FB;
1174
f26c473c 1175 DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
b5e89ed5
DA
1176 dev_priv->gart_info.addr,
1177 dev_priv->pcigart_offset);
1178 } else {
f2b04cd2
DA
1179 if (dev_priv->flags & RADEON_IS_IGPGART)
1180 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
1181 else
1182 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
b5e89ed5
DA
1183 dev_priv->gart_info.gart_table_location =
1184 DRM_ATI_GART_MAIN;
f26c473c
DA
1185 dev_priv->gart_info.addr = NULL;
1186 dev_priv->gart_info.bus_addr = 0;
54a56ac5 1187 if (dev_priv->flags & RADEON_IS_PCIE) {
b5e89ed5
DA
1188 DRM_ERROR
1189 ("Cannot use PCI Express without GART in FB memory\n");
ea98a92f 1190 radeon_do_cleanup_cp(dev);
20caafa6 1191 return -EINVAL;
ea98a92f
DA
1192 }
1193 }
1194
1195 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
b5e89ed5 1196 DRM_ERROR("failed to init PCI GART!\n");
1da177e4 1197 radeon_do_cleanup_cp(dev);
20caafa6 1198 return -ENOMEM;
1da177e4
LT
1199 }
1200
1201 /* Turn on PCI GART */
b5e89ed5 1202 radeon_set_pcigart(dev_priv, 1);
1da177e4
LT
1203 }
1204
b5e89ed5
DA
1205 radeon_cp_load_microcode(dev_priv);
1206 radeon_cp_init_ring_buffer(dev, dev_priv);
1da177e4
LT
1207
1208 dev_priv->last_buf = 0;
1209
b5e89ed5 1210 radeon_do_engine_reset(dev);
d5ea702f 1211 radeon_test_writeback(dev_priv);
1da177e4
LT
1212
1213 return 0;
1214}
1215
84b1fd10 1216static int radeon_do_cleanup_cp(struct drm_device * dev)
1da177e4
LT
1217{
1218 drm_radeon_private_t *dev_priv = dev->dev_private;
b5e89ed5 1219 DRM_DEBUG("\n");
1da177e4
LT
1220
1221 /* Make sure interrupts are disabled here because the uninstall ioctl
1222 * may not have been called from userspace and after dev_private
1223 * is freed, it's too late.
1224 */
b5e89ed5
DA
1225 if (dev->irq_enabled)
1226 drm_irq_uninstall(dev);
1da177e4
LT
1227
1228#if __OS_HAS_AGP
54a56ac5 1229 if (dev_priv->flags & RADEON_IS_AGP) {
d985c108 1230 if (dev_priv->cp_ring != NULL) {
b5e89ed5 1231 drm_core_ioremapfree(dev_priv->cp_ring, dev);
d985c108
DA
1232 dev_priv->cp_ring = NULL;
1233 }
1234 if (dev_priv->ring_rptr != NULL) {
b5e89ed5 1235 drm_core_ioremapfree(dev_priv->ring_rptr, dev);
d985c108
DA
1236 dev_priv->ring_rptr = NULL;
1237 }
b5e89ed5
DA
1238 if (dev->agp_buffer_map != NULL) {
1239 drm_core_ioremapfree(dev->agp_buffer_map, dev);
1da177e4
LT
1240 dev->agp_buffer_map = NULL;
1241 }
1242 } else
1243#endif
1244 {
d985c108
DA
1245
1246 if (dev_priv->gart_info.bus_addr) {
1247 /* Turn off PCI GART */
1248 radeon_set_pcigart(dev_priv, 0);
ea98a92f
DA
1249 if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
1250 DRM_ERROR("failed to cleanup PCI GART!\n");
d985c108 1251 }
b5e89ed5 1252
d985c108
DA
1253 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
1254 {
f26c473c 1255 drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
f2b04cd2 1256 dev_priv->gart_info.addr = 0;
ea98a92f 1257 }
1da177e4 1258 }
1da177e4
LT
1259 /* only clear to the start of flags */
1260 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
1261
1262 return 0;
1263}
1264
b5e89ed5
DA
1265/* This code will reinit the Radeon CP hardware after a resume from disc.
1266 * AFAIK, it would be very difficult to pickle the state at suspend time, so
1da177e4
LT
1267 * here we make sure that all Radeon hardware initialisation is re-done without
1268 * affecting running applications.
1269 *
1270 * Charl P. Botha <http://cpbotha.net>
1271 */
84b1fd10 1272static int radeon_do_resume_cp(struct drm_device * dev)
1da177e4
LT
1273{
1274 drm_radeon_private_t *dev_priv = dev->dev_private;
1275
b5e89ed5
DA
1276 if (!dev_priv) {
1277 DRM_ERROR("Called with no initialization\n");
20caafa6 1278 return -EINVAL;
1da177e4
LT
1279 }
1280
1281 DRM_DEBUG("Starting radeon_do_resume_cp()\n");
1282
1283#if __OS_HAS_AGP
54a56ac5 1284 if (dev_priv->flags & RADEON_IS_AGP) {
1da177e4 1285 /* Turn off PCI GART */
b5e89ed5 1286 radeon_set_pcigart(dev_priv, 0);
1da177e4
LT
1287 } else
1288#endif
1289 {
1290 /* Turn on PCI GART */
b5e89ed5 1291 radeon_set_pcigart(dev_priv, 1);
1da177e4
LT
1292 }
1293
b5e89ed5
DA
1294 radeon_cp_load_microcode(dev_priv);
1295 radeon_cp_init_ring_buffer(dev, dev_priv);
1da177e4 1296
b5e89ed5 1297 radeon_do_engine_reset(dev);
0a3e67a4 1298 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
1da177e4
LT
1299
1300 DRM_DEBUG("radeon_do_resume_cp() complete\n");
1301
1302 return 0;
1303}
1304
c153f45f 1305int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
1da177e4 1306{
c153f45f 1307 drm_radeon_init_t *init = data;
1da177e4 1308
6c340eac 1309 LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 1310
c153f45f 1311 if (init->func == RADEON_INIT_R300_CP)
3d5e2c13 1312 r300_init_reg_flags(dev);
414ed537 1313
c153f45f 1314 switch (init->func) {
1da177e4
LT
1315 case RADEON_INIT_CP:
1316 case RADEON_INIT_R200_CP:
1317 case RADEON_INIT_R300_CP:
c153f45f 1318 return radeon_do_init_cp(dev, init);
1da177e4 1319 case RADEON_CLEANUP_CP:
b5e89ed5 1320 return radeon_do_cleanup_cp(dev);
1da177e4
LT
1321 }
1322
20caafa6 1323 return -EINVAL;
1da177e4
LT
1324}
1325
c153f45f 1326int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
1da177e4 1327{
1da177e4 1328 drm_radeon_private_t *dev_priv = dev->dev_private;
b5e89ed5 1329 DRM_DEBUG("\n");
1da177e4 1330
6c340eac 1331 LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 1332
b5e89ed5 1333 if (dev_priv->cp_running) {
3e684eae 1334 DRM_DEBUG("while CP running\n");
1da177e4
LT
1335 return 0;
1336 }
b5e89ed5 1337 if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
3e684eae
MN
1338 DRM_DEBUG("called with bogus CP mode (%d)\n",
1339 dev_priv->cp_mode);
1da177e4
LT
1340 return 0;
1341 }
1342
b5e89ed5 1343 radeon_do_cp_start(dev_priv);
1da177e4
LT
1344
1345 return 0;
1346}
1347
1348/* Stop the CP. The engine must have been idled before calling this
1349 * routine.
1350 */
c153f45f 1351int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
1da177e4 1352{
1da177e4 1353 drm_radeon_private_t *dev_priv = dev->dev_private;
c153f45f 1354 drm_radeon_cp_stop_t *stop = data;
1da177e4 1355 int ret;
b5e89ed5 1356 DRM_DEBUG("\n");
1da177e4 1357
6c340eac 1358 LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 1359
1da177e4
LT
1360 if (!dev_priv->cp_running)
1361 return 0;
1362
1363 /* Flush any pending CP commands. This ensures any outstanding
1364 * commands are exectuted by the engine before we turn it off.
1365 */
c153f45f 1366 if (stop->flush) {
b5e89ed5 1367 radeon_do_cp_flush(dev_priv);
1da177e4
LT
1368 }
1369
1370 /* If we fail to make the engine go idle, we return an error
1371 * code so that the DRM ioctl wrapper can try again.
1372 */
c153f45f 1373 if (stop->idle) {
b5e89ed5
DA
1374 ret = radeon_do_cp_idle(dev_priv);
1375 if (ret)
1376 return ret;
1da177e4
LT
1377 }
1378
1379 /* Finally, we can turn off the CP. If the engine isn't idle,
1380 * we will get some dropped triangles as they won't be fully
1381 * rendered before the CP is shut down.
1382 */
b5e89ed5 1383 radeon_do_cp_stop(dev_priv);
1da177e4
LT
1384
1385 /* Reset the engine */
b5e89ed5 1386 radeon_do_engine_reset(dev);
1da177e4
LT
1387
1388 return 0;
1389}
1390
84b1fd10 1391void radeon_do_release(struct drm_device * dev)
1da177e4
LT
1392{
1393 drm_radeon_private_t *dev_priv = dev->dev_private;
1394 int i, ret;
1395
1396 if (dev_priv) {
1397 if (dev_priv->cp_running) {
1398 /* Stop the cp */
b5e89ed5 1399 while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
1da177e4
LT
1400 DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
1401#ifdef __linux__
1402 schedule();
1403#else
1404 tsleep(&ret, PZERO, "rdnrel", 1);
1405#endif
1406 }
b5e89ed5
DA
1407 radeon_do_cp_stop(dev_priv);
1408 radeon_do_engine_reset(dev);
1da177e4
LT
1409 }
1410
1411 /* Disable *all* interrupts */
1412 if (dev_priv->mmio) /* remove this after permanent addmaps */
b5e89ed5 1413 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
1da177e4 1414
b5e89ed5 1415 if (dev_priv->mmio) { /* remove all surfaces */
1da177e4 1416 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
b5e89ed5
DA
1417 RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
1418 RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
1419 16 * i, 0);
1420 RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
1421 16 * i, 0);
1da177e4
LT
1422 }
1423 }
1424
1425 /* Free memory heap structures */
b5e89ed5
DA
1426 radeon_mem_takedown(&(dev_priv->gart_heap));
1427 radeon_mem_takedown(&(dev_priv->fb_heap));
1da177e4
LT
1428
1429 /* deallocate kernel resources */
b5e89ed5 1430 radeon_do_cleanup_cp(dev);
1da177e4
LT
1431 }
1432}
1433
1434/* Just reset the CP ring. Called as part of an X Server engine reset.
1435 */
c153f45f 1436int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1da177e4 1437{
1da177e4 1438 drm_radeon_private_t *dev_priv = dev->dev_private;
b5e89ed5 1439 DRM_DEBUG("\n");
1da177e4 1440
6c340eac 1441 LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 1442
b5e89ed5 1443 if (!dev_priv) {
3e684eae 1444 DRM_DEBUG("called before init done\n");
20caafa6 1445 return -EINVAL;
1da177e4
LT
1446 }
1447
b5e89ed5 1448 radeon_do_cp_reset(dev_priv);
1da177e4
LT
1449
1450 /* The CP is no longer running after an engine reset */
1451 dev_priv->cp_running = 0;
1452
1453 return 0;
1454}
1455
c153f45f 1456int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
1da177e4 1457{
1da177e4 1458 drm_radeon_private_t *dev_priv = dev->dev_private;
b5e89ed5 1459 DRM_DEBUG("\n");
1da177e4 1460
6c340eac 1461 LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 1462
b5e89ed5 1463 return radeon_do_cp_idle(dev_priv);
1da177e4
LT
1464}
1465
1466/* Added by Charl P. Botha to call radeon_do_resume_cp().
1467 */
c153f45f 1468int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
1da177e4 1469{
1da177e4
LT
1470
1471 return radeon_do_resume_cp(dev);
1472}
1473
c153f45f 1474int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1da177e4 1475{
b5e89ed5 1476 DRM_DEBUG("\n");
1da177e4 1477
6c340eac 1478 LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 1479
b5e89ed5 1480 return radeon_do_engine_reset(dev);
1da177e4
LT
1481}
1482
1da177e4
LT
1483/* ================================================================
1484 * Fullscreen mode
1485 */
1486
1487/* KW: Deprecated to say the least:
1488 */
c153f45f 1489int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
1da177e4
LT
1490{
1491 return 0;
1492}
1493
1da177e4
LT
1494/* ================================================================
1495 * Freelist management
1496 */
1497
1498/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
1499 * bufs until freelist code is used. Note this hides a problem with
1500 * the scratch register * (used to keep track of last buffer
1501 * completed) being written to before * the last buffer has actually
b5e89ed5 1502 * completed rendering.
1da177e4
LT
1503 *
1504 * KW: It's also a good way to find free buffers quickly.
1505 *
1506 * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
1507 * sleep. However, bugs in older versions of radeon_accel.c mean that
1508 * we essentially have to do this, else old clients will break.
b5e89ed5 1509 *
1da177e4
LT
1510 * However, it does leave open a potential deadlock where all the
1511 * buffers are held by other clients, which can't release them because
b5e89ed5 1512 * they can't get the lock.
1da177e4
LT
1513 */
1514
056219e2 1515struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1da177e4 1516{
cdd55a29 1517 struct drm_device_dma *dma = dev->dma;
1da177e4
LT
1518 drm_radeon_private_t *dev_priv = dev->dev_private;
1519 drm_radeon_buf_priv_t *buf_priv;
056219e2 1520 struct drm_buf *buf;
1da177e4
LT
1521 int i, t;
1522 int start;
1523
b5e89ed5 1524 if (++dev_priv->last_buf >= dma->buf_count)
1da177e4
LT
1525 dev_priv->last_buf = 0;
1526
1527 start = dev_priv->last_buf;
1528
b5e89ed5
DA
1529 for (t = 0; t < dev_priv->usec_timeout; t++) {
1530 u32 done_age = GET_SCRATCH(1);
1531 DRM_DEBUG("done_age = %d\n", done_age);
1532 for (i = start; i < dma->buf_count; i++) {
1da177e4
LT
1533 buf = dma->buflist[i];
1534 buf_priv = buf->dev_private;
6c340eac
EA
1535 if (buf->file_priv == NULL || (buf->pending &&
1536 buf_priv->age <=
1537 done_age)) {
1da177e4
LT
1538 dev_priv->stats.requested_bufs++;
1539 buf->pending = 0;
1540 return buf;
1541 }
1542 start = 0;
1543 }
1544
1545 if (t) {
b5e89ed5 1546 DRM_UDELAY(1);
1da177e4
LT
1547 dev_priv->stats.freelist_loops++;
1548 }
1549 }
1550
b5e89ed5 1551 DRM_DEBUG("returning NULL!\n");
1da177e4
LT
1552 return NULL;
1553}
b5e89ed5 1554
1da177e4 1555#if 0
056219e2 1556struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1da177e4 1557{
cdd55a29 1558 struct drm_device_dma *dma = dev->dma;
1da177e4
LT
1559 drm_radeon_private_t *dev_priv = dev->dev_private;
1560 drm_radeon_buf_priv_t *buf_priv;
056219e2 1561 struct drm_buf *buf;
1da177e4
LT
1562 int i, t;
1563 int start;
1564 u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
1565
b5e89ed5 1566 if (++dev_priv->last_buf >= dma->buf_count)
1da177e4
LT
1567 dev_priv->last_buf = 0;
1568
1569 start = dev_priv->last_buf;
1570 dev_priv->stats.freelist_loops++;
b5e89ed5
DA
1571
1572 for (t = 0; t < 2; t++) {
1573 for (i = start; i < dma->buf_count; i++) {
1da177e4
LT
1574 buf = dma->buflist[i];
1575 buf_priv = buf->dev_private;
6c340eac
EA
1576 if (buf->file_priv == 0 || (buf->pending &&
1577 buf_priv->age <=
1578 done_age)) {
1da177e4
LT
1579 dev_priv->stats.requested_bufs++;
1580 buf->pending = 0;
1581 return buf;
1582 }
1583 }
1584 start = 0;
1585 }
1586
1587 return NULL;
1588}
1589#endif
1590
84b1fd10 1591void radeon_freelist_reset(struct drm_device * dev)
1da177e4 1592{
cdd55a29 1593 struct drm_device_dma *dma = dev->dma;
1da177e4
LT
1594 drm_radeon_private_t *dev_priv = dev->dev_private;
1595 int i;
1596
1597 dev_priv->last_buf = 0;
b5e89ed5 1598 for (i = 0; i < dma->buf_count; i++) {
056219e2 1599 struct drm_buf *buf = dma->buflist[i];
1da177e4
LT
1600 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1601 buf_priv->age = 0;
1602 }
1603}
1604
1da177e4
LT
1605/* ================================================================
1606 * CP command submission
1607 */
1608
b5e89ed5 1609int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
1da177e4
LT
1610{
1611 drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
1612 int i;
b5e89ed5 1613 u32 last_head = GET_RING_HEAD(dev_priv);
1da177e4 1614
b5e89ed5
DA
1615 for (i = 0; i < dev_priv->usec_timeout; i++) {
1616 u32 head = GET_RING_HEAD(dev_priv);
1da177e4
LT
1617
1618 ring->space = (head - ring->tail) * sizeof(u32);
b5e89ed5 1619 if (ring->space <= 0)
1da177e4 1620 ring->space += ring->size;
b5e89ed5 1621 if (ring->space > n)
1da177e4 1622 return 0;
b5e89ed5 1623
1da177e4
LT
1624 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
1625
1626 if (head != last_head)
1627 i = 0;
1628 last_head = head;
1629
b5e89ed5 1630 DRM_UDELAY(1);
1da177e4
LT
1631 }
1632
1633 /* FIXME: This return value is ignored in the BEGIN_RING macro! */
1634#if RADEON_FIFO_DEBUG
b5e89ed5
DA
1635 radeon_status(dev_priv);
1636 DRM_ERROR("failed!\n");
1da177e4 1637#endif
20caafa6 1638 return -EBUSY;
1da177e4
LT
1639}
1640
6c340eac
EA
1641static int radeon_cp_get_buffers(struct drm_device *dev,
1642 struct drm_file *file_priv,
c60ce623 1643 struct drm_dma * d)
1da177e4
LT
1644{
1645 int i;
056219e2 1646 struct drm_buf *buf;
1da177e4 1647
b5e89ed5
DA
1648 for (i = d->granted_count; i < d->request_count; i++) {
1649 buf = radeon_freelist_get(dev);
1650 if (!buf)
20caafa6 1651 return -EBUSY; /* NOTE: broken client */
1da177e4 1652
6c340eac 1653 buf->file_priv = file_priv;
1da177e4 1654
b5e89ed5
DA
1655 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
1656 sizeof(buf->idx)))
20caafa6 1657 return -EFAULT;
b5e89ed5
DA
1658 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
1659 sizeof(buf->total)))
20caafa6 1660 return -EFAULT;
1da177e4
LT
1661
1662 d->granted_count++;
1663 }
1664 return 0;
1665}
1666
c153f45f 1667int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1da177e4 1668{
cdd55a29 1669 struct drm_device_dma *dma = dev->dma;
1da177e4 1670 int ret = 0;
c153f45f 1671 struct drm_dma *d = data;
1da177e4 1672
6c340eac 1673 LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 1674
1da177e4
LT
1675 /* Please don't send us buffers.
1676 */
c153f45f 1677 if (d->send_count != 0) {
b5e89ed5 1678 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
c153f45f 1679 DRM_CURRENTPID, d->send_count);
20caafa6 1680 return -EINVAL;
1da177e4
LT
1681 }
1682
1683 /* We'll send you buffers.
1684 */
c153f45f 1685 if (d->request_count < 0 || d->request_count > dma->buf_count) {
b5e89ed5 1686 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
c153f45f 1687 DRM_CURRENTPID, d->request_count, dma->buf_count);
20caafa6 1688 return -EINVAL;
1da177e4
LT
1689 }
1690
c153f45f 1691 d->granted_count = 0;
1da177e4 1692
c153f45f
EA
1693 if (d->request_count) {
1694 ret = radeon_cp_get_buffers(dev, file_priv, d);
1da177e4
LT
1695 }
1696
1da177e4
LT
1697 return ret;
1698}
1699
22eae947 1700int radeon_driver_load(struct drm_device *dev, unsigned long flags)
1da177e4
LT
1701{
1702 drm_radeon_private_t *dev_priv;
1703 int ret = 0;
1704
1705 dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
1706 if (dev_priv == NULL)
20caafa6 1707 return -ENOMEM;
1da177e4
LT
1708
1709 memset(dev_priv, 0, sizeof(drm_radeon_private_t));
1710 dev->dev_private = (void *)dev_priv;
1711 dev_priv->flags = flags;
1712
54a56ac5 1713 switch (flags & RADEON_FAMILY_MASK) {
1da177e4
LT
1714 case CHIP_R100:
1715 case CHIP_RV200:
1716 case CHIP_R200:
1717 case CHIP_R300:
b15ec368 1718 case CHIP_R350:
414ed537 1719 case CHIP_R420:
b15ec368 1720 case CHIP_RV410:
3d5e2c13
DA
1721 case CHIP_RV515:
1722 case CHIP_R520:
1723 case CHIP_RV570:
1724 case CHIP_R580:
54a56ac5 1725 dev_priv->flags |= RADEON_HAS_HIERZ;
1da177e4
LT
1726 break;
1727 default:
b5e89ed5 1728 /* all other chips have no hierarchical z buffer */
1da177e4
LT
1729 break;
1730 }
414ed537
DA
1731
1732 if (drm_device_is_agp(dev))
54a56ac5 1733 dev_priv->flags |= RADEON_IS_AGP;
b15ec368 1734 else if (drm_device_is_pcie(dev))
54a56ac5 1735 dev_priv->flags |= RADEON_IS_PCIE;
b15ec368 1736 else
54a56ac5 1737 dev_priv->flags |= RADEON_IS_PCI;
ea98a92f 1738
414ed537 1739 DRM_DEBUG("%s card detected\n",
54a56ac5 1740 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
1da177e4
LT
1741 return ret;
1742}
1743
22eae947
DA
1744/* Create mappings for registers and framebuffer so userland doesn't necessarily
1745 * have to find them.
1746 */
1747int radeon_driver_firstopen(struct drm_device *dev)
836cf046
DA
1748{
1749 int ret;
1750 drm_local_map_t *map;
1751 drm_radeon_private_t *dev_priv = dev->dev_private;
1752
f2b04cd2
DA
1753 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
1754
836cf046
DA
1755 ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
1756 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
1757 _DRM_READ_ONLY, &dev_priv->mmio);
1758 if (ret != 0)
1759 return ret;
1760
7fc86860
DA
1761 dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
1762 ret = drm_addmap(dev, dev_priv->fb_aper_offset,
836cf046
DA
1763 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
1764 _DRM_WRITE_COMBINING, &map);
1765 if (ret != 0)
1766 return ret;
1767
1768 return 0;
1769}
1770
22eae947 1771int radeon_driver_unload(struct drm_device *dev)
1da177e4
LT
1772{
1773 drm_radeon_private_t *dev_priv = dev->dev_private;
1774
1775 DRM_DEBUG("\n");
1da177e4
LT
1776 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
1777
1778 dev->dev_private = NULL;
1779 return 0;
1780}
This page took 0.413757 seconds and 5 git commands to generate.