drm/nouveau: Rework tile region handling.
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nv20_graph.c
CommitLineData
6ee73861
BS
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6/*
7 * NV20
8 * -----
9 * There are 3 families :
10 * NV20 is 0x10de:0x020*
11 * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
12 * NV2A is 0x10de:0x02A0
13 *
14 * NV30
15 * -----
16 * There are 3 families :
17 * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
18 * NV34 is 0x10de:0x032*
19 * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
20 *
21 * Not seen in the wild, no dumps (probably NV35) :
22 * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
23 * NV38 is 0x10de:0x0333, 0x10de:0x00fe
24 *
25 */
26
27#define NV20_GRCTX_SIZE (3580*4)
28#define NV25_GRCTX_SIZE (3529*4)
29#define NV2A_GRCTX_SIZE (3500*4)
30
31#define NV30_31_GRCTX_SIZE (24392)
32#define NV34_GRCTX_SIZE (18140)
33#define NV35_36_GRCTX_SIZE (22396)
34
b8c157d3
BS
35static int nv20_graph_register(struct drm_device *);
36static int nv30_graph_register(struct drm_device *);
37
6ee73861
BS
38static void
39nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
40{
41 int i;
42
b3beb167
BS
43 nv_wo32(ctx, 0x033c, 0xffff0000);
44 nv_wo32(ctx, 0x03a0, 0x0fff0000);
45 nv_wo32(ctx, 0x03a4, 0x0fff0000);
46 nv_wo32(ctx, 0x047c, 0x00000101);
47 nv_wo32(ctx, 0x0490, 0x00000111);
48 nv_wo32(ctx, 0x04a8, 0x44400000);
6ee73861 49 for (i = 0x04d4; i <= 0x04e0; i += 4)
b3beb167 50 nv_wo32(ctx, i, 0x00030303);
6ee73861 51 for (i = 0x04f4; i <= 0x0500; i += 4)
b3beb167 52 nv_wo32(ctx, i, 0x00080000);
6ee73861 53 for (i = 0x050c; i <= 0x0518; i += 4)
b3beb167 54 nv_wo32(ctx, i, 0x01012000);
6ee73861 55 for (i = 0x051c; i <= 0x0528; i += 4)
b3beb167 56 nv_wo32(ctx, i, 0x000105b8);
6ee73861 57 for (i = 0x052c; i <= 0x0538; i += 4)
b3beb167 58 nv_wo32(ctx, i, 0x00080008);
6ee73861 59 for (i = 0x055c; i <= 0x0598; i += 4)
b3beb167
BS
60 nv_wo32(ctx, i, 0x07ff0000);
61 nv_wo32(ctx, 0x05a4, 0x4b7fffff);
62 nv_wo32(ctx, 0x05fc, 0x00000001);
63 nv_wo32(ctx, 0x0604, 0x00004000);
64 nv_wo32(ctx, 0x0610, 0x00000001);
65 nv_wo32(ctx, 0x0618, 0x00040000);
66 nv_wo32(ctx, 0x061c, 0x00010000);
6ee73861 67 for (i = 0x1c1c; i <= 0x248c; i += 16) {
b3beb167
BS
68 nv_wo32(ctx, (i + 0), 0x10700ff9);
69 nv_wo32(ctx, (i + 4), 0x0436086c);
70 nv_wo32(ctx, (i + 8), 0x000c001b);
6ee73861 71 }
b3beb167
BS
72 nv_wo32(ctx, 0x281c, 0x3f800000);
73 nv_wo32(ctx, 0x2830, 0x3f800000);
74 nv_wo32(ctx, 0x285c, 0x40000000);
75 nv_wo32(ctx, 0x2860, 0x3f800000);
76 nv_wo32(ctx, 0x2864, 0x3f000000);
77 nv_wo32(ctx, 0x286c, 0x40000000);
78 nv_wo32(ctx, 0x2870, 0x3f800000);
79 nv_wo32(ctx, 0x2878, 0xbf800000);
80 nv_wo32(ctx, 0x2880, 0xbf800000);
81 nv_wo32(ctx, 0x34a4, 0x000fe000);
82 nv_wo32(ctx, 0x3530, 0x000003f8);
83 nv_wo32(ctx, 0x3540, 0x002fe000);
6ee73861 84 for (i = 0x355c; i <= 0x3578; i += 4)
b3beb167 85 nv_wo32(ctx, i, 0x001c527c);
6ee73861
BS
86}
87
88static void
89nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
90{
91 int i;
92
b3beb167
BS
93 nv_wo32(ctx, 0x035c, 0xffff0000);
94 nv_wo32(ctx, 0x03c0, 0x0fff0000);
95 nv_wo32(ctx, 0x03c4, 0x0fff0000);
96 nv_wo32(ctx, 0x049c, 0x00000101);
97 nv_wo32(ctx, 0x04b0, 0x00000111);
98 nv_wo32(ctx, 0x04c8, 0x00000080);
99 nv_wo32(ctx, 0x04cc, 0xffff0000);
100 nv_wo32(ctx, 0x04d0, 0x00000001);
101 nv_wo32(ctx, 0x04e4, 0x44400000);
102 nv_wo32(ctx, 0x04fc, 0x4b800000);
6ee73861 103 for (i = 0x0510; i <= 0x051c; i += 4)
b3beb167 104 nv_wo32(ctx, i, 0x00030303);
6ee73861 105 for (i = 0x0530; i <= 0x053c; i += 4)
b3beb167 106 nv_wo32(ctx, i, 0x00080000);
6ee73861 107 for (i = 0x0548; i <= 0x0554; i += 4)
b3beb167 108 nv_wo32(ctx, i, 0x01012000);
6ee73861 109 for (i = 0x0558; i <= 0x0564; i += 4)
b3beb167 110 nv_wo32(ctx, i, 0x000105b8);
6ee73861 111 for (i = 0x0568; i <= 0x0574; i += 4)
b3beb167 112 nv_wo32(ctx, i, 0x00080008);
6ee73861 113 for (i = 0x0598; i <= 0x05d4; i += 4)
b3beb167
BS
114 nv_wo32(ctx, i, 0x07ff0000);
115 nv_wo32(ctx, 0x05e0, 0x4b7fffff);
116 nv_wo32(ctx, 0x0620, 0x00000080);
117 nv_wo32(ctx, 0x0624, 0x30201000);
118 nv_wo32(ctx, 0x0628, 0x70605040);
119 nv_wo32(ctx, 0x062c, 0xb0a09080);
120 nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
121 nv_wo32(ctx, 0x0664, 0x00000001);
122 nv_wo32(ctx, 0x066c, 0x00004000);
123 nv_wo32(ctx, 0x0678, 0x00000001);
124 nv_wo32(ctx, 0x0680, 0x00040000);
125 nv_wo32(ctx, 0x0684, 0x00010000);
6ee73861 126 for (i = 0x1b04; i <= 0x2374; i += 16) {
b3beb167
BS
127 nv_wo32(ctx, (i + 0), 0x10700ff9);
128 nv_wo32(ctx, (i + 4), 0x0436086c);
129 nv_wo32(ctx, (i + 8), 0x000c001b);
6ee73861 130 }
b3beb167
BS
131 nv_wo32(ctx, 0x2704, 0x3f800000);
132 nv_wo32(ctx, 0x2718, 0x3f800000);
133 nv_wo32(ctx, 0x2744, 0x40000000);
134 nv_wo32(ctx, 0x2748, 0x3f800000);
135 nv_wo32(ctx, 0x274c, 0x3f000000);
136 nv_wo32(ctx, 0x2754, 0x40000000);
137 nv_wo32(ctx, 0x2758, 0x3f800000);
138 nv_wo32(ctx, 0x2760, 0xbf800000);
139 nv_wo32(ctx, 0x2768, 0xbf800000);
140 nv_wo32(ctx, 0x308c, 0x000fe000);
141 nv_wo32(ctx, 0x3108, 0x000003f8);
142 nv_wo32(ctx, 0x3468, 0x002fe000);
6ee73861 143 for (i = 0x3484; i <= 0x34a0; i += 4)
b3beb167 144 nv_wo32(ctx, i, 0x001c527c);
6ee73861
BS
145}
146
147static void
148nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
149{
150 int i;
151
b3beb167
BS
152 nv_wo32(ctx, 0x033c, 0xffff0000);
153 nv_wo32(ctx, 0x03a0, 0x0fff0000);
154 nv_wo32(ctx, 0x03a4, 0x0fff0000);
155 nv_wo32(ctx, 0x047c, 0x00000101);
156 nv_wo32(ctx, 0x0490, 0x00000111);
157 nv_wo32(ctx, 0x04a8, 0x44400000);
6ee73861 158 for (i = 0x04d4; i <= 0x04e0; i += 4)
b3beb167 159 nv_wo32(ctx, i, 0x00030303);
6ee73861 160 for (i = 0x04f4; i <= 0x0500; i += 4)
b3beb167 161 nv_wo32(ctx, i, 0x00080000);
6ee73861 162 for (i = 0x050c; i <= 0x0518; i += 4)
b3beb167 163 nv_wo32(ctx, i, 0x01012000);
6ee73861 164 for (i = 0x051c; i <= 0x0528; i += 4)
b3beb167 165 nv_wo32(ctx, i, 0x000105b8);
6ee73861 166 for (i = 0x052c; i <= 0x0538; i += 4)
b3beb167 167 nv_wo32(ctx, i, 0x00080008);
6ee73861 168 for (i = 0x055c; i <= 0x0598; i += 4)
b3beb167
BS
169 nv_wo32(ctx, i, 0x07ff0000);
170 nv_wo32(ctx, 0x05a4, 0x4b7fffff);
171 nv_wo32(ctx, 0x05fc, 0x00000001);
172 nv_wo32(ctx, 0x0604, 0x00004000);
173 nv_wo32(ctx, 0x0610, 0x00000001);
174 nv_wo32(ctx, 0x0618, 0x00040000);
175 nv_wo32(ctx, 0x061c, 0x00010000);
6ee73861 176 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
b3beb167
BS
177 nv_wo32(ctx, (i + 0), 0x10700ff9);
178 nv_wo32(ctx, (i + 4), 0x0436086c);
179 nv_wo32(ctx, (i + 8), 0x000c001b);
6ee73861 180 }
b3beb167
BS
181 nv_wo32(ctx, 0x269c, 0x3f800000);
182 nv_wo32(ctx, 0x26b0, 0x3f800000);
183 nv_wo32(ctx, 0x26dc, 0x40000000);
184 nv_wo32(ctx, 0x26e0, 0x3f800000);
185 nv_wo32(ctx, 0x26e4, 0x3f000000);
186 nv_wo32(ctx, 0x26ec, 0x40000000);
187 nv_wo32(ctx, 0x26f0, 0x3f800000);
188 nv_wo32(ctx, 0x26f8, 0xbf800000);
189 nv_wo32(ctx, 0x2700, 0xbf800000);
190 nv_wo32(ctx, 0x3024, 0x000fe000);
191 nv_wo32(ctx, 0x30a0, 0x000003f8);
192 nv_wo32(ctx, 0x33fc, 0x002fe000);
6ee73861 193 for (i = 0x341c; i <= 0x3438; i += 4)
b3beb167 194 nv_wo32(ctx, i, 0x001c527c);
6ee73861
BS
195}
196
197static void
198nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
199{
200 int i;
201
b3beb167
BS
202 nv_wo32(ctx, 0x0410, 0x00000101);
203 nv_wo32(ctx, 0x0424, 0x00000111);
204 nv_wo32(ctx, 0x0428, 0x00000060);
205 nv_wo32(ctx, 0x0444, 0x00000080);
206 nv_wo32(ctx, 0x0448, 0xffff0000);
207 nv_wo32(ctx, 0x044c, 0x00000001);
208 nv_wo32(ctx, 0x0460, 0x44400000);
209 nv_wo32(ctx, 0x048c, 0xffff0000);
6ee73861 210 for (i = 0x04e0; i < 0x04e8; i += 4)
b3beb167
BS
211 nv_wo32(ctx, i, 0x0fff0000);
212 nv_wo32(ctx, 0x04ec, 0x00011100);
6ee73861 213 for (i = 0x0508; i < 0x0548; i += 4)
b3beb167
BS
214 nv_wo32(ctx, i, 0x07ff0000);
215 nv_wo32(ctx, 0x0550, 0x4b7fffff);
216 nv_wo32(ctx, 0x058c, 0x00000080);
217 nv_wo32(ctx, 0x0590, 0x30201000);
218 nv_wo32(ctx, 0x0594, 0x70605040);
219 nv_wo32(ctx, 0x0598, 0xb8a89888);
220 nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
221 nv_wo32(ctx, 0x05b0, 0xb0000000);
6ee73861 222 for (i = 0x0600; i < 0x0640; i += 4)
b3beb167 223 nv_wo32(ctx, i, 0x00010588);
6ee73861 224 for (i = 0x0640; i < 0x0680; i += 4)
b3beb167 225 nv_wo32(ctx, i, 0x00030303);
6ee73861 226 for (i = 0x06c0; i < 0x0700; i += 4)
b3beb167 227 nv_wo32(ctx, i, 0x0008aae4);
6ee73861 228 for (i = 0x0700; i < 0x0740; i += 4)
b3beb167 229 nv_wo32(ctx, i, 0x01012000);
6ee73861 230 for (i = 0x0740; i < 0x0780; i += 4)
b3beb167
BS
231 nv_wo32(ctx, i, 0x00080008);
232 nv_wo32(ctx, 0x085c, 0x00040000);
233 nv_wo32(ctx, 0x0860, 0x00010000);
6ee73861 234 for (i = 0x0864; i < 0x0874; i += 4)
b3beb167 235 nv_wo32(ctx, i, 0x00040004);
6ee73861 236 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
b3beb167
BS
237 nv_wo32(ctx, i + 0, 0x10700ff9);
238 nv_wo32(ctx, i + 1, 0x0436086c);
239 nv_wo32(ctx, i + 2, 0x000c001b);
6ee73861
BS
240 }
241 for (i = 0x30b8; i < 0x30c8; i += 4)
b3beb167
BS
242 nv_wo32(ctx, i, 0x0000ffff);
243 nv_wo32(ctx, 0x344c, 0x3f800000);
244 nv_wo32(ctx, 0x3808, 0x3f800000);
245 nv_wo32(ctx, 0x381c, 0x3f800000);
246 nv_wo32(ctx, 0x3848, 0x40000000);
247 nv_wo32(ctx, 0x384c, 0x3f800000);
248 nv_wo32(ctx, 0x3850, 0x3f000000);
249 nv_wo32(ctx, 0x3858, 0x40000000);
250 nv_wo32(ctx, 0x385c, 0x3f800000);
251 nv_wo32(ctx, 0x3864, 0xbf800000);
252 nv_wo32(ctx, 0x386c, 0xbf800000);
6ee73861
BS
253}
254
255static void
256nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
257{
258 int i;
259
b3beb167
BS
260 nv_wo32(ctx, 0x040c, 0x01000101);
261 nv_wo32(ctx, 0x0420, 0x00000111);
262 nv_wo32(ctx, 0x0424, 0x00000060);
263 nv_wo32(ctx, 0x0440, 0x00000080);
264 nv_wo32(ctx, 0x0444, 0xffff0000);
265 nv_wo32(ctx, 0x0448, 0x00000001);
266 nv_wo32(ctx, 0x045c, 0x44400000);
267 nv_wo32(ctx, 0x0480, 0xffff0000);
6ee73861 268 for (i = 0x04d4; i < 0x04dc; i += 4)
b3beb167
BS
269 nv_wo32(ctx, i, 0x0fff0000);
270 nv_wo32(ctx, 0x04e0, 0x00011100);
6ee73861 271 for (i = 0x04fc; i < 0x053c; i += 4)
b3beb167
BS
272 nv_wo32(ctx, i, 0x07ff0000);
273 nv_wo32(ctx, 0x0544, 0x4b7fffff);
274 nv_wo32(ctx, 0x057c, 0x00000080);
275 nv_wo32(ctx, 0x0580, 0x30201000);
276 nv_wo32(ctx, 0x0584, 0x70605040);
277 nv_wo32(ctx, 0x0588, 0xb8a89888);
278 nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
279 nv_wo32(ctx, 0x05a0, 0xb0000000);
6ee73861 280 for (i = 0x05f0; i < 0x0630; i += 4)
b3beb167 281 nv_wo32(ctx, i, 0x00010588);
6ee73861 282 for (i = 0x0630; i < 0x0670; i += 4)
b3beb167 283 nv_wo32(ctx, i, 0x00030303);
6ee73861 284 for (i = 0x06b0; i < 0x06f0; i += 4)
b3beb167 285 nv_wo32(ctx, i, 0x0008aae4);
6ee73861 286 for (i = 0x06f0; i < 0x0730; i += 4)
b3beb167 287 nv_wo32(ctx, i, 0x01012000);
6ee73861 288 for (i = 0x0730; i < 0x0770; i += 4)
b3beb167
BS
289 nv_wo32(ctx, i, 0x00080008);
290 nv_wo32(ctx, 0x0850, 0x00040000);
291 nv_wo32(ctx, 0x0854, 0x00010000);
6ee73861 292 for (i = 0x0858; i < 0x0868; i += 4)
b3beb167 293 nv_wo32(ctx, i, 0x00040004);
6ee73861 294 for (i = 0x15ac; i <= 0x271c ; i += 16) {
b3beb167
BS
295 nv_wo32(ctx, i + 0, 0x10700ff9);
296 nv_wo32(ctx, i + 1, 0x0436086c);
297 nv_wo32(ctx, i + 2, 0x000c001b);
6ee73861
BS
298 }
299 for (i = 0x274c; i < 0x275c; i += 4)
b3beb167
BS
300 nv_wo32(ctx, i, 0x0000ffff);
301 nv_wo32(ctx, 0x2ae0, 0x3f800000);
302 nv_wo32(ctx, 0x2e9c, 0x3f800000);
303 nv_wo32(ctx, 0x2eb0, 0x3f800000);
304 nv_wo32(ctx, 0x2edc, 0x40000000);
305 nv_wo32(ctx, 0x2ee0, 0x3f800000);
306 nv_wo32(ctx, 0x2ee4, 0x3f000000);
307 nv_wo32(ctx, 0x2eec, 0x40000000);
308 nv_wo32(ctx, 0x2ef0, 0x3f800000);
309 nv_wo32(ctx, 0x2ef8, 0xbf800000);
310 nv_wo32(ctx, 0x2f00, 0xbf800000);
6ee73861
BS
311}
312
313static void
314nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
315{
316 int i;
317
b3beb167
BS
318 nv_wo32(ctx, 0x040c, 0x00000101);
319 nv_wo32(ctx, 0x0420, 0x00000111);
320 nv_wo32(ctx, 0x0424, 0x00000060);
321 nv_wo32(ctx, 0x0440, 0x00000080);
322 nv_wo32(ctx, 0x0444, 0xffff0000);
323 nv_wo32(ctx, 0x0448, 0x00000001);
324 nv_wo32(ctx, 0x045c, 0x44400000);
325 nv_wo32(ctx, 0x0488, 0xffff0000);
6ee73861 326 for (i = 0x04dc; i < 0x04e4; i += 4)
b3beb167
BS
327 nv_wo32(ctx, i, 0x0fff0000);
328 nv_wo32(ctx, 0x04e8, 0x00011100);
6ee73861 329 for (i = 0x0504; i < 0x0544; i += 4)
b3beb167
BS
330 nv_wo32(ctx, i, 0x07ff0000);
331 nv_wo32(ctx, 0x054c, 0x4b7fffff);
332 nv_wo32(ctx, 0x0588, 0x00000080);
333 nv_wo32(ctx, 0x058c, 0x30201000);
334 nv_wo32(ctx, 0x0590, 0x70605040);
335 nv_wo32(ctx, 0x0594, 0xb8a89888);
336 nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
337 nv_wo32(ctx, 0x05ac, 0xb0000000);
6ee73861 338 for (i = 0x0604; i < 0x0644; i += 4)
b3beb167 339 nv_wo32(ctx, i, 0x00010588);
6ee73861 340 for (i = 0x0644; i < 0x0684; i += 4)
b3beb167 341 nv_wo32(ctx, i, 0x00030303);
6ee73861 342 for (i = 0x06c4; i < 0x0704; i += 4)
b3beb167 343 nv_wo32(ctx, i, 0x0008aae4);
6ee73861 344 for (i = 0x0704; i < 0x0744; i += 4)
b3beb167 345 nv_wo32(ctx, i, 0x01012000);
6ee73861 346 for (i = 0x0744; i < 0x0784; i += 4)
b3beb167
BS
347 nv_wo32(ctx, i, 0x00080008);
348 nv_wo32(ctx, 0x0860, 0x00040000);
349 nv_wo32(ctx, 0x0864, 0x00010000);
6ee73861 350 for (i = 0x0868; i < 0x0878; i += 4)
b3beb167 351 nv_wo32(ctx, i, 0x00040004);
6ee73861 352 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
b3beb167
BS
353 nv_wo32(ctx, i + 0, 0x10700ff9);
354 nv_wo32(ctx, i + 4, 0x0436086c);
355 nv_wo32(ctx, i + 8, 0x000c001b);
6ee73861
BS
356 }
357 for (i = 0x30bc; i < 0x30cc; i += 4)
b3beb167
BS
358 nv_wo32(ctx, i, 0x0000ffff);
359 nv_wo32(ctx, 0x3450, 0x3f800000);
360 nv_wo32(ctx, 0x380c, 0x3f800000);
361 nv_wo32(ctx, 0x3820, 0x3f800000);
362 nv_wo32(ctx, 0x384c, 0x40000000);
363 nv_wo32(ctx, 0x3850, 0x3f800000);
364 nv_wo32(ctx, 0x3854, 0x3f000000);
365 nv_wo32(ctx, 0x385c, 0x40000000);
366 nv_wo32(ctx, 0x3860, 0x3f800000);
367 nv_wo32(ctx, 0x3868, 0xbf800000);
368 nv_wo32(ctx, 0x3870, 0xbf800000);
6ee73861
BS
369}
370
371int
372nv20_graph_create_context(struct nouveau_channel *chan)
373{
374 struct drm_device *dev = chan->dev;
375 struct drm_nouveau_private *dev_priv = dev->dev_private;
816544b2 376 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
6ee73861 377 void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
b3beb167 378 unsigned int idoffs = 0x28;
6ee73861
BS
379 int ret;
380
381 switch (dev_priv->chipset) {
382 case 0x20:
6ee73861
BS
383 ctx_init = nv20_graph_context_init;
384 idoffs = 0;
385 break;
386 case 0x25:
387 case 0x28:
6ee73861
BS
388 ctx_init = nv25_graph_context_init;
389 break;
390 case 0x2a:
6ee73861
BS
391 ctx_init = nv2a_graph_context_init;
392 idoffs = 0;
393 break;
394 case 0x30:
395 case 0x31:
6ee73861
BS
396 ctx_init = nv30_31_graph_context_init;
397 break;
398 case 0x34:
6ee73861
BS
399 ctx_init = nv34_graph_context_init;
400 break;
401 case 0x35:
402 case 0x36:
6ee73861
BS
403 ctx_init = nv35_36_graph_context_init;
404 break;
405 default:
816544b2 406 BUG_ON(1);
6ee73861
BS
407 }
408
a8eaebc6
BS
409 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
410 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
6ee73861
BS
411 if (ret)
412 return ret;
413
414 /* Initialise default context values */
a8eaebc6 415 ctx_init(dev, chan->ramin_grctx);
6ee73861
BS
416
417 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
a8eaebc6 418 nv_wo32(chan->ramin_grctx, idoffs,
b3beb167 419 (chan->id << 24) | 0x1); /* CTX_USER */
6ee73861 420
a8eaebc6 421 nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4);
6ee73861
BS
422 return 0;
423}
424
425void
426nv20_graph_destroy_context(struct nouveau_channel *chan)
427{
428 struct drm_device *dev = chan->dev;
429 struct drm_nouveau_private *dev_priv = dev->dev_private;
c50a5681 430 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
3945e475 431 unsigned long flags;
6ee73861 432
3945e475
FJ
433 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
434 pgraph->fifo_access(dev, false);
435
436 /* Unload the context if it's the currently active one */
437 if (pgraph->channel(dev) == chan)
438 pgraph->unload_context(dev);
439
440 pgraph->fifo_access(dev, true);
441 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
442
443 /* Free the context resources */
a8eaebc6 444 nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
3945e475 445 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
6ee73861
BS
446}
447
448int
449nv20_graph_load_context(struct nouveau_channel *chan)
450{
451 struct drm_device *dev = chan->dev;
452 uint32_t inst;
453
454 if (!chan->ramin_grctx)
455 return -EINVAL;
a8eaebc6 456 inst = chan->ramin_grctx->pinst >> 4;
6ee73861
BS
457
458 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
459 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
460 NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
461 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
462
463 nouveau_wait_for_idle(dev);
464 return 0;
465}
466
467int
468nv20_graph_unload_context(struct drm_device *dev)
469{
470 struct drm_nouveau_private *dev_priv = dev->dev_private;
471 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
472 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
473 struct nouveau_channel *chan;
474 uint32_t inst, tmp;
475
476 chan = pgraph->channel(dev);
477 if (!chan)
478 return 0;
a8eaebc6 479 inst = chan->ramin_grctx->pinst >> 4;
6ee73861
BS
480
481 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
482 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
483 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
484
485 nouveau_wait_for_idle(dev);
486
487 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
488 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
489 tmp |= (pfifo->channels - 1) << 24;
490 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
491 return 0;
492}
493
494static void
495nv20_graph_rdi(struct drm_device *dev)
496{
497 struct drm_nouveau_private *dev_priv = dev->dev_private;
498 int i, writecount = 32;
499 uint32_t rdi_index = 0x2c80000;
500
501 if (dev_priv->chipset == 0x20) {
502 rdi_index = 0x3d0000;
503 writecount = 15;
504 }
505
506 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
507 for (i = 0; i < writecount; i++)
508 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
509
510 nouveau_wait_for_idle(dev);
511}
512
0d87c100 513void
a5cf68b0 514nv20_graph_set_tile_region(struct drm_device *dev, int i)
0d87c100 515{
a5cf68b0
FJ
516 struct drm_nouveau_private *dev_priv = dev->dev_private;
517 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
0d87c100 518
a5cf68b0
FJ
519 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
520 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
521 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
0d87c100
FJ
522
523 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
a5cf68b0 524 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
0d87c100 525 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
a5cf68b0 526 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
0d87c100 527 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
a5cf68b0 528 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
0d87c100
FJ
529}
530
6ee73861
BS
531int
532nv20_graph_init(struct drm_device *dev)
533{
c50a5681 534 struct drm_nouveau_private *dev_priv = dev->dev_private;
816544b2 535 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
6ee73861
BS
536 uint32_t tmp, vramsz;
537 int ret, i;
538
816544b2
BS
539 switch (dev_priv->chipset) {
540 case 0x20:
541 pgraph->grctx_size = NV20_GRCTX_SIZE;
542 break;
543 case 0x25:
544 case 0x28:
545 pgraph->grctx_size = NV25_GRCTX_SIZE;
546 break;
547 case 0x2a:
548 pgraph->grctx_size = NV2A_GRCTX_SIZE;
549 break;
550 default:
551 NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
552 pgraph->accel_blocked = true;
553 return 0;
554 }
555
6ee73861
BS
556 nv_wr32(dev, NV03_PMC_ENABLE,
557 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
558 nv_wr32(dev, NV03_PMC_ENABLE,
559 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
560
c50a5681 561 if (!pgraph->ctx_table) {
6ee73861 562 /* Create Context Pointer Table */
a8eaebc6
BS
563 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
564 NVOBJ_FLAG_ZERO_ALLOC,
565 &pgraph->ctx_table);
6ee73861
BS
566 if (ret)
567 return ret;
568 }
569
570 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
a8eaebc6 571 pgraph->ctx_table->pinst >> 4);
6ee73861
BS
572
573 nv20_graph_rdi(dev);
574
b8c157d3
BS
575 ret = nv20_graph_register(dev);
576 if (ret) {
577 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
578 return ret;
579 }
580
6ee73861
BS
581 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
582 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
583
584 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
585 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
586 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
587 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
588 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
589 nv_wr32(dev, 0x40009C , 0x00000040);
590
591 if (dev_priv->chipset >= 0x25) {
592 nv_wr32(dev, 0x400890, 0x00080000);
593 nv_wr32(dev, 0x400610, 0x304B1FB6);
594 nv_wr32(dev, 0x400B80, 0x18B82880);
595 nv_wr32(dev, 0x400B84, 0x44000000);
596 nv_wr32(dev, 0x400098, 0x40000080);
597 nv_wr32(dev, 0x400B88, 0x000000ff);
598 } else {
599 nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
600 nv_wr32(dev, 0x400094, 0x00000005);
601 nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
602 nv_wr32(dev, 0x400B84, 0x24000000);
603 nv_wr32(dev, 0x400098, 0x00000040);
604 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
605 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
606 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
607 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
608 }
609
0d87c100
FJ
610 /* Turn all the tiling regions off. */
611 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
a5cf68b0 612 nv20_graph_set_tile_region(dev, i);
0d87c100 613
6ee73861
BS
614 for (i = 0; i < 8; i++) {
615 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
616 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
617 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
618 nv_rd32(dev, 0x100300 + i * 4));
619 }
620 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
621 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
622 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
623
624 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
625 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
626
627 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
628 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
629 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
630 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
631
632 /* begin RAM config */
01d73a69 633 vramsz = pci_resource_len(dev->pdev, 0) - 1;
6ee73861
BS
634 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
635 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
636 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
637 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
638 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
639 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
640 nv_wr32(dev, 0x400820, 0);
641 nv_wr32(dev, 0x400824, 0);
642 nv_wr32(dev, 0x400864, vramsz - 1);
643 nv_wr32(dev, 0x400868, vramsz - 1);
644
645 /* interesting.. the below overwrites some of the tile setup above.. */
646 nv_wr32(dev, 0x400B20, 0x00000000);
647 nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
648
649 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
650 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
651 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
652 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
653
654 return 0;
655}
656
657void
658nv20_graph_takedown(struct drm_device *dev)
659{
660 struct drm_nouveau_private *dev_priv = dev->dev_private;
c50a5681 661 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
6ee73861 662
a8eaebc6 663 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
6ee73861
BS
664}
665
666int
667nv30_graph_init(struct drm_device *dev)
668{
669 struct drm_nouveau_private *dev_priv = dev->dev_private;
816544b2 670 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
6ee73861
BS
671 int ret, i;
672
816544b2
BS
673 switch (dev_priv->chipset) {
674 case 0x30:
675 case 0x31:
676 pgraph->grctx_size = NV30_31_GRCTX_SIZE;
677 break;
678 case 0x34:
679 pgraph->grctx_size = NV34_GRCTX_SIZE;
680 break;
681 case 0x35:
682 case 0x36:
683 pgraph->grctx_size = NV35_36_GRCTX_SIZE;
684 break;
685 default:
686 NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
687 pgraph->accel_blocked = true;
688 return 0;
689 }
690
6ee73861
BS
691 nv_wr32(dev, NV03_PMC_ENABLE,
692 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
693 nv_wr32(dev, NV03_PMC_ENABLE,
694 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
695
c50a5681 696 if (!pgraph->ctx_table) {
6ee73861 697 /* Create Context Pointer Table */
a8eaebc6
BS
698 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
699 NVOBJ_FLAG_ZERO_ALLOC,
700 &pgraph->ctx_table);
6ee73861
BS
701 if (ret)
702 return ret;
703 }
704
b8c157d3
BS
705 ret = nv30_graph_register(dev);
706 if (ret) {
707 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
708 return ret;
709 }
710
6ee73861 711 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
a8eaebc6 712 pgraph->ctx_table->pinst >> 4);
6ee73861
BS
713
714 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
715 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
716
717 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
718 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
719 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
720 nv_wr32(dev, 0x400890, 0x01b463ff);
721 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
722 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
723 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
724 nv_wr32(dev, 0x400B80, 0x1003d888);
725 nv_wr32(dev, 0x400B84, 0x0c000000);
726 nv_wr32(dev, 0x400098, 0x00000000);
727 nv_wr32(dev, 0x40009C, 0x0005ad00);
728 nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
729 nv_wr32(dev, 0x4000a0, 0x00000000);
730 nv_wr32(dev, 0x4000a4, 0x00000008);
731 nv_wr32(dev, 0x4008a8, 0xb784a400);
732 nv_wr32(dev, 0x400ba0, 0x002f8685);
733 nv_wr32(dev, 0x400ba4, 0x00231f3f);
734 nv_wr32(dev, 0x4008a4, 0x40000020);
735
736 if (dev_priv->chipset == 0x34) {
737 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
738 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
739 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
740 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
741 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
742 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
743 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
744 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
745 }
746
747 nv_wr32(dev, 0x4000c0, 0x00000016);
748
0d87c100
FJ
749 /* Turn all the tiling regions off. */
750 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
a5cf68b0 751 nv20_graph_set_tile_region(dev, i);
6ee73861
BS
752
753 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
754 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
755 nv_wr32(dev, 0x0040075c , 0x00000001);
756
757 /* begin RAM config */
01d73a69 758 /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */
6ee73861
BS
759 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
760 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
761 if (dev_priv->chipset != 0x34) {
762 nv_wr32(dev, 0x400750, 0x00EA0000);
763 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
764 nv_wr32(dev, 0x400750, 0x00EA0004);
765 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
766 }
767
768 return 0;
769}
770
b8c157d3
BS
771static int
772nv20_graph_register(struct drm_device *dev)
773{
774 struct drm_nouveau_private *dev_priv = dev->dev_private;
775
776 if (dev_priv->engine.graph.registered)
777 return 0;
6ee73861 778
b8c157d3
BS
779 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
780 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
781 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
782 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
783 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
784 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
785 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
786 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
787 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
788 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
789 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
790 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
791 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
792 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
793 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
794
795 /* kelvin */
796 if (dev_priv->chipset < 0x25)
797 NVOBJ_CLASS(dev, 0x0097, GR);
798 else
799 NVOBJ_CLASS(dev, 0x0597, GR);
800
332b242f
FJ
801 /* nvsw */
802 NVOBJ_CLASS(dev, 0x506e, SW);
803 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
804
b8c157d3
BS
805 dev_priv->engine.graph.registered = true;
806 return 0;
807}
808
809static int
810nv30_graph_register(struct drm_device *dev)
811{
812 struct drm_nouveau_private *dev_priv = dev->dev_private;
813
814 if (dev_priv->engine.graph.registered)
815 return 0;
816
817 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
818 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
819 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
820 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
821 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
822 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
823 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
824 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
825 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
826 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
827 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
828 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
829 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
830 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
831 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
832 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
833 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
834
835 /* rankine */
836 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
837 NVOBJ_CLASS(dev, 0x0397, GR);
838 else
839 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
840 NVOBJ_CLASS(dev, 0x0697, GR);
841 else
842 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
843 NVOBJ_CLASS(dev, 0x0497, GR);
844
332b242f
FJ
845 /* nvsw */
846 NVOBJ_CLASS(dev, 0x506e, SW);
847 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
848
b8c157d3
BS
849 dev_priv->engine.graph.registered = true;
850 return 0;
851}
This page took 0.150764 seconds and 5 git commands to generate.