2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
28 #include <drm/radeon_drm.h>
29 #include "radeon_reg.h"
32 void r100_cs_dump_packet(struct radeon_cs_parser
*p
,
33 struct radeon_cs_packet
*pkt
);
35 static int radeon_cs_parser_relocs(struct radeon_cs_parser
*p
)
37 struct drm_device
*ddev
= p
->rdev
->ddev
;
38 struct radeon_cs_chunk
*chunk
;
42 if (p
->chunk_relocs_idx
== -1) {
45 chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
47 /* FIXME: we assume that each relocs use 4 dwords */
48 p
->nrelocs
= chunk
->length_dw
/ 4;
49 p
->relocs_ptr
= kcalloc(p
->nrelocs
, sizeof(void *), GFP_KERNEL
);
50 if (p
->relocs_ptr
== NULL
) {
53 p
->relocs
= kcalloc(p
->nrelocs
, sizeof(struct radeon_cs_reloc
), GFP_KERNEL
);
54 if (p
->relocs
== NULL
) {
57 for (i
= 0; i
< p
->nrelocs
; i
++) {
58 struct drm_radeon_cs_reloc
*r
;
61 r
= (struct drm_radeon_cs_reloc
*)&chunk
->kdata
[i
*4];
62 for (j
= 0; j
< i
; j
++) {
63 if (r
->handle
== p
->relocs
[j
].handle
) {
64 p
->relocs_ptr
[i
] = &p
->relocs
[j
];
70 p
->relocs
[i
].gobj
= drm_gem_object_lookup(ddev
,
73 if (p
->relocs
[i
].gobj
== NULL
) {
74 DRM_ERROR("gem object lookup failed 0x%x\n",
78 p
->relocs_ptr
[i
] = &p
->relocs
[i
];
79 p
->relocs
[i
].robj
= gem_to_radeon_bo(p
->relocs
[i
].gobj
);
80 p
->relocs
[i
].lobj
.bo
= p
->relocs
[i
].robj
;
81 p
->relocs
[i
].lobj
.wdomain
= r
->write_domain
;
82 p
->relocs
[i
].lobj
.rdomain
= r
->read_domains
;
83 p
->relocs
[i
].lobj
.tv
.bo
= &p
->relocs
[i
].robj
->tbo
;
84 p
->relocs
[i
].handle
= r
->handle
;
85 p
->relocs
[i
].flags
= r
->flags
;
86 radeon_bo_list_add_object(&p
->relocs
[i
].lobj
,
90 p
->relocs
[i
].handle
= 0;
92 return radeon_bo_list_validate(&p
->validated
);
95 static int radeon_cs_get_ring(struct radeon_cs_parser
*p
, u32 ring
, s32 priority
)
97 p
->priority
= priority
;
101 DRM_ERROR("unknown ring id: %d\n", ring
);
103 case RADEON_CS_RING_GFX
:
104 p
->ring
= RADEON_RING_TYPE_GFX_INDEX
;
106 case RADEON_CS_RING_COMPUTE
:
107 if (p
->rdev
->family
>= CHIP_TAHITI
) {
109 p
->ring
= CAYMAN_RING_TYPE_CP1_INDEX
;
111 p
->ring
= CAYMAN_RING_TYPE_CP2_INDEX
;
113 p
->ring
= RADEON_RING_TYPE_GFX_INDEX
;
115 case RADEON_CS_RING_DMA
:
116 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
118 p
->ring
= R600_RING_TYPE_DMA_INDEX
;
120 p
->ring
= CAYMAN_RING_TYPE_DMA1_INDEX
;
121 } else if (p
->rdev
->family
>= CHIP_R600
) {
122 p
->ring
= R600_RING_TYPE_DMA_INDEX
;
131 static void radeon_cs_sync_to(struct radeon_cs_parser
*p
,
132 struct radeon_fence
*fence
)
134 struct radeon_fence
*other
;
139 other
= p
->ib
.sync_to
[fence
->ring
];
140 p
->ib
.sync_to
[fence
->ring
] = radeon_fence_later(fence
, other
);
143 static void radeon_cs_sync_rings(struct radeon_cs_parser
*p
)
147 for (i
= 0; i
< p
->nrelocs
; i
++) {
148 if (!p
->relocs
[i
].robj
)
151 radeon_cs_sync_to(p
, p
->relocs
[i
].robj
->tbo
.sync_obj
);
155 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
156 int radeon_cs_parser_init(struct radeon_cs_parser
*p
, void *data
)
158 struct drm_radeon_cs
*cs
= data
;
159 uint64_t *chunk_array_ptr
;
161 u32 ring
= RADEON_CS_RING_GFX
;
164 if (!cs
->num_chunks
) {
168 INIT_LIST_HEAD(&p
->validated
);
171 p
->ib
.semaphore
= NULL
;
172 p
->const_ib
.sa_bo
= NULL
;
173 p
->const_ib
.semaphore
= NULL
;
174 p
->chunk_ib_idx
= -1;
175 p
->chunk_relocs_idx
= -1;
176 p
->chunk_flags_idx
= -1;
177 p
->chunk_const_ib_idx
= -1;
178 p
->chunks_array
= kcalloc(cs
->num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
179 if (p
->chunks_array
== NULL
) {
182 chunk_array_ptr
= (uint64_t *)(unsigned long)(cs
->chunks
);
183 if (DRM_COPY_FROM_USER(p
->chunks_array
, chunk_array_ptr
,
184 sizeof(uint64_t)*cs
->num_chunks
)) {
188 p
->nchunks
= cs
->num_chunks
;
189 p
->chunks
= kcalloc(p
->nchunks
, sizeof(struct radeon_cs_chunk
), GFP_KERNEL
);
190 if (p
->chunks
== NULL
) {
193 for (i
= 0; i
< p
->nchunks
; i
++) {
194 struct drm_radeon_cs_chunk __user
**chunk_ptr
= NULL
;
195 struct drm_radeon_cs_chunk user_chunk
;
196 uint32_t __user
*cdata
;
198 chunk_ptr
= (void __user
*)(unsigned long)p
->chunks_array
[i
];
199 if (DRM_COPY_FROM_USER(&user_chunk
, chunk_ptr
,
200 sizeof(struct drm_radeon_cs_chunk
))) {
203 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
204 p
->chunks
[i
].kdata
= NULL
;
205 p
->chunks
[i
].chunk_id
= user_chunk
.chunk_id
;
207 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_RELOCS
) {
208 p
->chunk_relocs_idx
= i
;
210 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_IB
) {
212 /* zero length IB isn't useful */
213 if (p
->chunks
[i
].length_dw
== 0)
216 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_CONST_IB
) {
217 p
->chunk_const_ib_idx
= i
;
218 /* zero length CONST IB isn't useful */
219 if (p
->chunks
[i
].length_dw
== 0)
222 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_FLAGS
) {
223 p
->chunk_flags_idx
= i
;
224 /* zero length flags aren't useful */
225 if (p
->chunks
[i
].length_dw
== 0)
229 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
230 p
->chunks
[i
].user_ptr
= (void __user
*)(unsigned long)user_chunk
.chunk_data
;
232 cdata
= (uint32_t *)(unsigned long)user_chunk
.chunk_data
;
233 if ((p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_RELOCS
) ||
234 (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_FLAGS
)) {
235 size
= p
->chunks
[i
].length_dw
* sizeof(uint32_t);
236 p
->chunks
[i
].kdata
= kmalloc(size
, GFP_KERNEL
);
237 if (p
->chunks
[i
].kdata
== NULL
) {
240 if (DRM_COPY_FROM_USER(p
->chunks
[i
].kdata
,
241 p
->chunks
[i
].user_ptr
, size
)) {
244 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_FLAGS
) {
245 p
->cs_flags
= p
->chunks
[i
].kdata
[0];
246 if (p
->chunks
[i
].length_dw
> 1)
247 ring
= p
->chunks
[i
].kdata
[1];
248 if (p
->chunks
[i
].length_dw
> 2)
249 priority
= (s32
)p
->chunks
[i
].kdata
[2];
254 /* these are KMS only */
256 if ((p
->cs_flags
& RADEON_CS_USE_VM
) &&
257 !p
->rdev
->vm_manager
.enabled
) {
258 DRM_ERROR("VM not active on asic!\n");
262 /* we only support VM on SI+ */
263 if ((p
->rdev
->family
>= CHIP_TAHITI
) &&
264 ((p
->cs_flags
& RADEON_CS_USE_VM
) == 0)) {
265 DRM_ERROR("VM required on SI+!\n");
269 if (radeon_cs_get_ring(p
, ring
, priority
))
273 /* deal with non-vm */
274 if ((p
->chunk_ib_idx
!= -1) &&
275 ((p
->cs_flags
& RADEON_CS_USE_VM
) == 0) &&
276 (p
->chunks
[p
->chunk_ib_idx
].chunk_id
== RADEON_CHUNK_ID_IB
)) {
277 if (p
->chunks
[p
->chunk_ib_idx
].length_dw
> (16 * 1024)) {
278 DRM_ERROR("cs IB too big: %d\n",
279 p
->chunks
[p
->chunk_ib_idx
].length_dw
);
282 if (p
->rdev
&& (p
->rdev
->flags
& RADEON_IS_AGP
)) {
283 p
->chunks
[p
->chunk_ib_idx
].kpage
[0] = kmalloc(PAGE_SIZE
, GFP_KERNEL
);
284 p
->chunks
[p
->chunk_ib_idx
].kpage
[1] = kmalloc(PAGE_SIZE
, GFP_KERNEL
);
285 if (p
->chunks
[p
->chunk_ib_idx
].kpage
[0] == NULL
||
286 p
->chunks
[p
->chunk_ib_idx
].kpage
[1] == NULL
) {
287 kfree(p
->chunks
[p
->chunk_ib_idx
].kpage
[0]);
288 kfree(p
->chunks
[p
->chunk_ib_idx
].kpage
[1]);
289 p
->chunks
[p
->chunk_ib_idx
].kpage
[0] = NULL
;
290 p
->chunks
[p
->chunk_ib_idx
].kpage
[1] = NULL
;
294 p
->chunks
[p
->chunk_ib_idx
].kpage_idx
[0] = -1;
295 p
->chunks
[p
->chunk_ib_idx
].kpage_idx
[1] = -1;
296 p
->chunks
[p
->chunk_ib_idx
].last_copied_page
= -1;
297 p
->chunks
[p
->chunk_ib_idx
].last_page_index
=
298 ((p
->chunks
[p
->chunk_ib_idx
].length_dw
* 4) - 1) / PAGE_SIZE
;
305 * cs_parser_fini() - clean parser states
306 * @parser: parser structure holding parsing context.
307 * @error: error number
309 * If error is set than unvalidate buffer, otherwise just free memory
310 * used by parsing context.
312 static void radeon_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
)
317 ttm_eu_fence_buffer_objects(&parser
->validated
,
320 ttm_eu_backoff_reservation(&parser
->validated
);
323 if (parser
->relocs
!= NULL
) {
324 for (i
= 0; i
< parser
->nrelocs
; i
++) {
325 if (parser
->relocs
[i
].gobj
)
326 drm_gem_object_unreference_unlocked(parser
->relocs
[i
].gobj
);
329 kfree(parser
->track
);
330 kfree(parser
->relocs
);
331 kfree(parser
->relocs_ptr
);
332 for (i
= 0; i
< parser
->nchunks
; i
++) {
333 kfree(parser
->chunks
[i
].kdata
);
334 if ((parser
->rdev
->flags
& RADEON_IS_AGP
)) {
335 kfree(parser
->chunks
[i
].kpage
[0]);
336 kfree(parser
->chunks
[i
].kpage
[1]);
339 kfree(parser
->chunks
);
340 kfree(parser
->chunks_array
);
341 radeon_ib_free(parser
->rdev
, &parser
->ib
);
342 radeon_ib_free(parser
->rdev
, &parser
->const_ib
);
345 static int radeon_cs_ib_chunk(struct radeon_device
*rdev
,
346 struct radeon_cs_parser
*parser
)
348 struct radeon_cs_chunk
*ib_chunk
;
351 if (parser
->chunk_ib_idx
== -1)
354 if (parser
->cs_flags
& RADEON_CS_USE_VM
)
357 ib_chunk
= &parser
->chunks
[parser
->chunk_ib_idx
];
358 /* Copy the packet into the IB, the parser will read from the
359 * input memory (cached) and write to the IB (which can be
362 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->ib
,
363 NULL
, ib_chunk
->length_dw
* 4);
365 DRM_ERROR("Failed to get ib !\n");
368 parser
->ib
.length_dw
= ib_chunk
->length_dw
;
369 r
= radeon_cs_parse(rdev
, parser
->ring
, parser
);
370 if (r
|| parser
->parser_error
) {
371 DRM_ERROR("Invalid command stream !\n");
374 r
= radeon_cs_finish_pages(parser
);
376 DRM_ERROR("Invalid command stream !\n");
379 radeon_cs_sync_rings(parser
);
380 r
= radeon_ib_schedule(rdev
, &parser
->ib
, NULL
);
382 DRM_ERROR("Failed to schedule IB !\n");
387 static int radeon_bo_vm_update_pte(struct radeon_cs_parser
*parser
,
388 struct radeon_vm
*vm
)
390 struct radeon_device
*rdev
= parser
->rdev
;
391 struct radeon_bo_list
*lobj
;
392 struct radeon_bo
*bo
;
395 r
= radeon_vm_bo_update_pte(rdev
, vm
, rdev
->ring_tmp_bo
.bo
, &rdev
->ring_tmp_bo
.bo
->tbo
.mem
);
399 list_for_each_entry(lobj
, &parser
->validated
, tv
.head
) {
401 r
= radeon_vm_bo_update_pte(parser
->rdev
, vm
, bo
, &bo
->tbo
.mem
);
409 static int radeon_cs_ib_vm_chunk(struct radeon_device
*rdev
,
410 struct radeon_cs_parser
*parser
)
412 struct radeon_cs_chunk
*ib_chunk
;
413 struct radeon_fpriv
*fpriv
= parser
->filp
->driver_priv
;
414 struct radeon_vm
*vm
= &fpriv
->vm
;
417 if (parser
->chunk_ib_idx
== -1)
419 if ((parser
->cs_flags
& RADEON_CS_USE_VM
) == 0)
422 if ((rdev
->family
>= CHIP_TAHITI
) &&
423 (parser
->chunk_const_ib_idx
!= -1)) {
424 ib_chunk
= &parser
->chunks
[parser
->chunk_const_ib_idx
];
425 if (ib_chunk
->length_dw
> RADEON_IB_VM_MAX_SIZE
) {
426 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk
->length_dw
);
429 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->const_ib
,
430 vm
, ib_chunk
->length_dw
* 4);
432 DRM_ERROR("Failed to get const ib !\n");
435 parser
->const_ib
.is_const_ib
= true;
436 parser
->const_ib
.length_dw
= ib_chunk
->length_dw
;
437 /* Copy the packet into the IB */
438 if (DRM_COPY_FROM_USER(parser
->const_ib
.ptr
, ib_chunk
->user_ptr
,
439 ib_chunk
->length_dw
* 4)) {
442 r
= radeon_ring_ib_parse(rdev
, parser
->ring
, &parser
->const_ib
);
448 ib_chunk
= &parser
->chunks
[parser
->chunk_ib_idx
];
449 if (ib_chunk
->length_dw
> RADEON_IB_VM_MAX_SIZE
) {
450 DRM_ERROR("cs IB too big: %d\n", ib_chunk
->length_dw
);
453 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->ib
,
454 vm
, ib_chunk
->length_dw
* 4);
456 DRM_ERROR("Failed to get ib !\n");
459 parser
->ib
.length_dw
= ib_chunk
->length_dw
;
460 /* Copy the packet into the IB */
461 if (DRM_COPY_FROM_USER(parser
->ib
.ptr
, ib_chunk
->user_ptr
,
462 ib_chunk
->length_dw
* 4)) {
465 r
= radeon_ring_ib_parse(rdev
, parser
->ring
, &parser
->ib
);
470 mutex_lock(&rdev
->vm_manager
.lock
);
471 mutex_lock(&vm
->mutex
);
472 r
= radeon_vm_alloc_pt(rdev
, vm
);
476 r
= radeon_bo_vm_update_pte(parser
, vm
);
480 radeon_cs_sync_rings(parser
);
481 radeon_cs_sync_to(parser
, vm
->fence
);
482 radeon_cs_sync_to(parser
, radeon_vm_grab_id(rdev
, vm
, parser
->ring
));
484 if ((rdev
->family
>= CHIP_TAHITI
) &&
485 (parser
->chunk_const_ib_idx
!= -1)) {
486 r
= radeon_ib_schedule(rdev
, &parser
->ib
, &parser
->const_ib
);
488 r
= radeon_ib_schedule(rdev
, &parser
->ib
, NULL
);
492 radeon_vm_fence(rdev
, vm
, parser
->ib
.fence
);
496 radeon_vm_add_to_lru(rdev
, vm
);
497 mutex_unlock(&vm
->mutex
);
498 mutex_unlock(&rdev
->vm_manager
.lock
);
502 static int radeon_cs_handle_lockup(struct radeon_device
*rdev
, int r
)
505 r
= radeon_gpu_reset(rdev
);
512 int radeon_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
514 struct radeon_device
*rdev
= dev
->dev_private
;
515 struct radeon_cs_parser parser
;
518 down_read(&rdev
->exclusive_lock
);
519 if (!rdev
->accel_working
) {
520 up_read(&rdev
->exclusive_lock
);
523 /* initialize parser */
524 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
527 parser
.dev
= rdev
->dev
;
528 parser
.family
= rdev
->family
;
529 r
= radeon_cs_parser_init(&parser
, data
);
531 DRM_ERROR("Failed to initialize parser !\n");
532 radeon_cs_parser_fini(&parser
, r
);
533 up_read(&rdev
->exclusive_lock
);
534 r
= radeon_cs_handle_lockup(rdev
, r
);
537 r
= radeon_cs_parser_relocs(&parser
);
539 if (r
!= -ERESTARTSYS
)
540 DRM_ERROR("Failed to parse relocation %d!\n", r
);
541 radeon_cs_parser_fini(&parser
, r
);
542 up_read(&rdev
->exclusive_lock
);
543 r
= radeon_cs_handle_lockup(rdev
, r
);
546 r
= radeon_cs_ib_chunk(rdev
, &parser
);
550 r
= radeon_cs_ib_vm_chunk(rdev
, &parser
);
555 radeon_cs_parser_fini(&parser
, r
);
556 up_read(&rdev
->exclusive_lock
);
557 r
= radeon_cs_handle_lockup(rdev
, r
);
561 int radeon_cs_finish_pages(struct radeon_cs_parser
*p
)
563 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
565 int size
= PAGE_SIZE
;
567 for (i
= ibc
->last_copied_page
+ 1; i
<= ibc
->last_page_index
; i
++) {
568 if (i
== ibc
->last_page_index
) {
569 size
= (ibc
->length_dw
* 4) % PAGE_SIZE
;
574 if (DRM_COPY_FROM_USER(p
->ib
.ptr
+ (i
* (PAGE_SIZE
/4)),
575 ibc
->user_ptr
+ (i
* PAGE_SIZE
),
582 static int radeon_cs_update_pages(struct radeon_cs_parser
*p
, int pg_idx
)
585 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
587 int size
= PAGE_SIZE
;
588 bool copy1
= (p
->rdev
&& (p
->rdev
->flags
& RADEON_IS_AGP
)) ?
591 for (i
= ibc
->last_copied_page
+ 1; i
< pg_idx
; i
++) {
592 if (DRM_COPY_FROM_USER(p
->ib
.ptr
+ (i
* (PAGE_SIZE
/4)),
593 ibc
->user_ptr
+ (i
* PAGE_SIZE
),
595 p
->parser_error
= -EFAULT
;
600 if (pg_idx
== ibc
->last_page_index
) {
601 size
= (ibc
->length_dw
* 4) % PAGE_SIZE
;
606 new_page
= ibc
->kpage_idx
[0] < ibc
->kpage_idx
[1] ? 0 : 1;
608 ibc
->kpage
[new_page
] = p
->ib
.ptr
+ (pg_idx
* (PAGE_SIZE
/ 4));
610 if (DRM_COPY_FROM_USER(ibc
->kpage
[new_page
],
611 ibc
->user_ptr
+ (pg_idx
* PAGE_SIZE
),
613 p
->parser_error
= -EFAULT
;
617 /* copy to IB for non single case */
619 memcpy((void *)(p
->ib
.ptr
+(pg_idx
*(PAGE_SIZE
/4))), ibc
->kpage
[new_page
], size
);
621 ibc
->last_copied_page
= pg_idx
;
622 ibc
->kpage_idx
[new_page
] = pg_idx
;
627 u32
radeon_get_ib_value(struct radeon_cs_parser
*p
, int idx
)
629 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
630 u32 pg_idx
, pg_offset
;
634 pg_idx
= (idx
* 4) / PAGE_SIZE
;
635 pg_offset
= (idx
* 4) % PAGE_SIZE
;
637 if (ibc
->kpage_idx
[0] == pg_idx
)
638 return ibc
->kpage
[0][pg_offset
/4];
639 if (ibc
->kpage_idx
[1] == pg_idx
)
640 return ibc
->kpage
[1][pg_offset
/4];
642 new_page
= radeon_cs_update_pages(p
, pg_idx
);
644 p
->parser_error
= new_page
;
648 idx_value
= ibc
->kpage
[new_page
][pg_offset
/4];