2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
39 #include "amdgpu_trace.h"
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
51 * amdgpu_fence_write - write a fence value
53 * @ring: ring the fence is associated with
54 * @seq: sequence number to write
56 * Writes a fence value to memory (all asics).
58 static void amdgpu_fence_write(struct amdgpu_ring
*ring
, u32 seq
)
60 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
63 *drv
->cpu_addr
= cpu_to_le32(seq
);
67 * amdgpu_fence_read - read a fence value
69 * @ring: ring the fence is associated with
71 * Reads a fence value from memory (all asics).
72 * Returns the value of the fence read from memory.
74 static u32
amdgpu_fence_read(struct amdgpu_ring
*ring
)
76 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
80 seq
= le32_to_cpu(*drv
->cpu_addr
);
82 seq
= lower_32_bits(atomic64_read(&drv
->last_seq
));
88 * amdgpu_fence_schedule_check - schedule lockup check
90 * @ring: pointer to struct amdgpu_ring
92 * Queues a delayed work item to check for lockups.
94 static void amdgpu_fence_schedule_check(struct amdgpu_ring
*ring
)
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
100 queue_delayed_work(system_power_efficient_wq
,
101 &ring
->fence_drv
.lockup_work
,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT
);
106 * amdgpu_fence_emit - emit a fence on the requested ring
108 * @ring: ring the fence is associated with
109 * @owner: creator of the fence
110 * @fence: amdgpu fence object
112 * Emits a fence command on the requested ring (all asics).
113 * Returns 0 on success, -ENOMEM on failure.
115 int amdgpu_fence_emit(struct amdgpu_ring
*ring
, void *owner
,
116 struct amdgpu_fence
**fence
)
118 struct amdgpu_device
*adev
= ring
->adev
;
120 /* we are protected by the ring emission mutex */
121 *fence
= kmalloc(sizeof(struct amdgpu_fence
), GFP_KERNEL
);
122 if ((*fence
) == NULL
) {
125 (*fence
)->seq
= ++ring
->fence_drv
.sync_seq
[ring
->idx
];
126 (*fence
)->ring
= ring
;
127 (*fence
)->owner
= owner
;
128 fence_init(&(*fence
)->base
, &amdgpu_fence_ops
,
129 &adev
->fence_queue
.lock
, adev
->fence_context
+ ring
->idx
,
131 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
133 AMDGPU_FENCE_FLAG_INT
);
134 trace_amdgpu_fence_emit(ring
->adev
->ddev
, ring
->idx
, (*fence
)->seq
);
139 * amdgpu_fence_check_signaled - callback from fence_queue
141 * this function is called with fence_queue lock held, which is also used
142 * for the fence locking itself, so unlocked variants are used for
143 * fence_signal, and remove_wait_queue.
145 static int amdgpu_fence_check_signaled(wait_queue_t
*wait
, unsigned mode
, int flags
, void *key
)
147 struct amdgpu_fence
*fence
;
148 struct amdgpu_device
*adev
;
152 fence
= container_of(wait
, struct amdgpu_fence
, fence_wake
);
153 adev
= fence
->ring
->adev
;
156 * We cannot use amdgpu_fence_process here because we're already
157 * in the waitqueue, in a call from wake_up_all.
159 seq
= atomic64_read(&fence
->ring
->fence_drv
.last_seq
);
160 if (seq
>= fence
->seq
) {
161 ret
= fence_signal_locked(&fence
->base
);
163 FENCE_TRACE(&fence
->base
, "signaled from irq context\n");
165 FENCE_TRACE(&fence
->base
, "was already signaled\n");
167 __remove_wait_queue(&adev
->fence_queue
, &fence
->fence_wake
);
168 fence_put(&fence
->base
);
170 FENCE_TRACE(&fence
->base
, "pending\n");
175 * amdgpu_fence_activity - check for fence activity
177 * @ring: pointer to struct amdgpu_ring
179 * Checks the current fence value and calculates the last
180 * signalled fence value. Returns true if activity occured
181 * on the ring, and the fence_queue should be waken up.
183 static bool amdgpu_fence_activity(struct amdgpu_ring
*ring
)
185 uint64_t seq
, last_seq
, last_emitted
;
186 unsigned count_loop
= 0;
189 /* Note there is a scenario here for an infinite loop but it's
190 * very unlikely to happen. For it to happen, the current polling
191 * process need to be interrupted by another process and another
192 * process needs to update the last_seq btw the atomic read and
193 * xchg of the current process.
195 * More over for this to go in infinite loop there need to be
196 * continuously new fence signaled ie amdgpu_fence_read needs
197 * to return a different value each time for both the currently
198 * polling process and the other process that xchg the last_seq
199 * btw atomic read and xchg of the current process. And the
200 * value the other process set as last seq must be higher than
201 * the seq value we just read. Which means that current process
202 * need to be interrupted after amdgpu_fence_read and before
205 * To be even more safe we count the number of time we loop and
206 * we bail after 10 loop just accepting the fact that we might
207 * have temporarly set the last_seq not to the true real last
208 * seq but to an older one.
210 last_seq
= atomic64_read(&ring
->fence_drv
.last_seq
);
212 last_emitted
= ring
->fence_drv
.sync_seq
[ring
->idx
];
213 seq
= amdgpu_fence_read(ring
);
214 seq
|= last_seq
& 0xffffffff00000000LL
;
215 if (seq
< last_seq
) {
217 seq
|= last_emitted
& 0xffffffff00000000LL
;
220 if (seq
<= last_seq
|| seq
> last_emitted
) {
223 /* If we loop over we don't want to return without
224 * checking if a fence is signaled as it means that the
225 * seq we just read is different from the previous on.
229 if ((count_loop
++) > 10) {
230 /* We looped over too many time leave with the
231 * fact that we might have set an older fence
232 * seq then the current real last seq as signaled
237 } while (atomic64_xchg(&ring
->fence_drv
.last_seq
, seq
) > seq
);
239 if (seq
< last_emitted
)
240 amdgpu_fence_schedule_check(ring
);
246 * amdgpu_fence_check_lockup - check for hardware lockup
248 * @work: delayed work item
250 * Checks for fence activity and if there is none probe
251 * the hardware if a lockup occured.
253 static void amdgpu_fence_check_lockup(struct work_struct
*work
)
255 struct amdgpu_fence_driver
*fence_drv
;
256 struct amdgpu_ring
*ring
;
258 fence_drv
= container_of(work
, struct amdgpu_fence_driver
,
260 ring
= fence_drv
->ring
;
262 if (!down_read_trylock(&ring
->adev
->exclusive_lock
)) {
263 /* just reschedule the check if a reset is going on */
264 amdgpu_fence_schedule_check(ring
);
268 if (amdgpu_fence_activity(ring
))
269 wake_up_all(&ring
->adev
->fence_queue
);
270 else if (amdgpu_ring_is_lockup(ring
)) {
271 /* good news we believe it's a lockup */
272 dev_warn(ring
->adev
->dev
, "GPU lockup (current fence id "
273 "0x%016llx last fence id 0x%016llx on ring %d)\n",
274 (uint64_t)atomic64_read(&fence_drv
->last_seq
),
275 fence_drv
->sync_seq
[ring
->idx
], ring
->idx
);
277 /* remember that we need an reset */
278 ring
->adev
->needs_reset
= true;
279 wake_up_all(&ring
->adev
->fence_queue
);
281 up_read(&ring
->adev
->exclusive_lock
);
285 * amdgpu_fence_process - process a fence
287 * @adev: amdgpu_device pointer
288 * @ring: ring index the fence is associated with
290 * Checks the current fence value and wakes the fence queue
291 * if the sequence number has increased (all asics).
293 void amdgpu_fence_process(struct amdgpu_ring
*ring
)
295 uint64_t seq
, last_seq
, last_emitted
;
296 unsigned count_loop
= 0;
299 /* Note there is a scenario here for an infinite loop but it's
300 * very unlikely to happen. For it to happen, the current polling
301 * process need to be interrupted by another process and another
302 * process needs to update the last_seq btw the atomic read and
303 * xchg of the current process.
305 * More over for this to go in infinite loop there need to be
306 * continuously new fence signaled ie amdgpu_fence_read needs
307 * to return a different value each time for both the currently
308 * polling process and the other process that xchg the last_seq
309 * btw atomic read and xchg of the current process. And the
310 * value the other process set as last seq must be higher than
311 * the seq value we just read. Which means that current process
312 * need to be interrupted after amdgpu_fence_read and before
315 * To be even more safe we count the number of time we loop and
316 * we bail after 10 loop just accepting the fact that we might
317 * have temporarly set the last_seq not to the true real last
318 * seq but to an older one.
320 last_seq
= atomic64_read(&ring
->fence_drv
.last_seq
);
322 last_emitted
= ring
->fence_drv
.sync_seq
[ring
->idx
];
323 seq
= amdgpu_fence_read(ring
);
324 seq
|= last_seq
& 0xffffffff00000000LL
;
325 if (seq
< last_seq
) {
327 seq
|= last_emitted
& 0xffffffff00000000LL
;
330 if (seq
<= last_seq
|| seq
> last_emitted
) {
333 /* If we loop over we don't want to return without
334 * checking if a fence is signaled as it means that the
335 * seq we just read is different from the previous on.
339 if ((count_loop
++) > 10) {
340 /* We looped over too many time leave with the
341 * fact that we might have set an older fence
342 * seq then the current real last seq as signaled
347 } while (atomic64_xchg(&ring
->fence_drv
.last_seq
, seq
) > seq
);
350 wake_up_all(&ring
->adev
->fence_queue
);
354 * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
356 * @ring: ring the fence is associated with
357 * @seq: sequence number
359 * Check if the last signaled fence sequnce number is >= the requested
360 * sequence number (all asics).
361 * Returns true if the fence has signaled (current fence value
362 * is >= requested value) or false if it has not (current fence
363 * value is < the requested value. Helper function for
364 * amdgpu_fence_signaled().
366 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring
*ring
, u64 seq
)
368 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
371 /* poll new last sequence at least once */
372 amdgpu_fence_process(ring
);
373 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
379 static bool amdgpu_fence_is_signaled(struct fence
*f
)
381 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
382 struct amdgpu_ring
*ring
= fence
->ring
;
383 struct amdgpu_device
*adev
= ring
->adev
;
385 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
388 if (down_read_trylock(&adev
->exclusive_lock
)) {
389 amdgpu_fence_process(ring
);
390 up_read(&adev
->exclusive_lock
);
392 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
399 * amdgpu_fence_enable_signaling - enable signalling on fence
402 * This function is called with fence_queue lock held, and adds a callback
403 * to fence_queue that checks if this fence is signaled, and if so it
404 * signals the fence and removes itself.
406 static bool amdgpu_fence_enable_signaling(struct fence
*f
)
408 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
409 struct amdgpu_ring
*ring
= fence
->ring
;
410 struct amdgpu_device
*adev
= ring
->adev
;
412 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
415 fence
->fence_wake
.flags
= 0;
416 fence
->fence_wake
.private = NULL
;
417 fence
->fence_wake
.func
= amdgpu_fence_check_signaled
;
418 __add_wait_queue(&adev
->fence_queue
, &fence
->fence_wake
);
420 FENCE_TRACE(&fence
->base
, "armed on ring %i!\n", ring
->idx
);
425 * amdgpu_fence_signaled - check if a fence has signaled
427 * @fence: amdgpu fence object
429 * Check if the requested fence has signaled (all asics).
430 * Returns true if the fence has signaled or false if it has not.
432 bool amdgpu_fence_signaled(struct amdgpu_fence
*fence
)
437 if (amdgpu_fence_seq_signaled(fence
->ring
, fence
->seq
)) {
438 if (!fence_signal(&fence
->base
))
439 FENCE_TRACE(&fence
->base
, "signaled from amdgpu_fence_signaled\n");
447 * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
449 * @adev: amdgpu device pointer
450 * @seq: sequence numbers
452 * Check if the last signaled fence sequnce number is >= the requested
453 * sequence number (all asics).
454 * Returns true if any has signaled (current value is >= requested value)
455 * or false if it has not. Helper function for amdgpu_fence_wait_seq.
457 static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device
*adev
, u64
*seq
)
461 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
462 if (!adev
->rings
[i
] || !seq
[i
])
465 if (amdgpu_fence_seq_signaled(adev
->rings
[i
], seq
[i
]))
473 * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
475 * @adev: amdgpu device pointer
476 * @target_seq: sequence number(s) we want to wait for
477 * @intr: use interruptable sleep
478 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
480 * Wait for the requested sequence number(s) to be written by any ring
481 * (all asics). Sequnce number array is indexed by ring id.
482 * @intr selects whether to use interruptable (true) or non-interruptable
483 * (false) sleep when waiting for the sequence number. Helper function
484 * for amdgpu_fence_wait_*().
485 * Returns remaining time if the sequence number has passed, 0 when
486 * the wait timeout, or an error for all other cases.
487 * -EDEADLK is returned when a GPU lockup has been detected.
489 static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device
*adev
,
490 u64
*target_seq
, bool intr
,
493 uint64_t last_seq
[AMDGPU_MAX_RINGS
];
499 return amdgpu_fence_any_seq_signaled(adev
, target_seq
);
502 while (!amdgpu_fence_any_seq_signaled(adev
, target_seq
)) {
504 /* Save current sequence values, used to check for GPU lockups */
505 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
506 struct amdgpu_ring
*ring
= adev
->rings
[i
];
508 if (!ring
|| !target_seq
[i
])
511 last_seq
[i
] = atomic64_read(&ring
->fence_drv
.last_seq
);
512 trace_amdgpu_fence_wait_begin(adev
->ddev
, i
, target_seq
[i
]);
516 r
= wait_event_interruptible_timeout(adev
->fence_queue
, (
517 (signaled
= amdgpu_fence_any_seq_signaled(adev
, target_seq
))
518 || adev
->needs_reset
), AMDGPU_FENCE_JIFFIES_TIMEOUT
);
520 r
= wait_event_timeout(adev
->fence_queue
, (
521 (signaled
= amdgpu_fence_any_seq_signaled(adev
, target_seq
))
522 || adev
->needs_reset
), AMDGPU_FENCE_JIFFIES_TIMEOUT
);
525 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
526 struct amdgpu_ring
*ring
= adev
->rings
[i
];
528 if (!ring
|| !target_seq
[i
])
531 trace_amdgpu_fence_wait_end(adev
->ddev
, i
, target_seq
[i
]);
537 if (unlikely(!signaled
)) {
539 if (adev
->needs_reset
)
542 /* we were interrupted for some reason and fence
543 * isn't signaled yet, resume waiting */
547 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
548 struct amdgpu_ring
*ring
= adev
->rings
[i
];
550 if (!ring
|| !target_seq
[i
])
553 if (last_seq
[i
] != atomic64_read(&ring
->fence_drv
.last_seq
))
557 if (i
!= AMDGPU_MAX_RINGS
)
560 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
561 if (!adev
->rings
[i
] || !target_seq
[i
])
564 if (amdgpu_ring_is_lockup(adev
->rings
[i
]))
568 if (i
< AMDGPU_MAX_RINGS
) {
569 /* good news we believe it's a lockup */
570 dev_warn(adev
->dev
, "GPU lockup (waiting for "
571 "0x%016llx last fence id 0x%016llx on"
573 target_seq
[i
], last_seq
[i
], i
);
575 /* remember that we need an reset */
576 adev
->needs_reset
= true;
577 wake_up_all(&adev
->fence_queue
);
581 if (timeout
< MAX_SCHEDULE_TIMEOUT
) {
582 timeout
-= AMDGPU_FENCE_JIFFIES_TIMEOUT
;
593 * amdgpu_fence_wait - wait for a fence to signal
595 * @fence: amdgpu fence object
596 * @intr: use interruptable sleep
598 * Wait for the requested fence to signal (all asics).
599 * @intr selects whether to use interruptable (true) or non-interruptable
600 * (false) sleep when waiting for the fence.
601 * Returns 0 if the fence has passed, error for all other cases.
603 int amdgpu_fence_wait(struct amdgpu_fence
*fence
, bool intr
)
605 uint64_t seq
[AMDGPU_MAX_RINGS
] = {};
608 seq
[fence
->ring
->idx
] = fence
->seq
;
609 r
= amdgpu_fence_wait_seq_timeout(fence
->ring
->adev
, seq
, intr
, MAX_SCHEDULE_TIMEOUT
);
614 r
= fence_signal(&fence
->base
);
616 FENCE_TRACE(&fence
->base
, "signaled from fence_wait\n");
621 * amdgpu_fence_wait_any - wait for a fence to signal on any ring
623 * @adev: amdgpu device pointer
624 * @fences: amdgpu fence object(s)
625 * @intr: use interruptable sleep
627 * Wait for any requested fence to signal (all asics). Fence
628 * array is indexed by ring id. @intr selects whether to use
629 * interruptable (true) or non-interruptable (false) sleep when
630 * waiting for the fences. Used by the suballocator.
631 * Returns 0 if any fence has passed, error for all other cases.
633 int amdgpu_fence_wait_any(struct amdgpu_device
*adev
,
634 struct amdgpu_fence
**fences
,
637 uint64_t seq
[AMDGPU_MAX_RINGS
];
638 unsigned i
, num_rings
= 0;
641 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
648 seq
[i
] = fences
[i
]->seq
;
652 /* nothing to wait for ? */
656 r
= amdgpu_fence_wait_seq_timeout(adev
, seq
, intr
, MAX_SCHEDULE_TIMEOUT
);
664 * amdgpu_fence_wait_next - wait for the next fence to signal
666 * @adev: amdgpu device pointer
667 * @ring: ring index the fence is associated with
669 * Wait for the next fence on the requested ring to signal (all asics).
670 * Returns 0 if the next fence has passed, error for all other cases.
671 * Caller must hold ring lock.
673 int amdgpu_fence_wait_next(struct amdgpu_ring
*ring
)
675 uint64_t seq
[AMDGPU_MAX_RINGS
] = {};
678 seq
[ring
->idx
] = atomic64_read(&ring
->fence_drv
.last_seq
) + 1ULL;
679 if (seq
[ring
->idx
] >= ring
->fence_drv
.sync_seq
[ring
->idx
]) {
680 /* nothing to wait for, last_seq is
681 already the last emited fence */
684 r
= amdgpu_fence_wait_seq_timeout(ring
->adev
, seq
, false, MAX_SCHEDULE_TIMEOUT
);
691 * amdgpu_fence_wait_empty - wait for all fences to signal
693 * @adev: amdgpu device pointer
694 * @ring: ring index the fence is associated with
696 * Wait for all fences on the requested ring to signal (all asics).
697 * Returns 0 if the fences have passed, error for all other cases.
698 * Caller must hold ring lock.
700 int amdgpu_fence_wait_empty(struct amdgpu_ring
*ring
)
702 struct amdgpu_device
*adev
= ring
->adev
;
703 uint64_t seq
[AMDGPU_MAX_RINGS
] = {};
706 seq
[ring
->idx
] = ring
->fence_drv
.sync_seq
[ring
->idx
];
710 r
= amdgpu_fence_wait_seq_timeout(adev
, seq
, false, MAX_SCHEDULE_TIMEOUT
);
715 dev_err(adev
->dev
, "error waiting for ring[%d] to become idle (%ld)\n",
722 * amdgpu_fence_ref - take a ref on a fence
724 * @fence: amdgpu fence object
726 * Take a reference on a fence (all asics).
729 struct amdgpu_fence
*amdgpu_fence_ref(struct amdgpu_fence
*fence
)
731 fence_get(&fence
->base
);
736 * amdgpu_fence_unref - remove a ref on a fence
738 * @fence: amdgpu fence object
740 * Remove a reference on a fence (all asics).
742 void amdgpu_fence_unref(struct amdgpu_fence
**fence
)
744 struct amdgpu_fence
*tmp
= *fence
;
748 fence_put(&tmp
->base
);
752 * amdgpu_fence_count_emitted - get the count of emitted fences
754 * @ring: ring the fence is associated with
756 * Get the number of fences emitted on the requested ring (all asics).
757 * Returns the number of emitted fences on the ring. Used by the
758 * dynpm code to ring track activity.
760 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring
*ring
)
764 /* We are not protected by ring lock when reading the last sequence
765 * but it's ok to report slightly wrong fence count here.
767 amdgpu_fence_process(ring
);
768 emitted
= ring
->fence_drv
.sync_seq
[ring
->idx
]
769 - atomic64_read(&ring
->fence_drv
.last_seq
);
770 /* to avoid 32bits warp around */
771 if (emitted
> 0x10000000)
772 emitted
= 0x10000000;
774 return (unsigned)emitted
;
778 * amdgpu_fence_need_sync - do we need a semaphore
780 * @fence: amdgpu fence object
781 * @dst_ring: which ring to check against
783 * Check if the fence needs to be synced against another ring
784 * (all asics). If so, we need to emit a semaphore.
785 * Returns true if we need to sync with another ring, false if
788 bool amdgpu_fence_need_sync(struct amdgpu_fence
*fence
,
789 struct amdgpu_ring
*dst_ring
)
791 struct amdgpu_fence_driver
*fdrv
;
796 if (fence
->ring
== dst_ring
)
799 /* we are protected by the ring mutex */
800 fdrv
= &dst_ring
->fence_drv
;
801 if (fence
->seq
<= fdrv
->sync_seq
[fence
->ring
->idx
])
808 * amdgpu_fence_note_sync - record the sync point
810 * @fence: amdgpu fence object
811 * @dst_ring: which ring to check against
813 * Note the sequence number at which point the fence will
814 * be synced with the requested ring (all asics).
816 void amdgpu_fence_note_sync(struct amdgpu_fence
*fence
,
817 struct amdgpu_ring
*dst_ring
)
819 struct amdgpu_fence_driver
*dst
, *src
;
825 if (fence
->ring
== dst_ring
)
828 /* we are protected by the ring mutex */
829 src
= &fence
->ring
->fence_drv
;
830 dst
= &dst_ring
->fence_drv
;
831 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
832 if (i
== dst_ring
->idx
)
835 dst
->sync_seq
[i
] = max(dst
->sync_seq
[i
], src
->sync_seq
[i
]);
840 * amdgpu_fence_driver_start_ring - make the fence driver
841 * ready for use on the requested ring.
843 * @ring: ring to start the fence driver on
844 * @irq_src: interrupt source to use for this ring
845 * @irq_type: interrupt type to use for this ring
847 * Make the fence driver ready for processing (all asics).
848 * Not all asics have all rings, so each asic will only
849 * start the fence driver on the rings it has.
850 * Returns 0 for success, errors for failure.
852 int amdgpu_fence_driver_start_ring(struct amdgpu_ring
*ring
,
853 struct amdgpu_irq_src
*irq_src
,
856 struct amdgpu_device
*adev
= ring
->adev
;
859 if (ring
!= &adev
->uvd
.ring
) {
860 ring
->fence_drv
.cpu_addr
= &adev
->wb
.wb
[ring
->fence_offs
];
861 ring
->fence_drv
.gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->fence_offs
* 4);
863 /* put fence directly behind firmware */
864 index
= ALIGN(adev
->uvd
.fw
->size
, 8);
865 ring
->fence_drv
.cpu_addr
= adev
->uvd
.cpu_addr
+ index
;
866 ring
->fence_drv
.gpu_addr
= adev
->uvd
.gpu_addr
+ index
;
868 amdgpu_fence_write(ring
, atomic64_read(&ring
->fence_drv
.last_seq
));
869 amdgpu_irq_get(adev
, irq_src
, irq_type
);
871 ring
->fence_drv
.irq_src
= irq_src
;
872 ring
->fence_drv
.irq_type
= irq_type
;
873 ring
->fence_drv
.initialized
= true;
875 dev_info(adev
->dev
, "fence driver on ring %d use gpu addr 0x%016llx, "
876 "cpu addr 0x%p\n", ring
->idx
,
877 ring
->fence_drv
.gpu_addr
, ring
->fence_drv
.cpu_addr
);
882 * amdgpu_fence_driver_init_ring - init the fence driver
883 * for the requested ring.
885 * @ring: ring to init the fence driver on
887 * Init the fence driver for the requested ring (all asics).
888 * Helper function for amdgpu_fence_driver_init().
890 void amdgpu_fence_driver_init_ring(struct amdgpu_ring
*ring
)
894 ring
->fence_drv
.cpu_addr
= NULL
;
895 ring
->fence_drv
.gpu_addr
= 0;
896 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
897 ring
->fence_drv
.sync_seq
[i
] = 0;
899 atomic64_set(&ring
->fence_drv
.last_seq
, 0);
900 ring
->fence_drv
.initialized
= false;
902 INIT_DELAYED_WORK(&ring
->fence_drv
.lockup_work
,
903 amdgpu_fence_check_lockup
);
904 ring
->fence_drv
.ring
= ring
;
906 if (amdgpu_enable_scheduler
) {
907 ring
->scheduler
= amd_sched_create((void *)ring
->adev
,
908 NULL
, ring
->idx
, 5, 0);
909 if (!ring
->scheduler
)
910 DRM_ERROR("Failed to create scheduler on ring %d.\n",
916 * amdgpu_fence_driver_init - init the fence driver
917 * for all possible rings.
919 * @adev: amdgpu device pointer
921 * Init the fence driver for all possible rings (all asics).
922 * Not all asics have all rings, so each asic will only
923 * start the fence driver on the rings it has using
924 * amdgpu_fence_driver_start_ring().
925 * Returns 0 for success.
927 int amdgpu_fence_driver_init(struct amdgpu_device
*adev
)
929 init_waitqueue_head(&adev
->fence_queue
);
930 if (amdgpu_debugfs_fence_init(adev
))
931 dev_err(adev
->dev
, "fence debugfs file creation failed\n");
937 * amdgpu_fence_driver_fini - tear down the fence driver
938 * for all possible rings.
940 * @adev: amdgpu device pointer
942 * Tear down the fence driver for all possible rings (all asics).
944 void amdgpu_fence_driver_fini(struct amdgpu_device
*adev
)
948 mutex_lock(&adev
->ring_lock
);
949 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
950 struct amdgpu_ring
*ring
= adev
->rings
[i
];
951 if (!ring
|| !ring
->fence_drv
.initialized
)
953 r
= amdgpu_fence_wait_empty(ring
);
955 /* no need to trigger GPU reset as we are unloading */
956 amdgpu_fence_driver_force_completion(adev
);
958 wake_up_all(&adev
->fence_queue
);
959 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
960 ring
->fence_drv
.irq_type
);
962 amd_sched_destroy(ring
->scheduler
);
963 ring
->fence_drv
.initialized
= false;
965 mutex_unlock(&adev
->ring_lock
);
969 * amdgpu_fence_driver_suspend - suspend the fence driver
970 * for all possible rings.
972 * @adev: amdgpu device pointer
974 * Suspend the fence driver for all possible rings (all asics).
976 void amdgpu_fence_driver_suspend(struct amdgpu_device
*adev
)
980 mutex_lock(&adev
->ring_lock
);
981 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
982 struct amdgpu_ring
*ring
= adev
->rings
[i
];
983 if (!ring
|| !ring
->fence_drv
.initialized
)
986 /* wait for gpu to finish processing current batch */
987 r
= amdgpu_fence_wait_empty(ring
);
989 /* delay GPU reset to resume */
990 amdgpu_fence_driver_force_completion(adev
);
993 /* disable the interrupt */
994 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
995 ring
->fence_drv
.irq_type
);
997 mutex_unlock(&adev
->ring_lock
);
1001 * amdgpu_fence_driver_resume - resume the fence driver
1002 * for all possible rings.
1004 * @adev: amdgpu device pointer
1006 * Resume the fence driver for all possible rings (all asics).
1007 * Not all asics have all rings, so each asic will only
1008 * start the fence driver on the rings it has using
1009 * amdgpu_fence_driver_start_ring().
1010 * Returns 0 for success.
1012 void amdgpu_fence_driver_resume(struct amdgpu_device
*adev
)
1016 mutex_lock(&adev
->ring_lock
);
1017 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
1018 struct amdgpu_ring
*ring
= adev
->rings
[i
];
1019 if (!ring
|| !ring
->fence_drv
.initialized
)
1022 /* enable the interrupt */
1023 amdgpu_irq_get(adev
, ring
->fence_drv
.irq_src
,
1024 ring
->fence_drv
.irq_type
);
1026 mutex_unlock(&adev
->ring_lock
);
1030 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
1032 * @adev: amdgpu device pointer
1034 * In case of GPU reset failure make sure no process keep waiting on fence
1035 * that will never complete.
1037 void amdgpu_fence_driver_force_completion(struct amdgpu_device
*adev
)
1041 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
1042 struct amdgpu_ring
*ring
= adev
->rings
[i
];
1043 if (!ring
|| !ring
->fence_drv
.initialized
)
1046 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
[i
]);
1054 #if defined(CONFIG_DEBUG_FS)
1055 static int amdgpu_debugfs_fence_info(struct seq_file
*m
, void *data
)
1057 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
1058 struct drm_device
*dev
= node
->minor
->dev
;
1059 struct amdgpu_device
*adev
= dev
->dev_private
;
1062 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
1063 struct amdgpu_ring
*ring
= adev
->rings
[i
];
1064 if (!ring
|| !ring
->fence_drv
.initialized
)
1067 amdgpu_fence_process(ring
);
1069 seq_printf(m
, "--- ring %d (%s) ---\n", i
, ring
->name
);
1070 seq_printf(m
, "Last signaled fence 0x%016llx\n",
1071 (unsigned long long)atomic64_read(&ring
->fence_drv
.last_seq
));
1072 seq_printf(m
, "Last emitted 0x%016llx\n",
1073 ring
->fence_drv
.sync_seq
[i
]);
1075 for (j
= 0; j
< AMDGPU_MAX_RINGS
; ++j
) {
1076 struct amdgpu_ring
*other
= adev
->rings
[j
];
1077 if (i
!= j
&& other
&& other
->fence_drv
.initialized
&&
1078 ring
->fence_drv
.sync_seq
[j
])
1079 seq_printf(m
, "Last sync to ring %d 0x%016llx\n",
1080 j
, ring
->fence_drv
.sync_seq
[j
]);
1086 static struct drm_info_list amdgpu_debugfs_fence_list
[] = {
1087 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
1091 int amdgpu_debugfs_fence_init(struct amdgpu_device
*adev
)
1093 #if defined(CONFIG_DEBUG_FS)
1094 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list
, 1);
1100 static const char *amdgpu_fence_get_driver_name(struct fence
*fence
)
1105 static const char *amdgpu_fence_get_timeline_name(struct fence
*f
)
1107 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
1108 return (const char *)fence
->ring
->name
;
1111 static inline bool amdgpu_test_signaled(struct amdgpu_fence
*fence
)
1113 return test_bit(FENCE_FLAG_SIGNALED_BIT
, &fence
->base
.flags
);
1116 struct amdgpu_wait_cb
{
1117 struct fence_cb base
;
1118 struct task_struct
*task
;
1121 static void amdgpu_fence_wait_cb(struct fence
*fence
, struct fence_cb
*cb
)
1123 struct amdgpu_wait_cb
*wait
=
1124 container_of(cb
, struct amdgpu_wait_cb
, base
);
1125 wake_up_process(wait
->task
);
1128 static signed long amdgpu_fence_default_wait(struct fence
*f
, bool intr
,
1131 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
1132 struct amdgpu_device
*adev
= fence
->ring
->adev
;
1133 struct amdgpu_wait_cb cb
;
1137 if (fence_add_callback(f
, &cb
.base
, amdgpu_fence_wait_cb
))
1142 set_current_state(TASK_INTERRUPTIBLE
);
1144 set_current_state(TASK_UNINTERRUPTIBLE
);
1147 * amdgpu_test_signaled must be called after
1148 * set_current_state to prevent a race with wake_up_process
1150 if (amdgpu_test_signaled(fence
))
1153 if (adev
->needs_reset
) {
1158 t
= schedule_timeout(t
);
1160 if (t
> 0 && intr
&& signal_pending(current
))
1164 __set_current_state(TASK_RUNNING
);
1165 fence_remove_callback(f
, &cb
.base
);
1170 const struct fence_ops amdgpu_fence_ops
= {
1171 .get_driver_name
= amdgpu_fence_get_driver_name
,
1172 .get_timeline_name
= amdgpu_fence_get_timeline_name
,
1173 .enable_signaling
= amdgpu_fence_enable_signaling
,
1174 .signaled
= amdgpu_fence_is_signaled
,
1175 .wait
= amdgpu_fence_default_wait
,