2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/firmware.h>
25 #include <linux/circ_buf.h>
27 #include "intel_guc.h"
30 * DOC: GuC-based command submission
33 * We use the term client to avoid confusion with contexts. A i915_guc_client is
34 * equivalent to GuC object guc_context_desc. This context descriptor is
35 * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
36 * and workqueue for it. Also the process descriptor (guc_process_desc), which
37 * is mapped to client space. So the client can write Work Item then ring the
40 * To simplify the implementation, we allocate one gem object that contains all
41 * pages for doorbell, process descriptor and workqueue.
43 * The Scratch registers:
44 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
45 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
46 * triggers an interrupt on the GuC via another register write (0xC4C8).
47 * Firmware writes a success/fail code back to the action register after
48 * processes the request. The kernel driver polls waiting for this update and
50 * See host2guc_action()
53 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
54 * mapped into process space.
57 * There are several types of work items that the host may place into a
58 * workqueue, each with its own requirements and limitations. Currently only
59 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
60 * represents in-order queue. The kernel driver packs ring tail pointer and an
61 * ELSP context descriptor dword into Work Item.
62 * See guc_add_workqueue_item()
67 * Read GuC command/status register (SOFT_SCRATCH_0)
68 * Return true if it contains a response rather than a command
70 static inline bool host2guc_action_response(struct drm_i915_private
*dev_priv
,
73 u32 val
= I915_READ(SOFT_SCRATCH(0));
75 return GUC2HOST_IS_RESPONSE(val
);
78 static int host2guc_action(struct intel_guc
*guc
, u32
*data
, u32 len
)
80 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
85 if (WARN_ON(len
< 1 || len
> 15))
88 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
90 dev_priv
->guc
.action_count
+= 1;
91 dev_priv
->guc
.action_cmd
= data
[0];
93 for (i
= 0; i
< len
; i
++)
94 I915_WRITE(SOFT_SCRATCH(i
), data
[i
]);
96 POSTING_READ(SOFT_SCRATCH(i
- 1));
98 I915_WRITE(HOST2GUC_INTERRUPT
, HOST2GUC_TRIGGER
);
100 /* No HOST2GUC command should take longer than 10ms */
101 ret
= wait_for_atomic(host2guc_action_response(dev_priv
, &status
), 10);
102 if (status
!= GUC2HOST_STATUS_SUCCESS
) {
104 * Either the GuC explicitly returned an error (which
105 * we convert to -EIO here) or no response at all was
106 * received within the timeout limit (-ETIMEDOUT)
108 if (ret
!= -ETIMEDOUT
)
111 DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
112 "status=0x%08X response=0x%08X\n",
113 data
[0], ret
, status
,
114 I915_READ(SOFT_SCRATCH(15)));
116 dev_priv
->guc
.action_fail
+= 1;
117 dev_priv
->guc
.action_err
= ret
;
119 dev_priv
->guc
.action_status
= status
;
121 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
127 * Tell the GuC to allocate or deallocate a specific doorbell
130 static int host2guc_allocate_doorbell(struct intel_guc
*guc
,
131 struct i915_guc_client
*client
)
135 data
[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL
;
136 data
[1] = client
->ctx_index
;
138 return host2guc_action(guc
, data
, 2);
141 static int host2guc_release_doorbell(struct intel_guc
*guc
,
142 struct i915_guc_client
*client
)
146 data
[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL
;
147 data
[1] = client
->ctx_index
;
149 return host2guc_action(guc
, data
, 2);
152 static int host2guc_sample_forcewake(struct intel_guc
*guc
,
153 struct i915_guc_client
*client
)
155 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
156 struct drm_device
*dev
= dev_priv
->dev
;
159 data
[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE
;
160 /* WaRsDisableCoarsePowerGating:skl,bxt */
161 if (!intel_enable_rc6(dev
) ||
162 NEEDS_WaRsDisableCoarsePowerGating(dev
))
165 /* bit 0 and 1 are for Render and Media domain separately */
166 data
[1] = GUC_FORCEWAKE_RENDER
| GUC_FORCEWAKE_MEDIA
;
168 return host2guc_action(guc
, data
, ARRAY_SIZE(data
));
172 * Initialise, update, or clear doorbell data shared with the GuC
174 * These functions modify shared data and so need access to the mapped
175 * client object which contains the page being used for the doorbell
178 static void guc_init_doorbell(struct intel_guc
*guc
,
179 struct i915_guc_client
*client
)
181 struct guc_doorbell_info
*doorbell
;
184 base
= kmap_atomic(i915_gem_object_get_page(client
->client_obj
, 0));
185 doorbell
= base
+ client
->doorbell_offset
;
187 doorbell
->db_status
= 1;
188 doorbell
->cookie
= 0;
193 static int guc_ring_doorbell(struct i915_guc_client
*gc
)
195 struct guc_process_desc
*desc
;
196 union guc_doorbell_qw db_cmp
, db_exc
, db_ret
;
197 union guc_doorbell_qw
*db
;
199 int attempt
= 2, ret
= -EAGAIN
;
201 base
= kmap_atomic(i915_gem_object_get_page(gc
->client_obj
, 0));
202 desc
= base
+ gc
->proc_desc_offset
;
204 /* Update the tail so it is visible to GuC */
205 desc
->tail
= gc
->wq_tail
;
208 db_cmp
.db_status
= GUC_DOORBELL_ENABLED
;
209 db_cmp
.cookie
= gc
->cookie
;
211 /* cookie to be updated */
212 db_exc
.db_status
= GUC_DOORBELL_ENABLED
;
213 db_exc
.cookie
= gc
->cookie
+ 1;
214 if (db_exc
.cookie
== 0)
217 /* pointer of current doorbell cacheline */
218 db
= base
+ gc
->doorbell_offset
;
221 /* lets ring the doorbell */
222 db_ret
.value_qw
= atomic64_cmpxchg((atomic64_t
*)db
,
223 db_cmp
.value_qw
, db_exc
.value_qw
);
225 /* if the exchange was successfully executed */
226 if (db_ret
.value_qw
== db_cmp
.value_qw
) {
227 /* db was successfully rung */
228 gc
->cookie
= db_exc
.cookie
;
233 /* XXX: doorbell was lost and need to acquire it again */
234 if (db_ret
.db_status
== GUC_DOORBELL_DISABLED
)
237 DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
238 db_cmp
.cookie
, db_ret
.cookie
);
240 /* update the cookie to newly read cookie from GuC */
241 db_cmp
.cookie
= db_ret
.cookie
;
242 db_exc
.cookie
= db_ret
.cookie
+ 1;
243 if (db_exc
.cookie
== 0)
247 /* Finally, update the cached copy of the GuC's WQ head */
248 gc
->wq_head
= desc
->head
;
254 static void guc_disable_doorbell(struct intel_guc
*guc
,
255 struct i915_guc_client
*client
)
257 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
258 struct guc_doorbell_info
*doorbell
;
260 i915_reg_t drbreg
= GEN8_DRBREGL(client
->doorbell_id
);
263 base
= kmap_atomic(i915_gem_object_get_page(client
->client_obj
, 0));
264 doorbell
= base
+ client
->doorbell_offset
;
266 doorbell
->db_status
= 0;
270 I915_WRITE(drbreg
, I915_READ(drbreg
) & ~GEN8_DRB_VALID
);
272 value
= I915_READ(drbreg
);
273 WARN_ON((value
& GEN8_DRB_VALID
) != 0);
275 I915_WRITE(GEN8_DRBREGU(client
->doorbell_id
), 0);
276 I915_WRITE(drbreg
, 0);
278 /* XXX: wait for any interrupts */
279 /* XXX: wait for workqueue to drain */
283 * Select, assign and relase doorbell cachelines
285 * These functions track which doorbell cachelines are in use.
286 * The data they manipulate is protected by the host2guc lock.
289 static uint32_t select_doorbell_cacheline(struct intel_guc
*guc
)
291 const uint32_t cacheline_size
= cache_line_size();
294 /* Doorbell uses a single cache line within a page */
295 offset
= offset_in_page(guc
->db_cacheline
);
297 /* Moving to next cache line to reduce contention */
298 guc
->db_cacheline
+= cacheline_size
;
300 DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
301 offset
, guc
->db_cacheline
, cacheline_size
);
306 static uint16_t assign_doorbell(struct intel_guc
*guc
, uint32_t priority
)
309 * The bitmap is split into two halves; the first half is used for
310 * normal priority contexts, the second half for high-priority ones.
311 * Note that logically higher priorities are numerically less than
312 * normal ones, so the test below means "is it high-priority?"
314 const bool hi_pri
= (priority
<= GUC_CTX_PRIORITY_HIGH
);
315 const uint16_t half
= GUC_MAX_DOORBELLS
/ 2;
316 const uint16_t start
= hi_pri
? half
: 0;
317 const uint16_t end
= start
+ half
;
320 id
= find_next_zero_bit(guc
->doorbell_bitmap
, end
, start
);
322 id
= GUC_INVALID_DOORBELL_ID
;
324 bitmap_set(guc
->doorbell_bitmap
, id
, 1);
326 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
327 hi_pri
? "high" : "normal", id
);
332 static void release_doorbell(struct intel_guc
*guc
, uint16_t id
)
334 bitmap_clear(guc
->doorbell_bitmap
, id
, 1);
338 * Initialise the process descriptor shared with the GuC firmware.
340 static void guc_init_proc_desc(struct intel_guc
*guc
,
341 struct i915_guc_client
*client
)
343 struct guc_process_desc
*desc
;
346 base
= kmap_atomic(i915_gem_object_get_page(client
->client_obj
, 0));
347 desc
= base
+ client
->proc_desc_offset
;
349 memset(desc
, 0, sizeof(*desc
));
352 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
353 * space for ring3 clients (set them as in mmap_ioctl) or kernel
354 * space for kernel clients (map on demand instead? May make debug
355 * easier to have it mapped).
357 desc
->wq_base_addr
= 0;
358 desc
->db_base_addr
= 0;
360 desc
->context_id
= client
->ctx_index
;
361 desc
->wq_size_bytes
= client
->wq_size
;
362 desc
->wq_status
= WQ_STATUS_ACTIVE
;
363 desc
->priority
= client
->priority
;
369 * Initialise/clear the context descriptor shared with the GuC firmware.
371 * This descriptor tells the GuC where (in GGTT space) to find the important
372 * data structures relating to this client (doorbell, process descriptor,
376 static void guc_init_ctx_desc(struct intel_guc
*guc
,
377 struct i915_guc_client
*client
)
379 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
380 struct intel_engine_cs
*ring
;
381 struct intel_context
*ctx
= client
->owner
;
382 struct guc_context_desc desc
;
386 memset(&desc
, 0, sizeof(desc
));
388 desc
.attribute
= GUC_CTX_DESC_ATTR_ACTIVE
| GUC_CTX_DESC_ATTR_KERNEL
;
389 desc
.context_id
= client
->ctx_index
;
390 desc
.priority
= client
->priority
;
391 desc
.db_id
= client
->doorbell_id
;
393 for_each_ring(ring
, dev_priv
, i
) {
394 struct guc_execlist_context
*lrc
= &desc
.lrc
[ring
->guc_id
];
395 struct drm_i915_gem_object
*obj
;
398 /* TODO: We have a design issue to be solved here. Only when we
399 * receive the first batch, we know which engine is used by the
400 * user. But here GuC expects the lrc and ring to be pinned. It
401 * is not an issue for default context, which is the only one
402 * for now who owns a GuC client. But for future owner of GuC
403 * client, need to make sure lrc is pinned prior to enter here.
405 obj
= ctx
->engine
[i
].state
;
407 break; /* XXX: continue? */
409 ctx_desc
= intel_lr_context_descriptor(ctx
, ring
);
410 lrc
->context_desc
= (u32
)ctx_desc
;
412 /* The state page is after PPHWSP */
413 lrc
->ring_lcra
= i915_gem_obj_ggtt_offset(obj
) +
414 LRC_STATE_PN
* PAGE_SIZE
;
415 lrc
->context_id
= (client
->ctx_index
<< GUC_ELC_CTXID_OFFSET
) |
416 (ring
->guc_id
<< GUC_ELC_ENGINE_OFFSET
);
418 obj
= ctx
->engine
[i
].ringbuf
->obj
;
420 lrc
->ring_begin
= i915_gem_obj_ggtt_offset(obj
);
421 lrc
->ring_end
= lrc
->ring_begin
+ obj
->base
.size
- 1;
422 lrc
->ring_next_free_location
= lrc
->ring_begin
;
423 lrc
->ring_current_tail_pointer_value
= 0;
425 desc
.engines_used
|= (1 << ring
->guc_id
);
428 WARN_ON(desc
.engines_used
== 0);
431 * The CPU address is only needed at certain points, so kmap_atomic on
432 * demand instead of storing it in the ctx descriptor.
433 * XXX: May make debug easier to have it mapped
435 desc
.db_trigger_cpu
= 0;
436 desc
.db_trigger_uk
= client
->doorbell_offset
+
437 i915_gem_obj_ggtt_offset(client
->client_obj
);
438 desc
.db_trigger_phy
= client
->doorbell_offset
+
439 sg_dma_address(client
->client_obj
->pages
->sgl
);
441 desc
.process_desc
= client
->proc_desc_offset
+
442 i915_gem_obj_ggtt_offset(client
->client_obj
);
444 desc
.wq_addr
= client
->wq_offset
+
445 i915_gem_obj_ggtt_offset(client
->client_obj
);
447 desc
.wq_size
= client
->wq_size
;
450 * XXX: Take LRCs from an existing intel_context if this is not an
451 * IsKMDCreatedContext client
453 desc
.desc_private
= (uintptr_t)client
;
455 /* Pool context is pinned already */
456 sg
= guc
->ctx_pool_obj
->pages
;
457 sg_pcopy_from_buffer(sg
->sgl
, sg
->nents
, &desc
, sizeof(desc
),
458 sizeof(desc
) * client
->ctx_index
);
461 static void guc_fini_ctx_desc(struct intel_guc
*guc
,
462 struct i915_guc_client
*client
)
464 struct guc_context_desc desc
;
467 memset(&desc
, 0, sizeof(desc
));
469 sg
= guc
->ctx_pool_obj
->pages
;
470 sg_pcopy_from_buffer(sg
->sgl
, sg
->nents
, &desc
, sizeof(desc
),
471 sizeof(desc
) * client
->ctx_index
);
474 int i915_guc_wq_check_space(struct i915_guc_client
*gc
)
476 struct guc_process_desc
*desc
;
478 u32 size
= sizeof(struct guc_wq_item
);
479 int ret
= -ETIMEDOUT
, timeout_counter
= 200;
484 /* Quickly return if wq space is available since last time we cache the
486 if (CIRC_SPACE(gc
->wq_tail
, gc
->wq_head
, gc
->wq_size
) >= size
)
489 base
= kmap_atomic(i915_gem_object_get_page(gc
->client_obj
, 0));
490 desc
= base
+ gc
->proc_desc_offset
;
492 while (timeout_counter
-- > 0) {
493 gc
->wq_head
= desc
->head
;
495 if (CIRC_SPACE(gc
->wq_tail
, gc
->wq_head
, gc
->wq_size
) >= size
) {
501 usleep_range(1000, 2000);
509 static int guc_add_workqueue_item(struct i915_guc_client
*gc
,
510 struct drm_i915_gem_request
*rq
)
512 struct guc_wq_item
*wqi
;
514 u32 tail
, wq_len
, wq_off
, space
;
516 space
= CIRC_SPACE(gc
->wq_tail
, gc
->wq_head
, gc
->wq_size
);
517 if (WARN_ON(space
< sizeof(struct guc_wq_item
)))
518 return -ENOSPC
; /* shouldn't happen */
520 /* postincrement WQ tail for next time */
521 wq_off
= gc
->wq_tail
;
522 gc
->wq_tail
+= sizeof(struct guc_wq_item
);
523 gc
->wq_tail
&= gc
->wq_size
- 1;
525 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
526 * should not have the case where structure wqi is across page, neither
527 * wrapped to the beginning. This simplifies the implementation below.
529 * XXX: if not the case, we need save data to a temp wqi and copy it to
530 * workqueue buffer dw by dw.
532 WARN_ON(sizeof(struct guc_wq_item
) != 16);
535 /* wq starts from the page after doorbell / process_desc */
536 base
= kmap_atomic(i915_gem_object_get_page(gc
->client_obj
,
537 (wq_off
+ GUC_DB_SIZE
) >> PAGE_SHIFT
));
538 wq_off
&= PAGE_SIZE
- 1;
539 wqi
= (struct guc_wq_item
*)((char *)base
+ wq_off
);
541 /* len does not include the header */
542 wq_len
= sizeof(struct guc_wq_item
) / sizeof(u32
) - 1;
543 wqi
->header
= WQ_TYPE_INORDER
|
544 (wq_len
<< WQ_LEN_SHIFT
) |
545 (rq
->ring
->guc_id
<< WQ_TARGET_SHIFT
) |
548 /* The GuC wants only the low-order word of the context descriptor */
549 wqi
->context_desc
= (u32
)intel_lr_context_descriptor(rq
->ctx
, rq
->ring
);
551 /* The GuC firmware wants the tail index in QWords, not bytes */
552 tail
= rq
->ringbuf
->tail
>> 3;
553 wqi
->ring_tail
= tail
<< WQ_RING_TAIL_SHIFT
;
554 wqi
->fence_id
= 0; /*XXX: what fence to be here */
562 * i915_guc_submit() - Submit commands through GuC
563 * @client: the guc client where commands will go through
564 * @rq: request associated with the commands
566 * Return: 0 if succeed
568 int i915_guc_submit(struct i915_guc_client
*client
,
569 struct drm_i915_gem_request
*rq
)
571 struct intel_guc
*guc
= client
->guc
;
572 unsigned int engine_id
= rq
->ring
->guc_id
;
575 q_ret
= guc_add_workqueue_item(client
, rq
);
577 b_ret
= guc_ring_doorbell(client
);
579 client
->submissions
[engine_id
] += 1;
582 client
->retcode
= q_ret
;
585 client
->retcode
= q_ret
= b_ret
;
589 guc
->submissions
[engine_id
] += 1;
590 guc
->last_seqno
[engine_id
] = rq
->seqno
;
596 * Everything below here is concerned with setup & teardown, and is
597 * therefore not part of the somewhat time-critical batch-submission
598 * path of i915_guc_submit() above.
602 * gem_allocate_guc_obj() - Allocate gem object for GuC usage
604 * @size: size of object
606 * This is a wrapper to create a gem obj. In order to use it inside GuC, the
607 * object needs to be pinned lifetime. Also we must pin it to gtt space other
608 * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC.
610 * Return: A drm_i915_gem_object if successful, otherwise NULL.
612 static struct drm_i915_gem_object
*gem_allocate_guc_obj(struct drm_device
*dev
,
615 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
616 struct drm_i915_gem_object
*obj
;
618 obj
= i915_gem_alloc_object(dev
, size
);
622 if (i915_gem_object_get_pages(obj
)) {
623 drm_gem_object_unreference(&obj
->base
);
627 if (i915_gem_obj_ggtt_pin(obj
, PAGE_SIZE
,
628 PIN_OFFSET_BIAS
| GUC_WOPCM_TOP
)) {
629 drm_gem_object_unreference(&obj
->base
);
633 /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
634 I915_WRITE(GEN8_GTCR
, GEN8_GTCR_INVALIDATE
);
640 * gem_release_guc_obj() - Release gem object allocated for GuC usage
641 * @obj: gem obj to be released
643 static void gem_release_guc_obj(struct drm_i915_gem_object
*obj
)
648 if (i915_gem_obj_is_pinned(obj
))
649 i915_gem_object_ggtt_unpin(obj
);
651 drm_gem_object_unreference(&obj
->base
);
654 static void guc_client_free(struct drm_device
*dev
,
655 struct i915_guc_client
*client
)
657 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
658 struct intel_guc
*guc
= &dev_priv
->guc
;
663 if (client
->doorbell_id
!= GUC_INVALID_DOORBELL_ID
) {
665 * First disable the doorbell, then tell the GuC we've
666 * finished with it, finally deallocate it in our bitmap
668 guc_disable_doorbell(guc
, client
);
669 host2guc_release_doorbell(guc
, client
);
670 release_doorbell(guc
, client
->doorbell_id
);
674 * XXX: wait for any outstanding submissions before freeing memory.
675 * Be sure to drop any locks
678 gem_release_guc_obj(client
->client_obj
);
680 if (client
->ctx_index
!= GUC_INVALID_CTX_ID
) {
681 guc_fini_ctx_desc(guc
, client
);
682 ida_simple_remove(&guc
->ctx_ids
, client
->ctx_index
);
689 * guc_client_alloc() - Allocate an i915_guc_client
691 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
692 * The kernel client to replace ExecList submission is created with
693 * NORMAL priority. Priority of a client for scheduler can be HIGH,
694 * while a preemption context can use CRITICAL.
695 * @ctx: the context that owns the client (we use the default render
698 * Return: An i915_guc_client object if success.
700 static struct i915_guc_client
*guc_client_alloc(struct drm_device
*dev
,
702 struct intel_context
*ctx
)
704 struct i915_guc_client
*client
;
705 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
706 struct intel_guc
*guc
= &dev_priv
->guc
;
707 struct drm_i915_gem_object
*obj
;
709 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
713 client
->doorbell_id
= GUC_INVALID_DOORBELL_ID
;
714 client
->priority
= priority
;
718 client
->ctx_index
= (uint32_t)ida_simple_get(&guc
->ctx_ids
, 0,
719 GUC_MAX_GPU_CONTEXTS
, GFP_KERNEL
);
720 if (client
->ctx_index
>= GUC_MAX_GPU_CONTEXTS
) {
721 client
->ctx_index
= GUC_INVALID_CTX_ID
;
725 /* The first page is doorbell/proc_desc. Two followed pages are wq. */
726 obj
= gem_allocate_guc_obj(dev
, GUC_DB_SIZE
+ GUC_WQ_SIZE
);
730 client
->client_obj
= obj
;
731 client
->wq_offset
= GUC_DB_SIZE
;
732 client
->wq_size
= GUC_WQ_SIZE
;
734 client
->doorbell_offset
= select_doorbell_cacheline(guc
);
737 * Since the doorbell only requires a single cacheline, we can save
738 * space by putting the application process descriptor in the same
739 * page. Use the half of the page that doesn't include the doorbell.
741 if (client
->doorbell_offset
>= (GUC_DB_SIZE
/ 2))
742 client
->proc_desc_offset
= 0;
744 client
->proc_desc_offset
= (GUC_DB_SIZE
/ 2);
746 client
->doorbell_id
= assign_doorbell(guc
, client
->priority
);
747 if (client
->doorbell_id
== GUC_INVALID_DOORBELL_ID
)
748 /* XXX: evict a doorbell instead */
751 guc_init_proc_desc(guc
, client
);
752 guc_init_ctx_desc(guc
, client
);
753 guc_init_doorbell(guc
, client
);
755 /* XXX: Any cache flushes needed? General domain mgmt calls? */
757 if (host2guc_allocate_doorbell(guc
, client
))
760 DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n",
761 priority
, client
, client
->ctx_index
, client
->doorbell_id
);
766 DRM_ERROR("FAILED to create priority %u GuC client!\n", priority
);
768 guc_client_free(dev
, client
);
772 static void guc_create_log(struct intel_guc
*guc
)
774 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
775 struct drm_i915_gem_object
*obj
;
776 unsigned long offset
;
777 uint32_t size
, flags
;
779 if (i915
.guc_log_level
< GUC_LOG_VERBOSITY_MIN
)
782 if (i915
.guc_log_level
> GUC_LOG_VERBOSITY_MAX
)
783 i915
.guc_log_level
= GUC_LOG_VERBOSITY_MAX
;
785 /* The first page is to save log buffer state. Allocate one
786 * extra page for others in case for overlap */
787 size
= (1 + GUC_LOG_DPC_PAGES
+ 1 +
788 GUC_LOG_ISR_PAGES
+ 1 +
789 GUC_LOG_CRASH_PAGES
+ 1) << PAGE_SHIFT
;
793 obj
= gem_allocate_guc_obj(dev_priv
->dev
, size
);
795 /* logging will be off */
796 i915
.guc_log_level
= -1;
803 /* each allocated unit is a page */
804 flags
= GUC_LOG_VALID
| GUC_LOG_NOTIFY_ON_HALF_FULL
|
805 (GUC_LOG_DPC_PAGES
<< GUC_LOG_DPC_SHIFT
) |
806 (GUC_LOG_ISR_PAGES
<< GUC_LOG_ISR_SHIFT
) |
807 (GUC_LOG_CRASH_PAGES
<< GUC_LOG_CRASH_SHIFT
);
809 offset
= i915_gem_obj_ggtt_offset(obj
) >> PAGE_SHIFT
; /* in pages */
810 guc
->log_flags
= (offset
<< GUC_LOG_BUF_ADDR_SHIFT
) | flags
;
813 static void init_guc_policies(struct guc_policies
*policies
)
815 struct guc_policy
*policy
;
818 policies
->dpc_promote_time
= 500000;
819 policies
->max_num_work_items
= POLICY_MAX_NUM_WI
;
821 for (p
= 0; p
< GUC_CTX_PRIORITY_NUM
; p
++) {
822 for (i
= GUC_RENDER_ENGINE
; i
< GUC_MAX_ENGINES_NUM
; i
++) {
823 policy
= &policies
->policy
[p
][i
];
825 policy
->execution_quantum
= 1000000;
826 policy
->preemption_time
= 500000;
827 policy
->fault_time
= 250000;
828 policy
->policy_flags
= 0;
832 policies
->is_valid
= 1;
835 static void guc_create_ads(struct intel_guc
*guc
)
837 struct drm_i915_private
*dev_priv
= guc_to_i915(guc
);
838 struct drm_i915_gem_object
*obj
;
840 struct guc_policies
*policies
;
841 struct guc_mmio_reg_state
*reg_state
;
842 struct intel_engine_cs
*ring
;
846 /* The ads obj includes the struct itself and buffers passed to GuC */
847 size
= sizeof(struct guc_ads
) + sizeof(struct guc_policies
) +
848 sizeof(struct guc_mmio_reg_state
) +
849 GUC_S3_SAVE_SPACE_PAGES
* PAGE_SIZE
;
853 obj
= gem_allocate_guc_obj(dev_priv
->dev
, PAGE_ALIGN(size
));
860 page
= i915_gem_object_get_page(obj
, 0);
864 * The GuC requires a "Golden Context" when it reinitialises
865 * engines after a reset. Here we use the Render ring default
866 * context, which must already exist and be pinned in the GGTT,
867 * so its address won't change after we've told the GuC where
870 ring
= &dev_priv
->ring
[RCS
];
871 ads
->golden_context_lrca
= ring
->status_page
.gfx_addr
;
873 for_each_ring(ring
, dev_priv
, i
)
874 ads
->eng_state_size
[ring
->guc_id
] = intel_lr_context_size(ring
);
876 /* GuC scheduling policies */
877 policies
= (void *)ads
+ sizeof(struct guc_ads
);
878 init_guc_policies(policies
);
880 ads
->scheduler_policies
= i915_gem_obj_ggtt_offset(obj
) +
881 sizeof(struct guc_ads
);
884 reg_state
= (void *)policies
+ sizeof(struct guc_policies
);
886 for_each_ring(ring
, dev_priv
, i
) {
887 reg_state
->mmio_white_list
[ring
->guc_id
].mmio_start
=
888 ring
->mmio_base
+ GUC_MMIO_WHITE_LIST_START
;
890 /* Nothing to be saved or restored for now. */
891 reg_state
->mmio_white_list
[ring
->guc_id
].count
= 0;
894 ads
->reg_state_addr
= ads
->scheduler_policies
+
895 sizeof(struct guc_policies
);
897 ads
->reg_state_buffer
= ads
->reg_state_addr
+
898 sizeof(struct guc_mmio_reg_state
);
904 * Set up the memory resources to be shared with the GuC. At this point,
905 * we require just one object that can be mapped through the GGTT.
907 int i915_guc_submission_init(struct drm_device
*dev
)
909 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
910 const size_t ctxsize
= sizeof(struct guc_context_desc
);
911 const size_t poolsize
= GUC_MAX_GPU_CONTEXTS
* ctxsize
;
912 const size_t gemsize
= round_up(poolsize
, PAGE_SIZE
);
913 struct intel_guc
*guc
= &dev_priv
->guc
;
915 if (!i915
.enable_guc_submission
)
916 return 0; /* not enabled */
918 if (guc
->ctx_pool_obj
)
919 return 0; /* already allocated */
921 guc
->ctx_pool_obj
= gem_allocate_guc_obj(dev_priv
->dev
, gemsize
);
922 if (!guc
->ctx_pool_obj
)
925 ida_init(&guc
->ctx_ids
);
934 int i915_guc_submission_enable(struct drm_device
*dev
)
936 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
937 struct intel_guc
*guc
= &dev_priv
->guc
;
938 struct intel_context
*ctx
= dev_priv
->kernel_context
;
939 struct i915_guc_client
*client
;
941 /* client for execbuf submission */
942 client
= guc_client_alloc(dev
, GUC_CTX_PRIORITY_KMD_NORMAL
, ctx
);
944 DRM_ERROR("Failed to create execbuf guc_client\n");
948 guc
->execbuf_client
= client
;
950 host2guc_sample_forcewake(guc
, client
);
955 void i915_guc_submission_disable(struct drm_device
*dev
)
957 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
958 struct intel_guc
*guc
= &dev_priv
->guc
;
960 guc_client_free(dev
, guc
->execbuf_client
);
961 guc
->execbuf_client
= NULL
;
964 void i915_guc_submission_fini(struct drm_device
*dev
)
966 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
967 struct intel_guc
*guc
= &dev_priv
->guc
;
969 gem_release_guc_obj(dev_priv
->guc
.ads_obj
);
972 gem_release_guc_obj(dev_priv
->guc
.log_obj
);
975 if (guc
->ctx_pool_obj
)
976 ida_destroy(&guc
->ctx_ids
);
977 gem_release_guc_obj(guc
->ctx_pool_obj
);
978 guc
->ctx_pool_obj
= NULL
;
982 * intel_guc_suspend() - notify GuC entering suspend state
985 int intel_guc_suspend(struct drm_device
*dev
)
987 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
988 struct intel_guc
*guc
= &dev_priv
->guc
;
989 struct intel_context
*ctx
;
992 if (!i915
.enable_guc_submission
)
995 ctx
= dev_priv
->kernel_context
;
997 data
[0] = HOST2GUC_ACTION_ENTER_S_STATE
;
998 /* any value greater than GUC_POWER_D0 */
999 data
[1] = GUC_POWER_D1
;
1000 /* first page is shared data with GuC */
1001 data
[2] = i915_gem_obj_ggtt_offset(ctx
->engine
[RCS
].state
);
1003 return host2guc_action(guc
, data
, ARRAY_SIZE(data
));
1008 * intel_guc_resume() - notify GuC resuming from suspend state
1011 int intel_guc_resume(struct drm_device
*dev
)
1013 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1014 struct intel_guc
*guc
= &dev_priv
->guc
;
1015 struct intel_context
*ctx
;
1018 if (!i915
.enable_guc_submission
)
1021 ctx
= dev_priv
->kernel_context
;
1023 data
[0] = HOST2GUC_ACTION_EXIT_S_STATE
;
1024 data
[1] = GUC_POWER_D0
;
1025 /* first page is shared data with GuC */
1026 data
[2] = i915_gem_obj_ggtt_offset(ctx
->engine
[RCS
].state
);
1028 return host2guc_action(guc
, data
, ARRAY_SIZE(data
));