drm/i915: Support 64b execbuf
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
CommitLineData
8187a2b7
ZN
1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
633cf8f5
VS
4/*
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
8 *
9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
10 * cacheline, the Head Pointer must not be greater than the Tail
11 * Pointer."
12 */
13#define I915_RING_FREE_SPACE 64
14
8187a2b7 15struct intel_hw_status_page {
4225d0f2 16 u32 *page_addr;
8187a2b7 17 unsigned int gfx_addr;
05394f39 18 struct drm_i915_gem_object *obj;
8187a2b7
ZN
19};
20
b7287d80
BW
21#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
22#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
cae5852d 23
b7287d80
BW
24#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
25#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
cae5852d 26
b7287d80
BW
27#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
28#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
cae5852d 29
b7287d80
BW
30#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
31#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
cae5852d 32
b7287d80
BW
33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
870e86dd 35
e9fea574 36#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
9991ae78 37#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
e9fea574 38
f2f4d82f 39enum intel_ring_hangcheck_action {
da661464 40 HANGCHECK_IDLE = 0,
f2f4d82f
JN
41 HANGCHECK_WAIT,
42 HANGCHECK_ACTIVE,
43 HANGCHECK_KICK,
44 HANGCHECK_HUNG,
45};
ad8beaea 46
b6b0fac0
MK
47#define HANGCHECK_SCORE_RING_HUNG 31
48
92cab734 49struct intel_ring_hangcheck {
50877445 50 u64 acthd;
92cab734 51 u32 seqno;
05407ff8 52 int score;
ad8beaea 53 enum intel_ring_hangcheck_action action;
50877445 54 bool deadlock;
92cab734
MK
55};
56
8187a2b7
ZN
57struct intel_ring_buffer {
58 const char *name;
9220434a 59 enum intel_ring_id {
96154f2f
DV
60 RCS = 0x0,
61 VCS,
62 BCS,
4a3dd19d 63 VECS,
845f74a7 64 VCS2
9220434a 65 } id;
845f74a7 66#define I915_NUM_RINGS 5
b1a93306 67#define LAST_USER_RING (VECS + 1)
333e9fe9 68 u32 mmio_base;
311bd68e 69 void __iomem *virtual_start;
8187a2b7 70 struct drm_device *dev;
05394f39 71 struct drm_i915_gem_object *obj;
8187a2b7 72
8c0a6bfe
CW
73 u32 head;
74 u32 tail;
780f0ca3 75 int space;
c2c347a9 76 int size;
55249baa 77 int effective_size;
8187a2b7
ZN
78 struct intel_hw_status_page status_page;
79
a71d8d94
CW
80 /** We track the position of the requests in the ring buffer, and
81 * when each is retired we increment last_retired_head as the GPU
82 * must have finished processing the request and so we know we
83 * can advance the ringbuffer up to that position.
84 *
85 * last_retired_head is set to -1 after the value is consumed so
86 * we can detect new retirements.
87 */
88 u32 last_retired_head;
89
c7113cc3 90 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
6a848ccb 91 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
db53a302 92 u32 trace_irq_seqno;
b13c2b96 93 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
1ec14ad3 94 void (*irq_put)(struct intel_ring_buffer *ring);
8187a2b7 95
78501eac 96 int (*init)(struct intel_ring_buffer *ring);
8187a2b7 97
78501eac 98 void (*write_tail)(struct intel_ring_buffer *ring,
297b0c5b 99 u32 value);
b72f3acb
CW
100 int __must_check (*flush)(struct intel_ring_buffer *ring,
101 u32 invalidate_domains,
102 u32 flush_domains);
9d773091 103 int (*add_request)(struct intel_ring_buffer *ring);
b2eadbc8
CW
104 /* Some chipsets are not quite as coherent as advertised and need
105 * an expensive kick to force a true read of the up-to-date seqno.
106 * However, the up-to-date seqno is not always required and the last
107 * seen value is good enough. Note that the seqno will always be
108 * monotonic, even if not coherent.
109 */
110 u32 (*get_seqno)(struct intel_ring_buffer *ring,
111 bool lazy_coherency);
b70ec5bf
MK
112 void (*set_seqno)(struct intel_ring_buffer *ring,
113 u32 seqno);
78501eac 114 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
9bcb144c 115 u64 offset, u32 length,
d7d4eedd
CW
116 unsigned flags);
117#define I915_DISPATCH_SECURE 0x1
b45305fc 118#define I915_DISPATCH_PINNED 0x2
8d19215b 119 void (*cleanup)(struct intel_ring_buffer *ring);
ebc348b2
BW
120
121 struct {
122 u32 sync_seqno[I915_NUM_RINGS-1];
78325f2d 123
ebc348b2
BW
124 struct {
125 /* our mbox written by others */
126 u32 wait[I915_NUM_RINGS];
127 /* mboxes this ring signals to */
128 u32 signal[I915_NUM_RINGS];
129 } mbox;
78325f2d
BW
130
131 /* AKA wait() */
132 int (*sync_to)(struct intel_ring_buffer *ring,
133 struct intel_ring_buffer *to,
134 u32 seqno);
024a43e1
BW
135 int (*signal)(struct intel_ring_buffer *signaller,
136 /* num_dwords needed by caller */
137 unsigned int num_dwords);
ebc348b2 138 } semaphore;
ad776f8b 139
8187a2b7
ZN
140 /**
141 * List of objects currently involved in rendering from the
142 * ringbuffer.
143 *
144 * Includes buffers having the contents of their GPU caches
145 * flushed, not necessarily primitives. last_rendering_seqno
146 * represents when the rendering involved will be completed.
147 *
148 * A reference is held on the buffer while on this list.
149 */
150 struct list_head active_list;
151
152 /**
153 * List of breadcrumbs associated with GPU requests currently
154 * outstanding.
155 */
156 struct list_head request_list;
157
a56ba56c
CW
158 /**
159 * Do we have some not yet emitted requests outstanding?
160 */
3c0e234c 161 struct drm_i915_gem_request *preallocated_lazy_request;
1823521d 162 u32 outstanding_lazy_seqno;
cc889e0f 163 bool gpu_caches_dirty;
c65355bb 164 bool fbc_dirty;
a56ba56c 165
8187a2b7 166 wait_queue_head_t irq_queue;
8d19215b 167
40521054 168 struct i915_hw_context *default_context;
112522f6 169 struct i915_hw_context *last_context;
40521054 170
92cab734
MK
171 struct intel_ring_hangcheck hangcheck;
172
0d1aacac
CW
173 struct {
174 struct drm_i915_gem_object *obj;
175 u32 gtt_offset;
176 volatile u32 *cpu_page;
177 } scratch;
351e3db2
BV
178
179 /*
180 * Tables of commands the command parser needs to know about
181 * for this ring.
182 */
183 const struct drm_i915_cmd_table *cmd_tables;
184 int cmd_table_count;
185
186 /*
187 * Table of registers allowed in commands that read/write registers.
188 */
189 const u32 *reg_table;
190 int reg_count;
191
192 /*
193 * Table of registers allowed in commands that read/write registers, but
194 * only from the DRM master.
195 */
196 const u32 *master_reg_table;
197 int master_reg_count;
198
199 /*
200 * Returns the bitmask for the length field of the specified command.
201 * Return 0 for an unrecognized/invalid command.
202 *
203 * If the command parser finds an entry for a command in the ring's
204 * cmd_tables, it gets the command's length based on the table entry.
205 * If not, it calls this function to determine the per-ring length field
206 * encoding for the command (i.e. certain opcode ranges use certain bits
207 * to encode the command length in the header).
208 */
209 u32 (*get_cmd_length_mask)(u32 cmd_header);
8187a2b7
ZN
210};
211
b4519513
CW
212static inline bool
213intel_ring_initialized(struct intel_ring_buffer *ring)
214{
215 return ring->obj != NULL;
216}
217
96154f2f
DV
218static inline unsigned
219intel_ring_flag(struct intel_ring_buffer *ring)
220{
221 return 1 << ring->id;
222}
223
1ec14ad3
CW
224static inline u32
225intel_ring_sync_index(struct intel_ring_buffer *ring,
226 struct intel_ring_buffer *other)
227{
228 int idx;
229
230 /*
231 * cs -> 0 = vcs, 1 = bcs
232 * vcs -> 0 = bcs, 1 = cs,
233 * bcs -> 0 = cs, 1 = vcs.
234 */
235
236 idx = (other - ring) - 1;
237 if (idx < 0)
238 idx += I915_NUM_RINGS;
239
240 return idx;
241}
242
8187a2b7
ZN
243static inline u32
244intel_read_status_page(struct intel_ring_buffer *ring,
78501eac 245 int reg)
8187a2b7 246{
4225d0f2
DV
247 /* Ensure that the compiler doesn't optimize away the load. */
248 barrier();
249 return ring->status_page.page_addr[reg];
8187a2b7
ZN
250}
251
b70ec5bf
MK
252static inline void
253intel_write_status_page(struct intel_ring_buffer *ring,
254 int reg, u32 value)
255{
256 ring->status_page.page_addr[reg] = value;
257}
258
311bd68e
CW
259/**
260 * Reads a dword out of the status page, which is written to from the command
261 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
262 * MI_STORE_DATA_IMM.
263 *
264 * The following dwords have a reserved meaning:
265 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
266 * 0x04: ring 0 head pointer
267 * 0x05: ring 1 head pointer (915-class)
268 * 0x06: ring 2 head pointer (915-class)
269 * 0x10-0x1b: Context status DWords (GM45)
270 * 0x1f: Last written status offset. (GM45)
271 *
272 * The area from dword 0x20 to 0x3ff is available for driver usage.
273 */
311bd68e 274#define I915_GEM_HWS_INDEX 0x20
9a289771
JB
275#define I915_GEM_HWS_SCRATCH_INDEX 0x30
276#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
311bd68e 277
e3efda49 278void intel_stop_ring_buffer(struct intel_ring_buffer *ring);
78501eac 279void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
96f298aa 280
e1f99ce6 281int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
753b1ad4 282int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
78501eac
CW
283static inline void intel_ring_emit(struct intel_ring_buffer *ring,
284 u32 data)
e898cd22 285{
78501eac 286 iowrite32(data, ring->virtual_start + ring->tail);
e898cd22
CW
287 ring->tail += 4;
288}
09246732
CW
289static inline void intel_ring_advance(struct intel_ring_buffer *ring)
290{
291 ring->tail &= ring->size - 1;
292}
293void __intel_ring_advance(struct intel_ring_buffer *ring);
294
3e960501 295int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
f7e98ad4 296void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
a7b9761d
CW
297int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
298int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
8187a2b7 299
5c1143bb
XH
300int intel_init_render_ring_buffer(struct drm_device *dev);
301int intel_init_bsd_ring_buffer(struct drm_device *dev);
845f74a7 302int intel_init_bsd2_ring_buffer(struct drm_device *dev);
549f7365 303int intel_init_blt_ring_buffer(struct drm_device *dev);
9a8a2213 304int intel_init_vebox_ring_buffer(struct drm_device *dev);
8187a2b7 305
50877445 306u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
78501eac 307void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
79f321b7 308
a71d8d94
CW
309static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
310{
311 return ring->tail;
312}
313
9d773091
CW
314static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
315{
1823521d
CW
316 BUG_ON(ring->outstanding_lazy_seqno == 0);
317 return ring->outstanding_lazy_seqno;
9d773091
CW
318}
319
db53a302
CW
320static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
321{
322 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
323 ring->trace_irq_seqno = seqno;
324}
325
e8616b6c
CW
326/* DRI warts */
327int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
328
8187a2b7 329#endif /* _INTEL_RINGBUFFER_H_ */
This page took 0.242714 seconds and 5 git commands to generate.