drm/i915: fix ILK GPU reset for render
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_drv.h
CommitLineData
1da177e4
LT
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4
LT
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
e9b73c67
CW
33#include <uapi/drm/i915_drm.h>
34
585fb111 35#include "i915_reg.h"
79e53945 36#include "intel_bios.h"
8187a2b7 37#include "intel_ringbuffer.h"
0839ccb8 38#include <linux/io-mapping.h>
f899fc64 39#include <linux/i2c.h>
c167a6fc 40#include <linux/i2c-algo-bit.h>
0ade6386 41#include <drm/intel-gtt.h>
aaa6fd2a 42#include <linux/backlight.h>
2911a35b 43#include <linux/intel-iommu.h>
742cbee8 44#include <linux/kref.h>
9ee32fea 45#include <linux/pm_qos.h>
585fb111 46
1da177e4
LT
47/* General customization:
48 */
49
50#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
51
52#define DRIVER_NAME "i915"
53#define DRIVER_DESC "Intel Graphics"
673a394b 54#define DRIVER_DATE "20080730"
1da177e4 55
317c35d1
JB
56enum pipe {
57 PIPE_A = 0,
58 PIPE_B,
9db4a9c7
JB
59 PIPE_C,
60 I915_MAX_PIPES
317c35d1 61};
9db4a9c7 62#define pipe_name(p) ((p) + 'A')
317c35d1 63
a5c961d1
PZ
64enum transcoder {
65 TRANSCODER_A = 0,
66 TRANSCODER_B,
67 TRANSCODER_C,
68 TRANSCODER_EDP = 0xF,
69};
70#define transcoder_name(t) ((t) + 'A')
71
80824003
JB
72enum plane {
73 PLANE_A = 0,
74 PLANE_B,
9db4a9c7 75 PLANE_C,
80824003 76};
9db4a9c7 77#define plane_name(p) ((p) + 'A')
52440211 78
2b139522
ED
79enum port {
80 PORT_A = 0,
81 PORT_B,
82 PORT_C,
83 PORT_D,
84 PORT_E,
85 I915_MAX_PORTS
86};
87#define port_name(p) ((p) + 'A')
88
1d843f9d
EE
89enum hpd_pin {
90 HPD_NONE = 0,
91 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
92 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
93 HPD_CRT,
94 HPD_SDVO_B,
95 HPD_SDVO_C,
96 HPD_PORT_B,
97 HPD_PORT_C,
98 HPD_PORT_D,
99 HPD_NUM_PINS
100};
101
2a2d5482
CW
102#define I915_GEM_GPU_DOMAINS \
103 (I915_GEM_DOMAIN_RENDER | \
104 I915_GEM_DOMAIN_SAMPLER | \
105 I915_GEM_DOMAIN_COMMAND | \
106 I915_GEM_DOMAIN_INSTRUCTION | \
107 I915_GEM_DOMAIN_VERTEX)
62fdfeaf 108
7eb552ae 109#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
9db4a9c7 110
6c2b7c12
DV
111#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
112 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
113 if ((intel_encoder)->base.crtc == (__crtc))
114
ee7b9f93
JB
115struct intel_pch_pll {
116 int refcount; /* count of number of CRTCs sharing this PLL */
117 int active; /* count of number of active CRTCs (i.e. DPMS on) */
118 bool on; /* is the PLL actually active? Disabled during modeset */
119 int pll_reg;
120 int fp0_reg;
121 int fp1_reg;
122};
123#define I915_NUM_PLLS 2
124
e69d0bc1
DV
125/* Used by dp and fdi links */
126struct intel_link_m_n {
127 uint32_t tu;
128 uint32_t gmch_m;
129 uint32_t gmch_n;
130 uint32_t link_m;
131 uint32_t link_n;
132};
133
134void intel_link_compute_m_n(int bpp, int nlanes,
135 int pixel_clock, int link_clock,
136 struct intel_link_m_n *m_n);
137
6441ab5f
PZ
138struct intel_ddi_plls {
139 int spll_refcount;
140 int wrpll1_refcount;
141 int wrpll2_refcount;
142};
143
1da177e4
LT
144/* Interface history:
145 *
146 * 1.1: Original.
0d6aa60b
DA
147 * 1.2: Add Power Management
148 * 1.3: Add vblank support
de227f5f 149 * 1.4: Fix cmdbuffer path, add heap destroy
702880f2 150 * 1.5: Add vblank pipe configuration
2228ed67
MCA
151 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
152 * - Support vertical blank on secondary display pipe
1da177e4
LT
153 */
154#define DRIVER_MAJOR 1
2228ed67 155#define DRIVER_MINOR 6
1da177e4
LT
156#define DRIVER_PATCHLEVEL 0
157
673a394b 158#define WATCH_COHERENCY 0
23bc5982 159#define WATCH_LISTS 0
42d6ab48 160#define WATCH_GTT 0
673a394b 161
71acb5eb
DA
162#define I915_GEM_PHYS_CURSOR_0 1
163#define I915_GEM_PHYS_CURSOR_1 2
164#define I915_GEM_PHYS_OVERLAY_REGS 3
165#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
166
167struct drm_i915_gem_phys_object {
168 int id;
169 struct page **page_list;
170 drm_dma_handle_t *handle;
05394f39 171 struct drm_i915_gem_object *cur_obj;
71acb5eb
DA
172};
173
0a3e67a4
JB
174struct opregion_header;
175struct opregion_acpi;
176struct opregion_swsci;
177struct opregion_asle;
8d715f00 178struct drm_i915_private;
0a3e67a4 179
8ee1c3db 180struct intel_opregion {
5bc4418b
BW
181 struct opregion_header __iomem *header;
182 struct opregion_acpi __iomem *acpi;
183 struct opregion_swsci __iomem *swsci;
184 struct opregion_asle __iomem *asle;
185 void __iomem *vbt;
01fe9dbd 186 u32 __iomem *lid_state;
8ee1c3db 187};
44834a67 188#define OPREGION_SIZE (8*1024)
8ee1c3db 189
6ef3d427
CW
190struct intel_overlay;
191struct intel_overlay_error_state;
192
7c1c2871
DA
193struct drm_i915_master_private {
194 drm_local_map_t *sarea;
195 struct _drm_i915_sarea *sarea_priv;
196};
de151cf6 197#define I915_FENCE_REG_NONE -1
4b9de737
DV
198#define I915_MAX_NUM_FENCES 16
199/* 16 fences + sign bit for FENCE_REG_NONE */
200#define I915_MAX_NUM_FENCE_BITS 5
de151cf6
JB
201
202struct drm_i915_fence_reg {
007cc8ac 203 struct list_head lru_list;
caea7476 204 struct drm_i915_gem_object *obj;
1690e1eb 205 int pin_count;
de151cf6 206};
7c1c2871 207
9b9d172d 208struct sdvo_device_mapping {
e957d772 209 u8 initialized;
9b9d172d 210 u8 dvo_port;
211 u8 slave_addr;
212 u8 dvo_wiring;
e957d772 213 u8 i2c_pin;
b1083333 214 u8 ddc_pin;
9b9d172d 215};
216
c4a1d9e4
CW
217struct intel_display_error_state;
218
63eeaf38 219struct drm_i915_error_state {
742cbee8 220 struct kref ref;
63eeaf38
JB
221 u32 eir;
222 u32 pgtbl_er;
be998e2e 223 u32 ier;
b9a3906b 224 u32 ccid;
0f3b6849
CW
225 u32 derrmr;
226 u32 forcewake;
9574b3fe 227 bool waiting[I915_NUM_RINGS];
9db4a9c7 228 u32 pipestat[I915_MAX_PIPES];
c1cd90ed
DV
229 u32 tail[I915_NUM_RINGS];
230 u32 head[I915_NUM_RINGS];
0f3b6849 231 u32 ctl[I915_NUM_RINGS];
d27b1e0e
DV
232 u32 ipeir[I915_NUM_RINGS];
233 u32 ipehr[I915_NUM_RINGS];
234 u32 instdone[I915_NUM_RINGS];
235 u32 acthd[I915_NUM_RINGS];
7e3b8737 236 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
df2b23d9 237 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
12f55818 238 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
7e3b8737
DV
239 /* our own tracking of ring head and tail */
240 u32 cpu_ring_head[I915_NUM_RINGS];
241 u32 cpu_ring_tail[I915_NUM_RINGS];
1d8f38f4 242 u32 error; /* gen6+ */
71e172e8 243 u32 err_int; /* gen7 */
c1cd90ed
DV
244 u32 instpm[I915_NUM_RINGS];
245 u32 instps[I915_NUM_RINGS];
050ee91f 246 u32 extra_instdone[I915_NUM_INSTDONE_REG];
d27b1e0e 247 u32 seqno[I915_NUM_RINGS];
9df30794 248 u64 bbaddr;
33f3f518
DV
249 u32 fault_reg[I915_NUM_RINGS];
250 u32 done_reg;
c1cd90ed 251 u32 faddr[I915_NUM_RINGS];
4b9de737 252 u64 fence[I915_MAX_NUM_FENCES];
63eeaf38 253 struct timeval time;
52d39a21
CW
254 struct drm_i915_error_ring {
255 struct drm_i915_error_object {
256 int page_count;
257 u32 gtt_offset;
258 u32 *pages[0];
8c123e54 259 } *ringbuffer, *batchbuffer, *ctx;
52d39a21
CW
260 struct drm_i915_error_request {
261 long jiffies;
262 u32 seqno;
ee4f42b1 263 u32 tail;
52d39a21
CW
264 } *requests;
265 int num_requests;
266 } ring[I915_NUM_RINGS];
9df30794 267 struct drm_i915_error_buffer {
a779e5ab 268 u32 size;
9df30794 269 u32 name;
0201f1ec 270 u32 rseqno, wseqno;
9df30794
CW
271 u32 gtt_offset;
272 u32 read_domains;
273 u32 write_domain;
4b9de737 274 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
9df30794
CW
275 s32 pinned:2;
276 u32 tiling:2;
277 u32 dirty:1;
278 u32 purgeable:1;
5d1333fc 279 s32 ring:4;
93dfb40c 280 u32 cache_level:2;
c724e8a9
CW
281 } *active_bo, *pinned_bo;
282 u32 active_bo_count, pinned_bo_count;
6ef3d427 283 struct intel_overlay_error_state *overlay;
c4a1d9e4 284 struct intel_display_error_state *display;
63eeaf38
JB
285};
286
b8cecdf5
DV
287struct intel_crtc_config;
288
e70236a8 289struct drm_i915_display_funcs {
ee5382ae 290 bool (*fbc_enabled)(struct drm_device *dev);
e70236a8
JB
291 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
292 void (*disable_fbc)(struct drm_device *dev);
293 int (*get_display_clock_speed)(struct drm_device *dev);
294 int (*get_fifo_size)(struct drm_device *dev, int plane);
d210246a 295 void (*update_wm)(struct drm_device *dev);
b840d907
JB
296 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
297 uint32_t sprite_width, int pixel_size);
1f8eeabf
ED
298 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
299 struct drm_display_mode *mode);
47fab737 300 void (*modeset_global_resources)(struct drm_device *dev);
f564048e 301 int (*crtc_mode_set)(struct drm_crtc *crtc,
f564048e
EA
302 int x, int y,
303 struct drm_framebuffer *old_fb);
76e5a89c
DV
304 void (*crtc_enable)(struct drm_crtc *crtc);
305 void (*crtc_disable)(struct drm_crtc *crtc);
ee7b9f93 306 void (*off)(struct drm_crtc *crtc);
e0dac65e
WF
307 void (*write_eld)(struct drm_connector *connector,
308 struct drm_crtc *crtc);
674cf967 309 void (*fdi_link_train)(struct drm_crtc *crtc);
6067aaea 310 void (*init_clock_gating)(struct drm_device *dev);
8c9f3aaf
JB
311 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
312 struct drm_framebuffer *fb,
313 struct drm_i915_gem_object *obj);
17638cd6
JB
314 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
315 int x, int y);
20afbda2 316 void (*hpd_irq_setup)(struct drm_device *dev);
e70236a8
JB
317 /* clock updates for mode set */
318 /* cursor updates */
319 /* render clock increase/decrease */
320 /* display clock increase/decrease */
321 /* pll clock increase/decrease */
e70236a8
JB
322};
323
990bbdad
CW
324struct drm_i915_gt_funcs {
325 void (*force_wake_get)(struct drm_i915_private *dev_priv);
326 void (*force_wake_put)(struct drm_i915_private *dev_priv);
327};
328
c96ea64e
DV
329#define DEV_INFO_FLAGS \
330 DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
331 DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
332 DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
333 DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
334 DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
335 DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
336 DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
337 DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
338 DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
339 DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
340 DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
341 DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
342 DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
343 DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
344 DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
345 DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
346 DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
347 DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
348 DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
349 DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
350 DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
351 DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
352 DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
353 DEV_INFO_FLAG(has_llc)
354
cfdf1fa2 355struct intel_device_info {
10fce67a 356 u32 display_mmio_offset;
7eb552ae 357 u8 num_pipes:3;
c96c3a8c 358 u8 gen;
0206e353
AJ
359 u8 is_mobile:1;
360 u8 is_i85x:1;
361 u8 is_i915g:1;
362 u8 is_i945gm:1;
363 u8 is_g33:1;
364 u8 need_gfx_hws:1;
365 u8 is_g4x:1;
366 u8 is_pineview:1;
367 u8 is_broadwater:1;
368 u8 is_crestline:1;
369 u8 is_ivybridge:1;
70a3eb7a 370 u8 is_valleyview:1;
b7884eb4 371 u8 has_force_wake:1;
4cae9ae0 372 u8 is_haswell:1;
0206e353
AJ
373 u8 has_fbc:1;
374 u8 has_pipe_cxsr:1;
375 u8 has_hotplug:1;
376 u8 cursor_needs_physical:1;
377 u8 has_overlay:1;
378 u8 overlay_needs_physical:1;
379 u8 supports_tv:1;
380 u8 has_bsd_ring:1;
381 u8 has_blt_ring:1;
3d29b842 382 u8 has_llc:1;
cfdf1fa2
KH
383};
384
7faf1ab2
DV
385enum i915_cache_level {
386 I915_CACHE_NONE = 0,
387 I915_CACHE_LLC,
388 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
389};
390
5d4545ae
BW
391/* The Graphics Translation Table is the way in which GEN hardware translates a
392 * Graphics Virtual Address into a Physical Address. In addition to the normal
393 * collateral associated with any va->pa translations GEN hardware also has a
394 * portion of the GTT which can be mapped by the CPU and remain both coherent
395 * and correct (in cases like swizzling). That region is referred to as GMADR in
396 * the spec.
397 */
398struct i915_gtt {
399 unsigned long start; /* Start offset of used GTT */
400 size_t total; /* Total size GTT can map */
baa09f5f 401 size_t stolen_size; /* Total size of stolen memory */
5d4545ae
BW
402
403 unsigned long mappable_end; /* End offset that we can CPU map */
404 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
405 phys_addr_t mappable_base; /* PA of our GMADR */
406
407 /** "Graphics Stolen Memory" holds the global PTEs */
408 void __iomem *gsm;
a81cc00c
BW
409
410 bool do_idle_maps;
9c61a32d
BW
411 dma_addr_t scratch_page_dma;
412 struct page *scratch_page;
7faf1ab2
DV
413
414 /* global gtt ops */
baa09f5f 415 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
41907ddc
BW
416 size_t *stolen, phys_addr_t *mappable_base,
417 unsigned long *mappable_end);
baa09f5f 418 void (*gtt_remove)(struct drm_device *dev);
7faf1ab2
DV
419 void (*gtt_clear_range)(struct drm_device *dev,
420 unsigned int first_entry,
421 unsigned int num_entries);
422 void (*gtt_insert_entries)(struct drm_device *dev,
423 struct sg_table *st,
424 unsigned int pg_start,
425 enum i915_cache_level cache_level);
5d4545ae 426};
a54c0c27 427#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
5d4545ae 428
1d2a314c
DV
429#define I915_PPGTT_PD_ENTRIES 512
430#define I915_PPGTT_PT_ENTRIES 1024
431struct i915_hw_ppgtt {
8f2c59f0 432 struct drm_device *dev;
1d2a314c
DV
433 unsigned num_pd_entries;
434 struct page **pt_pages;
435 uint32_t pd_offset;
436 dma_addr_t *pt_dma_addr;
437 dma_addr_t scratch_page_dma_addr;
def886c3
DV
438
439 /* pte functions, mirroring the interface of the global gtt. */
440 void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
441 unsigned int first_entry,
442 unsigned int num_entries);
443 void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
444 struct sg_table *st,
445 unsigned int pg_start,
446 enum i915_cache_level cache_level);
3440d265 447 void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
1d2a314c
DV
448};
449
40521054
BW
450
451/* This must match up with the value previously used for execbuf2.rsvd1. */
452#define DEFAULT_CONTEXT_ID 0
453struct i915_hw_context {
454 int id;
e0556841 455 bool is_initialized;
40521054
BW
456 struct drm_i915_file_private *file_priv;
457 struct intel_ring_buffer *ring;
458 struct drm_i915_gem_object *obj;
459};
460
b5e50c3f 461enum no_fbc_reason {
bed4a673 462 FBC_NO_OUTPUT, /* no outputs enabled to compress */
b5e50c3f
JB
463 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
464 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
465 FBC_MODE_TOO_LARGE, /* mode too large for compression */
466 FBC_BAD_PLANE, /* fbc not supported on plane */
467 FBC_NOT_TILED, /* buffer not tiled */
9c928d16 468 FBC_MULTIPLE_PIPES, /* more than one pipe active */
c1a9f047 469 FBC_MODULE_PARAM,
b5e50c3f
JB
470};
471
3bad0781 472enum intel_pch {
f0350830 473 PCH_NONE = 0, /* No PCH present */
3bad0781
ZW
474 PCH_IBX, /* Ibexpeak PCH */
475 PCH_CPT, /* Cougarpoint PCH */
eb877ebf 476 PCH_LPT, /* Lynxpoint PCH */
3bad0781
ZW
477};
478
988d6ee8
PZ
479enum intel_sbi_destination {
480 SBI_ICLK,
481 SBI_MPHY,
482};
483
b690e96c 484#define QUIRK_PIPEA_FORCE (1<<0)
435793df 485#define QUIRK_LVDS_SSC_DISABLE (1<<1)
4dca20ef 486#define QUIRK_INVERT_BRIGHTNESS (1<<2)
b690e96c 487
8be48d92 488struct intel_fbdev;
1630fe75 489struct intel_fbc_work;
38651674 490
c2b9152f
DV
491struct intel_gmbus {
492 struct i2c_adapter adapter;
f2ce9faf 493 u32 force_bit;
c2b9152f 494 u32 reg0;
36c785f0 495 u32 gpio_reg;
c167a6fc 496 struct i2c_algo_bit_data bit_algo;
c2b9152f
DV
497 struct drm_i915_private *dev_priv;
498};
499
f4c956ad 500struct i915_suspend_saved_registers {
ba8bbcf6
JB
501 u8 saveLBB;
502 u32 saveDSPACNTR;
503 u32 saveDSPBCNTR;
e948e994 504 u32 saveDSPARB;
ba8bbcf6
JB
505 u32 savePIPEACONF;
506 u32 savePIPEBCONF;
507 u32 savePIPEASRC;
508 u32 savePIPEBSRC;
509 u32 saveFPA0;
510 u32 saveFPA1;
511 u32 saveDPLL_A;
512 u32 saveDPLL_A_MD;
513 u32 saveHTOTAL_A;
514 u32 saveHBLANK_A;
515 u32 saveHSYNC_A;
516 u32 saveVTOTAL_A;
517 u32 saveVBLANK_A;
518 u32 saveVSYNC_A;
519 u32 saveBCLRPAT_A;
5586c8bc 520 u32 saveTRANSACONF;
42048781
ZW
521 u32 saveTRANS_HTOTAL_A;
522 u32 saveTRANS_HBLANK_A;
523 u32 saveTRANS_HSYNC_A;
524 u32 saveTRANS_VTOTAL_A;
525 u32 saveTRANS_VBLANK_A;
526 u32 saveTRANS_VSYNC_A;
0da3ea12 527 u32 savePIPEASTAT;
ba8bbcf6
JB
528 u32 saveDSPASTRIDE;
529 u32 saveDSPASIZE;
530 u32 saveDSPAPOS;
585fb111 531 u32 saveDSPAADDR;
ba8bbcf6
JB
532 u32 saveDSPASURF;
533 u32 saveDSPATILEOFF;
534 u32 savePFIT_PGM_RATIOS;
0eb96d6e 535 u32 saveBLC_HIST_CTL;
ba8bbcf6
JB
536 u32 saveBLC_PWM_CTL;
537 u32 saveBLC_PWM_CTL2;
42048781
ZW
538 u32 saveBLC_CPU_PWM_CTL;
539 u32 saveBLC_CPU_PWM_CTL2;
ba8bbcf6
JB
540 u32 saveFPB0;
541 u32 saveFPB1;
542 u32 saveDPLL_B;
543 u32 saveDPLL_B_MD;
544 u32 saveHTOTAL_B;
545 u32 saveHBLANK_B;
546 u32 saveHSYNC_B;
547 u32 saveVTOTAL_B;
548 u32 saveVBLANK_B;
549 u32 saveVSYNC_B;
550 u32 saveBCLRPAT_B;
5586c8bc 551 u32 saveTRANSBCONF;
42048781
ZW
552 u32 saveTRANS_HTOTAL_B;
553 u32 saveTRANS_HBLANK_B;
554 u32 saveTRANS_HSYNC_B;
555 u32 saveTRANS_VTOTAL_B;
556 u32 saveTRANS_VBLANK_B;
557 u32 saveTRANS_VSYNC_B;
0da3ea12 558 u32 savePIPEBSTAT;
ba8bbcf6
JB
559 u32 saveDSPBSTRIDE;
560 u32 saveDSPBSIZE;
561 u32 saveDSPBPOS;
585fb111 562 u32 saveDSPBADDR;
ba8bbcf6
JB
563 u32 saveDSPBSURF;
564 u32 saveDSPBTILEOFF;
585fb111
JB
565 u32 saveVGA0;
566 u32 saveVGA1;
567 u32 saveVGA_PD;
ba8bbcf6
JB
568 u32 saveVGACNTRL;
569 u32 saveADPA;
570 u32 saveLVDS;
585fb111
JB
571 u32 savePP_ON_DELAYS;
572 u32 savePP_OFF_DELAYS;
ba8bbcf6
JB
573 u32 saveDVOA;
574 u32 saveDVOB;
575 u32 saveDVOC;
576 u32 savePP_ON;
577 u32 savePP_OFF;
578 u32 savePP_CONTROL;
585fb111 579 u32 savePP_DIVISOR;
ba8bbcf6
JB
580 u32 savePFIT_CONTROL;
581 u32 save_palette_a[256];
582 u32 save_palette_b[256];
06027f91 583 u32 saveDPFC_CB_BASE;
ba8bbcf6
JB
584 u32 saveFBC_CFB_BASE;
585 u32 saveFBC_LL_BASE;
586 u32 saveFBC_CONTROL;
587 u32 saveFBC_CONTROL2;
0da3ea12
JB
588 u32 saveIER;
589 u32 saveIIR;
590 u32 saveIMR;
42048781
ZW
591 u32 saveDEIER;
592 u32 saveDEIMR;
593 u32 saveGTIER;
594 u32 saveGTIMR;
595 u32 saveFDI_RXA_IMR;
596 u32 saveFDI_RXB_IMR;
1f84e550 597 u32 saveCACHE_MODE_0;
1f84e550 598 u32 saveMI_ARB_STATE;
ba8bbcf6
JB
599 u32 saveSWF0[16];
600 u32 saveSWF1[16];
601 u32 saveSWF2[3];
602 u8 saveMSR;
603 u8 saveSR[8];
123f794f 604 u8 saveGR[25];
ba8bbcf6 605 u8 saveAR_INDEX;
a59e122a 606 u8 saveAR[21];
ba8bbcf6 607 u8 saveDACMASK;
a59e122a 608 u8 saveCR[37];
4b9de737 609 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
1fd1c624
EA
610 u32 saveCURACNTR;
611 u32 saveCURAPOS;
612 u32 saveCURABASE;
613 u32 saveCURBCNTR;
614 u32 saveCURBPOS;
615 u32 saveCURBBASE;
616 u32 saveCURSIZE;
a4fc5ed6
KP
617 u32 saveDP_B;
618 u32 saveDP_C;
619 u32 saveDP_D;
620 u32 savePIPEA_GMCH_DATA_M;
621 u32 savePIPEB_GMCH_DATA_M;
622 u32 savePIPEA_GMCH_DATA_N;
623 u32 savePIPEB_GMCH_DATA_N;
624 u32 savePIPEA_DP_LINK_M;
625 u32 savePIPEB_DP_LINK_M;
626 u32 savePIPEA_DP_LINK_N;
627 u32 savePIPEB_DP_LINK_N;
42048781
ZW
628 u32 saveFDI_RXA_CTL;
629 u32 saveFDI_TXA_CTL;
630 u32 saveFDI_RXB_CTL;
631 u32 saveFDI_TXB_CTL;
632 u32 savePFA_CTL_1;
633 u32 savePFB_CTL_1;
634 u32 savePFA_WIN_SZ;
635 u32 savePFB_WIN_SZ;
636 u32 savePFA_WIN_POS;
637 u32 savePFB_WIN_POS;
5586c8bc
ZW
638 u32 savePCH_DREF_CONTROL;
639 u32 saveDISP_ARB_CTL;
640 u32 savePIPEA_DATA_M1;
641 u32 savePIPEA_DATA_N1;
642 u32 savePIPEA_LINK_M1;
643 u32 savePIPEA_LINK_N1;
644 u32 savePIPEB_DATA_M1;
645 u32 savePIPEB_DATA_N1;
646 u32 savePIPEB_LINK_M1;
647 u32 savePIPEB_LINK_N1;
b5b72e89 648 u32 saveMCHBAR_RENDER_STANDBY;
cda2bb78 649 u32 savePCH_PORT_HOTPLUG;
f4c956ad 650};
c85aa885
DV
651
652struct intel_gen6_power_mgmt {
653 struct work_struct work;
654 u32 pm_iir;
655 /* lock - irqsave spinlock that protectects the work_struct and
656 * pm_iir. */
657 spinlock_t lock;
658
659 /* The below variables an all the rps hw state are protected by
660 * dev->struct mutext. */
661 u8 cur_delay;
662 u8 min_delay;
663 u8 max_delay;
1a01ab3b
JB
664
665 struct delayed_work delayed_resume_work;
4fc688ce
JB
666
667 /*
668 * Protects RPS/RC6 register access and PCU communication.
669 * Must be taken after struct_mutex if nested.
670 */
671 struct mutex hw_lock;
c85aa885
DV
672};
673
1a240d4d
DV
674/* defined intel_pm.c */
675extern spinlock_t mchdev_lock;
676
c85aa885
DV
677struct intel_ilk_power_mgmt {
678 u8 cur_delay;
679 u8 min_delay;
680 u8 max_delay;
681 u8 fmax;
682 u8 fstart;
683
684 u64 last_count1;
685 unsigned long last_time1;
686 unsigned long chipset_power;
687 u64 last_count2;
688 struct timespec last_time2;
689 unsigned long gfx_power;
690 u8 corr;
691
692 int c_m;
693 int r_t;
3e373948
DV
694
695 struct drm_i915_gem_object *pwrctx;
696 struct drm_i915_gem_object *renderctx;
c85aa885
DV
697};
698
231f42a4
DV
699struct i915_dri1_state {
700 unsigned allow_batchbuffer : 1;
701 u32 __iomem *gfx_hws_cpu_addr;
702
703 unsigned int cpp;
704 int back_offset;
705 int front_offset;
706 int current_page;
707 int page_flipping;
708
709 uint32_t counter;
710};
711
a4da4fa4
DV
712struct intel_l3_parity {
713 u32 *remap_info;
714 struct work_struct error_work;
715};
716
4b5aed62 717struct i915_gem_mm {
4b5aed62
DV
718 /** Memory allocator for GTT stolen memory */
719 struct drm_mm stolen;
720 /** Memory allocator for GTT */
721 struct drm_mm gtt_space;
722 /** List of all objects in gtt_space. Used to restore gtt
723 * mappings on resume */
724 struct list_head bound_list;
725 /**
726 * List of objects which are not bound to the GTT (thus
727 * are idle and not used by the GPU) but still have
728 * (presumably uncached) pages still attached.
729 */
730 struct list_head unbound_list;
731
732 /** Usable portion of the GTT for GEM */
733 unsigned long stolen_base; /* limited to low memory (32-bit) */
734
735 int gtt_mtrr;
736
737 /** PPGTT used for aliasing the PPGTT with the GTT */
738 struct i915_hw_ppgtt *aliasing_ppgtt;
739
740 struct shrinker inactive_shrinker;
741 bool shrinker_no_lock_stealing;
742
743 /**
744 * List of objects currently involved in rendering.
745 *
746 * Includes buffers having the contents of their GPU caches
747 * flushed, not necessarily primitives. last_rendering_seqno
748 * represents when the rendering involved will be completed.
749 *
750 * A reference is held on the buffer while on this list.
751 */
752 struct list_head active_list;
753
754 /**
755 * LRU list of objects which are not in the ringbuffer and
756 * are ready to unbind, but are still in the GTT.
757 *
758 * last_rendering_seqno is 0 while an object is in this list.
759 *
760 * A reference is not held on the buffer while on this list,
761 * as merely being GTT-bound shouldn't prevent its being
762 * freed, and we'll pull it off the list in the free path.
763 */
764 struct list_head inactive_list;
765
766 /** LRU list of objects with fence regs on them. */
767 struct list_head fence_list;
768
769 /**
770 * We leave the user IRQ off as much as possible,
771 * but this means that requests will finish and never
772 * be retired once the system goes idle. Set a timer to
773 * fire periodically while the ring is running. When it
774 * fires, go retire requests.
775 */
776 struct delayed_work retire_work;
777
778 /**
779 * Are we in a non-interruptible section of code like
780 * modesetting?
781 */
782 bool interruptible;
783
784 /**
785 * Flag if the X Server, and thus DRM, is not currently in
786 * control of the device.
787 *
788 * This is set between LeaveVT and EnterVT. It needs to be
789 * replaced with a semaphore. It also needs to be
790 * transitioned away from for kernel modesetting.
791 */
792 int suspended;
793
4b5aed62
DV
794 /** Bit 6 swizzling required for X tiling */
795 uint32_t bit_6_swizzle_x;
796 /** Bit 6 swizzling required for Y tiling */
797 uint32_t bit_6_swizzle_y;
798
799 /* storage for physical objects */
800 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
801
802 /* accounting, useful for userland debugging */
803 size_t object_memory;
804 u32 object_count;
805};
806
99584db3
DV
807struct i915_gpu_error {
808 /* For hangcheck timer */
809#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
810#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
811 struct timer_list hangcheck_timer;
812 int hangcheck_count;
813 uint32_t last_acthd[I915_NUM_RINGS];
814 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
815
816 /* For reset and error_state handling. */
817 spinlock_t lock;
818 /* Protected by the above dev->gpu_error.lock. */
819 struct drm_i915_error_state *first_error;
820 struct work_struct work;
99584db3
DV
821
822 unsigned long last_reset;
823
1f83fee0 824 /**
f69061be 825 * State variable and reset counter controlling the reset flow
1f83fee0 826 *
f69061be
DV
827 * Upper bits are for the reset counter. This counter is used by the
828 * wait_seqno code to race-free noticed that a reset event happened and
829 * that it needs to restart the entire ioctl (since most likely the
830 * seqno it waited for won't ever signal anytime soon).
831 *
832 * This is important for lock-free wait paths, where no contended lock
833 * naturally enforces the correct ordering between the bail-out of the
834 * waiter and the gpu reset work code.
1f83fee0
DV
835 *
836 * Lowest bit controls the reset state machine: Set means a reset is in
837 * progress. This state will (presuming we don't have any bugs) decay
838 * into either unset (successful reset) or the special WEDGED value (hw
839 * terminally sour). All waiters on the reset_queue will be woken when
840 * that happens.
841 */
842 atomic_t reset_counter;
843
844 /**
845 * Special values/flags for reset_counter
846 *
847 * Note that the code relies on
848 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
849 * being true.
850 */
851#define I915_RESET_IN_PROGRESS_FLAG 1
852#define I915_WEDGED 0xffffffff
853
854 /**
855 * Waitqueue to signal when the reset has completed. Used by clients
856 * that wait for dev_priv->mm.wedged to settle.
857 */
858 wait_queue_head_t reset_queue;
33196ded 859
99584db3
DV
860 /* For gpu hang simulation. */
861 unsigned int stop_rings;
862};
863
b8efb17b
ZR
864enum modeset_restore {
865 MODESET_ON_LID_OPEN,
866 MODESET_DONE,
867 MODESET_SUSPENDED,
868};
869
f4c956ad
DV
870typedef struct drm_i915_private {
871 struct drm_device *dev;
42dcedd4 872 struct kmem_cache *slab;
f4c956ad
DV
873
874 const struct intel_device_info *info;
875
876 int relative_constants_mode;
877
878 void __iomem *regs;
879
880 struct drm_i915_gt_funcs gt;
881 /** gt_fifo_count and the subsequent register write are synchronized
882 * with dev->struct_mutex. */
883 unsigned gt_fifo_count;
884 /** forcewake_count is protected by gt_lock */
885 unsigned forcewake_count;
886 /** gt_lock is also taken in irq contexts. */
99057c81 887 spinlock_t gt_lock;
f4c956ad
DV
888
889 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
890
28c70f16 891
f4c956ad
DV
892 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
893 * controller on different i2c buses. */
894 struct mutex gmbus_mutex;
895
896 /**
897 * Base address of the gmbus and gpio block.
898 */
899 uint32_t gpio_mmio_base;
900
28c70f16
DV
901 wait_queue_head_t gmbus_wait_queue;
902
f4c956ad
DV
903 struct pci_dev *bridge_dev;
904 struct intel_ring_buffer ring[I915_NUM_RINGS];
f72b3435 905 uint32_t last_seqno, next_seqno;
f4c956ad
DV
906
907 drm_dma_handle_t *status_page_dmah;
f4c956ad
DV
908 struct resource mch_res;
909
910 atomic_t irq_received;
911
912 /* protects the irq masks */
913 spinlock_t irq_lock;
914
9ee32fea
DV
915 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
916 struct pm_qos_request pm_qos;
917
f4c956ad 918 /* DPIO indirect register protection */
09153000 919 struct mutex dpio_lock;
f4c956ad
DV
920
921 /** Cached value of IMR to avoid reads in updating the bitfield */
f4c956ad
DV
922 u32 irq_mask;
923 u32 gt_irq_mask;
f4c956ad 924
f4c956ad 925 struct work_struct hotplug_work;
52d7eced 926 bool enable_hotplug_processing;
f4c956ad 927
f4c956ad
DV
928 int num_pch_pll;
929
f4c956ad
DV
930 unsigned long cfb_size;
931 unsigned int cfb_fb;
932 enum plane cfb_plane;
933 int cfb_y;
934 struct intel_fbc_work *fbc_work;
935
936 struct intel_opregion opregion;
937
938 /* overlay */
939 struct intel_overlay *overlay;
2c6602df 940 unsigned int sprite_scaling_enabled;
f4c956ad
DV
941
942 /* LVDS info */
943 int backlight_level; /* restore backlight to this value */
944 bool backlight_enabled;
945 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
946 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
947
948 /* Feature bits from the VBIOS */
949 unsigned int int_tv_support:1;
950 unsigned int lvds_dither:1;
951 unsigned int lvds_vbt:1;
952 unsigned int int_crt_support:1;
953 unsigned int lvds_use_ssc:1;
954 unsigned int display_clock_mode:1;
955 int lvds_ssc_freq;
956 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
f4c956ad
DV
957 struct {
958 int rate;
959 int lanes;
960 int preemphasis;
961 int vswing;
962
963 bool initialized;
964 bool support;
965 int bpp;
966 struct edp_power_seq pps;
967 } edp;
968 bool no_aux_handshake;
969
970 int crt_ddc_pin;
971 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
972 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
973 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
974
975 unsigned int fsb_freq, mem_freq, is_ddr3;
976
f4c956ad
DV
977 struct workqueue_struct *wq;
978
979 /* Display functions */
980 struct drm_i915_display_funcs display;
981
982 /* PCH chipset type */
983 enum intel_pch pch_type;
17a303ec 984 unsigned short pch_id;
f4c956ad
DV
985
986 unsigned long quirks;
987
b8efb17b
ZR
988 enum modeset_restore modeset_restore;
989 struct mutex modeset_restore_lock;
673a394b 990
5d4545ae
BW
991 struct i915_gtt gtt;
992
4b5aed62 993 struct i915_gem_mm mm;
8781342d 994
8781342d
DV
995 /* Kernel Modesetting */
996
9b9d172d 997 struct sdvo_device_mapping sdvo_mappings[2];
a3e17eb8
ZY
998 /* indicate whether the LVDS_BORDER should be enabled or not */
999 unsigned int lvds_border_bits;
1d8e1c75
CW
1000 /* Panel fitter placement and size for Ironlake+ */
1001 u32 pch_pf_pos, pch_pf_size;
652c393a 1002
27f8227b
JB
1003 struct drm_crtc *plane_to_crtc_mapping[3];
1004 struct drm_crtc *pipe_to_crtc_mapping[3];
6b95a207
KH
1005 wait_queue_head_t pending_flip_queue;
1006
ee7b9f93 1007 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
6441ab5f 1008 struct intel_ddi_plls ddi_plls;
ee7b9f93 1009
652c393a
JB
1010 /* Reclocking support */
1011 bool render_reclock_avail;
1012 bool lvds_downclock_avail;
18f9ed12
ZY
1013 /* indicates the reduced downclock for LVDS*/
1014 int lvds_downclock;
652c393a 1015 u16 orig_clock;
6363ee6f
ZY
1016 int child_dev_num;
1017 struct child_device_config *child_dev;
f97108d1 1018
c4804411 1019 bool mchbar_need_disable;
f97108d1 1020
a4da4fa4
DV
1021 struct intel_l3_parity l3_parity;
1022
c6a828d3 1023 /* gen6+ rps state */
c85aa885 1024 struct intel_gen6_power_mgmt rps;
c6a828d3 1025
20e4d407
DV
1026 /* ilk-only ips/rps state. Everything in here is protected by the global
1027 * mchdev_lock in intel_pm.c */
c85aa885 1028 struct intel_ilk_power_mgmt ips;
b5e50c3f
JB
1029
1030 enum no_fbc_reason no_fbc_reason;
38651674 1031
20bf377e
JB
1032 struct drm_mm_node *compressed_fb;
1033 struct drm_mm_node *compressed_llb;
34dc4d44 1034
99584db3 1035 struct i915_gpu_error gpu_error;
ae681d96 1036
8be48d92
DA
1037 /* list of fbdev register on this device */
1038 struct intel_fbdev *fbdev;
e953fd7b 1039
073f34d9
JB
1040 /*
1041 * The console may be contended at resume, but we don't
1042 * want it to block on it.
1043 */
1044 struct work_struct console_resume_work;
1045
aaa6fd2a
MG
1046 struct backlight_device *backlight;
1047
e953fd7b 1048 struct drm_property *broadcast_rgb_property;
3f43c48d 1049 struct drm_property *force_audio_property;
e3689190 1050
254f965c
BW
1051 bool hw_contexts_disabled;
1052 uint32_t hw_context_size;
f4c956ad 1053
3e68320e 1054 u32 fdi_rx_config;
68d18ad7 1055
f4c956ad 1056 struct i915_suspend_saved_registers regfile;
231f42a4
DV
1057
1058 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1059 * here! */
1060 struct i915_dri1_state dri1;
1da177e4
LT
1061} drm_i915_private_t;
1062
b4519513
CW
1063/* Iterate over initialised rings */
1064#define for_each_ring(ring__, dev_priv__, i__) \
1065 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1066 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1067
b1d7e4b4
WF
1068enum hdmi_force_audio {
1069 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1070 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1071 HDMI_AUDIO_AUTO, /* trust EDID */
1072 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1073};
1074
ed2f3452
CW
1075#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
1076
37e680a1
CW
1077struct drm_i915_gem_object_ops {
1078 /* Interface between the GEM object and its backing storage.
1079 * get_pages() is called once prior to the use of the associated set
1080 * of pages before to binding them into the GTT, and put_pages() is
1081 * called after we no longer need them. As we expect there to be
1082 * associated cost with migrating pages between the backing storage
1083 * and making them available for the GPU (e.g. clflush), we may hold
1084 * onto the pages after they are no longer referenced by the GPU
1085 * in case they may be used again shortly (for example migrating the
1086 * pages to a different memory domain within the GTT). put_pages()
1087 * will therefore most likely be called when the object itself is
1088 * being released or under memory pressure (where we attempt to
1089 * reap pages for the shrinker).
1090 */
1091 int (*get_pages)(struct drm_i915_gem_object *);
1092 void (*put_pages)(struct drm_i915_gem_object *);
1093};
1094
673a394b 1095struct drm_i915_gem_object {
c397b908 1096 struct drm_gem_object base;
673a394b 1097
37e680a1
CW
1098 const struct drm_i915_gem_object_ops *ops;
1099
673a394b
EA
1100 /** Current space allocated to this object in the GTT, if any. */
1101 struct drm_mm_node *gtt_space;
c1ad11fc
CW
1102 /** Stolen memory for this object, instead of being backed by shmem. */
1103 struct drm_mm_node *stolen;
93a37f20 1104 struct list_head gtt_list;
673a394b 1105
65ce3027 1106 /** This object's place on the active/inactive lists */
69dc4987
CW
1107 struct list_head ring_list;
1108 struct list_head mm_list;
432e58ed
CW
1109 /** This object's place in the batchbuffer or on the eviction list */
1110 struct list_head exec_list;
673a394b
EA
1111
1112 /**
65ce3027
CW
1113 * This is set if the object is on the active lists (has pending
1114 * rendering and so a non-zero seqno), and is not set if it i s on
1115 * inactive (ready to be unbound) list.
673a394b 1116 */
0206e353 1117 unsigned int active:1;
673a394b
EA
1118
1119 /**
1120 * This is set if the object has been written to since last bound
1121 * to the GTT
1122 */
0206e353 1123 unsigned int dirty:1;
778c3544
DV
1124
1125 /**
1126 * Fence register bits (if any) for this object. Will be set
1127 * as needed when mapped into the GTT.
1128 * Protected by dev->struct_mutex.
778c3544 1129 */
4b9de737 1130 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
778c3544 1131
778c3544
DV
1132 /**
1133 * Advice: are the backing pages purgeable?
1134 */
0206e353 1135 unsigned int madv:2;
778c3544 1136
778c3544
DV
1137 /**
1138 * Current tiling mode for the object.
1139 */
0206e353 1140 unsigned int tiling_mode:2;
5d82e3e6
CW
1141 /**
1142 * Whether the tiling parameters for the currently associated fence
1143 * register have changed. Note that for the purposes of tracking
1144 * tiling changes we also treat the unfenced register, the register
1145 * slot that the object occupies whilst it executes a fenced
1146 * command (such as BLT on gen2/3), as a "fence".
1147 */
1148 unsigned int fence_dirty:1;
778c3544
DV
1149
1150 /** How many users have pinned this object in GTT space. The following
1151 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1152 * (via user_pin_count), execbuffer (objects are not allowed multiple
1153 * times for the same batchbuffer), and the framebuffer code. When
1154 * switching/pageflipping, the framebuffer code has at most two buffers
1155 * pinned per crtc.
1156 *
1157 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1158 * bits with absolutely no headroom. So use 4 bits. */
0206e353 1159 unsigned int pin_count:4;
778c3544 1160#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
673a394b 1161
75e9e915
DV
1162 /**
1163 * Is the object at the current location in the gtt mappable and
1164 * fenceable? Used to avoid costly recalculations.
1165 */
0206e353 1166 unsigned int map_and_fenceable:1;
75e9e915 1167
fb7d516a
DV
1168 /**
1169 * Whether the current gtt mapping needs to be mappable (and isn't just
1170 * mappable by accident). Track pin and fault separate for a more
1171 * accurate mappable working set.
1172 */
0206e353
AJ
1173 unsigned int fault_mappable:1;
1174 unsigned int pin_mappable:1;
fb7d516a 1175
caea7476
CW
1176 /*
1177 * Is the GPU currently using a fence to access this buffer,
1178 */
1179 unsigned int pending_fenced_gpu_access:1;
1180 unsigned int fenced_gpu_access:1;
1181
93dfb40c
CW
1182 unsigned int cache_level:2;
1183
7bddb01f 1184 unsigned int has_aliasing_ppgtt_mapping:1;
74898d7e 1185 unsigned int has_global_gtt_mapping:1;
9da3da66 1186 unsigned int has_dma_mapping:1;
7bddb01f 1187
9da3da66 1188 struct sg_table *pages;
a5570178 1189 int pages_pin_count;
673a394b 1190
1286ff73 1191 /* prime dma-buf support */
9a70cc2a
DA
1192 void *dma_buf_vmapping;
1193 int vmapping_count;
1194
67731b87
CW
1195 /**
1196 * Used for performing relocations during execbuffer insertion.
1197 */
1198 struct hlist_node exec_node;
1199 unsigned long exec_handle;
6fe4f140 1200 struct drm_i915_gem_exec_object2 *exec_entry;
67731b87 1201
673a394b
EA
1202 /**
1203 * Current offset of the object in GTT space.
1204 *
1205 * This is the same as gtt_space->start
1206 */
1207 uint32_t gtt_offset;
e67b8ce1 1208
caea7476
CW
1209 struct intel_ring_buffer *ring;
1210
1c293ea3 1211 /** Breadcrumb of last rendering to the buffer. */
0201f1ec
CW
1212 uint32_t last_read_seqno;
1213 uint32_t last_write_seqno;
caea7476
CW
1214 /** Breadcrumb of last fenced GPU access to the buffer. */
1215 uint32_t last_fenced_seqno;
673a394b 1216
778c3544 1217 /** Current tiling stride for the object, if it's tiled. */
de151cf6 1218 uint32_t stride;
673a394b 1219
280b713b 1220 /** Record of address bit 17 of each page at last unbind. */
d312ec25 1221 unsigned long *bit_17;
280b713b 1222
79e53945
JB
1223 /** User space pin count and filp owning the pin */
1224 uint32_t user_pin_count;
1225 struct drm_file *pin_filp;
71acb5eb
DA
1226
1227 /** for phy allocated objects */
1228 struct drm_i915_gem_phys_object *phys_obj;
673a394b 1229};
b45305fc 1230#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
673a394b 1231
62b8b215 1232#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
23010e43 1233
673a394b
EA
1234/**
1235 * Request queue structure.
1236 *
1237 * The request queue allows us to note sequence numbers that have been emitted
1238 * and may be associated with active buffers to be retired.
1239 *
1240 * By keeping this list, we can avoid having to do questionable
1241 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1242 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1243 */
1244struct drm_i915_gem_request {
852835f3
ZN
1245 /** On Which ring this request was generated */
1246 struct intel_ring_buffer *ring;
1247
673a394b
EA
1248 /** GEM sequence number associated with this request. */
1249 uint32_t seqno;
1250
a71d8d94
CW
1251 /** Postion in the ringbuffer of the end of the request */
1252 u32 tail;
1253
673a394b
EA
1254 /** Time at which this request was emitted, in jiffies. */
1255 unsigned long emitted_jiffies;
1256
b962442e 1257 /** global list entry for this request */
673a394b 1258 struct list_head list;
b962442e 1259
f787a5f5 1260 struct drm_i915_file_private *file_priv;
b962442e
EA
1261 /** file_priv list entry for this request */
1262 struct list_head client_list;
673a394b
EA
1263};
1264
1265struct drm_i915_file_private {
1266 struct {
99057c81 1267 spinlock_t lock;
b962442e 1268 struct list_head request_list;
673a394b 1269 } mm;
40521054 1270 struct idr context_idr;
673a394b
EA
1271};
1272
cae5852d
ZN
1273#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1274
1275#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1276#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1277#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1278#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1279#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1280#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1281#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1282#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1283#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1284#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1285#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1286#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1287#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1288#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1289#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1290#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1291#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1292#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
4b65177b 1293#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
8ab43976
JB
1294#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1295 (dev)->pci_device == 0x0152 || \
1296 (dev)->pci_device == 0x015a)
6547fbdb
DV
1297#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1298 (dev)->pci_device == 0x0106 || \
1299 (dev)->pci_device == 0x010A)
70a3eb7a 1300#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
4cae9ae0 1301#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
cae5852d 1302#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
d567b07f
PZ
1303#define IS_ULT(dev) (IS_HASWELL(dev) && \
1304 ((dev)->pci_device & 0xFF00) == 0x0A00)
cae5852d 1305
85436696
JB
1306/*
1307 * The genX designation typically refers to the render engine, so render
1308 * capability related checks should use IS_GEN, while display and other checks
1309 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1310 * chips, etc.).
1311 */
cae5852d
ZN
1312#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1313#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1314#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1315#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1316#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
85436696 1317#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
cae5852d
ZN
1318
1319#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1320#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
3d29b842 1321#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
cae5852d
ZN
1322#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1323
254f965c 1324#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
93553609 1325#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1d2a314c 1326
05394f39 1327#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
cae5852d
ZN
1328#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1329
b45305fc
DV
1330/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1331#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1332
cae5852d
ZN
1333/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1334 * rows, which changed the alignment requirements and fence programming.
1335 */
1336#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1337 IS_I915GM(dev)))
1338#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1339#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1340#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1341#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1342#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1343#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1344/* dsparb controlled by hw only */
1345#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1346
1347#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1348#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1349#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
cae5852d 1350
eceae481 1351#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
cae5852d 1352
affa9354 1353#define HAS_DDI(dev) (IS_HASWELL(dev))
86d52df6 1354#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
affa9354 1355
17a303ec
PZ
1356#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1357#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1358#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1359#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1360#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1361#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1362
cae5852d 1363#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
eb877ebf 1364#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
cae5852d
ZN
1365#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1366#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
45e6e3a1 1367#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
cae5852d 1368
b7884eb4
DV
1369#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1370
f27b9265 1371#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
e1ef7cc2 1372
c8735b0c
BW
1373#define GT_FREQUENCY_MULTIPLIER 50
1374
05394f39
CW
1375#include "i915_trace.h"
1376
83b7f9ac
ED
1377/**
1378 * RC6 is a special power stage which allows the GPU to enter an very
1379 * low-voltage mode when idle, using down to 0V while at this stage. This
1380 * stage is entered automatically when the GPU is idle when RC6 support is
1381 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1382 *
1383 * There are different RC6 modes available in Intel GPU, which differentiate
1384 * among each other with the latency required to enter and leave RC6 and
1385 * voltage consumed by the GPU in different states.
1386 *
1387 * The combination of the following flags define which states GPU is allowed
1388 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1389 * RC6pp is deepest RC6. Their support by hardware varies according to the
1390 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1391 * which brings the most power savings; deeper states save more power, but
1392 * require higher latency to switch to and wake up.
1393 */
1394#define INTEL_RC6_ENABLE (1<<0)
1395#define INTEL_RC6p_ENABLE (1<<1)
1396#define INTEL_RC6pp_ENABLE (1<<2)
1397
c153f45f 1398extern struct drm_ioctl_desc i915_ioctls[];
b3a83639 1399extern int i915_max_ioctl;
a35d9d3c
BW
1400extern unsigned int i915_fbpercrtc __always_unused;
1401extern int i915_panel_ignore_lid __read_mostly;
1402extern unsigned int i915_powersave __read_mostly;
f45b5557 1403extern int i915_semaphores __read_mostly;
a35d9d3c 1404extern unsigned int i915_lvds_downclock __read_mostly;
121d527a 1405extern int i915_lvds_channel_mode __read_mostly;
4415e63b 1406extern int i915_panel_use_ssc __read_mostly;
a35d9d3c 1407extern int i915_vbt_sdvo_panel_type __read_mostly;
c0f372b3 1408extern int i915_enable_rc6 __read_mostly;
4415e63b 1409extern int i915_enable_fbc __read_mostly;
a35d9d3c 1410extern bool i915_enable_hangcheck __read_mostly;
650dc07e 1411extern int i915_enable_ppgtt __read_mostly;
0a3af268 1412extern unsigned int i915_preliminary_hw_support __read_mostly;
b3a83639 1413
6a9ee8af
DA
1414extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1415extern int i915_resume(struct drm_device *dev);
7c1c2871
DA
1416extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1417extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1418
1da177e4 1419 /* i915_dma.c */
d05c617e 1420void i915_update_dri1_breadcrumb(struct drm_device *dev);
84b1fd10 1421extern void i915_kernel_lost_context(struct drm_device * dev);
22eae947 1422extern int i915_driver_load(struct drm_device *, unsigned long flags);
ba8bbcf6 1423extern int i915_driver_unload(struct drm_device *);
673a394b 1424extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
84b1fd10 1425extern void i915_driver_lastclose(struct drm_device * dev);
6c340eac
EA
1426extern void i915_driver_preclose(struct drm_device *dev,
1427 struct drm_file *file_priv);
673a394b
EA
1428extern void i915_driver_postclose(struct drm_device *dev,
1429 struct drm_file *file_priv);
84b1fd10 1430extern int i915_driver_device_is_agp(struct drm_device * dev);
c43b5634 1431#ifdef CONFIG_COMPAT
0d6aa60b
DA
1432extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1433 unsigned long arg);
c43b5634 1434#endif
673a394b 1435extern int i915_emit_box(struct drm_device *dev,
c4e7a414
CW
1436 struct drm_clip_rect *box,
1437 int DR1, int DR4);
8e96d9c4 1438extern int intel_gpu_reset(struct drm_device *dev);
d4b8bb2a 1439extern int i915_reset(struct drm_device *dev);
7648fa99
JB
1440extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1441extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1442extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1443extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1444
073f34d9 1445extern void intel_console_resume(struct work_struct *work);
af6061af 1446
1da177e4 1447/* i915_irq.c */
f65d9421 1448void i915_hangcheck_elapsed(unsigned long data);
527f9e90 1449void i915_handle_error(struct drm_device *dev, bool wedged);
1da177e4 1450
f71d4af4 1451extern void intel_irq_init(struct drm_device *dev);
20afbda2 1452extern void intel_hpd_init(struct drm_device *dev);
990bbdad 1453extern void intel_gt_init(struct drm_device *dev);
16995a9f 1454extern void intel_gt_reset(struct drm_device *dev);
b1f14ad0 1455
742cbee8
DV
1456void i915_error_state_free(struct kref *error_ref);
1457
7c463586
KP
1458void
1459i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1460
1461void
1462i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1463
0206e353 1464void intel_enable_asle(struct drm_device *dev);
01c66889 1465
3bd3c932
CW
1466#ifdef CONFIG_DEBUG_FS
1467extern void i915_destroy_error_state(struct drm_device *dev);
1468#else
1469#define i915_destroy_error_state(x)
1470#endif
1471
7c463586 1472
673a394b
EA
1473/* i915_gem.c */
1474int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1475 struct drm_file *file_priv);
1476int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1477 struct drm_file *file_priv);
1478int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1479 struct drm_file *file_priv);
1480int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1481 struct drm_file *file_priv);
1482int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1483 struct drm_file *file_priv);
de151cf6
JB
1484int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1485 struct drm_file *file_priv);
673a394b
EA
1486int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1487 struct drm_file *file_priv);
1488int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1489 struct drm_file *file_priv);
1490int i915_gem_execbuffer(struct drm_device *dev, void *data,
1491 struct drm_file *file_priv);
76446cac
JB
1492int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1493 struct drm_file *file_priv);
673a394b
EA
1494int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1495 struct drm_file *file_priv);
1496int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1497 struct drm_file *file_priv);
1498int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1499 struct drm_file *file_priv);
199adf40
BW
1500int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1501 struct drm_file *file);
1502int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1503 struct drm_file *file);
673a394b
EA
1504int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1505 struct drm_file *file_priv);
3ef94daa
CW
1506int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1507 struct drm_file *file_priv);
673a394b
EA
1508int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1509 struct drm_file *file_priv);
1510int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1511 struct drm_file *file_priv);
1512int i915_gem_set_tiling(struct drm_device *dev, void *data,
1513 struct drm_file *file_priv);
1514int i915_gem_get_tiling(struct drm_device *dev, void *data,
1515 struct drm_file *file_priv);
5a125c3c
EA
1516int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1517 struct drm_file *file_priv);
23ba4fd0
BW
1518int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1519 struct drm_file *file_priv);
673a394b 1520void i915_gem_load(struct drm_device *dev);
42dcedd4
CW
1521void *i915_gem_object_alloc(struct drm_device *dev);
1522void i915_gem_object_free(struct drm_i915_gem_object *obj);
673a394b 1523int i915_gem_init_object(struct drm_gem_object *obj);
37e680a1
CW
1524void i915_gem_object_init(struct drm_i915_gem_object *obj,
1525 const struct drm_i915_gem_object_ops *ops);
05394f39
CW
1526struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1527 size_t size);
673a394b 1528void i915_gem_free_object(struct drm_gem_object *obj);
42dcedd4 1529
2021746e
CW
1530int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1531 uint32_t alignment,
86a1ee26
CW
1532 bool map_and_fenceable,
1533 bool nonblocking);
05394f39 1534void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
2021746e 1535int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
dd624afd 1536int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
05394f39 1537void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
673a394b 1538void i915_gem_lastclose(struct drm_device *dev);
f787a5f5 1539
37e680a1 1540int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
9da3da66
CW
1541static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1542{
67d5a50c
ID
1543 struct sg_page_iter sg_iter;
1544
1545 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
2db76d7c 1546 return sg_page_iter_page(&sg_iter);
67d5a50c
ID
1547
1548 return NULL;
9da3da66 1549}
a5570178
CW
1550static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1551{
1552 BUG_ON(obj->pages == NULL);
1553 obj->pages_pin_count++;
1554}
1555static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1556{
1557 BUG_ON(obj->pages_pin_count == 0);
1558 obj->pages_pin_count--;
1559}
1560
54cf91dc 1561int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2911a35b
BW
1562int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1563 struct intel_ring_buffer *to);
54cf91dc 1564void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
9d773091 1565 struct intel_ring_buffer *ring);
54cf91dc 1566
ff72145b
DA
1567int i915_gem_dumb_create(struct drm_file *file_priv,
1568 struct drm_device *dev,
1569 struct drm_mode_create_dumb *args);
1570int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1571 uint32_t handle, uint64_t *offset);
1572int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
0206e353 1573 uint32_t handle);
f787a5f5
CW
1574/**
1575 * Returns true if seq1 is later than seq2.
1576 */
1577static inline bool
1578i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1579{
1580 return (int32_t)(seq1 - seq2) >= 0;
1581}
1582
fca26bb4
MK
1583int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1584int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
06d98131 1585int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
d9e86c0e 1586int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2021746e 1587
9a5a53b3 1588static inline bool
1690e1eb
CW
1589i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1590{
1591 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1592 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1593 dev_priv->fence_regs[obj->fence_reg].pin_count++;
9a5a53b3
CW
1594 return true;
1595 } else
1596 return false;
1690e1eb
CW
1597}
1598
1599static inline void
1600i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1601{
1602 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1603 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1604 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1605 }
1606}
1607
b09a1fec 1608void i915_gem_retire_requests(struct drm_device *dev);
a71d8d94 1609void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
33196ded 1610int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
d6b2c790 1611 bool interruptible);
1f83fee0
DV
1612static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1613{
1614 return unlikely(atomic_read(&error->reset_counter)
1615 & I915_RESET_IN_PROGRESS_FLAG);
1616}
1617
1618static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1619{
1620 return atomic_read(&error->reset_counter) == I915_WEDGED;
1621}
a71d8d94 1622
069efc1d 1623void i915_gem_reset(struct drm_device *dev);
05394f39 1624void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
2021746e
CW
1625int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1626 uint32_t read_domains,
1627 uint32_t write_domain);
a8198eea 1628int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1070a42b 1629int __must_check i915_gem_init(struct drm_device *dev);
f691e2f4 1630int __must_check i915_gem_init_hw(struct drm_device *dev);
b9524a1e 1631void i915_gem_l3_remap(struct drm_device *dev);
f691e2f4 1632void i915_gem_init_swizzling(struct drm_device *dev);
e21af88d 1633void i915_gem_init_ppgtt(struct drm_device *dev);
79e53945 1634void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
b2da9fe5 1635int __must_check i915_gpu_idle(struct drm_device *dev);
2021746e 1636int __must_check i915_gem_idle(struct drm_device *dev);
3bb73aba
CW
1637int i915_add_request(struct intel_ring_buffer *ring,
1638 struct drm_file *file,
acb868d3 1639 u32 *seqno);
199b2bc2
BW
1640int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1641 uint32_t seqno);
de151cf6 1642int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2021746e
CW
1643int __must_check
1644i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1645 bool write);
1646int __must_check
dabdfe02
CW
1647i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1648int __must_check
2da3b9b9
CW
1649i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1650 u32 alignment,
2021746e 1651 struct intel_ring_buffer *pipelined);
71acb5eb 1652int i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 1653 struct drm_i915_gem_object *obj,
6eeefaf3
CW
1654 int id,
1655 int align);
71acb5eb 1656void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 1657 struct drm_i915_gem_object *obj);
71acb5eb 1658void i915_gem_free_all_phys_object(struct drm_device *dev);
05394f39 1659void i915_gem_release(struct drm_device *dev, struct drm_file *file);
673a394b 1660
0fa87796
ID
1661uint32_t
1662i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
467cffba 1663uint32_t
d865110c
ID
1664i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1665 int tiling_mode, bool fenced);
467cffba 1666
e4ffd173
CW
1667int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1668 enum i915_cache_level cache_level);
1669
1286ff73
DV
1670struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1671 struct dma_buf *dma_buf);
1672
1673struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1674 struct drm_gem_object *gem_obj, int flags);
1675
254f965c
BW
1676/* i915_gem_context.c */
1677void i915_gem_context_init(struct drm_device *dev);
1678void i915_gem_context_fini(struct drm_device *dev);
254f965c 1679void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
e0556841
BW
1680int i915_switch_context(struct intel_ring_buffer *ring,
1681 struct drm_file *file, int to_id);
84624813
BW
1682int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1683 struct drm_file *file);
1684int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1685 struct drm_file *file);
1286ff73 1686
76aaf220 1687/* i915_gem_gtt.c */
1d2a314c 1688void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
7bddb01f
DV
1689void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1690 struct drm_i915_gem_object *obj,
1691 enum i915_cache_level cache_level);
1692void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1693 struct drm_i915_gem_object *obj);
1d2a314c 1694
76aaf220 1695void i915_gem_restore_gtt_mappings(struct drm_device *dev);
74163907
DV
1696int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1697void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
e4ffd173 1698 enum i915_cache_level cache_level);
05394f39 1699void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
74163907 1700void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
d7e5008f
BW
1701void i915_gem_init_global_gtt(struct drm_device *dev);
1702void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
1703 unsigned long mappable_end, unsigned long end);
e76e9aeb 1704int i915_gem_gtt_init(struct drm_device *dev);
d09105c6 1705static inline void i915_gem_chipset_flush(struct drm_device *dev)
e76e9aeb
BW
1706{
1707 if (INTEL_INFO(dev)->gen < 6)
1708 intel_gtt_chipset_flush();
1709}
1710
76aaf220 1711
b47eb4a2 1712/* i915_gem_evict.c */
2021746e 1713int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
42d6ab48
CW
1714 unsigned alignment,
1715 unsigned cache_level,
86a1ee26
CW
1716 bool mappable,
1717 bool nonblock);
6c085a72 1718int i915_gem_evict_everything(struct drm_device *dev);
b47eb4a2 1719
9797fbfb
CW
1720/* i915_gem_stolen.c */
1721int i915_gem_init_stolen(struct drm_device *dev);
11be49eb
CW
1722int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
1723void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
9797fbfb 1724void i915_gem_cleanup_stolen(struct drm_device *dev);
0104fdbb
CW
1725struct drm_i915_gem_object *
1726i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
866d12b4
CW
1727struct drm_i915_gem_object *
1728i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
1729 u32 stolen_offset,
1730 u32 gtt_offset,
1731 u32 size);
0104fdbb 1732void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
9797fbfb 1733
673a394b 1734/* i915_gem_tiling.c */
e9b73c67
CW
1735inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1736{
1737 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1738
1739 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1740 obj->tiling_mode != I915_TILING_NONE;
1741}
1742
673a394b 1743void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
05394f39
CW
1744void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1745void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
673a394b
EA
1746
1747/* i915_gem_debug.c */
05394f39 1748void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
673a394b 1749 const char *where, uint32_t mark);
23bc5982
CW
1750#if WATCH_LISTS
1751int i915_verify_lists(struct drm_device *dev);
673a394b 1752#else
23bc5982 1753#define i915_verify_lists(dev) 0
673a394b 1754#endif
05394f39
CW
1755void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
1756 int handle);
1757void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
673a394b 1758 const char *where, uint32_t mark);
1da177e4 1759
2017263e 1760/* i915_debugfs.c */
27c202ad
BG
1761int i915_debugfs_init(struct drm_minor *minor);
1762void i915_debugfs_cleanup(struct drm_minor *minor);
2017263e 1763
317c35d1
JB
1764/* i915_suspend.c */
1765extern int i915_save_state(struct drm_device *dev);
1766extern int i915_restore_state(struct drm_device *dev);
0a3e67a4 1767
d8157a36
DV
1768/* i915_ums.c */
1769void i915_save_display_reg(struct drm_device *dev);
1770void i915_restore_display_reg(struct drm_device *dev);
317c35d1 1771
0136db58
BW
1772/* i915_sysfs.c */
1773void i915_setup_sysfs(struct drm_device *dev_priv);
1774void i915_teardown_sysfs(struct drm_device *dev_priv);
1775
f899fc64
CW
1776/* intel_i2c.c */
1777extern int intel_setup_gmbus(struct drm_device *dev);
1778extern void intel_teardown_gmbus(struct drm_device *dev);
3bd7d909
DK
1779extern inline bool intel_gmbus_is_port_valid(unsigned port)
1780{
2ed06c93 1781 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
3bd7d909
DK
1782}
1783
1784extern struct i2c_adapter *intel_gmbus_get_adapter(
1785 struct drm_i915_private *dev_priv, unsigned port);
e957d772
CW
1786extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1787extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
b8232e90
CW
1788extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
1789{
1790 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
1791}
f899fc64
CW
1792extern void intel_i2c_reset(struct drm_device *dev);
1793
3b617967 1794/* intel_opregion.c */
44834a67
CW
1795extern int intel_opregion_setup(struct drm_device *dev);
1796#ifdef CONFIG_ACPI
1797extern void intel_opregion_init(struct drm_device *dev);
1798extern void intel_opregion_fini(struct drm_device *dev);
3b617967
CW
1799extern void intel_opregion_asle_intr(struct drm_device *dev);
1800extern void intel_opregion_gse_intr(struct drm_device *dev);
1801extern void intel_opregion_enable_asle(struct drm_device *dev);
65e082c9 1802#else
44834a67
CW
1803static inline void intel_opregion_init(struct drm_device *dev) { return; }
1804static inline void intel_opregion_fini(struct drm_device *dev) { return; }
3b617967
CW
1805static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
1806static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
1807static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
65e082c9 1808#endif
8ee1c3db 1809
723bfd70
JB
1810/* intel_acpi.c */
1811#ifdef CONFIG_ACPI
1812extern void intel_register_dsm_handler(void);
1813extern void intel_unregister_dsm_handler(void);
1814#else
1815static inline void intel_register_dsm_handler(void) { return; }
1816static inline void intel_unregister_dsm_handler(void) { return; }
1817#endif /* CONFIG_ACPI */
1818
79e53945 1819/* modesetting */
f817586c 1820extern void intel_modeset_init_hw(struct drm_device *dev);
79e53945 1821extern void intel_modeset_init(struct drm_device *dev);
2c7111db 1822extern void intel_modeset_gem_init(struct drm_device *dev);
79e53945 1823extern void intel_modeset_cleanup(struct drm_device *dev);
28d52043 1824extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
45e2b5f6
DV
1825extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1826 bool force_restore);
44cec740 1827extern void i915_redisable_vga(struct drm_device *dev);
ee5382ae 1828extern bool intel_fbc_enabled(struct drm_device *dev);
43a9539f 1829extern void intel_disable_fbc(struct drm_device *dev);
7648fa99 1830extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
dde86e2d 1831extern void intel_init_pch_refclk(struct drm_device *dev);
3b8d8d91 1832extern void gen6_set_rps(struct drm_device *dev, u8 val);
0206e353
AJ
1833extern void intel_detect_pch(struct drm_device *dev);
1834extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
0136db58 1835extern int intel_enable_rc6(const struct drm_device *dev);
3bad0781 1836
2911a35b 1837extern bool i915_semaphore_is_enabled(struct drm_device *dev);
c0c7babc
BW
1838int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1839 struct drm_file *file);
575155a9 1840
6ef3d427 1841/* overlay */
3bd3c932 1842#ifdef CONFIG_DEBUG_FS
6ef3d427
CW
1843extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1844extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
c4a1d9e4
CW
1845
1846extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
1847extern void intel_display_print_error_state(struct seq_file *m,
1848 struct drm_device *dev,
1849 struct intel_display_error_state *error);
3bd3c932 1850#endif
6ef3d427 1851
b7287d80
BW
1852/* On SNB platform, before reading ring registers forcewake bit
1853 * must be set to prevent GT core from power down and stale values being
1854 * returned.
1855 */
fcca7926
BW
1856void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1857void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
67a3744f 1858int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
b7287d80 1859
42c0526c
BW
1860int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
1861int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
1862
5f75377d 1863#define __i915_read(x, y) \
f7000883 1864 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
fcca7926 1865
5f75377d
KP
1866__i915_read(8, b)
1867__i915_read(16, w)
1868__i915_read(32, l)
1869__i915_read(64, q)
1870#undef __i915_read
1871
1872#define __i915_write(x, y) \
f7000883
AK
1873 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
1874
5f75377d
KP
1875__i915_write(8, b)
1876__i915_write(16, w)
1877__i915_write(32, l)
1878__i915_write(64, q)
1879#undef __i915_write
1880
1881#define I915_READ8(reg) i915_read8(dev_priv, (reg))
1882#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
1883
1884#define I915_READ16(reg) i915_read16(dev_priv, (reg))
1885#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
1886#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
1887#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
1888
1889#define I915_READ(reg) i915_read32(dev_priv, (reg))
1890#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
cae5852d
ZN
1891#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
1892#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
5f75377d
KP
1893
1894#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
1895#define I915_READ64(reg) i915_read64(dev_priv, (reg))
cae5852d
ZN
1896
1897#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1898#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1899
55bc60db
VS
1900/* "Broadcast RGB" property */
1901#define INTEL_BROADCAST_RGB_AUTO 0
1902#define INTEL_BROADCAST_RGB_FULL 1
1903#define INTEL_BROADCAST_RGB_LIMITED 2
ba4f01a3 1904
766aa1c4
VS
1905static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
1906{
1907 if (HAS_PCH_SPLIT(dev))
1908 return CPU_VGACNTRL;
1909 else if (IS_VALLEYVIEW(dev))
1910 return VLV_VGACNTRL;
1911 else
1912 return VGACNTRL;
1913}
1914
2bb4629a
VS
1915static inline void __user *to_user_ptr(u64 address)
1916{
1917 return (void __user *)(uintptr_t)address;
1918}
1919
1da177e4 1920#endif
This page took 0.801349 seconds and 5 git commands to generate.