drm/i915: introduce i915_queue_hangcheck
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_drv.h
CommitLineData
1da177e4
LT
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4
LT
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
e9b73c67
CW
33#include <uapi/drm/i915_drm.h>
34
585fb111 35#include "i915_reg.h"
79e53945 36#include "intel_bios.h"
8187a2b7 37#include "intel_ringbuffer.h"
0839ccb8 38#include <linux/io-mapping.h>
f899fc64 39#include <linux/i2c.h>
c167a6fc 40#include <linux/i2c-algo-bit.h>
0ade6386 41#include <drm/intel-gtt.h>
aaa6fd2a 42#include <linux/backlight.h>
2911a35b 43#include <linux/intel-iommu.h>
742cbee8 44#include <linux/kref.h>
9ee32fea 45#include <linux/pm_qos.h>
585fb111 46
1da177e4
LT
47/* General customization:
48 */
49
50#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
51
52#define DRIVER_NAME "i915"
53#define DRIVER_DESC "Intel Graphics"
673a394b 54#define DRIVER_DATE "20080730"
1da177e4 55
317c35d1
JB
56enum pipe {
57 PIPE_A = 0,
58 PIPE_B,
9db4a9c7
JB
59 PIPE_C,
60 I915_MAX_PIPES
317c35d1 61};
9db4a9c7 62#define pipe_name(p) ((p) + 'A')
317c35d1 63
a5c961d1
PZ
64enum transcoder {
65 TRANSCODER_A = 0,
66 TRANSCODER_B,
67 TRANSCODER_C,
68 TRANSCODER_EDP = 0xF,
69};
70#define transcoder_name(t) ((t) + 'A')
71
80824003
JB
72enum plane {
73 PLANE_A = 0,
74 PLANE_B,
9db4a9c7 75 PLANE_C,
80824003 76};
9db4a9c7 77#define plane_name(p) ((p) + 'A')
52440211 78
06da8da2
VS
79#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
80
2b139522
ED
81enum port {
82 PORT_A = 0,
83 PORT_B,
84 PORT_C,
85 PORT_D,
86 PORT_E,
87 I915_MAX_PORTS
88};
89#define port_name(p) ((p) + 'A')
90
b97186f0
PZ
91enum intel_display_power_domain {
92 POWER_DOMAIN_PIPE_A,
93 POWER_DOMAIN_PIPE_B,
94 POWER_DOMAIN_PIPE_C,
95 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
96 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
97 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
98 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
102};
103
104#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
105#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
106 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
107#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
108
1d843f9d
EE
109enum hpd_pin {
110 HPD_NONE = 0,
111 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
112 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
113 HPD_CRT,
114 HPD_SDVO_B,
115 HPD_SDVO_C,
116 HPD_PORT_B,
117 HPD_PORT_C,
118 HPD_PORT_D,
119 HPD_NUM_PINS
120};
121
2a2d5482
CW
122#define I915_GEM_GPU_DOMAINS \
123 (I915_GEM_DOMAIN_RENDER | \
124 I915_GEM_DOMAIN_SAMPLER | \
125 I915_GEM_DOMAIN_COMMAND | \
126 I915_GEM_DOMAIN_INSTRUCTION | \
127 I915_GEM_DOMAIN_VERTEX)
62fdfeaf 128
7eb552ae 129#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
9db4a9c7 130
6c2b7c12
DV
131#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
132 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
133 if ((intel_encoder)->base.crtc == (__crtc))
134
e7b903d2
DV
135struct drm_i915_private;
136
46edb027
DV
137enum intel_dpll_id {
138 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
139 /* real shared dpll ids must be >= 0 */
140 DPLL_ID_PCH_PLL_A,
141 DPLL_ID_PCH_PLL_B,
142};
143#define I915_NUM_PLLS 2
144
5358901f 145struct intel_dpll_hw_state {
66e985c0 146 uint32_t dpll;
8bcc2795 147 uint32_t dpll_md;
66e985c0
DV
148 uint32_t fp0;
149 uint32_t fp1;
5358901f
DV
150};
151
e72f9fbf 152struct intel_shared_dpll {
ee7b9f93
JB
153 int refcount; /* count of number of CRTCs sharing this PLL */
154 int active; /* count of number of active CRTCs (i.e. DPMS on) */
155 bool on; /* is the PLL actually active? Disabled during modeset */
46edb027
DV
156 const char *name;
157 /* should match the index in the dev_priv->shared_dplls array */
158 enum intel_dpll_id id;
5358901f 159 struct intel_dpll_hw_state hw_state;
15bdd4cf
DV
160 void (*mode_set)(struct drm_i915_private *dev_priv,
161 struct intel_shared_dpll *pll);
e7b903d2
DV
162 void (*enable)(struct drm_i915_private *dev_priv,
163 struct intel_shared_dpll *pll);
164 void (*disable)(struct drm_i915_private *dev_priv,
165 struct intel_shared_dpll *pll);
5358901f
DV
166 bool (*get_hw_state)(struct drm_i915_private *dev_priv,
167 struct intel_shared_dpll *pll,
168 struct intel_dpll_hw_state *hw_state);
ee7b9f93 169};
ee7b9f93 170
e69d0bc1
DV
171/* Used by dp and fdi links */
172struct intel_link_m_n {
173 uint32_t tu;
174 uint32_t gmch_m;
175 uint32_t gmch_n;
176 uint32_t link_m;
177 uint32_t link_n;
178};
179
180void intel_link_compute_m_n(int bpp, int nlanes,
181 int pixel_clock, int link_clock,
182 struct intel_link_m_n *m_n);
183
6441ab5f
PZ
184struct intel_ddi_plls {
185 int spll_refcount;
186 int wrpll1_refcount;
187 int wrpll2_refcount;
188};
189
1da177e4
LT
190/* Interface history:
191 *
192 * 1.1: Original.
0d6aa60b
DA
193 * 1.2: Add Power Management
194 * 1.3: Add vblank support
de227f5f 195 * 1.4: Fix cmdbuffer path, add heap destroy
702880f2 196 * 1.5: Add vblank pipe configuration
2228ed67
MCA
197 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
198 * - Support vertical blank on secondary display pipe
1da177e4
LT
199 */
200#define DRIVER_MAJOR 1
2228ed67 201#define DRIVER_MINOR 6
1da177e4
LT
202#define DRIVER_PATCHLEVEL 0
203
673a394b 204#define WATCH_COHERENCY 0
23bc5982 205#define WATCH_LISTS 0
42d6ab48 206#define WATCH_GTT 0
673a394b 207
71acb5eb
DA
208#define I915_GEM_PHYS_CURSOR_0 1
209#define I915_GEM_PHYS_CURSOR_1 2
210#define I915_GEM_PHYS_OVERLAY_REGS 3
211#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
212
213struct drm_i915_gem_phys_object {
214 int id;
215 struct page **page_list;
216 drm_dma_handle_t *handle;
05394f39 217 struct drm_i915_gem_object *cur_obj;
71acb5eb
DA
218};
219
0a3e67a4
JB
220struct opregion_header;
221struct opregion_acpi;
222struct opregion_swsci;
223struct opregion_asle;
224
8ee1c3db 225struct intel_opregion {
5bc4418b
BW
226 struct opregion_header __iomem *header;
227 struct opregion_acpi __iomem *acpi;
228 struct opregion_swsci __iomem *swsci;
229 struct opregion_asle __iomem *asle;
230 void __iomem *vbt;
01fe9dbd 231 u32 __iomem *lid_state;
8ee1c3db 232};
44834a67 233#define OPREGION_SIZE (8*1024)
8ee1c3db 234
6ef3d427
CW
235struct intel_overlay;
236struct intel_overlay_error_state;
237
7c1c2871
DA
238struct drm_i915_master_private {
239 drm_local_map_t *sarea;
240 struct _drm_i915_sarea *sarea_priv;
241};
de151cf6 242#define I915_FENCE_REG_NONE -1
42b5aeab
VS
243#define I915_MAX_NUM_FENCES 32
244/* 32 fences + sign bit for FENCE_REG_NONE */
245#define I915_MAX_NUM_FENCE_BITS 6
de151cf6
JB
246
247struct drm_i915_fence_reg {
007cc8ac 248 struct list_head lru_list;
caea7476 249 struct drm_i915_gem_object *obj;
1690e1eb 250 int pin_count;
de151cf6 251};
7c1c2871 252
9b9d172d 253struct sdvo_device_mapping {
e957d772 254 u8 initialized;
9b9d172d 255 u8 dvo_port;
256 u8 slave_addr;
257 u8 dvo_wiring;
e957d772 258 u8 i2c_pin;
b1083333 259 u8 ddc_pin;
9b9d172d 260};
261
c4a1d9e4
CW
262struct intel_display_error_state;
263
63eeaf38 264struct drm_i915_error_state {
742cbee8 265 struct kref ref;
63eeaf38
JB
266 u32 eir;
267 u32 pgtbl_er;
be998e2e 268 u32 ier;
b9a3906b 269 u32 ccid;
0f3b6849
CW
270 u32 derrmr;
271 u32 forcewake;
9574b3fe 272 bool waiting[I915_NUM_RINGS];
9db4a9c7 273 u32 pipestat[I915_MAX_PIPES];
c1cd90ed
DV
274 u32 tail[I915_NUM_RINGS];
275 u32 head[I915_NUM_RINGS];
0f3b6849 276 u32 ctl[I915_NUM_RINGS];
d27b1e0e
DV
277 u32 ipeir[I915_NUM_RINGS];
278 u32 ipehr[I915_NUM_RINGS];
279 u32 instdone[I915_NUM_RINGS];
280 u32 acthd[I915_NUM_RINGS];
7e3b8737 281 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
df2b23d9 282 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
12f55818 283 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
7e3b8737
DV
284 /* our own tracking of ring head and tail */
285 u32 cpu_ring_head[I915_NUM_RINGS];
286 u32 cpu_ring_tail[I915_NUM_RINGS];
1d8f38f4 287 u32 error; /* gen6+ */
71e172e8 288 u32 err_int; /* gen7 */
c1cd90ed
DV
289 u32 instpm[I915_NUM_RINGS];
290 u32 instps[I915_NUM_RINGS];
050ee91f 291 u32 extra_instdone[I915_NUM_INSTDONE_REG];
d27b1e0e 292 u32 seqno[I915_NUM_RINGS];
9df30794 293 u64 bbaddr;
33f3f518
DV
294 u32 fault_reg[I915_NUM_RINGS];
295 u32 done_reg;
c1cd90ed 296 u32 faddr[I915_NUM_RINGS];
4b9de737 297 u64 fence[I915_MAX_NUM_FENCES];
63eeaf38 298 struct timeval time;
52d39a21
CW
299 struct drm_i915_error_ring {
300 struct drm_i915_error_object {
301 int page_count;
302 u32 gtt_offset;
303 u32 *pages[0];
8c123e54 304 } *ringbuffer, *batchbuffer, *ctx;
52d39a21
CW
305 struct drm_i915_error_request {
306 long jiffies;
307 u32 seqno;
ee4f42b1 308 u32 tail;
52d39a21
CW
309 } *requests;
310 int num_requests;
311 } ring[I915_NUM_RINGS];
9df30794 312 struct drm_i915_error_buffer {
a779e5ab 313 u32 size;
9df30794 314 u32 name;
0201f1ec 315 u32 rseqno, wseqno;
9df30794
CW
316 u32 gtt_offset;
317 u32 read_domains;
318 u32 write_domain;
4b9de737 319 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
9df30794
CW
320 s32 pinned:2;
321 u32 tiling:2;
322 u32 dirty:1;
323 u32 purgeable:1;
5d1333fc 324 s32 ring:4;
93dfb40c 325 u32 cache_level:2;
c724e8a9
CW
326 } *active_bo, *pinned_bo;
327 u32 active_bo_count, pinned_bo_count;
6ef3d427 328 struct intel_overlay_error_state *overlay;
c4a1d9e4 329 struct intel_display_error_state *display;
63eeaf38
JB
330};
331
b8cecdf5 332struct intel_crtc_config;
0e8ffe1b 333struct intel_crtc;
ee9300bb
DV
334struct intel_limit;
335struct dpll;
b8cecdf5 336
e70236a8 337struct drm_i915_display_funcs {
ee5382ae 338 bool (*fbc_enabled)(struct drm_device *dev);
e70236a8
JB
339 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
340 void (*disable_fbc)(struct drm_device *dev);
341 int (*get_display_clock_speed)(struct drm_device *dev);
342 int (*get_fifo_size)(struct drm_device *dev, int plane);
ee9300bb
DV
343 /**
344 * find_dpll() - Find the best values for the PLL
345 * @limit: limits for the PLL
346 * @crtc: current CRTC
347 * @target: target frequency in kHz
348 * @refclk: reference clock frequency in kHz
349 * @match_clock: if provided, @best_clock P divider must
350 * match the P divider from @match_clock
351 * used for LVDS downclocking
352 * @best_clock: best PLL values found
353 *
354 * Returns true on success, false on failure.
355 */
356 bool (*find_dpll)(const struct intel_limit *limit,
357 struct drm_crtc *crtc,
358 int target, int refclk,
359 struct dpll *match_clock,
360 struct dpll *best_clock);
d210246a 361 void (*update_wm)(struct drm_device *dev);
b840d907 362 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
4c4ff43a
PZ
363 uint32_t sprite_width, int pixel_size,
364 bool enable);
47fab737 365 void (*modeset_global_resources)(struct drm_device *dev);
0e8ffe1b
DV
366 /* Returns the active state of the crtc, and if the crtc is active,
367 * fills out the pipe-config with the hw state. */
368 bool (*get_pipe_config)(struct intel_crtc *,
369 struct intel_crtc_config *);
f1f644dc 370 void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
f564048e 371 int (*crtc_mode_set)(struct drm_crtc *crtc,
f564048e
EA
372 int x, int y,
373 struct drm_framebuffer *old_fb);
76e5a89c
DV
374 void (*crtc_enable)(struct drm_crtc *crtc);
375 void (*crtc_disable)(struct drm_crtc *crtc);
ee7b9f93 376 void (*off)(struct drm_crtc *crtc);
e0dac65e
WF
377 void (*write_eld)(struct drm_connector *connector,
378 struct drm_crtc *crtc);
674cf967 379 void (*fdi_link_train)(struct drm_crtc *crtc);
6067aaea 380 void (*init_clock_gating)(struct drm_device *dev);
8c9f3aaf
JB
381 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
382 struct drm_framebuffer *fb,
383 struct drm_i915_gem_object *obj);
17638cd6
JB
384 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
385 int x, int y);
20afbda2 386 void (*hpd_irq_setup)(struct drm_device *dev);
e70236a8
JB
387 /* clock updates for mode set */
388 /* cursor updates */
389 /* render clock increase/decrease */
390 /* display clock increase/decrease */
391 /* pll clock increase/decrease */
e70236a8
JB
392};
393
990bbdad
CW
394struct drm_i915_gt_funcs {
395 void (*force_wake_get)(struct drm_i915_private *dev_priv);
396 void (*force_wake_put)(struct drm_i915_private *dev_priv);
397};
398
79fc46df
DL
399#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
400 func(is_mobile) sep \
401 func(is_i85x) sep \
402 func(is_i915g) sep \
403 func(is_i945gm) sep \
404 func(is_g33) sep \
405 func(need_gfx_hws) sep \
406 func(is_g4x) sep \
407 func(is_pineview) sep \
408 func(is_broadwater) sep \
409 func(is_crestline) sep \
410 func(is_ivybridge) sep \
411 func(is_valleyview) sep \
412 func(is_haswell) sep \
413 func(has_force_wake) sep \
414 func(has_fbc) sep \
415 func(has_pipe_cxsr) sep \
416 func(has_hotplug) sep \
417 func(cursor_needs_physical) sep \
418 func(has_overlay) sep \
419 func(overlay_needs_physical) sep \
420 func(supports_tv) sep \
421 func(has_bsd_ring) sep \
422 func(has_blt_ring) sep \
f72a1183 423 func(has_vebox_ring) sep \
dd93be58 424 func(has_llc) sep \
30568c45
DL
425 func(has_ddi) sep \
426 func(has_fpga_dbg)
c96ea64e 427
a587f779
DL
428#define DEFINE_FLAG(name) u8 name:1
429#define SEP_SEMICOLON ;
c96ea64e 430
cfdf1fa2 431struct intel_device_info {
10fce67a 432 u32 display_mmio_offset;
7eb552ae 433 u8 num_pipes:3;
c96c3a8c 434 u8 gen;
a587f779 435 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
cfdf1fa2
KH
436};
437
a587f779
DL
438#undef DEFINE_FLAG
439#undef SEP_SEMICOLON
440
7faf1ab2
DV
441enum i915_cache_level {
442 I915_CACHE_NONE = 0,
443 I915_CACHE_LLC,
444 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
445};
446
2d04befb
KG
447typedef uint32_t gen6_gtt_pte_t;
448
5d4545ae
BW
449/* The Graphics Translation Table is the way in which GEN hardware translates a
450 * Graphics Virtual Address into a Physical Address. In addition to the normal
451 * collateral associated with any va->pa translations GEN hardware also has a
452 * portion of the GTT which can be mapped by the CPU and remain both coherent
453 * and correct (in cases like swizzling). That region is referred to as GMADR in
454 * the spec.
455 */
456struct i915_gtt {
457 unsigned long start; /* Start offset of used GTT */
458 size_t total; /* Total size GTT can map */
baa09f5f 459 size_t stolen_size; /* Total size of stolen memory */
5d4545ae
BW
460
461 unsigned long mappable_end; /* End offset that we can CPU map */
462 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
463 phys_addr_t mappable_base; /* PA of our GMADR */
464
465 /** "Graphics Stolen Memory" holds the global PTEs */
466 void __iomem *gsm;
a81cc00c
BW
467
468 bool do_idle_maps;
67167240
BW
469 struct {
470 dma_addr_t addr;
471 struct page *page;
472 } scratch;
7faf1ab2 473
911bdf0a
BW
474 int mtrr;
475
7faf1ab2 476 /* global gtt ops */
baa09f5f 477 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
41907ddc
BW
478 size_t *stolen, phys_addr_t *mappable_base,
479 unsigned long *mappable_end);
baa09f5f 480 void (*gtt_remove)(struct drm_device *dev);
7faf1ab2
DV
481 void (*gtt_clear_range)(struct drm_device *dev,
482 unsigned int first_entry,
483 unsigned int num_entries);
484 void (*gtt_insert_entries)(struct drm_device *dev,
485 struct sg_table *st,
486 unsigned int pg_start,
487 enum i915_cache_level cache_level);
80a74f7f 488 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
2d04befb 489 enum i915_cache_level level);
5d4545ae 490};
a54c0c27 491#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
5d4545ae 492
1d2a314c 493struct i915_hw_ppgtt {
8f2c59f0 494 struct drm_device *dev;
1d2a314c
DV
495 unsigned num_pd_entries;
496 struct page **pt_pages;
497 uint32_t pd_offset;
498 dma_addr_t *pt_dma_addr;
def886c3
DV
499
500 /* pte functions, mirroring the interface of the global gtt. */
501 void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
502 unsigned int first_entry,
503 unsigned int num_entries);
504 void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
505 struct sg_table *st,
506 unsigned int pg_start,
507 enum i915_cache_level cache_level);
80a74f7f 508 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
2d04befb 509 enum i915_cache_level level);
b7c36d25 510 int (*enable)(struct drm_device *dev);
3440d265 511 void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
1d2a314c
DV
512};
513
e59ec13d
MK
514struct i915_ctx_hang_stats {
515 /* This context had batch pending when hang was declared */
516 unsigned batch_pending;
517
518 /* This context had batch active when hang was declared */
519 unsigned batch_active;
520};
40521054
BW
521
522/* This must match up with the value previously used for execbuf2.rsvd1. */
523#define DEFAULT_CONTEXT_ID 0
524struct i915_hw_context {
dce3271b 525 struct kref ref;
40521054 526 int id;
e0556841 527 bool is_initialized;
40521054
BW
528 struct drm_i915_file_private *file_priv;
529 struct intel_ring_buffer *ring;
530 struct drm_i915_gem_object *obj;
e59ec13d 531 struct i915_ctx_hang_stats hang_stats;
40521054
BW
532};
533
5c3fe8b0
BW
534struct i915_fbc {
535 unsigned long size;
536 unsigned int fb_id;
537 enum plane plane;
538 int y;
539
540 struct drm_mm_node *compressed_fb;
541 struct drm_mm_node *compressed_llb;
542
543 struct intel_fbc_work {
544 struct delayed_work work;
545 struct drm_crtc *crtc;
546 struct drm_framebuffer *fb;
547 int interval;
548 } *fbc_work;
549
550 enum {
551 FBC_NO_OUTPUT, /* no outputs enabled to compress */
552 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
553 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
554 FBC_MODE_TOO_LARGE, /* mode too large for compression */
555 FBC_BAD_PLANE, /* fbc not supported on plane */
556 FBC_NOT_TILED, /* buffer not tiled */
557 FBC_MULTIPLE_PIPES, /* more than one pipe active */
558 FBC_MODULE_PARAM,
559 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
560 } no_fbc_reason;
b5e50c3f
JB
561};
562
5c3fe8b0 563
3bad0781 564enum intel_pch {
f0350830 565 PCH_NONE = 0, /* No PCH present */
3bad0781
ZW
566 PCH_IBX, /* Ibexpeak PCH */
567 PCH_CPT, /* Cougarpoint PCH */
eb877ebf 568 PCH_LPT, /* Lynxpoint PCH */
40c7ead9 569 PCH_NOP,
3bad0781
ZW
570};
571
988d6ee8
PZ
572enum intel_sbi_destination {
573 SBI_ICLK,
574 SBI_MPHY,
575};
576
b690e96c 577#define QUIRK_PIPEA_FORCE (1<<0)
435793df 578#define QUIRK_LVDS_SSC_DISABLE (1<<1)
4dca20ef 579#define QUIRK_INVERT_BRIGHTNESS (1<<2)
b690e96c 580
8be48d92 581struct intel_fbdev;
1630fe75 582struct intel_fbc_work;
38651674 583
c2b9152f
DV
584struct intel_gmbus {
585 struct i2c_adapter adapter;
f2ce9faf 586 u32 force_bit;
c2b9152f 587 u32 reg0;
36c785f0 588 u32 gpio_reg;
c167a6fc 589 struct i2c_algo_bit_data bit_algo;
c2b9152f
DV
590 struct drm_i915_private *dev_priv;
591};
592
f4c956ad 593struct i915_suspend_saved_registers {
ba8bbcf6
JB
594 u8 saveLBB;
595 u32 saveDSPACNTR;
596 u32 saveDSPBCNTR;
e948e994 597 u32 saveDSPARB;
ba8bbcf6
JB
598 u32 savePIPEACONF;
599 u32 savePIPEBCONF;
600 u32 savePIPEASRC;
601 u32 savePIPEBSRC;
602 u32 saveFPA0;
603 u32 saveFPA1;
604 u32 saveDPLL_A;
605 u32 saveDPLL_A_MD;
606 u32 saveHTOTAL_A;
607 u32 saveHBLANK_A;
608 u32 saveHSYNC_A;
609 u32 saveVTOTAL_A;
610 u32 saveVBLANK_A;
611 u32 saveVSYNC_A;
612 u32 saveBCLRPAT_A;
5586c8bc 613 u32 saveTRANSACONF;
42048781
ZW
614 u32 saveTRANS_HTOTAL_A;
615 u32 saveTRANS_HBLANK_A;
616 u32 saveTRANS_HSYNC_A;
617 u32 saveTRANS_VTOTAL_A;
618 u32 saveTRANS_VBLANK_A;
619 u32 saveTRANS_VSYNC_A;
0da3ea12 620 u32 savePIPEASTAT;
ba8bbcf6
JB
621 u32 saveDSPASTRIDE;
622 u32 saveDSPASIZE;
623 u32 saveDSPAPOS;
585fb111 624 u32 saveDSPAADDR;
ba8bbcf6
JB
625 u32 saveDSPASURF;
626 u32 saveDSPATILEOFF;
627 u32 savePFIT_PGM_RATIOS;
0eb96d6e 628 u32 saveBLC_HIST_CTL;
ba8bbcf6
JB
629 u32 saveBLC_PWM_CTL;
630 u32 saveBLC_PWM_CTL2;
42048781
ZW
631 u32 saveBLC_CPU_PWM_CTL;
632 u32 saveBLC_CPU_PWM_CTL2;
ba8bbcf6
JB
633 u32 saveFPB0;
634 u32 saveFPB1;
635 u32 saveDPLL_B;
636 u32 saveDPLL_B_MD;
637 u32 saveHTOTAL_B;
638 u32 saveHBLANK_B;
639 u32 saveHSYNC_B;
640 u32 saveVTOTAL_B;
641 u32 saveVBLANK_B;
642 u32 saveVSYNC_B;
643 u32 saveBCLRPAT_B;
5586c8bc 644 u32 saveTRANSBCONF;
42048781
ZW
645 u32 saveTRANS_HTOTAL_B;
646 u32 saveTRANS_HBLANK_B;
647 u32 saveTRANS_HSYNC_B;
648 u32 saveTRANS_VTOTAL_B;
649 u32 saveTRANS_VBLANK_B;
650 u32 saveTRANS_VSYNC_B;
0da3ea12 651 u32 savePIPEBSTAT;
ba8bbcf6
JB
652 u32 saveDSPBSTRIDE;
653 u32 saveDSPBSIZE;
654 u32 saveDSPBPOS;
585fb111 655 u32 saveDSPBADDR;
ba8bbcf6
JB
656 u32 saveDSPBSURF;
657 u32 saveDSPBTILEOFF;
585fb111
JB
658 u32 saveVGA0;
659 u32 saveVGA1;
660 u32 saveVGA_PD;
ba8bbcf6
JB
661 u32 saveVGACNTRL;
662 u32 saveADPA;
663 u32 saveLVDS;
585fb111
JB
664 u32 savePP_ON_DELAYS;
665 u32 savePP_OFF_DELAYS;
ba8bbcf6
JB
666 u32 saveDVOA;
667 u32 saveDVOB;
668 u32 saveDVOC;
669 u32 savePP_ON;
670 u32 savePP_OFF;
671 u32 savePP_CONTROL;
585fb111 672 u32 savePP_DIVISOR;
ba8bbcf6
JB
673 u32 savePFIT_CONTROL;
674 u32 save_palette_a[256];
675 u32 save_palette_b[256];
06027f91 676 u32 saveDPFC_CB_BASE;
ba8bbcf6
JB
677 u32 saveFBC_CFB_BASE;
678 u32 saveFBC_LL_BASE;
679 u32 saveFBC_CONTROL;
680 u32 saveFBC_CONTROL2;
0da3ea12
JB
681 u32 saveIER;
682 u32 saveIIR;
683 u32 saveIMR;
42048781
ZW
684 u32 saveDEIER;
685 u32 saveDEIMR;
686 u32 saveGTIER;
687 u32 saveGTIMR;
688 u32 saveFDI_RXA_IMR;
689 u32 saveFDI_RXB_IMR;
1f84e550 690 u32 saveCACHE_MODE_0;
1f84e550 691 u32 saveMI_ARB_STATE;
ba8bbcf6
JB
692 u32 saveSWF0[16];
693 u32 saveSWF1[16];
694 u32 saveSWF2[3];
695 u8 saveMSR;
696 u8 saveSR[8];
123f794f 697 u8 saveGR[25];
ba8bbcf6 698 u8 saveAR_INDEX;
a59e122a 699 u8 saveAR[21];
ba8bbcf6 700 u8 saveDACMASK;
a59e122a 701 u8 saveCR[37];
4b9de737 702 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
1fd1c624
EA
703 u32 saveCURACNTR;
704 u32 saveCURAPOS;
705 u32 saveCURABASE;
706 u32 saveCURBCNTR;
707 u32 saveCURBPOS;
708 u32 saveCURBBASE;
709 u32 saveCURSIZE;
a4fc5ed6
KP
710 u32 saveDP_B;
711 u32 saveDP_C;
712 u32 saveDP_D;
713 u32 savePIPEA_GMCH_DATA_M;
714 u32 savePIPEB_GMCH_DATA_M;
715 u32 savePIPEA_GMCH_DATA_N;
716 u32 savePIPEB_GMCH_DATA_N;
717 u32 savePIPEA_DP_LINK_M;
718 u32 savePIPEB_DP_LINK_M;
719 u32 savePIPEA_DP_LINK_N;
720 u32 savePIPEB_DP_LINK_N;
42048781
ZW
721 u32 saveFDI_RXA_CTL;
722 u32 saveFDI_TXA_CTL;
723 u32 saveFDI_RXB_CTL;
724 u32 saveFDI_TXB_CTL;
725 u32 savePFA_CTL_1;
726 u32 savePFB_CTL_1;
727 u32 savePFA_WIN_SZ;
728 u32 savePFB_WIN_SZ;
729 u32 savePFA_WIN_POS;
730 u32 savePFB_WIN_POS;
5586c8bc
ZW
731 u32 savePCH_DREF_CONTROL;
732 u32 saveDISP_ARB_CTL;
733 u32 savePIPEA_DATA_M1;
734 u32 savePIPEA_DATA_N1;
735 u32 savePIPEA_LINK_M1;
736 u32 savePIPEA_LINK_N1;
737 u32 savePIPEB_DATA_M1;
738 u32 savePIPEB_DATA_N1;
739 u32 savePIPEB_LINK_M1;
740 u32 savePIPEB_LINK_N1;
b5b72e89 741 u32 saveMCHBAR_RENDER_STANDBY;
cda2bb78 742 u32 savePCH_PORT_HOTPLUG;
f4c956ad 743};
c85aa885
DV
744
745struct intel_gen6_power_mgmt {
59cdb63d 746 /* work and pm_iir are protected by dev_priv->irq_lock */
c85aa885
DV
747 struct work_struct work;
748 u32 pm_iir;
59cdb63d
DV
749
750 /* On vlv we need to manually drop to Vmin with a delayed work. */
751 struct delayed_work vlv_work;
c85aa885
DV
752
753 /* The below variables an all the rps hw state are protected by
754 * dev->struct mutext. */
755 u8 cur_delay;
756 u8 min_delay;
757 u8 max_delay;
52ceb908 758 u8 rpe_delay;
31c77388 759 u8 hw_max;
1a01ab3b
JB
760
761 struct delayed_work delayed_resume_work;
4fc688ce
JB
762
763 /*
764 * Protects RPS/RC6 register access and PCU communication.
765 * Must be taken after struct_mutex if nested.
766 */
767 struct mutex hw_lock;
c85aa885
DV
768};
769
1a240d4d
DV
770/* defined intel_pm.c */
771extern spinlock_t mchdev_lock;
772
c85aa885
DV
773struct intel_ilk_power_mgmt {
774 u8 cur_delay;
775 u8 min_delay;
776 u8 max_delay;
777 u8 fmax;
778 u8 fstart;
779
780 u64 last_count1;
781 unsigned long last_time1;
782 unsigned long chipset_power;
783 u64 last_count2;
784 struct timespec last_time2;
785 unsigned long gfx_power;
786 u8 corr;
787
788 int c_m;
789 int r_t;
3e373948
DV
790
791 struct drm_i915_gem_object *pwrctx;
792 struct drm_i915_gem_object *renderctx;
c85aa885
DV
793};
794
a38911a3
WX
795/* Power well structure for haswell */
796struct i915_power_well {
797 struct drm_device *device;
798 spinlock_t lock;
799 /* power well enable/disable usage count */
800 int count;
801 int i915_request;
802};
803
231f42a4
DV
804struct i915_dri1_state {
805 unsigned allow_batchbuffer : 1;
806 u32 __iomem *gfx_hws_cpu_addr;
807
808 unsigned int cpp;
809 int back_offset;
810 int front_offset;
811 int current_page;
812 int page_flipping;
813
814 uint32_t counter;
815};
816
db1b76ca
DV
817struct i915_ums_state {
818 /**
819 * Flag if the X Server, and thus DRM, is not currently in
820 * control of the device.
821 *
822 * This is set between LeaveVT and EnterVT. It needs to be
823 * replaced with a semaphore. It also needs to be
824 * transitioned away from for kernel modesetting.
825 */
826 int mm_suspended;
827};
828
a4da4fa4
DV
829struct intel_l3_parity {
830 u32 *remap_info;
831 struct work_struct error_work;
832};
833
4b5aed62 834struct i915_gem_mm {
4b5aed62
DV
835 /** Memory allocator for GTT stolen memory */
836 struct drm_mm stolen;
837 /** Memory allocator for GTT */
838 struct drm_mm gtt_space;
839 /** List of all objects in gtt_space. Used to restore gtt
840 * mappings on resume */
841 struct list_head bound_list;
842 /**
843 * List of objects which are not bound to the GTT (thus
844 * are idle and not used by the GPU) but still have
845 * (presumably uncached) pages still attached.
846 */
847 struct list_head unbound_list;
848
849 /** Usable portion of the GTT for GEM */
850 unsigned long stolen_base; /* limited to low memory (32-bit) */
851
4b5aed62
DV
852 /** PPGTT used for aliasing the PPGTT with the GTT */
853 struct i915_hw_ppgtt *aliasing_ppgtt;
854
855 struct shrinker inactive_shrinker;
856 bool shrinker_no_lock_stealing;
857
858 /**
859 * List of objects currently involved in rendering.
860 *
861 * Includes buffers having the contents of their GPU caches
862 * flushed, not necessarily primitives. last_rendering_seqno
863 * represents when the rendering involved will be completed.
864 *
865 * A reference is held on the buffer while on this list.
866 */
867 struct list_head active_list;
868
869 /**
870 * LRU list of objects which are not in the ringbuffer and
871 * are ready to unbind, but are still in the GTT.
872 *
873 * last_rendering_seqno is 0 while an object is in this list.
874 *
875 * A reference is not held on the buffer while on this list,
876 * as merely being GTT-bound shouldn't prevent its being
877 * freed, and we'll pull it off the list in the free path.
878 */
879 struct list_head inactive_list;
880
881 /** LRU list of objects with fence regs on them. */
882 struct list_head fence_list;
883
884 /**
885 * We leave the user IRQ off as much as possible,
886 * but this means that requests will finish and never
887 * be retired once the system goes idle. Set a timer to
888 * fire periodically while the ring is running. When it
889 * fires, go retire requests.
890 */
891 struct delayed_work retire_work;
892
893 /**
894 * Are we in a non-interruptible section of code like
895 * modesetting?
896 */
897 bool interruptible;
898
4b5aed62
DV
899 /** Bit 6 swizzling required for X tiling */
900 uint32_t bit_6_swizzle_x;
901 /** Bit 6 swizzling required for Y tiling */
902 uint32_t bit_6_swizzle_y;
903
904 /* storage for physical objects */
905 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
906
907 /* accounting, useful for userland debugging */
908 size_t object_memory;
909 u32 object_count;
910};
911
edc3d884
MK
912struct drm_i915_error_state_buf {
913 unsigned bytes;
914 unsigned size;
915 int err;
916 u8 *buf;
917 loff_t start;
918 loff_t pos;
919};
920
fc16b48b
MK
921struct i915_error_state_file_priv {
922 struct drm_device *dev;
923 struct drm_i915_error_state *error;
924};
925
99584db3
DV
926struct i915_gpu_error {
927 /* For hangcheck timer */
928#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
929#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
930 struct timer_list hangcheck_timer;
99584db3
DV
931
932 /* For reset and error_state handling. */
933 spinlock_t lock;
934 /* Protected by the above dev->gpu_error.lock. */
935 struct drm_i915_error_state *first_error;
936 struct work_struct work;
99584db3
DV
937
938 unsigned long last_reset;
939
1f83fee0 940 /**
f69061be 941 * State variable and reset counter controlling the reset flow
1f83fee0 942 *
f69061be
DV
943 * Upper bits are for the reset counter. This counter is used by the
944 * wait_seqno code to race-free noticed that a reset event happened and
945 * that it needs to restart the entire ioctl (since most likely the
946 * seqno it waited for won't ever signal anytime soon).
947 *
948 * This is important for lock-free wait paths, where no contended lock
949 * naturally enforces the correct ordering between the bail-out of the
950 * waiter and the gpu reset work code.
1f83fee0
DV
951 *
952 * Lowest bit controls the reset state machine: Set means a reset is in
953 * progress. This state will (presuming we don't have any bugs) decay
954 * into either unset (successful reset) or the special WEDGED value (hw
955 * terminally sour). All waiters on the reset_queue will be woken when
956 * that happens.
957 */
958 atomic_t reset_counter;
959
960 /**
961 * Special values/flags for reset_counter
962 *
963 * Note that the code relies on
964 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
965 * being true.
966 */
967#define I915_RESET_IN_PROGRESS_FLAG 1
968#define I915_WEDGED 0xffffffff
969
970 /**
971 * Waitqueue to signal when the reset has completed. Used by clients
972 * that wait for dev_priv->mm.wedged to settle.
973 */
974 wait_queue_head_t reset_queue;
33196ded 975
99584db3
DV
976 /* For gpu hang simulation. */
977 unsigned int stop_rings;
978};
979
b8efb17b
ZR
980enum modeset_restore {
981 MODESET_ON_LID_OPEN,
982 MODESET_DONE,
983 MODESET_SUSPENDED,
984};
985
41aa3448
RV
986struct intel_vbt_data {
987 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
988 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
989
990 /* Feature bits */
991 unsigned int int_tv_support:1;
992 unsigned int lvds_dither:1;
993 unsigned int lvds_vbt:1;
994 unsigned int int_crt_support:1;
995 unsigned int lvds_use_ssc:1;
996 unsigned int display_clock_mode:1;
997 unsigned int fdi_rx_polarity_inverted:1;
998 int lvds_ssc_freq;
999 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1000
1001 /* eDP */
1002 int edp_rate;
1003 int edp_lanes;
1004 int edp_preemphasis;
1005 int edp_vswing;
1006 bool edp_initialized;
1007 bool edp_support;
1008 int edp_bpp;
1009 struct edp_power_seq edp_pps;
1010
1011 int crt_ddc_pin;
1012
1013 int child_dev_num;
1014 struct child_device_config *child_dev;
1015};
1016
f4c956ad
DV
1017typedef struct drm_i915_private {
1018 struct drm_device *dev;
42dcedd4 1019 struct kmem_cache *slab;
f4c956ad
DV
1020
1021 const struct intel_device_info *info;
1022
1023 int relative_constants_mode;
1024
1025 void __iomem *regs;
1026
1027 struct drm_i915_gt_funcs gt;
1028 /** gt_fifo_count and the subsequent register write are synchronized
1029 * with dev->struct_mutex. */
1030 unsigned gt_fifo_count;
1031 /** forcewake_count is protected by gt_lock */
1032 unsigned forcewake_count;
1033 /** gt_lock is also taken in irq contexts. */
99057c81 1034 spinlock_t gt_lock;
f4c956ad
DV
1035
1036 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1037
28c70f16 1038
f4c956ad
DV
1039 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1040 * controller on different i2c buses. */
1041 struct mutex gmbus_mutex;
1042
1043 /**
1044 * Base address of the gmbus and gpio block.
1045 */
1046 uint32_t gpio_mmio_base;
1047
28c70f16
DV
1048 wait_queue_head_t gmbus_wait_queue;
1049
f4c956ad
DV
1050 struct pci_dev *bridge_dev;
1051 struct intel_ring_buffer ring[I915_NUM_RINGS];
f72b3435 1052 uint32_t last_seqno, next_seqno;
f4c956ad
DV
1053
1054 drm_dma_handle_t *status_page_dmah;
f4c956ad
DV
1055 struct resource mch_res;
1056
1057 atomic_t irq_received;
1058
1059 /* protects the irq masks */
1060 spinlock_t irq_lock;
1061
9ee32fea
DV
1062 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1063 struct pm_qos_request pm_qos;
1064
f4c956ad 1065 /* DPIO indirect register protection */
09153000 1066 struct mutex dpio_lock;
f4c956ad
DV
1067
1068 /** Cached value of IMR to avoid reads in updating the bitfield */
f4c956ad
DV
1069 u32 irq_mask;
1070 u32 gt_irq_mask;
f4c956ad 1071
f4c956ad 1072 struct work_struct hotplug_work;
52d7eced 1073 bool enable_hotplug_processing;
b543fb04
EE
1074 struct {
1075 unsigned long hpd_last_jiffies;
1076 int hpd_cnt;
1077 enum {
1078 HPD_ENABLED = 0,
1079 HPD_DISABLED = 1,
1080 HPD_MARK_DISABLED = 2
1081 } hpd_mark;
1082 } hpd_stats[HPD_NUM_PINS];
142e2398 1083 u32 hpd_event_bits;
ac4c16c5 1084 struct timer_list hotplug_reenable_timer;
f4c956ad 1085
7f1f3851 1086 int num_plane;
f4c956ad 1087
5c3fe8b0 1088 struct i915_fbc fbc;
f4c956ad 1089 struct intel_opregion opregion;
41aa3448 1090 struct intel_vbt_data vbt;
f4c956ad
DV
1091
1092 /* overlay */
1093 struct intel_overlay *overlay;
2c6602df 1094 unsigned int sprite_scaling_enabled;
f4c956ad 1095
31ad8ec6
JN
1096 /* backlight */
1097 struct {
1098 int level;
1099 bool enabled;
8ba2d185 1100 spinlock_t lock; /* bl registers and the above bl fields */
31ad8ec6
JN
1101 struct backlight_device *device;
1102 } backlight;
1103
f4c956ad 1104 /* LVDS info */
f4c956ad
DV
1105 bool no_aux_handshake;
1106
f4c956ad
DV
1107 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1108 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1109 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1110
1111 unsigned int fsb_freq, mem_freq, is_ddr3;
1112
f4c956ad
DV
1113 struct workqueue_struct *wq;
1114
1115 /* Display functions */
1116 struct drm_i915_display_funcs display;
1117
1118 /* PCH chipset type */
1119 enum intel_pch pch_type;
17a303ec 1120 unsigned short pch_id;
f4c956ad
DV
1121
1122 unsigned long quirks;
1123
b8efb17b
ZR
1124 enum modeset_restore modeset_restore;
1125 struct mutex modeset_restore_lock;
673a394b 1126
5d4545ae
BW
1127 struct i915_gtt gtt;
1128
4b5aed62 1129 struct i915_gem_mm mm;
8781342d 1130
8781342d
DV
1131 /* Kernel Modesetting */
1132
9b9d172d 1133 struct sdvo_device_mapping sdvo_mappings[2];
652c393a 1134
27f8227b
JB
1135 struct drm_crtc *plane_to_crtc_mapping[3];
1136 struct drm_crtc *pipe_to_crtc_mapping[3];
6b95a207
KH
1137 wait_queue_head_t pending_flip_queue;
1138
e72f9fbf
DV
1139 int num_shared_dpll;
1140 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
6441ab5f 1141 struct intel_ddi_plls ddi_plls;
ee7b9f93 1142
652c393a
JB
1143 /* Reclocking support */
1144 bool render_reclock_avail;
1145 bool lvds_downclock_avail;
18f9ed12
ZY
1146 /* indicates the reduced downclock for LVDS*/
1147 int lvds_downclock;
652c393a 1148 u16 orig_clock;
f97108d1 1149
c4804411 1150 bool mchbar_need_disable;
f97108d1 1151
a4da4fa4
DV
1152 struct intel_l3_parity l3_parity;
1153
59124506
BW
1154 /* Cannot be determined by PCIID. You must always read a register. */
1155 size_t ellc_size;
1156
c6a828d3 1157 /* gen6+ rps state */
c85aa885 1158 struct intel_gen6_power_mgmt rps;
c6a828d3 1159
20e4d407
DV
1160 /* ilk-only ips/rps state. Everything in here is protected by the global
1161 * mchdev_lock in intel_pm.c */
c85aa885 1162 struct intel_ilk_power_mgmt ips;
b5e50c3f 1163
a38911a3
WX
1164 /* Haswell power well */
1165 struct i915_power_well power_well;
1166
99584db3 1167 struct i915_gpu_error gpu_error;
ae681d96 1168
c9cddffc
JB
1169 struct drm_i915_gem_object *vlv_pctx;
1170
8be48d92
DA
1171 /* list of fbdev register on this device */
1172 struct intel_fbdev *fbdev;
e953fd7b 1173
073f34d9
JB
1174 /*
1175 * The console may be contended at resume, but we don't
1176 * want it to block on it.
1177 */
1178 struct work_struct console_resume_work;
1179
e953fd7b 1180 struct drm_property *broadcast_rgb_property;
3f43c48d 1181 struct drm_property *force_audio_property;
e3689190 1182
254f965c
BW
1183 bool hw_contexts_disabled;
1184 uint32_t hw_context_size;
f4c956ad 1185
3e68320e 1186 u32 fdi_rx_config;
68d18ad7 1187
f4c956ad 1188 struct i915_suspend_saved_registers regfile;
231f42a4
DV
1189
1190 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1191 * here! */
1192 struct i915_dri1_state dri1;
db1b76ca
DV
1193 /* Old ums support infrastructure, same warning applies. */
1194 struct i915_ums_state ums;
1da177e4
LT
1195} drm_i915_private_t;
1196
b4519513
CW
1197/* Iterate over initialised rings */
1198#define for_each_ring(ring__, dev_priv__, i__) \
1199 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1200 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1201
b1d7e4b4
WF
1202enum hdmi_force_audio {
1203 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1204 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1205 HDMI_AUDIO_AUTO, /* trust EDID */
1206 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1207};
1208
190d6cd5 1209#define I915_GTT_OFFSET_NONE ((u32)-1)
ed2f3452 1210
37e680a1
CW
1211struct drm_i915_gem_object_ops {
1212 /* Interface between the GEM object and its backing storage.
1213 * get_pages() is called once prior to the use of the associated set
1214 * of pages before to binding them into the GTT, and put_pages() is
1215 * called after we no longer need them. As we expect there to be
1216 * associated cost with migrating pages between the backing storage
1217 * and making them available for the GPU (e.g. clflush), we may hold
1218 * onto the pages after they are no longer referenced by the GPU
1219 * in case they may be used again shortly (for example migrating the
1220 * pages to a different memory domain within the GTT). put_pages()
1221 * will therefore most likely be called when the object itself is
1222 * being released or under memory pressure (where we attempt to
1223 * reap pages for the shrinker).
1224 */
1225 int (*get_pages)(struct drm_i915_gem_object *);
1226 void (*put_pages)(struct drm_i915_gem_object *);
1227};
1228
673a394b 1229struct drm_i915_gem_object {
c397b908 1230 struct drm_gem_object base;
673a394b 1231
37e680a1
CW
1232 const struct drm_i915_gem_object_ops *ops;
1233
673a394b 1234 /** Current space allocated to this object in the GTT, if any. */
c6cfb325 1235 struct drm_mm_node gtt_space;
c1ad11fc
CW
1236 /** Stolen memory for this object, instead of being backed by shmem. */
1237 struct drm_mm_node *stolen;
35c20a60 1238 struct list_head global_list;
673a394b 1239
65ce3027 1240 /** This object's place on the active/inactive lists */
69dc4987
CW
1241 struct list_head ring_list;
1242 struct list_head mm_list;
432e58ed
CW
1243 /** This object's place in the batchbuffer or on the eviction list */
1244 struct list_head exec_list;
673a394b
EA
1245
1246 /**
65ce3027
CW
1247 * This is set if the object is on the active lists (has pending
1248 * rendering and so a non-zero seqno), and is not set if it i s on
1249 * inactive (ready to be unbound) list.
673a394b 1250 */
0206e353 1251 unsigned int active:1;
673a394b
EA
1252
1253 /**
1254 * This is set if the object has been written to since last bound
1255 * to the GTT
1256 */
0206e353 1257 unsigned int dirty:1;
778c3544
DV
1258
1259 /**
1260 * Fence register bits (if any) for this object. Will be set
1261 * as needed when mapped into the GTT.
1262 * Protected by dev->struct_mutex.
778c3544 1263 */
4b9de737 1264 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
778c3544 1265
778c3544
DV
1266 /**
1267 * Advice: are the backing pages purgeable?
1268 */
0206e353 1269 unsigned int madv:2;
778c3544 1270
778c3544
DV
1271 /**
1272 * Current tiling mode for the object.
1273 */
0206e353 1274 unsigned int tiling_mode:2;
5d82e3e6
CW
1275 /**
1276 * Whether the tiling parameters for the currently associated fence
1277 * register have changed. Note that for the purposes of tracking
1278 * tiling changes we also treat the unfenced register, the register
1279 * slot that the object occupies whilst it executes a fenced
1280 * command (such as BLT on gen2/3), as a "fence".
1281 */
1282 unsigned int fence_dirty:1;
778c3544
DV
1283
1284 /** How many users have pinned this object in GTT space. The following
1285 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1286 * (via user_pin_count), execbuffer (objects are not allowed multiple
1287 * times for the same batchbuffer), and the framebuffer code. When
1288 * switching/pageflipping, the framebuffer code has at most two buffers
1289 * pinned per crtc.
1290 *
1291 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1292 * bits with absolutely no headroom. So use 4 bits. */
0206e353 1293 unsigned int pin_count:4;
778c3544 1294#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
673a394b 1295
75e9e915
DV
1296 /**
1297 * Is the object at the current location in the gtt mappable and
1298 * fenceable? Used to avoid costly recalculations.
1299 */
0206e353 1300 unsigned int map_and_fenceable:1;
75e9e915 1301
fb7d516a
DV
1302 /**
1303 * Whether the current gtt mapping needs to be mappable (and isn't just
1304 * mappable by accident). Track pin and fault separate for a more
1305 * accurate mappable working set.
1306 */
0206e353
AJ
1307 unsigned int fault_mappable:1;
1308 unsigned int pin_mappable:1;
fb7d516a 1309
caea7476
CW
1310 /*
1311 * Is the GPU currently using a fence to access this buffer,
1312 */
1313 unsigned int pending_fenced_gpu_access:1;
1314 unsigned int fenced_gpu_access:1;
1315
93dfb40c
CW
1316 unsigned int cache_level:2;
1317
7bddb01f 1318 unsigned int has_aliasing_ppgtt_mapping:1;
74898d7e 1319 unsigned int has_global_gtt_mapping:1;
9da3da66 1320 unsigned int has_dma_mapping:1;
7bddb01f 1321
9da3da66 1322 struct sg_table *pages;
a5570178 1323 int pages_pin_count;
673a394b 1324
1286ff73 1325 /* prime dma-buf support */
9a70cc2a
DA
1326 void *dma_buf_vmapping;
1327 int vmapping_count;
1328
67731b87
CW
1329 /**
1330 * Used for performing relocations during execbuffer insertion.
1331 */
1332 struct hlist_node exec_node;
1333 unsigned long exec_handle;
6fe4f140 1334 struct drm_i915_gem_exec_object2 *exec_entry;
67731b87 1335
caea7476
CW
1336 struct intel_ring_buffer *ring;
1337
1c293ea3 1338 /** Breadcrumb of last rendering to the buffer. */
0201f1ec
CW
1339 uint32_t last_read_seqno;
1340 uint32_t last_write_seqno;
caea7476
CW
1341 /** Breadcrumb of last fenced GPU access to the buffer. */
1342 uint32_t last_fenced_seqno;
673a394b 1343
778c3544 1344 /** Current tiling stride for the object, if it's tiled. */
de151cf6 1345 uint32_t stride;
673a394b 1346
280b713b 1347 /** Record of address bit 17 of each page at last unbind. */
d312ec25 1348 unsigned long *bit_17;
280b713b 1349
79e53945
JB
1350 /** User space pin count and filp owning the pin */
1351 uint32_t user_pin_count;
1352 struct drm_file *pin_filp;
71acb5eb
DA
1353
1354 /** for phy allocated objects */
1355 struct drm_i915_gem_phys_object *phys_obj;
673a394b 1356};
b45305fc 1357#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
673a394b 1358
62b8b215 1359#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
23010e43 1360
f343c5f6
BW
1361/* Offset of the first PTE pointing to this object */
1362static inline unsigned long
1363i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
1364{
c6cfb325 1365 return o->gtt_space.start;
f343c5f6
BW
1366}
1367
1368/* Whether or not this object is currently mapped by the translation tables */
1369static inline bool
1370i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
1371{
c6cfb325 1372 return drm_mm_node_allocated(&o->gtt_space);
f343c5f6
BW
1373}
1374
1375/* The size used in the translation tables may be larger than the actual size of
1376 * the object on GEN2/GEN3 because of the way tiling is handled. See
1377 * i915_gem_get_gtt_size() for more details.
1378 */
1379static inline unsigned long
1380i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
1381{
c6cfb325 1382 return o->gtt_space.size;
f343c5f6
BW
1383}
1384
1385static inline void
1386i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
1387 enum i915_cache_level color)
1388{
c6cfb325 1389 o->gtt_space.color = color;
f343c5f6
BW
1390}
1391
673a394b
EA
1392/**
1393 * Request queue structure.
1394 *
1395 * The request queue allows us to note sequence numbers that have been emitted
1396 * and may be associated with active buffers to be retired.
1397 *
1398 * By keeping this list, we can avoid having to do questionable
1399 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1400 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1401 */
1402struct drm_i915_gem_request {
852835f3
ZN
1403 /** On Which ring this request was generated */
1404 struct intel_ring_buffer *ring;
1405
673a394b
EA
1406 /** GEM sequence number associated with this request. */
1407 uint32_t seqno;
1408
7d736f4f
MK
1409 /** Position in the ringbuffer of the start of the request */
1410 u32 head;
1411
1412 /** Position in the ringbuffer of the end of the request */
a71d8d94
CW
1413 u32 tail;
1414
0e50e96b
MK
1415 /** Context related to this request */
1416 struct i915_hw_context *ctx;
1417
7d736f4f
MK
1418 /** Batch buffer related to this request if any */
1419 struct drm_i915_gem_object *batch_obj;
1420
673a394b
EA
1421 /** Time at which this request was emitted, in jiffies. */
1422 unsigned long emitted_jiffies;
1423
b962442e 1424 /** global list entry for this request */
673a394b 1425 struct list_head list;
b962442e 1426
f787a5f5 1427 struct drm_i915_file_private *file_priv;
b962442e
EA
1428 /** file_priv list entry for this request */
1429 struct list_head client_list;
673a394b
EA
1430};
1431
1432struct drm_i915_file_private {
1433 struct {
99057c81 1434 spinlock_t lock;
b962442e 1435 struct list_head request_list;
673a394b 1436 } mm;
40521054 1437 struct idr context_idr;
e59ec13d
MK
1438
1439 struct i915_ctx_hang_stats hang_stats;
673a394b
EA
1440};
1441
cae5852d
ZN
1442#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1443
1444#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1445#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1446#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1447#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1448#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1449#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1450#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1451#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1452#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1453#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1454#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1455#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1456#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1457#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1458#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1459#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1460#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1461#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
4b65177b 1462#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
8ab43976
JB
1463#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1464 (dev)->pci_device == 0x0152 || \
1465 (dev)->pci_device == 0x015a)
6547fbdb
DV
1466#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1467 (dev)->pci_device == 0x0106 || \
1468 (dev)->pci_device == 0x010A)
70a3eb7a 1469#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
4cae9ae0 1470#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
cae5852d 1471#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
d567b07f
PZ
1472#define IS_ULT(dev) (IS_HASWELL(dev) && \
1473 ((dev)->pci_device & 0xFF00) == 0x0A00)
cae5852d 1474
85436696
JB
1475/*
1476 * The genX designation typically refers to the render engine, so render
1477 * capability related checks should use IS_GEN, while display and other checks
1478 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1479 * chips, etc.).
1480 */
cae5852d
ZN
1481#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1482#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1483#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1484#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1485#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
85436696 1486#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
cae5852d
ZN
1487
1488#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1489#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
f72a1183 1490#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
3d29b842 1491#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
cae5852d
ZN
1492#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1493
254f965c 1494#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
93553609 1495#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1d2a314c 1496
05394f39 1497#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
cae5852d
ZN
1498#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1499
b45305fc
DV
1500/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1501#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1502
cae5852d
ZN
1503/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1504 * rows, which changed the alignment requirements and fence programming.
1505 */
1506#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1507 IS_I915GM(dev)))
1508#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1509#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1510#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1511#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1512#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1513#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1514/* dsparb controlled by hw only */
1515#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1516
1517#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1518#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1519#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
cae5852d 1520
f5adf94e
DL
1521#define HAS_IPS(dev) (IS_ULT(dev))
1522
eceae481 1523#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
cae5852d 1524
dd93be58 1525#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
86d52df6 1526#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
30568c45 1527#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
affa9354 1528
17a303ec
PZ
1529#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1530#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1531#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1532#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1533#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1534#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1535
cae5852d 1536#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
eb877ebf 1537#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
cae5852d
ZN
1538#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1539#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
40c7ead9 1540#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
45e6e3a1 1541#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
cae5852d 1542
b7884eb4
DV
1543#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1544
f27b9265 1545#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
e1ef7cc2 1546
c8735b0c
BW
1547#define GT_FREQUENCY_MULTIPLIER 50
1548
05394f39
CW
1549#include "i915_trace.h"
1550
83b7f9ac
ED
1551/**
1552 * RC6 is a special power stage which allows the GPU to enter an very
1553 * low-voltage mode when idle, using down to 0V while at this stage. This
1554 * stage is entered automatically when the GPU is idle when RC6 support is
1555 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1556 *
1557 * There are different RC6 modes available in Intel GPU, which differentiate
1558 * among each other with the latency required to enter and leave RC6 and
1559 * voltage consumed by the GPU in different states.
1560 *
1561 * The combination of the following flags define which states GPU is allowed
1562 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1563 * RC6pp is deepest RC6. Their support by hardware varies according to the
1564 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1565 * which brings the most power savings; deeper states save more power, but
1566 * require higher latency to switch to and wake up.
1567 */
1568#define INTEL_RC6_ENABLE (1<<0)
1569#define INTEL_RC6p_ENABLE (1<<1)
1570#define INTEL_RC6pp_ENABLE (1<<2)
1571
c153f45f 1572extern struct drm_ioctl_desc i915_ioctls[];
b3a83639 1573extern int i915_max_ioctl;
a35d9d3c
BW
1574extern unsigned int i915_fbpercrtc __always_unused;
1575extern int i915_panel_ignore_lid __read_mostly;
1576extern unsigned int i915_powersave __read_mostly;
f45b5557 1577extern int i915_semaphores __read_mostly;
a35d9d3c 1578extern unsigned int i915_lvds_downclock __read_mostly;
121d527a 1579extern int i915_lvds_channel_mode __read_mostly;
4415e63b 1580extern int i915_panel_use_ssc __read_mostly;
a35d9d3c 1581extern int i915_vbt_sdvo_panel_type __read_mostly;
c0f372b3 1582extern int i915_enable_rc6 __read_mostly;
4415e63b 1583extern int i915_enable_fbc __read_mostly;
a35d9d3c 1584extern bool i915_enable_hangcheck __read_mostly;
650dc07e 1585extern int i915_enable_ppgtt __read_mostly;
0a3af268 1586extern unsigned int i915_preliminary_hw_support __read_mostly;
2124b72e 1587extern int i915_disable_power_well __read_mostly;
3c4ca58c 1588extern int i915_enable_ips __read_mostly;
2385bdf0 1589extern bool i915_fastboot __read_mostly;
b3a83639 1590
6a9ee8af
DA
1591extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1592extern int i915_resume(struct drm_device *dev);
7c1c2871
DA
1593extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1594extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1595
1da177e4 1596 /* i915_dma.c */
d05c617e 1597void i915_update_dri1_breadcrumb(struct drm_device *dev);
84b1fd10 1598extern void i915_kernel_lost_context(struct drm_device * dev);
22eae947 1599extern int i915_driver_load(struct drm_device *, unsigned long flags);
ba8bbcf6 1600extern int i915_driver_unload(struct drm_device *);
673a394b 1601extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
84b1fd10 1602extern void i915_driver_lastclose(struct drm_device * dev);
6c340eac
EA
1603extern void i915_driver_preclose(struct drm_device *dev,
1604 struct drm_file *file_priv);
673a394b
EA
1605extern void i915_driver_postclose(struct drm_device *dev,
1606 struct drm_file *file_priv);
84b1fd10 1607extern int i915_driver_device_is_agp(struct drm_device * dev);
c43b5634 1608#ifdef CONFIG_COMPAT
0d6aa60b
DA
1609extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1610 unsigned long arg);
c43b5634 1611#endif
673a394b 1612extern int i915_emit_box(struct drm_device *dev,
c4e7a414
CW
1613 struct drm_clip_rect *box,
1614 int DR1, int DR4);
8e96d9c4 1615extern int intel_gpu_reset(struct drm_device *dev);
d4b8bb2a 1616extern int i915_reset(struct drm_device *dev);
7648fa99
JB
1617extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1618extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1619extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1620extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1621
073f34d9 1622extern void intel_console_resume(struct work_struct *work);
af6061af 1623
1da177e4 1624/* i915_irq.c */
10cd45b6 1625void i915_queue_hangcheck(struct drm_device *dev);
f65d9421 1626void i915_hangcheck_elapsed(unsigned long data);
527f9e90 1627void i915_handle_error(struct drm_device *dev, bool wedged);
1da177e4 1628
f71d4af4 1629extern void intel_irq_init(struct drm_device *dev);
20afbda2 1630extern void intel_hpd_init(struct drm_device *dev);
990bbdad 1631extern void intel_gt_init(struct drm_device *dev);
16995a9f 1632extern void intel_gt_reset(struct drm_device *dev);
b1f14ad0 1633
7c463586
KP
1634void
1635i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1636
1637void
1638i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1639
673a394b
EA
1640/* i915_gem.c */
1641int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1642 struct drm_file *file_priv);
1643int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1644 struct drm_file *file_priv);
1645int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1646 struct drm_file *file_priv);
1647int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1648 struct drm_file *file_priv);
1649int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1650 struct drm_file *file_priv);
de151cf6
JB
1651int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1652 struct drm_file *file_priv);
673a394b
EA
1653int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1654 struct drm_file *file_priv);
1655int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1656 struct drm_file *file_priv);
1657int i915_gem_execbuffer(struct drm_device *dev, void *data,
1658 struct drm_file *file_priv);
76446cac
JB
1659int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1660 struct drm_file *file_priv);
673a394b
EA
1661int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1662 struct drm_file *file_priv);
1663int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1664 struct drm_file *file_priv);
1665int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1666 struct drm_file *file_priv);
199adf40
BW
1667int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1668 struct drm_file *file);
1669int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1670 struct drm_file *file);
673a394b
EA
1671int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1672 struct drm_file *file_priv);
3ef94daa
CW
1673int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1674 struct drm_file *file_priv);
673a394b
EA
1675int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1676 struct drm_file *file_priv);
1677int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1678 struct drm_file *file_priv);
1679int i915_gem_set_tiling(struct drm_device *dev, void *data,
1680 struct drm_file *file_priv);
1681int i915_gem_get_tiling(struct drm_device *dev, void *data,
1682 struct drm_file *file_priv);
5a125c3c
EA
1683int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1684 struct drm_file *file_priv);
23ba4fd0
BW
1685int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1686 struct drm_file *file_priv);
673a394b 1687void i915_gem_load(struct drm_device *dev);
42dcedd4
CW
1688void *i915_gem_object_alloc(struct drm_device *dev);
1689void i915_gem_object_free(struct drm_i915_gem_object *obj);
673a394b 1690int i915_gem_init_object(struct drm_gem_object *obj);
37e680a1
CW
1691void i915_gem_object_init(struct drm_i915_gem_object *obj,
1692 const struct drm_i915_gem_object_ops *ops);
05394f39
CW
1693struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1694 size_t size);
673a394b 1695void i915_gem_free_object(struct drm_gem_object *obj);
42dcedd4 1696
2021746e
CW
1697int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1698 uint32_t alignment,
86a1ee26
CW
1699 bool map_and_fenceable,
1700 bool nonblocking);
05394f39 1701void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
2021746e 1702int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
dd624afd 1703int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
05394f39 1704void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
673a394b 1705void i915_gem_lastclose(struct drm_device *dev);
f787a5f5 1706
37e680a1 1707int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
9da3da66
CW
1708static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1709{
67d5a50c
ID
1710 struct sg_page_iter sg_iter;
1711
1712 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
2db76d7c 1713 return sg_page_iter_page(&sg_iter);
67d5a50c
ID
1714
1715 return NULL;
9da3da66 1716}
a5570178
CW
1717static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1718{
1719 BUG_ON(obj->pages == NULL);
1720 obj->pages_pin_count++;
1721}
1722static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1723{
1724 BUG_ON(obj->pages_pin_count == 0);
1725 obj->pages_pin_count--;
1726}
1727
54cf91dc 1728int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2911a35b
BW
1729int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1730 struct intel_ring_buffer *to);
54cf91dc 1731void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
9d773091 1732 struct intel_ring_buffer *ring);
54cf91dc 1733
ff72145b
DA
1734int i915_gem_dumb_create(struct drm_file *file_priv,
1735 struct drm_device *dev,
1736 struct drm_mode_create_dumb *args);
1737int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1738 uint32_t handle, uint64_t *offset);
1739int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
0206e353 1740 uint32_t handle);
f787a5f5
CW
1741/**
1742 * Returns true if seq1 is later than seq2.
1743 */
1744static inline bool
1745i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1746{
1747 return (int32_t)(seq1 - seq2) >= 0;
1748}
1749
fca26bb4
MK
1750int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1751int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
06d98131 1752int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
d9e86c0e 1753int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2021746e 1754
9a5a53b3 1755static inline bool
1690e1eb
CW
1756i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1757{
1758 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1759 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1760 dev_priv->fence_regs[obj->fence_reg].pin_count++;
9a5a53b3
CW
1761 return true;
1762 } else
1763 return false;
1690e1eb
CW
1764}
1765
1766static inline void
1767i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1768{
1769 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1770 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
b8c3af76 1771 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
1690e1eb
CW
1772 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1773 }
1774}
1775
b09a1fec 1776void i915_gem_retire_requests(struct drm_device *dev);
a71d8d94 1777void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
33196ded 1778int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
d6b2c790 1779 bool interruptible);
1f83fee0
DV
1780static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1781{
1782 return unlikely(atomic_read(&error->reset_counter)
1783 & I915_RESET_IN_PROGRESS_FLAG);
1784}
1785
1786static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1787{
1788 return atomic_read(&error->reset_counter) == I915_WEDGED;
1789}
a71d8d94 1790
069efc1d 1791void i915_gem_reset(struct drm_device *dev);
05394f39 1792void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
2021746e
CW
1793int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1794 uint32_t read_domains,
1795 uint32_t write_domain);
a8198eea 1796int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1070a42b 1797int __must_check i915_gem_init(struct drm_device *dev);
f691e2f4 1798int __must_check i915_gem_init_hw(struct drm_device *dev);
b9524a1e 1799void i915_gem_l3_remap(struct drm_device *dev);
f691e2f4 1800void i915_gem_init_swizzling(struct drm_device *dev);
79e53945 1801void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
b2da9fe5 1802int __must_check i915_gpu_idle(struct drm_device *dev);
2021746e 1803int __must_check i915_gem_idle(struct drm_device *dev);
0025c077
MK
1804int __i915_add_request(struct intel_ring_buffer *ring,
1805 struct drm_file *file,
7d736f4f 1806 struct drm_i915_gem_object *batch_obj,
0025c077
MK
1807 u32 *seqno);
1808#define i915_add_request(ring, seqno) \
854c94a7 1809 __i915_add_request(ring, NULL, NULL, seqno)
199b2bc2
BW
1810int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1811 uint32_t seqno);
de151cf6 1812int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2021746e
CW
1813int __must_check
1814i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1815 bool write);
1816int __must_check
dabdfe02
CW
1817i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1818int __must_check
2da3b9b9
CW
1819i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1820 u32 alignment,
2021746e 1821 struct intel_ring_buffer *pipelined);
71acb5eb 1822int i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 1823 struct drm_i915_gem_object *obj,
6eeefaf3
CW
1824 int id,
1825 int align);
71acb5eb 1826void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 1827 struct drm_i915_gem_object *obj);
71acb5eb 1828void i915_gem_free_all_phys_object(struct drm_device *dev);
05394f39 1829void i915_gem_release(struct drm_device *dev, struct drm_file *file);
673a394b 1830
0fa87796
ID
1831uint32_t
1832i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
467cffba 1833uint32_t
d865110c
ID
1834i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1835 int tiling_mode, bool fenced);
467cffba 1836
e4ffd173
CW
1837int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1838 enum i915_cache_level cache_level);
1839
1286ff73
DV
1840struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1841 struct dma_buf *dma_buf);
1842
1843struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1844 struct drm_gem_object *gem_obj, int flags);
1845
254f965c
BW
1846/* i915_gem_context.c */
1847void i915_gem_context_init(struct drm_device *dev);
1848void i915_gem_context_fini(struct drm_device *dev);
254f965c 1849void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
e0556841
BW
1850int i915_switch_context(struct intel_ring_buffer *ring,
1851 struct drm_file *file, int to_id);
dce3271b
MK
1852void i915_gem_context_free(struct kref *ctx_ref);
1853static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
1854{
1855 kref_get(&ctx->ref);
1856}
1857
1858static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
1859{
1860 kref_put(&ctx->ref, i915_gem_context_free);
1861}
1862
c0bb617a 1863struct i915_ctx_hang_stats * __must_check
11fa3384 1864i915_gem_context_get_hang_stats(struct drm_device *dev,
c0bb617a
MK
1865 struct drm_file *file,
1866 u32 id);
84624813
BW
1867int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1868 struct drm_file *file);
1869int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1870 struct drm_file *file);
1286ff73 1871
76aaf220 1872/* i915_gem_gtt.c */
1d2a314c 1873void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
7bddb01f
DV
1874void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1875 struct drm_i915_gem_object *obj,
1876 enum i915_cache_level cache_level);
1877void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1878 struct drm_i915_gem_object *obj);
1d2a314c 1879
76aaf220 1880void i915_gem_restore_gtt_mappings(struct drm_device *dev);
74163907
DV
1881int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1882void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
e4ffd173 1883 enum i915_cache_level cache_level);
05394f39 1884void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
74163907 1885void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
d7e5008f
BW
1886void i915_gem_init_global_gtt(struct drm_device *dev);
1887void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
1888 unsigned long mappable_end, unsigned long end);
e76e9aeb 1889int i915_gem_gtt_init(struct drm_device *dev);
d09105c6 1890static inline void i915_gem_chipset_flush(struct drm_device *dev)
e76e9aeb
BW
1891{
1892 if (INTEL_INFO(dev)->gen < 6)
1893 intel_gtt_chipset_flush();
1894}
1895
76aaf220 1896
b47eb4a2 1897/* i915_gem_evict.c */
2021746e 1898int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
42d6ab48
CW
1899 unsigned alignment,
1900 unsigned cache_level,
86a1ee26
CW
1901 bool mappable,
1902 bool nonblock);
6c085a72 1903int i915_gem_evict_everything(struct drm_device *dev);
b47eb4a2 1904
9797fbfb
CW
1905/* i915_gem_stolen.c */
1906int i915_gem_init_stolen(struct drm_device *dev);
11be49eb
CW
1907int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
1908void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
9797fbfb 1909void i915_gem_cleanup_stolen(struct drm_device *dev);
0104fdbb
CW
1910struct drm_i915_gem_object *
1911i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
866d12b4
CW
1912struct drm_i915_gem_object *
1913i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
1914 u32 stolen_offset,
1915 u32 gtt_offset,
1916 u32 size);
0104fdbb 1917void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
9797fbfb 1918
673a394b 1919/* i915_gem_tiling.c */
e9b73c67
CW
1920inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1921{
1922 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1923
1924 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1925 obj->tiling_mode != I915_TILING_NONE;
1926}
1927
673a394b 1928void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
05394f39
CW
1929void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1930void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
673a394b
EA
1931
1932/* i915_gem_debug.c */
05394f39 1933void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
673a394b 1934 const char *where, uint32_t mark);
23bc5982
CW
1935#if WATCH_LISTS
1936int i915_verify_lists(struct drm_device *dev);
673a394b 1937#else
23bc5982 1938#define i915_verify_lists(dev) 0
673a394b 1939#endif
05394f39
CW
1940void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
1941 int handle);
1942void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
673a394b 1943 const char *where, uint32_t mark);
1da177e4 1944
2017263e 1945/* i915_debugfs.c */
27c202ad
BG
1946int i915_debugfs_init(struct drm_minor *minor);
1947void i915_debugfs_cleanup(struct drm_minor *minor);
84734a04
MK
1948
1949/* i915_gpu_error.c */
edc3d884
MK
1950__printf(2, 3)
1951void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
fc16b48b
MK
1952int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
1953 const struct i915_error_state_file_priv *error);
4dc955f7
MK
1954int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
1955 size_t count, loff_t pos);
1956static inline void i915_error_state_buf_release(
1957 struct drm_i915_error_state_buf *eb)
1958{
1959 kfree(eb->buf);
1960}
84734a04
MK
1961void i915_capture_error_state(struct drm_device *dev);
1962void i915_error_state_get(struct drm_device *dev,
1963 struct i915_error_state_file_priv *error_priv);
1964void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
1965void i915_destroy_error_state(struct drm_device *dev);
1966
1967void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
1968const char *i915_cache_level_str(int type);
2017263e 1969
317c35d1
JB
1970/* i915_suspend.c */
1971extern int i915_save_state(struct drm_device *dev);
1972extern int i915_restore_state(struct drm_device *dev);
0a3e67a4 1973
d8157a36
DV
1974/* i915_ums.c */
1975void i915_save_display_reg(struct drm_device *dev);
1976void i915_restore_display_reg(struct drm_device *dev);
317c35d1 1977
0136db58
BW
1978/* i915_sysfs.c */
1979void i915_setup_sysfs(struct drm_device *dev_priv);
1980void i915_teardown_sysfs(struct drm_device *dev_priv);
1981
f899fc64
CW
1982/* intel_i2c.c */
1983extern int intel_setup_gmbus(struct drm_device *dev);
1984extern void intel_teardown_gmbus(struct drm_device *dev);
8f375e10 1985static inline bool intel_gmbus_is_port_valid(unsigned port)
3bd7d909 1986{
2ed06c93 1987 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
3bd7d909
DK
1988}
1989
1990extern struct i2c_adapter *intel_gmbus_get_adapter(
1991 struct drm_i915_private *dev_priv, unsigned port);
e957d772
CW
1992extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1993extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
8f375e10 1994static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
b8232e90
CW
1995{
1996 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
1997}
f899fc64
CW
1998extern void intel_i2c_reset(struct drm_device *dev);
1999
3b617967 2000/* intel_opregion.c */
44834a67
CW
2001extern int intel_opregion_setup(struct drm_device *dev);
2002#ifdef CONFIG_ACPI
2003extern void intel_opregion_init(struct drm_device *dev);
2004extern void intel_opregion_fini(struct drm_device *dev);
3b617967 2005extern void intel_opregion_asle_intr(struct drm_device *dev);
65e082c9 2006#else
44834a67
CW
2007static inline void intel_opregion_init(struct drm_device *dev) { return; }
2008static inline void intel_opregion_fini(struct drm_device *dev) { return; }
3b617967 2009static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
65e082c9 2010#endif
8ee1c3db 2011
723bfd70
JB
2012/* intel_acpi.c */
2013#ifdef CONFIG_ACPI
2014extern void intel_register_dsm_handler(void);
2015extern void intel_unregister_dsm_handler(void);
2016#else
2017static inline void intel_register_dsm_handler(void) { return; }
2018static inline void intel_unregister_dsm_handler(void) { return; }
2019#endif /* CONFIG_ACPI */
2020
79e53945 2021/* modesetting */
f817586c 2022extern void intel_modeset_init_hw(struct drm_device *dev);
7d708ee4 2023extern void intel_modeset_suspend_hw(struct drm_device *dev);
79e53945 2024extern void intel_modeset_init(struct drm_device *dev);
2c7111db 2025extern void intel_modeset_gem_init(struct drm_device *dev);
79e53945 2026extern void intel_modeset_cleanup(struct drm_device *dev);
28d52043 2027extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
45e2b5f6
DV
2028extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2029 bool force_restore);
44cec740 2030extern void i915_redisable_vga(struct drm_device *dev);
ee5382ae 2031extern bool intel_fbc_enabled(struct drm_device *dev);
43a9539f 2032extern void intel_disable_fbc(struct drm_device *dev);
7648fa99 2033extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
dde86e2d 2034extern void intel_init_pch_refclk(struct drm_device *dev);
3b8d8d91 2035extern void gen6_set_rps(struct drm_device *dev, u8 val);
0a073b84
JB
2036extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2037extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2038extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
0206e353
AJ
2039extern void intel_detect_pch(struct drm_device *dev);
2040extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
0136db58 2041extern int intel_enable_rc6(const struct drm_device *dev);
3bad0781 2042
2911a35b 2043extern bool i915_semaphore_is_enabled(struct drm_device *dev);
c0c7babc
BW
2044int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2045 struct drm_file *file);
575155a9 2046
6ef3d427
CW
2047/* overlay */
2048extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
edc3d884
MK
2049extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
2050 struct intel_overlay_error_state *error);
c4a1d9e4
CW
2051
2052extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
edc3d884 2053extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
c4a1d9e4
CW
2054 struct drm_device *dev,
2055 struct intel_display_error_state *error);
6ef3d427 2056
b7287d80
BW
2057/* On SNB platform, before reading ring registers forcewake bit
2058 * must be set to prevent GT core from power down and stale values being
2059 * returned.
2060 */
fcca7926
BW
2061void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
2062void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
67a3744f 2063int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
b7287d80 2064
42c0526c
BW
2065int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2066int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
59de0813
JN
2067
2068/* intel_sideband.c */
64936258
JN
2069u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2070void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2071u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
ae99258f
JN
2072u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
2073void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
59de0813
JN
2074u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2075 enum intel_sbi_destination destination);
2076void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2077 enum intel_sbi_destination destination);
0a073b84 2078
855ba3be
JB
2079int vlv_gpu_freq(int ddr_freq, int val);
2080int vlv_freq_opcode(int ddr_freq, int val);
42c0526c 2081
5f75377d 2082#define __i915_read(x, y) \
f7000883 2083 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
fcca7926 2084
5f75377d
KP
2085__i915_read(8, b)
2086__i915_read(16, w)
2087__i915_read(32, l)
2088__i915_read(64, q)
2089#undef __i915_read
2090
2091#define __i915_write(x, y) \
f7000883
AK
2092 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
2093
5f75377d
KP
2094__i915_write(8, b)
2095__i915_write(16, w)
2096__i915_write(32, l)
2097__i915_write(64, q)
2098#undef __i915_write
2099
2100#define I915_READ8(reg) i915_read8(dev_priv, (reg))
2101#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
2102
2103#define I915_READ16(reg) i915_read16(dev_priv, (reg))
2104#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
2105#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
2106#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
2107
2108#define I915_READ(reg) i915_read32(dev_priv, (reg))
2109#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
cae5852d
ZN
2110#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
2111#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
5f75377d
KP
2112
2113#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
2114#define I915_READ64(reg) i915_read64(dev_priv, (reg))
cae5852d
ZN
2115
2116#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2117#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2118
55bc60db
VS
2119/* "Broadcast RGB" property */
2120#define INTEL_BROADCAST_RGB_AUTO 0
2121#define INTEL_BROADCAST_RGB_FULL 1
2122#define INTEL_BROADCAST_RGB_LIMITED 2
ba4f01a3 2123
766aa1c4
VS
2124static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2125{
2126 if (HAS_PCH_SPLIT(dev))
2127 return CPU_VGACNTRL;
2128 else if (IS_VALLEYVIEW(dev))
2129 return VLV_VGACNTRL;
2130 else
2131 return VGACNTRL;
2132}
2133
2bb4629a
VS
2134static inline void __user *to_user_ptr(u64 address)
2135{
2136 return (void __user *)(uintptr_t)address;
2137}
2138
df97729f
ID
2139static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2140{
2141 unsigned long j = msecs_to_jiffies(m);
2142
2143 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2144}
2145
2146static inline unsigned long
2147timespec_to_jiffies_timeout(const struct timespec *value)
2148{
2149 unsigned long j = timespec_to_jiffies(value);
2150
2151 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2152}
2153
1da177e4 2154#endif
This page took 0.800928 seconds and 5 git commands to generate.