drm/i915: Update less state during modeset.
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_atomic.c
CommitLineData
5ee67f1c
MR
1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * DOC: atomic modeset support
26 *
27 * The functions here implement the state management and hardware programming
28 * dispatch required by the atomic modeset infrastructure.
29 * See intel_atomic_plane.c for the plane-specific atomic functionality.
30 */
31
32#include <drm/drmP.h>
33#include <drm/drm_atomic.h>
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_plane_helper.h>
36#include "intel_drv.h"
37
38
39/**
40 * intel_atomic_check - validate state object
41 * @dev: drm device
42 * @state: state to validate
43 */
44int intel_atomic_check(struct drm_device *dev,
45 struct drm_atomic_state *state)
46{
47 int nplanes = dev->mode_config.num_total_plane;
48 int ncrtcs = dev->mode_config.num_crtc;
49 int nconnectors = dev->mode_config.num_connector;
50 enum pipe nuclear_pipe = INVALID_PIPE;
e04fa803
CK
51 struct intel_crtc *nuclear_crtc = NULL;
52 struct intel_crtc_state *crtc_state = NULL;
5ee67f1c
MR
53 int ret;
54 int i;
55 bool not_nuclear = false;
56
57 /*
58 * FIXME: At the moment, we only support "nuclear pageflip" on a
59 * single CRTC. Cross-crtc updates will be added later.
60 */
61 for (i = 0; i < nplanes; i++) {
62 struct intel_plane *plane = to_intel_plane(state->planes[i]);
63 if (!plane)
64 continue;
65
66 if (nuclear_pipe == INVALID_PIPE) {
67 nuclear_pipe = plane->pipe;
68 } else if (nuclear_pipe != plane->pipe) {
69 DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
70 return -EINVAL;
71 }
72 }
73
74 /*
75 * FIXME: We only handle planes for now; make sure there are no CRTC's
76 * or connectors involved.
77 */
78 state->allow_modeset = false;
79 for (i = 0; i < ncrtcs; i++) {
80 struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
f1e2daea
MR
81 if (crtc)
82 memset(&crtc->atomic, 0, sizeof(crtc->atomic));
5ee67f1c
MR
83 if (crtc && crtc->pipe != nuclear_pipe)
84 not_nuclear = true;
e04fa803
CK
85 if (crtc && crtc->pipe == nuclear_pipe) {
86 nuclear_crtc = crtc;
87 crtc_state = to_intel_crtc_state(state->crtc_states[i]);
88 }
5ee67f1c
MR
89 }
90 for (i = 0; i < nconnectors; i++)
91 if (state->connectors[i] != NULL)
92 not_nuclear = true;
93
94 if (not_nuclear) {
95 DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
96 return -EINVAL;
97 }
98
d032ffa0
ML
99 if (crtc_state &&
100 crtc_state->quirks & PIPE_CONFIG_QUIRK_INITIAL_PLANES) {
101 ret = drm_atomic_add_affected_planes(state, &nuclear_crtc->base);
102 if (ret)
103 return ret;
104 }
105
5ee67f1c
MR
106 ret = drm_atomic_helper_check_planes(dev, state);
107 if (ret)
108 return ret;
109
110 return ret;
111}
112
113
114/**
115 * intel_atomic_commit - commit validated state object
116 * @dev: DRM device
117 * @state: the top-level driver state object
118 * @async: asynchronous commit
119 *
120 * This function commits a top-level state object that has been validated
121 * with drm_atomic_helper_check().
122 *
123 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
124 * we can only handle plane-related operations and do not yet support
125 * asynchronous commit.
126 *
127 * RETURNS
128 * Zero for success or -errno.
129 */
130int intel_atomic_commit(struct drm_device *dev,
131 struct drm_atomic_state *state,
132 bool async)
133{
61c05498
ML
134 struct drm_crtc_state *crtc_state;
135 struct drm_crtc *crtc;
5ee67f1c
MR
136 int ret;
137 int i;
138
139 if (async) {
140 DRM_DEBUG_KMS("i915 does not yet support async commit\n");
141 return -EINVAL;
142 }
143
144 ret = drm_atomic_helper_prepare_planes(dev, state);
145 if (ret)
146 return ret;
147
148 /* Point of no return */
61c05498
ML
149 drm_atomic_helper_swap_state(dev, state);
150
151 for_each_crtc_in_state(state, crtc, crtc_state, i) {
152 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
153
5ac1c4bc 154 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
61c05498 155 }
5ee67f1c 156
5ac1c4bc 157 /* FIXME: This function should eventually call __intel_set_mode when needed */
61c05498 158
5ee67f1c
MR
159 drm_atomic_helper_wait_for_vblanks(dev, state);
160 drm_atomic_helper_cleanup_planes(dev, state);
161 drm_atomic_state_free(state);
162
163 return 0;
164}
2545e4a6
MR
165
166/**
167 * intel_connector_atomic_get_property - fetch connector property value
168 * @connector: connector to fetch property for
169 * @state: state containing the property value
170 * @property: property to look up
171 * @val: pointer to write property value into
172 *
173 * The DRM core does not store shadow copies of properties for
174 * atomic-capable drivers. This entrypoint is used to fetch
175 * the current value of a driver-specific connector property.
176 */
177int
178intel_connector_atomic_get_property(struct drm_connector *connector,
179 const struct drm_connector_state *state,
180 struct drm_property *property,
181 uint64_t *val)
182{
183 int i;
184
185 /*
186 * TODO: We only have atomic modeset for planes at the moment, so the
187 * crtc/connector code isn't quite ready yet. Until it's ready,
188 * continue to look up all property values in the DRM's shadow copy
189 * in obj->properties->values[].
190 *
191 * When the crtc/connector state work matures, this function should
192 * be updated to read the values out of the state structure instead.
193 */
194 for (i = 0; i < connector->base.properties->count; i++) {
195 if (connector->base.properties->properties[i] == property) {
196 *val = connector->base.properties->values[i];
197 return 0;
198 }
199 }
200
201 return -EINVAL;
202}
1356837e
MR
203
204/*
205 * intel_crtc_duplicate_state - duplicate crtc state
206 * @crtc: drm crtc
207 *
208 * Allocates and returns a copy of the crtc state (both common and
209 * Intel-specific) for the specified crtc.
210 *
211 * Returns: The newly allocated crtc state, or NULL on failure.
212 */
213struct drm_crtc_state *
214intel_crtc_duplicate_state(struct drm_crtc *crtc)
215{
216 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
a91572f3 217 struct intel_crtc_state *crtc_state;
1356837e
MR
218
219 if (WARN_ON(!intel_crtc->config))
a91572f3
ACO
220 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
221 else
222 crtc_state = kmemdup(intel_crtc->config,
223 sizeof(*intel_crtc->config), GFP_KERNEL);
1356837e 224
f0c60574
ACO
225 if (!crtc_state)
226 return NULL;
227
228 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
229
230 crtc_state->base.crtc = crtc;
a91572f3
ACO
231
232 return &crtc_state->base;
1356837e
MR
233}
234
235/**
236 * intel_crtc_destroy_state - destroy crtc state
237 * @crtc: drm crtc
238 *
239 * Destroys the crtc state (both common and Intel-specific) for the
240 * specified crtc.
241 */
242void
243intel_crtc_destroy_state(struct drm_crtc *crtc,
244 struct drm_crtc_state *state)
245{
246 drm_atomic_helper_crtc_destroy_state(crtc, state);
247}
d03c93d4
CK
248
249/**
250 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
251 * @dev: DRM device
252 * @crtc: intel crtc
253 * @crtc_state: incoming crtc_state to validate and setup scalers
254 *
255 * This function sets up scalers based on staged scaling requests for
256 * a @crtc and its planes. It is called from crtc level check path. If request
257 * is a supportable request, it attaches scalers to requested planes and crtc.
258 *
259 * This function takes into account the current scaler(s) in use by any planes
260 * not being part of this atomic state
261 *
262 * Returns:
263 * 0 - scalers were setup succesfully
264 * error code - otherwise
265 */
266int intel_atomic_setup_scalers(struct drm_device *dev,
267 struct intel_crtc *intel_crtc,
268 struct intel_crtc_state *crtc_state)
269{
270 struct drm_plane *plane = NULL;
271 struct intel_plane *intel_plane;
272 struct intel_plane_state *plane_state = NULL;
273 struct intel_crtc_scaler_state *scaler_state;
274 struct drm_atomic_state *drm_state;
275 int num_scalers_need;
276 int i, j;
277
278 if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
279 return 0;
280
281 scaler_state = &crtc_state->scaler_state;
282 drm_state = crtc_state->base.state;
283
284 num_scalers_need = hweight32(scaler_state->scaler_users);
285 DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
286 crtc_state, num_scalers_need, intel_crtc->num_scalers,
287 scaler_state->scaler_users);
288
289 /*
290 * High level flow:
291 * - staged scaler requests are already in scaler_state->scaler_users
292 * - check whether staged scaling requests can be supported
293 * - add planes using scalers that aren't in current transaction
294 * - assign scalers to requested users
295 * - as part of plane commit, scalers will be committed
296 * (i.e., either attached or detached) to respective planes in hw
297 * - as part of crtc_commit, scaler will be either attached or detached
298 * to crtc in hw
299 */
300
301 /* fail if required scalers > available scalers */
302 if (num_scalers_need > intel_crtc->num_scalers){
303 DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
304 num_scalers_need, intel_crtc->num_scalers);
305 return -EINVAL;
306 }
307
308 /* walkthrough scaler_users bits and start assigning scalers */
309 for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
310 int *scaler_id;
133b0d12
ML
311 const char *name;
312 int idx;
d03c93d4
CK
313
314 /* skip if scaler not required */
315 if (!(scaler_state->scaler_users & (1 << i)))
316 continue;
317
318 if (i == SKL_CRTC_INDEX) {
133b0d12
ML
319 name = "CRTC";
320 idx = intel_crtc->base.base.id;
321
d03c93d4
CK
322 /* panel fitter case: assign as a crtc scaler */
323 scaler_id = &scaler_state->scaler_id;
324 } else {
133b0d12
ML
325 name = "PLANE";
326 idx = plane->base.id;
327
d03c93d4
CK
328 if (!drm_state)
329 continue;
330
331 /* plane scaler case: assign as a plane scaler */
332 /* find the plane that set the bit as scaler_user */
333 plane = drm_state->planes[i];
334
335 /*
336 * to enable/disable hq mode, add planes that are using scaler
337 * into this transaction
338 */
339 if (!plane) {
340 struct drm_plane_state *state;
341 plane = drm_plane_from_index(dev, i);
342 state = drm_atomic_get_plane_state(drm_state, plane);
343 if (IS_ERR(state)) {
344 DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
345 plane->base.id);
346 return PTR_ERR(state);
347 }
cf5a15be
ML
348
349 /*
350 * the plane is added after plane checks are run,
351 * but since this plane is unchanged just do the
352 * minimum required validation.
353 */
354 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
355 intel_crtc->atomic.wait_for_flips = true;
356 crtc_state->base.planes_changed = true;
d03c93d4
CK
357 }
358
359 intel_plane = to_intel_plane(plane);
360
361 /* plane on different crtc cannot be a scaler user of this crtc */
362 if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
363 continue;
364 }
365
366 plane_state = to_intel_plane_state(drm_state->plane_states[i]);
367 scaler_id = &plane_state->scaler_id;
368 }
369
370 if (*scaler_id < 0) {
371 /* find a free scaler */
372 for (j = 0; j < intel_crtc->num_scalers; j++) {
373 if (!scaler_state->scalers[j].in_use) {
374 scaler_state->scalers[j].in_use = 1;
133b0d12 375 *scaler_id = j;
d03c93d4 376 DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
133b0d12 377 intel_crtc->pipe, *scaler_id, name, idx);
d03c93d4
CK
378 break;
379 }
380 }
381 }
382
383 if (WARN_ON(*scaler_id < 0)) {
133b0d12 384 DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
d03c93d4
CK
385 continue;
386 }
387
388 /* set scaler mode */
389 if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
390 /*
391 * when only 1 scaler is in use on either pipe A or B,
392 * scaler 0 operates in high quality (HQ) mode.
393 * In this case use scaler 0 to take advantage of HQ mode
394 */
395 *scaler_id = 0;
396 scaler_state->scalers[0].in_use = 1;
397 scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
398 scaler_state->scalers[1].in_use = 0;
399 } else {
400 scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
401 }
402 }
403
404 return 0;
405}
de419ab6 406
f7217905 407static void
de419ab6
ML
408intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
409 struct intel_shared_dpll_config *shared_dpll)
410{
411 enum intel_dpll_id i;
412
413 /* Copy shared dpll state */
414 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
415 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
416
417 shared_dpll[i] = pll->config;
418 }
419}
420
421struct intel_shared_dpll_config *
422intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
423{
424 struct intel_atomic_state *state = to_intel_atomic_state(s);
425
426 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
427
428 if (!state->dpll_set) {
429 state->dpll_set = true;
430
431 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
432 state->shared_dpll);
433 }
434
435 return state->shared_dpll;
436}
437
438struct drm_atomic_state *
439intel_atomic_state_alloc(struct drm_device *dev)
440{
441 struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
442
443 if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
444 kfree(state);
445 return NULL;
446 }
447
448 return &state->base;
449}
450
451void intel_atomic_state_clear(struct drm_atomic_state *s)
452{
453 struct intel_atomic_state *state = to_intel_atomic_state(s);
454 drm_atomic_state_default_clear(&state->base);
455 state->dpll_set = false;
456}
This page took 0.091737 seconds and 5 git commands to generate.