Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / drivers / gpu / drm / drm_atomic.c
1 /*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28
29 #include <drm/drmP.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_plane_helper.h>
32
33 /**
34 * drm_atomic_state_default_release -
35 * release memory initialized by drm_atomic_state_init
36 * @state: atomic state
37 *
38 * Free all the memory allocated by drm_atomic_state_init.
39 * This is useful for drivers that subclass the atomic state.
40 */
41 void drm_atomic_state_default_release(struct drm_atomic_state *state)
42 {
43 kfree(state->connectors);
44 kfree(state->connector_states);
45 kfree(state->crtcs);
46 kfree(state->crtc_states);
47 kfree(state->planes);
48 kfree(state->plane_states);
49 }
50 EXPORT_SYMBOL(drm_atomic_state_default_release);
51
52 /**
53 * drm_atomic_state_init - init new atomic state
54 * @dev: DRM device
55 * @state: atomic state
56 *
57 * Default implementation for filling in a new atomic state.
58 * This is useful for drivers that subclass the atomic state.
59 */
60 int
61 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
62 {
63 /* TODO legacy paths should maybe do a better job about
64 * setting this appropriately?
65 */
66 state->allow_modeset = true;
67
68 state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
69
70 state->crtcs = kcalloc(dev->mode_config.num_crtc,
71 sizeof(*state->crtcs), GFP_KERNEL);
72 if (!state->crtcs)
73 goto fail;
74 state->crtc_states = kcalloc(dev->mode_config.num_crtc,
75 sizeof(*state->crtc_states), GFP_KERNEL);
76 if (!state->crtc_states)
77 goto fail;
78 state->planes = kcalloc(dev->mode_config.num_total_plane,
79 sizeof(*state->planes), GFP_KERNEL);
80 if (!state->planes)
81 goto fail;
82 state->plane_states = kcalloc(dev->mode_config.num_total_plane,
83 sizeof(*state->plane_states), GFP_KERNEL);
84 if (!state->plane_states)
85 goto fail;
86 state->connectors = kcalloc(state->num_connector,
87 sizeof(*state->connectors),
88 GFP_KERNEL);
89 if (!state->connectors)
90 goto fail;
91 state->connector_states = kcalloc(state->num_connector,
92 sizeof(*state->connector_states),
93 GFP_KERNEL);
94 if (!state->connector_states)
95 goto fail;
96
97 state->dev = dev;
98
99 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
100
101 return 0;
102 fail:
103 drm_atomic_state_default_release(state);
104 return -ENOMEM;
105 }
106 EXPORT_SYMBOL(drm_atomic_state_init);
107
108 /**
109 * drm_atomic_state_alloc - allocate atomic state
110 * @dev: DRM device
111 *
112 * This allocates an empty atomic state to track updates.
113 */
114 struct drm_atomic_state *
115 drm_atomic_state_alloc(struct drm_device *dev)
116 {
117 struct drm_mode_config *config = &dev->mode_config;
118 struct drm_atomic_state *state;
119
120 if (!config->funcs->atomic_state_alloc) {
121 state = kzalloc(sizeof(*state), GFP_KERNEL);
122 if (!state)
123 return NULL;
124 if (drm_atomic_state_init(dev, state) < 0) {
125 kfree(state);
126 return NULL;
127 }
128 return state;
129 }
130
131 return config->funcs->atomic_state_alloc(dev);
132 }
133 EXPORT_SYMBOL(drm_atomic_state_alloc);
134
135 /**
136 * drm_atomic_state_default_clear - clear base atomic state
137 * @state: atomic state
138 *
139 * Default implementation for clearing atomic state.
140 * This is useful for drivers that subclass the atomic state.
141 */
142 void drm_atomic_state_default_clear(struct drm_atomic_state *state)
143 {
144 struct drm_device *dev = state->dev;
145 struct drm_mode_config *config = &dev->mode_config;
146 int i;
147
148 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
149
150 for (i = 0; i < state->num_connector; i++) {
151 struct drm_connector *connector = state->connectors[i];
152
153 if (!connector)
154 continue;
155
156 /*
157 * FIXME: Async commits can race with connector unplugging and
158 * there's currently nothing that prevents cleanup up state for
159 * deleted connectors. As long as the callback doesn't look at
160 * the connector we'll be fine though, so make sure that's the
161 * case by setting all connector pointers to NULL.
162 */
163 state->connector_states[i]->connector = NULL;
164 connector->funcs->atomic_destroy_state(NULL,
165 state->connector_states[i]);
166 state->connectors[i] = NULL;
167 state->connector_states[i] = NULL;
168 }
169
170 for (i = 0; i < config->num_crtc; i++) {
171 struct drm_crtc *crtc = state->crtcs[i];
172
173 if (!crtc)
174 continue;
175
176 crtc->funcs->atomic_destroy_state(crtc,
177 state->crtc_states[i]);
178 state->crtcs[i] = NULL;
179 state->crtc_states[i] = NULL;
180 }
181
182 for (i = 0; i < config->num_total_plane; i++) {
183 struct drm_plane *plane = state->planes[i];
184
185 if (!plane)
186 continue;
187
188 plane->funcs->atomic_destroy_state(plane,
189 state->plane_states[i]);
190 state->planes[i] = NULL;
191 state->plane_states[i] = NULL;
192 }
193 }
194 EXPORT_SYMBOL(drm_atomic_state_default_clear);
195
196 /**
197 * drm_atomic_state_clear - clear state object
198 * @state: atomic state
199 *
200 * When the w/w mutex algorithm detects a deadlock we need to back off and drop
201 * all locks. So someone else could sneak in and change the current modeset
202 * configuration. Which means that all the state assembled in @state is no
203 * longer an atomic update to the current state, but to some arbitrary earlier
204 * state. Which could break assumptions the driver's ->atomic_check likely
205 * relies on.
206 *
207 * Hence we must clear all cached state and completely start over, using this
208 * function.
209 */
210 void drm_atomic_state_clear(struct drm_atomic_state *state)
211 {
212 struct drm_device *dev = state->dev;
213 struct drm_mode_config *config = &dev->mode_config;
214
215 if (config->funcs->atomic_state_clear)
216 config->funcs->atomic_state_clear(state);
217 else
218 drm_atomic_state_default_clear(state);
219 }
220 EXPORT_SYMBOL(drm_atomic_state_clear);
221
222 /**
223 * drm_atomic_state_free - free all memory for an atomic state
224 * @state: atomic state to deallocate
225 *
226 * This frees all memory associated with an atomic state, including all the
227 * per-object state for planes, crtcs and connectors.
228 */
229 void drm_atomic_state_free(struct drm_atomic_state *state)
230 {
231 struct drm_device *dev;
232 struct drm_mode_config *config;
233
234 if (!state)
235 return;
236
237 dev = state->dev;
238 config = &dev->mode_config;
239
240 drm_atomic_state_clear(state);
241
242 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
243
244 if (config->funcs->atomic_state_free) {
245 config->funcs->atomic_state_free(state);
246 } else {
247 drm_atomic_state_default_release(state);
248 kfree(state);
249 }
250 }
251 EXPORT_SYMBOL(drm_atomic_state_free);
252
253 /**
254 * drm_atomic_get_crtc_state - get crtc state
255 * @state: global atomic state object
256 * @crtc: crtc to get state object for
257 *
258 * This function returns the crtc state for the given crtc, allocating it if
259 * needed. It will also grab the relevant crtc lock to make sure that the state
260 * is consistent.
261 *
262 * Returns:
263 *
264 * Either the allocated state or the error code encoded into the pointer. When
265 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
266 * entire atomic sequence must be restarted. All other errors are fatal.
267 */
268 struct drm_crtc_state *
269 drm_atomic_get_crtc_state(struct drm_atomic_state *state,
270 struct drm_crtc *crtc)
271 {
272 int ret, index = drm_crtc_index(crtc);
273 struct drm_crtc_state *crtc_state;
274
275 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
276 if (crtc_state)
277 return crtc_state;
278
279 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
280 if (ret)
281 return ERR_PTR(ret);
282
283 crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
284 if (!crtc_state)
285 return ERR_PTR(-ENOMEM);
286
287 state->crtc_states[index] = crtc_state;
288 state->crtcs[index] = crtc;
289 crtc_state->state = state;
290
291 DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n",
292 crtc->base.id, crtc_state, state);
293
294 return crtc_state;
295 }
296 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
297
298 /**
299 * drm_atomic_set_mode_for_crtc - set mode for CRTC
300 * @state: the CRTC whose incoming state to update
301 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
302 *
303 * Set a mode (originating from the kernel) on the desired CRTC state. Does
304 * not change any other state properties, including enable, active, or
305 * mode_changed.
306 *
307 * RETURNS:
308 * Zero on success, error code on failure. Cannot return -EDEADLK.
309 */
310 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
311 struct drm_display_mode *mode)
312 {
313 struct drm_mode_modeinfo umode;
314
315 /* Early return for no change. */
316 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
317 return 0;
318
319 if (state->mode_blob)
320 drm_property_unreference_blob(state->mode_blob);
321 state->mode_blob = NULL;
322
323 if (mode) {
324 drm_mode_convert_to_umode(&umode, mode);
325 state->mode_blob =
326 drm_property_create_blob(state->crtc->dev,
327 sizeof(umode),
328 &umode);
329 if (IS_ERR(state->mode_blob))
330 return PTR_ERR(state->mode_blob);
331
332 drm_mode_copy(&state->mode, mode);
333 state->enable = true;
334 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
335 mode->name, state);
336 } else {
337 memset(&state->mode, 0, sizeof(state->mode));
338 state->enable = false;
339 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
340 state);
341 }
342
343 return 0;
344 }
345 EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
346
347 /**
348 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
349 * @state: the CRTC whose incoming state to update
350 * @blob: pointer to blob property to use for mode
351 *
352 * Set a mode (originating from a blob property) on the desired CRTC state.
353 * This function will take a reference on the blob property for the CRTC state,
354 * and release the reference held on the state's existing mode property, if any
355 * was set.
356 *
357 * RETURNS:
358 * Zero on success, error code on failure. Cannot return -EDEADLK.
359 */
360 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
361 struct drm_property_blob *blob)
362 {
363 if (blob == state->mode_blob)
364 return 0;
365
366 if (state->mode_blob)
367 drm_property_unreference_blob(state->mode_blob);
368 state->mode_blob = NULL;
369
370 if (blob) {
371 if (blob->length != sizeof(struct drm_mode_modeinfo) ||
372 drm_mode_convert_umode(&state->mode,
373 (const struct drm_mode_modeinfo *)
374 blob->data))
375 return -EINVAL;
376
377 state->mode_blob = drm_property_reference_blob(blob);
378 state->enable = true;
379 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
380 state->mode.name, state);
381 } else {
382 memset(&state->mode, 0, sizeof(state->mode));
383 state->enable = false;
384 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
385 state);
386 }
387
388 return 0;
389 }
390 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
391
392 /**
393 * drm_atomic_crtc_set_property - set property on CRTC
394 * @crtc: the drm CRTC to set a property on
395 * @state: the state object to update with the new property value
396 * @property: the property to set
397 * @val: the new property value
398 *
399 * Use this instead of calling crtc->atomic_set_property directly.
400 * This function handles generic/core properties and calls out to
401 * driver's ->atomic_set_property() for driver properties. To ensure
402 * consistent behavior you must call this function rather than the
403 * driver hook directly.
404 *
405 * RETURNS:
406 * Zero on success, error code on failure
407 */
408 int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
409 struct drm_crtc_state *state, struct drm_property *property,
410 uint64_t val)
411 {
412 struct drm_device *dev = crtc->dev;
413 struct drm_mode_config *config = &dev->mode_config;
414 int ret;
415
416 if (property == config->prop_active)
417 state->active = val;
418 else if (property == config->prop_mode_id) {
419 struct drm_property_blob *mode =
420 drm_property_lookup_blob(dev, val);
421 ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
422 if (mode)
423 drm_property_unreference_blob(mode);
424 return ret;
425 }
426 else if (crtc->funcs->atomic_set_property)
427 return crtc->funcs->atomic_set_property(crtc, state, property, val);
428 else
429 return -EINVAL;
430
431 return 0;
432 }
433 EXPORT_SYMBOL(drm_atomic_crtc_set_property);
434
435 /*
436 * This function handles generic/core properties and calls out to
437 * driver's ->atomic_get_property() for driver properties. To ensure
438 * consistent behavior you must call this function rather than the
439 * driver hook directly.
440 */
441 static int
442 drm_atomic_crtc_get_property(struct drm_crtc *crtc,
443 const struct drm_crtc_state *state,
444 struct drm_property *property, uint64_t *val)
445 {
446 struct drm_device *dev = crtc->dev;
447 struct drm_mode_config *config = &dev->mode_config;
448
449 if (property == config->prop_active)
450 *val = state->active;
451 else if (property == config->prop_mode_id)
452 *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
453 else if (crtc->funcs->atomic_get_property)
454 return crtc->funcs->atomic_get_property(crtc, state, property, val);
455 else
456 return -EINVAL;
457
458 return 0;
459 }
460
461 /**
462 * drm_atomic_crtc_check - check crtc state
463 * @crtc: crtc to check
464 * @state: crtc state to check
465 *
466 * Provides core sanity checks for crtc state.
467 *
468 * RETURNS:
469 * Zero on success, error code on failure
470 */
471 static int drm_atomic_crtc_check(struct drm_crtc *crtc,
472 struct drm_crtc_state *state)
473 {
474 /* NOTE: we explicitly don't enforce constraints such as primary
475 * layer covering entire screen, since that is something we want
476 * to allow (on hw that supports it). For hw that does not, it
477 * should be checked in driver's crtc->atomic_check() vfunc.
478 *
479 * TODO: Add generic modeset state checks once we support those.
480 */
481
482 if (state->active && !state->enable) {
483 DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n",
484 crtc->base.id);
485 return -EINVAL;
486 }
487
488 /* The state->enable vs. state->mode_blob checks can be WARN_ON,
489 * as this is a kernel-internal detail that userspace should never
490 * be able to trigger. */
491 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
492 WARN_ON(state->enable && !state->mode_blob)) {
493 DRM_DEBUG_ATOMIC("[CRTC:%d] enabled without mode blob\n",
494 crtc->base.id);
495 return -EINVAL;
496 }
497
498 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
499 WARN_ON(!state->enable && state->mode_blob)) {
500 DRM_DEBUG_ATOMIC("[CRTC:%d] disabled with mode blob\n",
501 crtc->base.id);
502 return -EINVAL;
503 }
504
505 return 0;
506 }
507
508 /**
509 * drm_atomic_get_plane_state - get plane state
510 * @state: global atomic state object
511 * @plane: plane to get state object for
512 *
513 * This function returns the plane state for the given plane, allocating it if
514 * needed. It will also grab the relevant plane lock to make sure that the state
515 * is consistent.
516 *
517 * Returns:
518 *
519 * Either the allocated state or the error code encoded into the pointer. When
520 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
521 * entire atomic sequence must be restarted. All other errors are fatal.
522 */
523 struct drm_plane_state *
524 drm_atomic_get_plane_state(struct drm_atomic_state *state,
525 struct drm_plane *plane)
526 {
527 int ret, index = drm_plane_index(plane);
528 struct drm_plane_state *plane_state;
529
530 plane_state = drm_atomic_get_existing_plane_state(state, plane);
531 if (plane_state)
532 return plane_state;
533
534 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
535 if (ret)
536 return ERR_PTR(ret);
537
538 plane_state = plane->funcs->atomic_duplicate_state(plane);
539 if (!plane_state)
540 return ERR_PTR(-ENOMEM);
541
542 state->plane_states[index] = plane_state;
543 state->planes[index] = plane;
544 plane_state->state = state;
545
546 DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n",
547 plane->base.id, plane_state, state);
548
549 if (plane_state->crtc) {
550 struct drm_crtc_state *crtc_state;
551
552 crtc_state = drm_atomic_get_crtc_state(state,
553 plane_state->crtc);
554 if (IS_ERR(crtc_state))
555 return ERR_CAST(crtc_state);
556 }
557
558 return plane_state;
559 }
560 EXPORT_SYMBOL(drm_atomic_get_plane_state);
561
562 /**
563 * drm_atomic_plane_set_property - set property on plane
564 * @plane: the drm plane to set a property on
565 * @state: the state object to update with the new property value
566 * @property: the property to set
567 * @val: the new property value
568 *
569 * Use this instead of calling plane->atomic_set_property directly.
570 * This function handles generic/core properties and calls out to
571 * driver's ->atomic_set_property() for driver properties. To ensure
572 * consistent behavior you must call this function rather than the
573 * driver hook directly.
574 *
575 * RETURNS:
576 * Zero on success, error code on failure
577 */
578 int drm_atomic_plane_set_property(struct drm_plane *plane,
579 struct drm_plane_state *state, struct drm_property *property,
580 uint64_t val)
581 {
582 struct drm_device *dev = plane->dev;
583 struct drm_mode_config *config = &dev->mode_config;
584
585 if (property == config->prop_fb_id) {
586 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
587 drm_atomic_set_fb_for_plane(state, fb);
588 if (fb)
589 drm_framebuffer_unreference(fb);
590 } else if (property == config->prop_crtc_id) {
591 struct drm_crtc *crtc = drm_crtc_find(dev, val);
592 return drm_atomic_set_crtc_for_plane(state, crtc);
593 } else if (property == config->prop_crtc_x) {
594 state->crtc_x = U642I64(val);
595 } else if (property == config->prop_crtc_y) {
596 state->crtc_y = U642I64(val);
597 } else if (property == config->prop_crtc_w) {
598 state->crtc_w = val;
599 } else if (property == config->prop_crtc_h) {
600 state->crtc_h = val;
601 } else if (property == config->prop_src_x) {
602 state->src_x = val;
603 } else if (property == config->prop_src_y) {
604 state->src_y = val;
605 } else if (property == config->prop_src_w) {
606 state->src_w = val;
607 } else if (property == config->prop_src_h) {
608 state->src_h = val;
609 } else if (property == config->rotation_property) {
610 state->rotation = val;
611 } else if (plane->funcs->atomic_set_property) {
612 return plane->funcs->atomic_set_property(plane, state,
613 property, val);
614 } else {
615 return -EINVAL;
616 }
617
618 return 0;
619 }
620 EXPORT_SYMBOL(drm_atomic_plane_set_property);
621
622 /*
623 * This function handles generic/core properties and calls out to
624 * driver's ->atomic_get_property() for driver properties. To ensure
625 * consistent behavior you must call this function rather than the
626 * driver hook directly.
627 */
628 static int
629 drm_atomic_plane_get_property(struct drm_plane *plane,
630 const struct drm_plane_state *state,
631 struct drm_property *property, uint64_t *val)
632 {
633 struct drm_device *dev = plane->dev;
634 struct drm_mode_config *config = &dev->mode_config;
635
636 if (property == config->prop_fb_id) {
637 *val = (state->fb) ? state->fb->base.id : 0;
638 } else if (property == config->prop_crtc_id) {
639 *val = (state->crtc) ? state->crtc->base.id : 0;
640 } else if (property == config->prop_crtc_x) {
641 *val = I642U64(state->crtc_x);
642 } else if (property == config->prop_crtc_y) {
643 *val = I642U64(state->crtc_y);
644 } else if (property == config->prop_crtc_w) {
645 *val = state->crtc_w;
646 } else if (property == config->prop_crtc_h) {
647 *val = state->crtc_h;
648 } else if (property == config->prop_src_x) {
649 *val = state->src_x;
650 } else if (property == config->prop_src_y) {
651 *val = state->src_y;
652 } else if (property == config->prop_src_w) {
653 *val = state->src_w;
654 } else if (property == config->prop_src_h) {
655 *val = state->src_h;
656 } else if (property == config->rotation_property) {
657 *val = state->rotation;
658 } else if (plane->funcs->atomic_get_property) {
659 return plane->funcs->atomic_get_property(plane, state, property, val);
660 } else {
661 return -EINVAL;
662 }
663
664 return 0;
665 }
666
667 static bool
668 plane_switching_crtc(struct drm_atomic_state *state,
669 struct drm_plane *plane,
670 struct drm_plane_state *plane_state)
671 {
672 if (!plane->state->crtc || !plane_state->crtc)
673 return false;
674
675 if (plane->state->crtc == plane_state->crtc)
676 return false;
677
678 /* This could be refined, but currently there's no helper or driver code
679 * to implement direct switching of active planes nor userspace to take
680 * advantage of more direct plane switching without the intermediate
681 * full OFF state.
682 */
683 return true;
684 }
685
686 /**
687 * drm_atomic_plane_check - check plane state
688 * @plane: plane to check
689 * @state: plane state to check
690 *
691 * Provides core sanity checks for plane state.
692 *
693 * RETURNS:
694 * Zero on success, error code on failure
695 */
696 static int drm_atomic_plane_check(struct drm_plane *plane,
697 struct drm_plane_state *state)
698 {
699 unsigned int fb_width, fb_height;
700 int ret;
701
702 /* either *both* CRTC and FB must be set, or neither */
703 if (WARN_ON(state->crtc && !state->fb)) {
704 DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
705 return -EINVAL;
706 } else if (WARN_ON(state->fb && !state->crtc)) {
707 DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
708 return -EINVAL;
709 }
710
711 /* if disabled, we don't care about the rest of the state: */
712 if (!state->crtc)
713 return 0;
714
715 /* Check whether this plane is usable on this CRTC */
716 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
717 DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
718 return -EINVAL;
719 }
720
721 /* Check whether this plane supports the fb pixel format. */
722 ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
723 if (ret) {
724 DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
725 drm_get_format_name(state->fb->pixel_format));
726 return ret;
727 }
728
729 /* Give drivers some help against integer overflows */
730 if (state->crtc_w > INT_MAX ||
731 state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
732 state->crtc_h > INT_MAX ||
733 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
734 DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
735 state->crtc_w, state->crtc_h,
736 state->crtc_x, state->crtc_y);
737 return -ERANGE;
738 }
739
740 fb_width = state->fb->width << 16;
741 fb_height = state->fb->height << 16;
742
743 /* Make sure source coordinates are inside the fb. */
744 if (state->src_w > fb_width ||
745 state->src_x > fb_width - state->src_w ||
746 state->src_h > fb_height ||
747 state->src_y > fb_height - state->src_h) {
748 DRM_DEBUG_ATOMIC("Invalid source coordinates "
749 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
750 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
751 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
752 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
753 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
754 return -ENOSPC;
755 }
756
757 if (plane_switching_crtc(state->state, plane, state)) {
758 DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n",
759 plane->base.id);
760 return -EINVAL;
761 }
762
763 return 0;
764 }
765
766 /**
767 * drm_atomic_get_connector_state - get connector state
768 * @state: global atomic state object
769 * @connector: connector to get state object for
770 *
771 * This function returns the connector state for the given connector,
772 * allocating it if needed. It will also grab the relevant connector lock to
773 * make sure that the state is consistent.
774 *
775 * Returns:
776 *
777 * Either the allocated state or the error code encoded into the pointer. When
778 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
779 * entire atomic sequence must be restarted. All other errors are fatal.
780 */
781 struct drm_connector_state *
782 drm_atomic_get_connector_state(struct drm_atomic_state *state,
783 struct drm_connector *connector)
784 {
785 int ret, index;
786 struct drm_mode_config *config = &connector->dev->mode_config;
787 struct drm_connector_state *connector_state;
788
789 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
790 if (ret)
791 return ERR_PTR(ret);
792
793 index = drm_connector_index(connector);
794
795 /*
796 * Construction of atomic state updates can race with a connector
797 * hot-add which might overflow. In this case flip the table and just
798 * restart the entire ioctl - no one is fast enough to livelock a cpu
799 * with physical hotplug events anyway.
800 *
801 * Note that we only grab the indexes once we have the right lock to
802 * prevent hotplug/unplugging of connectors. So removal is no problem,
803 * at most the array is a bit too large.
804 */
805 if (index >= state->num_connector) {
806 DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n");
807 return ERR_PTR(-EAGAIN);
808 }
809
810 if (state->connector_states[index])
811 return state->connector_states[index];
812
813 connector_state = connector->funcs->atomic_duplicate_state(connector);
814 if (!connector_state)
815 return ERR_PTR(-ENOMEM);
816
817 state->connector_states[index] = connector_state;
818 state->connectors[index] = connector;
819 connector_state->state = state;
820
821 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
822 connector->base.id, connector_state, state);
823
824 if (connector_state->crtc) {
825 struct drm_crtc_state *crtc_state;
826
827 crtc_state = drm_atomic_get_crtc_state(state,
828 connector_state->crtc);
829 if (IS_ERR(crtc_state))
830 return ERR_CAST(crtc_state);
831 }
832
833 return connector_state;
834 }
835 EXPORT_SYMBOL(drm_atomic_get_connector_state);
836
837 /**
838 * drm_atomic_connector_set_property - set property on connector.
839 * @connector: the drm connector to set a property on
840 * @state: the state object to update with the new property value
841 * @property: the property to set
842 * @val: the new property value
843 *
844 * Use this instead of calling connector->atomic_set_property directly.
845 * This function handles generic/core properties and calls out to
846 * driver's ->atomic_set_property() for driver properties. To ensure
847 * consistent behavior you must call this function rather than the
848 * driver hook directly.
849 *
850 * RETURNS:
851 * Zero on success, error code on failure
852 */
853 int drm_atomic_connector_set_property(struct drm_connector *connector,
854 struct drm_connector_state *state, struct drm_property *property,
855 uint64_t val)
856 {
857 struct drm_device *dev = connector->dev;
858 struct drm_mode_config *config = &dev->mode_config;
859
860 if (property == config->prop_crtc_id) {
861 struct drm_crtc *crtc = drm_crtc_find(dev, val);
862 return drm_atomic_set_crtc_for_connector(state, crtc);
863 } else if (property == config->dpms_property) {
864 /* setting DPMS property requires special handling, which
865 * is done in legacy setprop path for us. Disallow (for
866 * now?) atomic writes to DPMS property:
867 */
868 return -EINVAL;
869 } else if (connector->funcs->atomic_set_property) {
870 return connector->funcs->atomic_set_property(connector,
871 state, property, val);
872 } else {
873 return -EINVAL;
874 }
875 }
876 EXPORT_SYMBOL(drm_atomic_connector_set_property);
877
878 /*
879 * This function handles generic/core properties and calls out to
880 * driver's ->atomic_get_property() for driver properties. To ensure
881 * consistent behavior you must call this function rather than the
882 * driver hook directly.
883 */
884 static int
885 drm_atomic_connector_get_property(struct drm_connector *connector,
886 const struct drm_connector_state *state,
887 struct drm_property *property, uint64_t *val)
888 {
889 struct drm_device *dev = connector->dev;
890 struct drm_mode_config *config = &dev->mode_config;
891
892 if (property == config->prop_crtc_id) {
893 *val = (state->crtc) ? state->crtc->base.id : 0;
894 } else if (property == config->dpms_property) {
895 *val = connector->dpms;
896 } else if (connector->funcs->atomic_get_property) {
897 return connector->funcs->atomic_get_property(connector,
898 state, property, val);
899 } else {
900 return -EINVAL;
901 }
902
903 return 0;
904 }
905
906 int drm_atomic_get_property(struct drm_mode_object *obj,
907 struct drm_property *property, uint64_t *val)
908 {
909 struct drm_device *dev = property->dev;
910 int ret;
911
912 switch (obj->type) {
913 case DRM_MODE_OBJECT_CONNECTOR: {
914 struct drm_connector *connector = obj_to_connector(obj);
915 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
916 ret = drm_atomic_connector_get_property(connector,
917 connector->state, property, val);
918 break;
919 }
920 case DRM_MODE_OBJECT_CRTC: {
921 struct drm_crtc *crtc = obj_to_crtc(obj);
922 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
923 ret = drm_atomic_crtc_get_property(crtc,
924 crtc->state, property, val);
925 break;
926 }
927 case DRM_MODE_OBJECT_PLANE: {
928 struct drm_plane *plane = obj_to_plane(obj);
929 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
930 ret = drm_atomic_plane_get_property(plane,
931 plane->state, property, val);
932 break;
933 }
934 default:
935 ret = -EINVAL;
936 break;
937 }
938
939 return ret;
940 }
941
942 /**
943 * drm_atomic_set_crtc_for_plane - set crtc for plane
944 * @plane_state: the plane whose incoming state to update
945 * @crtc: crtc to use for the plane
946 *
947 * Changing the assigned crtc for a plane requires us to grab the lock and state
948 * for the new crtc, as needed. This function takes care of all these details
949 * besides updating the pointer in the state object itself.
950 *
951 * Returns:
952 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
953 * then the w/w mutex code has detected a deadlock and the entire atomic
954 * sequence must be restarted. All other errors are fatal.
955 */
956 int
957 drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
958 struct drm_crtc *crtc)
959 {
960 struct drm_plane *plane = plane_state->plane;
961 struct drm_crtc_state *crtc_state;
962
963 if (plane_state->crtc) {
964 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
965 plane_state->crtc);
966 if (WARN_ON(IS_ERR(crtc_state)))
967 return PTR_ERR(crtc_state);
968
969 crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
970 }
971
972 plane_state->crtc = crtc;
973
974 if (crtc) {
975 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
976 crtc);
977 if (IS_ERR(crtc_state))
978 return PTR_ERR(crtc_state);
979 crtc_state->plane_mask |= (1 << drm_plane_index(plane));
980 }
981
982 if (crtc)
983 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n",
984 plane_state, crtc->base.id);
985 else
986 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
987 plane_state);
988
989 return 0;
990 }
991 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
992
993 /**
994 * drm_atomic_set_fb_for_plane - set framebuffer for plane
995 * @plane_state: atomic state object for the plane
996 * @fb: fb to use for the plane
997 *
998 * Changing the assigned framebuffer for a plane requires us to grab a reference
999 * to the new fb and drop the reference to the old fb, if there is one. This
1000 * function takes care of all these details besides updating the pointer in the
1001 * state object itself.
1002 */
1003 void
1004 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
1005 struct drm_framebuffer *fb)
1006 {
1007 if (plane_state->fb)
1008 drm_framebuffer_unreference(plane_state->fb);
1009 if (fb)
1010 drm_framebuffer_reference(fb);
1011 plane_state->fb = fb;
1012
1013 if (fb)
1014 DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
1015 fb->base.id, plane_state);
1016 else
1017 DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
1018 plane_state);
1019 }
1020 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
1021
1022 /**
1023 * drm_atomic_set_crtc_for_connector - set crtc for connector
1024 * @conn_state: atomic state object for the connector
1025 * @crtc: crtc to use for the connector
1026 *
1027 * Changing the assigned crtc for a connector requires us to grab the lock and
1028 * state for the new crtc, as needed. This function takes care of all these
1029 * details besides updating the pointer in the state object itself.
1030 *
1031 * Returns:
1032 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1033 * then the w/w mutex code has detected a deadlock and the entire atomic
1034 * sequence must be restarted. All other errors are fatal.
1035 */
1036 int
1037 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
1038 struct drm_crtc *crtc)
1039 {
1040 struct drm_crtc_state *crtc_state;
1041
1042 if (crtc) {
1043 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
1044 if (IS_ERR(crtc_state))
1045 return PTR_ERR(crtc_state);
1046 }
1047
1048 conn_state->crtc = crtc;
1049
1050 if (crtc)
1051 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n",
1052 conn_state, crtc->base.id);
1053 else
1054 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
1055 conn_state);
1056
1057 return 0;
1058 }
1059 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
1060
1061 /**
1062 * drm_atomic_add_affected_connectors - add connectors for crtc
1063 * @state: atomic state
1064 * @crtc: DRM crtc
1065 *
1066 * This function walks the current configuration and adds all connectors
1067 * currently using @crtc to the atomic configuration @state. Note that this
1068 * function must acquire the connection mutex. This can potentially cause
1069 * unneeded seralization if the update is just for the planes on one crtc. Hence
1070 * drivers and helpers should only call this when really needed (e.g. when a
1071 * full modeset needs to happen due to some change).
1072 *
1073 * Returns:
1074 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1075 * then the w/w mutex code has detected a deadlock and the entire atomic
1076 * sequence must be restarted. All other errors are fatal.
1077 */
1078 int
1079 drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
1080 struct drm_crtc *crtc)
1081 {
1082 struct drm_mode_config *config = &state->dev->mode_config;
1083 struct drm_connector *connector;
1084 struct drm_connector_state *conn_state;
1085 int ret;
1086
1087 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
1088 if (ret)
1089 return ret;
1090
1091 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n",
1092 crtc->base.id, state);
1093
1094 /*
1095 * Changed connectors are already in @state, so only need to look at the
1096 * current configuration.
1097 */
1098 drm_for_each_connector(connector, state->dev) {
1099 if (connector->state->crtc != crtc)
1100 continue;
1101
1102 conn_state = drm_atomic_get_connector_state(state, connector);
1103 if (IS_ERR(conn_state))
1104 return PTR_ERR(conn_state);
1105 }
1106
1107 return 0;
1108 }
1109 EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
1110
1111 /**
1112 * drm_atomic_add_affected_planes - add planes for crtc
1113 * @state: atomic state
1114 * @crtc: DRM crtc
1115 *
1116 * This function walks the current configuration and adds all planes
1117 * currently used by @crtc to the atomic configuration @state. This is useful
1118 * when an atomic commit also needs to check all currently enabled plane on
1119 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
1120 * to avoid special code to force-enable all planes.
1121 *
1122 * Since acquiring a plane state will always also acquire the w/w mutex of the
1123 * current CRTC for that plane (if there is any) adding all the plane states for
1124 * a CRTC will not reduce parallism of atomic updates.
1125 *
1126 * Returns:
1127 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1128 * then the w/w mutex code has detected a deadlock and the entire atomic
1129 * sequence must be restarted. All other errors are fatal.
1130 */
1131 int
1132 drm_atomic_add_affected_planes(struct drm_atomic_state *state,
1133 struct drm_crtc *crtc)
1134 {
1135 struct drm_plane *plane;
1136
1137 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
1138
1139 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
1140 struct drm_plane_state *plane_state =
1141 drm_atomic_get_plane_state(state, plane);
1142
1143 if (IS_ERR(plane_state))
1144 return PTR_ERR(plane_state);
1145 }
1146 return 0;
1147 }
1148 EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1149
1150 /**
1151 * drm_atomic_connectors_for_crtc - count number of connected outputs
1152 * @state: atomic state
1153 * @crtc: DRM crtc
1154 *
1155 * This function counts all connectors which will be connected to @crtc
1156 * according to @state. Useful to recompute the enable state for @crtc.
1157 */
1158 int
1159 drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
1160 struct drm_crtc *crtc)
1161 {
1162 struct drm_connector *connector;
1163 struct drm_connector_state *conn_state;
1164
1165 int i, num_connected_connectors = 0;
1166
1167 for_each_connector_in_state(state, connector, conn_state, i) {
1168 if (conn_state->crtc == crtc)
1169 num_connected_connectors++;
1170 }
1171
1172 DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n",
1173 state, num_connected_connectors, crtc->base.id);
1174
1175 return num_connected_connectors;
1176 }
1177 EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
1178
1179 /**
1180 * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
1181 * @state: atomic state
1182 *
1183 * This function should be used by legacy entry points which don't understand
1184 * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
1185 * the slowpath completed.
1186 */
1187 void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
1188 {
1189 int ret;
1190
1191 retry:
1192 drm_modeset_backoff(state->acquire_ctx);
1193
1194 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
1195 state->acquire_ctx);
1196 if (ret)
1197 goto retry;
1198 ret = drm_modeset_lock_all_crtcs(state->dev,
1199 state->acquire_ctx);
1200 if (ret)
1201 goto retry;
1202 }
1203 EXPORT_SYMBOL(drm_atomic_legacy_backoff);
1204
1205 /**
1206 * drm_atomic_check_only - check whether a given config would work
1207 * @state: atomic configuration to check
1208 *
1209 * Note that this function can return -EDEADLK if the driver needed to acquire
1210 * more locks but encountered a deadlock. The caller must then do the usual w/w
1211 * backoff dance and restart. All other errors are fatal.
1212 *
1213 * Returns:
1214 * 0 on success, negative error code on failure.
1215 */
1216 int drm_atomic_check_only(struct drm_atomic_state *state)
1217 {
1218 struct drm_device *dev = state->dev;
1219 struct drm_mode_config *config = &dev->mode_config;
1220 struct drm_plane *plane;
1221 struct drm_plane_state *plane_state;
1222 struct drm_crtc *crtc;
1223 struct drm_crtc_state *crtc_state;
1224 int i, ret = 0;
1225
1226 DRM_DEBUG_ATOMIC("checking %p\n", state);
1227
1228 for_each_plane_in_state(state, plane, plane_state, i) {
1229 ret = drm_atomic_plane_check(plane, plane_state);
1230 if (ret) {
1231 DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n",
1232 plane->base.id);
1233 return ret;
1234 }
1235 }
1236
1237 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1238 ret = drm_atomic_crtc_check(crtc, crtc_state);
1239 if (ret) {
1240 DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n",
1241 crtc->base.id);
1242 return ret;
1243 }
1244 }
1245
1246 if (config->funcs->atomic_check)
1247 ret = config->funcs->atomic_check(state->dev, state);
1248
1249 if (!state->allow_modeset) {
1250 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1251 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
1252 DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n",
1253 crtc->base.id);
1254 return -EINVAL;
1255 }
1256 }
1257 }
1258
1259 return ret;
1260 }
1261 EXPORT_SYMBOL(drm_atomic_check_only);
1262
1263 /**
1264 * drm_atomic_commit - commit configuration atomically
1265 * @state: atomic configuration to check
1266 *
1267 * Note that this function can return -EDEADLK if the driver needed to acquire
1268 * more locks but encountered a deadlock. The caller must then do the usual w/w
1269 * backoff dance and restart. All other errors are fatal.
1270 *
1271 * Also note that on successful execution ownership of @state is transferred
1272 * from the caller of this function to the function itself. The caller must not
1273 * free or in any other way access @state. If the function fails then the caller
1274 * must clean up @state itself.
1275 *
1276 * Returns:
1277 * 0 on success, negative error code on failure.
1278 */
1279 int drm_atomic_commit(struct drm_atomic_state *state)
1280 {
1281 struct drm_mode_config *config = &state->dev->mode_config;
1282 int ret;
1283
1284 ret = drm_atomic_check_only(state);
1285 if (ret)
1286 return ret;
1287
1288 DRM_DEBUG_ATOMIC("commiting %p\n", state);
1289
1290 return config->funcs->atomic_commit(state->dev, state, false);
1291 }
1292 EXPORT_SYMBOL(drm_atomic_commit);
1293
1294 /**
1295 * drm_atomic_async_commit - atomic&async configuration commit
1296 * @state: atomic configuration to check
1297 *
1298 * Note that this function can return -EDEADLK if the driver needed to acquire
1299 * more locks but encountered a deadlock. The caller must then do the usual w/w
1300 * backoff dance and restart. All other errors are fatal.
1301 *
1302 * Also note that on successful execution ownership of @state is transferred
1303 * from the caller of this function to the function itself. The caller must not
1304 * free or in any other way access @state. If the function fails then the caller
1305 * must clean up @state itself.
1306 *
1307 * Returns:
1308 * 0 on success, negative error code on failure.
1309 */
1310 int drm_atomic_async_commit(struct drm_atomic_state *state)
1311 {
1312 struct drm_mode_config *config = &state->dev->mode_config;
1313 int ret;
1314
1315 ret = drm_atomic_check_only(state);
1316 if (ret)
1317 return ret;
1318
1319 DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state);
1320
1321 return config->funcs->atomic_commit(state->dev, state, true);
1322 }
1323 EXPORT_SYMBOL(drm_atomic_async_commit);
1324
1325 /*
1326 * The big monstor ioctl
1327 */
1328
1329 static struct drm_pending_vblank_event *create_vblank_event(
1330 struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
1331 {
1332 struct drm_pending_vblank_event *e = NULL;
1333 unsigned long flags;
1334
1335 spin_lock_irqsave(&dev->event_lock, flags);
1336 if (file_priv->event_space < sizeof e->event) {
1337 spin_unlock_irqrestore(&dev->event_lock, flags);
1338 goto out;
1339 }
1340 file_priv->event_space -= sizeof e->event;
1341 spin_unlock_irqrestore(&dev->event_lock, flags);
1342
1343 e = kzalloc(sizeof *e, GFP_KERNEL);
1344 if (e == NULL) {
1345 spin_lock_irqsave(&dev->event_lock, flags);
1346 file_priv->event_space += sizeof e->event;
1347 spin_unlock_irqrestore(&dev->event_lock, flags);
1348 goto out;
1349 }
1350
1351 e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
1352 e->event.base.length = sizeof e->event;
1353 e->event.user_data = user_data;
1354 e->base.event = &e->event.base;
1355 e->base.file_priv = file_priv;
1356 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
1357
1358 out:
1359 return e;
1360 }
1361
1362 static void destroy_vblank_event(struct drm_device *dev,
1363 struct drm_file *file_priv, struct drm_pending_vblank_event *e)
1364 {
1365 unsigned long flags;
1366
1367 spin_lock_irqsave(&dev->event_lock, flags);
1368 file_priv->event_space += sizeof e->event;
1369 spin_unlock_irqrestore(&dev->event_lock, flags);
1370 kfree(e);
1371 }
1372
1373 static int atomic_set_prop(struct drm_atomic_state *state,
1374 struct drm_mode_object *obj, struct drm_property *prop,
1375 uint64_t prop_value)
1376 {
1377 struct drm_mode_object *ref;
1378 int ret;
1379
1380 if (!drm_property_change_valid_get(prop, prop_value, &ref))
1381 return -EINVAL;
1382
1383 switch (obj->type) {
1384 case DRM_MODE_OBJECT_CONNECTOR: {
1385 struct drm_connector *connector = obj_to_connector(obj);
1386 struct drm_connector_state *connector_state;
1387
1388 connector_state = drm_atomic_get_connector_state(state, connector);
1389 if (IS_ERR(connector_state)) {
1390 ret = PTR_ERR(connector_state);
1391 break;
1392 }
1393
1394 ret = drm_atomic_connector_set_property(connector,
1395 connector_state, prop, prop_value);
1396 break;
1397 }
1398 case DRM_MODE_OBJECT_CRTC: {
1399 struct drm_crtc *crtc = obj_to_crtc(obj);
1400 struct drm_crtc_state *crtc_state;
1401
1402 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1403 if (IS_ERR(crtc_state)) {
1404 ret = PTR_ERR(crtc_state);
1405 break;
1406 }
1407
1408 ret = drm_atomic_crtc_set_property(crtc,
1409 crtc_state, prop, prop_value);
1410 break;
1411 }
1412 case DRM_MODE_OBJECT_PLANE: {
1413 struct drm_plane *plane = obj_to_plane(obj);
1414 struct drm_plane_state *plane_state;
1415
1416 plane_state = drm_atomic_get_plane_state(state, plane);
1417 if (IS_ERR(plane_state)) {
1418 ret = PTR_ERR(plane_state);
1419 break;
1420 }
1421
1422 ret = drm_atomic_plane_set_property(plane,
1423 plane_state, prop, prop_value);
1424 break;
1425 }
1426 default:
1427 ret = -EINVAL;
1428 break;
1429 }
1430
1431 drm_property_change_valid_put(prop, ref);
1432 return ret;
1433 }
1434
1435 int drm_mode_atomic_ioctl(struct drm_device *dev,
1436 void *data, struct drm_file *file_priv)
1437 {
1438 struct drm_mode_atomic *arg = data;
1439 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
1440 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
1441 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
1442 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
1443 unsigned int copied_objs, copied_props;
1444 struct drm_atomic_state *state;
1445 struct drm_modeset_acquire_ctx ctx;
1446 struct drm_plane *plane;
1447 struct drm_crtc *crtc;
1448 struct drm_crtc_state *crtc_state;
1449 unsigned plane_mask = 0;
1450 int ret = 0;
1451 unsigned int i, j;
1452
1453 /* disallow for drivers not supporting atomic: */
1454 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
1455 return -EINVAL;
1456
1457 /* disallow for userspace that has not enabled atomic cap (even
1458 * though this may be a bit overkill, since legacy userspace
1459 * wouldn't know how to call this ioctl)
1460 */
1461 if (!file_priv->atomic)
1462 return -EINVAL;
1463
1464 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
1465 return -EINVAL;
1466
1467 if (arg->reserved)
1468 return -EINVAL;
1469
1470 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
1471 !dev->mode_config.async_page_flip)
1472 return -EINVAL;
1473
1474 /* can't test and expect an event at the same time. */
1475 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
1476 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1477 return -EINVAL;
1478
1479 drm_modeset_acquire_init(&ctx, 0);
1480
1481 state = drm_atomic_state_alloc(dev);
1482 if (!state)
1483 return -ENOMEM;
1484
1485 state->acquire_ctx = &ctx;
1486 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1487
1488 retry:
1489 copied_objs = 0;
1490 copied_props = 0;
1491
1492 for (i = 0; i < arg->count_objs; i++) {
1493 uint32_t obj_id, count_props;
1494 struct drm_mode_object *obj;
1495
1496 if (get_user(obj_id, objs_ptr + copied_objs)) {
1497 ret = -EFAULT;
1498 goto out;
1499 }
1500
1501 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
1502 if (!obj || !obj->properties) {
1503 ret = -ENOENT;
1504 goto out;
1505 }
1506
1507 if (get_user(count_props, count_props_ptr + copied_objs)) {
1508 ret = -EFAULT;
1509 goto out;
1510 }
1511
1512 copied_objs++;
1513
1514 for (j = 0; j < count_props; j++) {
1515 uint32_t prop_id;
1516 uint64_t prop_value;
1517 struct drm_property *prop;
1518
1519 if (get_user(prop_id, props_ptr + copied_props)) {
1520 ret = -EFAULT;
1521 goto out;
1522 }
1523
1524 prop = drm_property_find(dev, prop_id);
1525 if (!prop) {
1526 ret = -ENOENT;
1527 goto out;
1528 }
1529
1530 if (copy_from_user(&prop_value,
1531 prop_values_ptr + copied_props,
1532 sizeof(prop_value))) {
1533 ret = -EFAULT;
1534 goto out;
1535 }
1536
1537 ret = atomic_set_prop(state, obj, prop, prop_value);
1538 if (ret)
1539 goto out;
1540
1541 copied_props++;
1542 }
1543
1544 if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
1545 !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
1546 plane = obj_to_plane(obj);
1547 plane_mask |= (1 << drm_plane_index(plane));
1548 plane->old_fb = plane->fb;
1549 }
1550 }
1551
1552 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
1553 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1554 struct drm_pending_vblank_event *e;
1555
1556 e = create_vblank_event(dev, file_priv, arg->user_data);
1557 if (!e) {
1558 ret = -ENOMEM;
1559 goto out;
1560 }
1561
1562 crtc_state->event = e;
1563 }
1564 }
1565
1566 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
1567 /*
1568 * Unlike commit, check_only does not clean up state.
1569 * Below we call drm_atomic_state_free for it.
1570 */
1571 ret = drm_atomic_check_only(state);
1572 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
1573 ret = drm_atomic_async_commit(state);
1574 } else {
1575 ret = drm_atomic_commit(state);
1576 }
1577
1578 out:
1579 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
1580 * locks (ie. while it is still safe to deref plane->state). We
1581 * need to do this here because the driver entry points cannot
1582 * distinguish between legacy and atomic ioctls.
1583 */
1584 drm_for_each_plane_mask(plane, dev, plane_mask) {
1585 if (ret == 0) {
1586 struct drm_framebuffer *new_fb = plane->state->fb;
1587 if (new_fb)
1588 drm_framebuffer_reference(new_fb);
1589 plane->fb = new_fb;
1590 plane->crtc = plane->state->crtc;
1591
1592 if (plane->old_fb)
1593 drm_framebuffer_unreference(plane->old_fb);
1594 }
1595 plane->old_fb = NULL;
1596 }
1597
1598 if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
1599 /*
1600 * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive,
1601 * if they weren't, this code should be called on success
1602 * for TEST_ONLY too.
1603 */
1604
1605 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1606 if (!crtc_state->event)
1607 continue;
1608
1609 destroy_vblank_event(dev, file_priv,
1610 crtc_state->event);
1611 }
1612 }
1613
1614 if (ret == -EDEADLK) {
1615 drm_atomic_state_clear(state);
1616 drm_modeset_backoff(&ctx);
1617 goto retry;
1618 }
1619
1620 if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
1621 drm_atomic_state_free(state);
1622
1623 drm_modeset_drop_locks(&ctx);
1624 drm_modeset_acquire_fini(&ctx);
1625
1626 return ret;
1627 }
This page took 0.099287 seconds and 5 git commands to generate.