drm: wbinvd is cache coherent.
[deliverable/linux.git] / drivers / gpu / drm / drm_irq.c
CommitLineData
1da177e4 1/**
b5e89ed5 2 * \file drm_irq.c
1da177e4
LT
3 * IRQ support
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37
38#include <linux/interrupt.h> /* For task queue support */
39
40/**
41 * Get interrupt from bus id.
b5e89ed5 42 *
1da177e4 43 * \param inode device inode.
6c340eac 44 * \param file_priv DRM file private.
1da177e4
LT
45 * \param cmd command.
46 * \param arg user argument, pointing to a drm_irq_busid structure.
47 * \return zero on success or a negative number on failure.
b5e89ed5 48 *
1da177e4
LT
49 * Finds the PCI device with the specified bus id and gets its IRQ number.
50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51 * to that of the device that this DRM instance attached to.
52 */
c153f45f
EA
53int drm_irq_by_busid(struct drm_device *dev, void *data,
54 struct drm_file *file_priv)
1da177e4 55{
c153f45f 56 struct drm_irq_busid *p = data;
1da177e4
LT
57
58 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
59 return -EINVAL;
60
c153f45f
EA
61 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
62 (p->busnum & 0xff) != dev->pdev->bus->number ||
63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
1da177e4
LT
64 return -EINVAL;
65
ed4cb414 66 p->irq = dev->pdev->irq;
c153f45f
EA
67
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
69 p->irq);
1da177e4 70
1da177e4
LT
71 return 0;
72}
73
0a3e67a4
JB
74static void vblank_disable_fn(unsigned long arg)
75{
76 struct drm_device *dev = (struct drm_device *)arg;
77 unsigned long irqflags;
78 int i;
79
80 if (!dev->vblank_disable_allowed)
81 return;
82
83 for (i = 0; i < dev->num_crtcs; i++) {
84 spin_lock_irqsave(&dev->vbl_lock, irqflags);
85 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
86 dev->vblank_enabled[i]) {
87 DRM_DEBUG("disabling vblank on crtc %d\n", i);
88 dev->last_vblank[i] =
89 dev->driver->get_vblank_counter(dev, i);
90 dev->driver->disable_vblank(dev, i);
91 dev->vblank_enabled[i] = 0;
92 }
93 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
94 }
95}
96
97static void drm_vblank_cleanup(struct drm_device *dev)
98{
99 /* Bail if the driver didn't call drm_vblank_init() */
100 if (dev->num_crtcs == 0)
101 return;
102
103 del_timer(&dev->vblank_disable_timer);
104
105 vblank_disable_fn((unsigned long)dev);
106
107 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
108 DRM_MEM_DRIVER);
109 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
110 DRM_MEM_DRIVER);
111 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
112 dev->num_crtcs, DRM_MEM_DRIVER);
113 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
114 dev->num_crtcs, DRM_MEM_DRIVER);
115 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
116 dev->num_crtcs, DRM_MEM_DRIVER);
117 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
118 DRM_MEM_DRIVER);
119 drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
120 dev->num_crtcs, DRM_MEM_DRIVER);
121
122 dev->num_crtcs = 0;
123}
124
125int drm_vblank_init(struct drm_device *dev, int num_crtcs)
126{
127 int i, ret = -ENOMEM;
128
129 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
130 (unsigned long)dev);
131 spin_lock_init(&dev->vbl_lock);
132 atomic_set(&dev->vbl_signal_pending, 0);
133 dev->num_crtcs = num_crtcs;
134
135 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
136 DRM_MEM_DRIVER);
137 if (!dev->vbl_queue)
138 goto err;
139
140 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
141 DRM_MEM_DRIVER);
142 if (!dev->vbl_sigs)
143 goto err;
144
145 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
146 DRM_MEM_DRIVER);
147 if (!dev->_vblank_count)
148 goto err;
149
150 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
151 DRM_MEM_DRIVER);
152 if (!dev->vblank_refcount)
153 goto err;
154
155 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
156 DRM_MEM_DRIVER);
157 if (!dev->vblank_enabled)
158 goto err;
159
160 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
161 if (!dev->last_vblank)
162 goto err;
163
164 dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
165 DRM_MEM_DRIVER);
166 if (!dev->vblank_inmodeset)
167 goto err;
168
169 /* Zero per-crtc vblank stuff */
170 for (i = 0; i < num_crtcs; i++) {
171 init_waitqueue_head(&dev->vbl_queue[i]);
172 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
173 atomic_set(&dev->_vblank_count[i], 0);
174 atomic_set(&dev->vblank_refcount[i], 0);
175 }
176
177 dev->vblank_disable_allowed = 0;
178
179 return 0;
180
181err:
182 drm_vblank_cleanup(dev);
183 return ret;
184}
185EXPORT_SYMBOL(drm_vblank_init);
186
1da177e4
LT
187/**
188 * Install IRQ handler.
189 *
190 * \param dev DRM device.
1da177e4 191 *
0a3e67a4 192 * Initializes the IRQ related data. Installs the handler, calling the driver
1da177e4
LT
193 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
194 * before and after the installation.
195 */
0a3e67a4 196int drm_irq_install(struct drm_device *dev)
1da177e4 197{
0a3e67a4 198 int ret = 0;
b5e89ed5 199 unsigned long sh_flags = 0;
1da177e4
LT
200
201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
202 return -EINVAL;
203
ed4cb414 204 if (dev->pdev->irq == 0)
1da177e4
LT
205 return -EINVAL;
206
30e2fb18 207 mutex_lock(&dev->struct_mutex);
1da177e4
LT
208
209 /* Driver must have been initialized */
b5e89ed5 210 if (!dev->dev_private) {
30e2fb18 211 mutex_unlock(&dev->struct_mutex);
1da177e4
LT
212 return -EINVAL;
213 }
214
b5e89ed5 215 if (dev->irq_enabled) {
30e2fb18 216 mutex_unlock(&dev->struct_mutex);
1da177e4
LT
217 return -EBUSY;
218 }
219 dev->irq_enabled = 1;
30e2fb18 220 mutex_unlock(&dev->struct_mutex);
1da177e4 221
ed4cb414 222 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
1da177e4 223
b5e89ed5 224 /* Before installing handler */
1da177e4
LT
225 dev->driver->irq_preinstall(dev);
226
b5e89ed5 227 /* Install handler */
1da177e4 228 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
935f6e3a 229 sh_flags = IRQF_SHARED;
b5e89ed5 230
ed4cb414 231 ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
b5e89ed5 232 sh_flags, dev->devname, dev);
ed4cb414
EA
233 /* Expose the device irq number to drivers that want to export it for
234 * whatever reason.
235 */
236 dev->irq = dev->pdev->irq;
b5e89ed5 237 if (ret < 0) {
30e2fb18 238 mutex_lock(&dev->struct_mutex);
1da177e4 239 dev->irq_enabled = 0;
30e2fb18 240 mutex_unlock(&dev->struct_mutex);
1da177e4
LT
241 return ret;
242 }
243
b5e89ed5 244 /* After installing handler */
0a3e67a4
JB
245 ret = dev->driver->irq_postinstall(dev);
246 if (ret < 0) {
247 mutex_lock(&dev->struct_mutex);
248 dev->irq_enabled = 0;
249 mutex_unlock(&dev->struct_mutex);
250 }
1da177e4 251
0a3e67a4 252 return ret;
1da177e4 253}
0a3e67a4 254EXPORT_SYMBOL(drm_irq_install);
1da177e4
LT
255
256/**
257 * Uninstall the IRQ handler.
258 *
259 * \param dev DRM device.
260 *
261 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
262 */
84b1fd10 263int drm_irq_uninstall(struct drm_device * dev)
1da177e4
LT
264{
265 int irq_enabled;
266
267 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
268 return -EINVAL;
269
30e2fb18 270 mutex_lock(&dev->struct_mutex);
1da177e4
LT
271 irq_enabled = dev->irq_enabled;
272 dev->irq_enabled = 0;
30e2fb18 273 mutex_unlock(&dev->struct_mutex);
1da177e4 274
b5e89ed5 275 if (!irq_enabled)
1da177e4
LT
276 return -EINVAL;
277
ed4cb414 278 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
1da177e4
LT
279
280 dev->driver->irq_uninstall(dev);
281
ed4cb414 282 free_irq(dev->pdev->irq, dev);
1da177e4 283
0a3e67a4
JB
284 drm_vblank_cleanup(dev);
285
2e54a007
MCA
286 dev->locked_tasklet_func = NULL;
287
1da177e4
LT
288 return 0;
289}
290EXPORT_SYMBOL(drm_irq_uninstall);
291
292/**
293 * IRQ control ioctl.
294 *
295 * \param inode device inode.
6c340eac 296 * \param file_priv DRM file private.
1da177e4
LT
297 * \param cmd command.
298 * \param arg user argument, pointing to a drm_control structure.
299 * \return zero on success or a negative number on failure.
300 *
301 * Calls irq_install() or irq_uninstall() according to \p arg.
302 */
c153f45f
EA
303int drm_control(struct drm_device *dev, void *data,
304 struct drm_file *file_priv)
1da177e4 305{
c153f45f 306 struct drm_control *ctl = data;
b5e89ed5 307
1da177e4
LT
308 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
309
1da177e4 310
c153f45f 311 switch (ctl->func) {
1da177e4
LT
312 case DRM_INST_HANDLER:
313 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
314 return 0;
315 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ed4cb414 316 ctl->irq != dev->pdev->irq)
1da177e4 317 return -EINVAL;
b5e89ed5 318 return drm_irq_install(dev);
1da177e4
LT
319 case DRM_UNINST_HANDLER:
320 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
321 return 0;
b5e89ed5 322 return drm_irq_uninstall(dev);
1da177e4
LT
323 default:
324 return -EINVAL;
325 }
326}
327
0a3e67a4
JB
328/**
329 * drm_vblank_count - retrieve "cooked" vblank counter value
330 * @dev: DRM device
331 * @crtc: which counter to retrieve
332 *
333 * Fetches the "cooked" vblank count value that represents the number of
334 * vblank events since the system was booted, including lost events due to
335 * modesetting activity.
336 */
337u32 drm_vblank_count(struct drm_device *dev, int crtc)
338{
339 return atomic_read(&dev->_vblank_count[crtc]);
340}
341EXPORT_SYMBOL(drm_vblank_count);
342
343/**
344 * drm_update_vblank_count - update the master vblank counter
345 * @dev: DRM device
346 * @crtc: counter to update
347 *
348 * Call back into the driver to update the appropriate vblank counter
349 * (specified by @crtc). Deal with wraparound, if it occurred, and
350 * update the last read value so we can deal with wraparound on the next
351 * call if necessary.
352 *
353 * Only necessary when going from off->on, to account for frames we
354 * didn't get an interrupt for.
355 *
356 * Note: caller must hold dev->vbl_lock since this reads & writes
357 * device vblank fields.
358 */
359static void drm_update_vblank_count(struct drm_device *dev, int crtc)
360{
361 u32 cur_vblank, diff;
362
363 /*
364 * Interrupts were disabled prior to this call, so deal with counter
365 * wrap if needed.
366 * NOTE! It's possible we lost a full dev->max_vblank_count events
367 * here if the register is small or we had vblank interrupts off for
368 * a long time.
369 */
370 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
371 diff = cur_vblank - dev->last_vblank[crtc];
372 if (cur_vblank < dev->last_vblank[crtc]) {
373 diff += dev->max_vblank_count;
374
375 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
376 crtc, dev->last_vblank[crtc], cur_vblank, diff);
377 }
378
379 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
380 crtc, diff);
381
382 atomic_add(diff, &dev->_vblank_count[crtc]);
383}
384
385/**
386 * drm_vblank_get - get a reference count on vblank events
387 * @dev: DRM device
388 * @crtc: which CRTC to own
389 *
390 * Acquire a reference count on vblank events to avoid having them disabled
391 * while in use.
392 *
393 * RETURNS
394 * Zero on success, nonzero on failure.
395 */
396int drm_vblank_get(struct drm_device *dev, int crtc)
397{
398 unsigned long irqflags;
399 int ret = 0;
400
401 spin_lock_irqsave(&dev->vbl_lock, irqflags);
402 /* Going from 0->1 means we have to enable interrupts again */
403 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
404 !dev->vblank_enabled[crtc]) {
405 ret = dev->driver->enable_vblank(dev, crtc);
406 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
407 if (ret)
408 atomic_dec(&dev->vblank_refcount[crtc]);
409 else {
410 dev->vblank_enabled[crtc] = 1;
411 drm_update_vblank_count(dev, crtc);
412 }
413 }
414 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
415
416 return ret;
417}
418EXPORT_SYMBOL(drm_vblank_get);
419
420/**
421 * drm_vblank_put - give up ownership of vblank events
422 * @dev: DRM device
423 * @crtc: which counter to give up
424 *
425 * Release ownership of a given vblank counter, turning off interrupts
426 * if possible.
427 */
428void drm_vblank_put(struct drm_device *dev, int crtc)
429{
430 /* Last user schedules interrupt disable */
431 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
432 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
433}
434EXPORT_SYMBOL(drm_vblank_put);
435
436/**
437 * drm_modeset_ctl - handle vblank event counter changes across mode switch
438 * @DRM_IOCTL_ARGS: standard ioctl arguments
439 *
440 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
441 * ioctls around modesetting so that any lost vblank events are accounted for.
442 *
443 * Generally the counter will reset across mode sets. If interrupts are
444 * enabled around this call, we don't have to do anything since the counter
445 * will have already been incremented.
446 */
447int drm_modeset_ctl(struct drm_device *dev, void *data,
448 struct drm_file *file_priv)
449{
450 struct drm_modeset_ctl *modeset = data;
451 unsigned long irqflags;
452 int crtc, ret = 0;
453
454 /* If drm_vblank_init() hasn't been called yet, just no-op */
455 if (!dev->num_crtcs)
456 goto out;
457
458 crtc = modeset->crtc;
459 if (crtc >= dev->num_crtcs) {
460 ret = -EINVAL;
461 goto out;
462 }
463
464 /*
465 * To avoid all the problems that might happen if interrupts
466 * were enabled/disabled around or between these calls, we just
467 * have the kernel take a reference on the CRTC (just once though
468 * to avoid corrupting the count if multiple, mismatch calls occur),
469 * so that interrupts remain enabled in the interim.
470 */
471 switch (modeset->cmd) {
472 case _DRM_PRE_MODESET:
473 if (!dev->vblank_inmodeset[crtc]) {
474 dev->vblank_inmodeset[crtc] = 1;
475 drm_vblank_get(dev, crtc);
476 }
477 break;
478 case _DRM_POST_MODESET:
479 if (dev->vblank_inmodeset[crtc]) {
480 spin_lock_irqsave(&dev->vbl_lock, irqflags);
481 dev->vblank_disable_allowed = 1;
482 dev->vblank_inmodeset[crtc] = 0;
483 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
484 drm_vblank_put(dev, crtc);
485 }
486 break;
487 default:
488 ret = -EINVAL;
489 break;
490 }
491
492out:
493 return ret;
494}
495
1da177e4
LT
496/**
497 * Wait for VBLANK.
498 *
499 * \param inode device inode.
6c340eac 500 * \param file_priv DRM file private.
1da177e4
LT
501 * \param cmd command.
502 * \param data user argument, pointing to a drm_wait_vblank structure.
503 * \return zero on success or a negative number on failure.
504 *
b5e89ed5 505 * Verifies the IRQ is installed.
1da177e4
LT
506 *
507 * If a signal is requested checks if this task has already scheduled the same signal
508 * for the same vblank sequence number - nothing to be done in
509 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
510 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
511 * task.
512 *
513 * If a signal is not requested, then calls vblank_wait().
514 */
0a3e67a4
JB
515int drm_wait_vblank(struct drm_device *dev, void *data,
516 struct drm_file *file_priv)
1da177e4 517{
c153f45f 518 union drm_wait_vblank *vblwait = data;
1da177e4 519 int ret = 0;
0a3e67a4 520 unsigned int flags, seq, crtc;
1da177e4 521
ed4cb414 522 if ((!dev->pdev->irq) || (!dev->irq_enabled))
1da177e4
LT
523 return -EINVAL;
524
c153f45f 525 if (vblwait->request.type &
776c9443
MCA
526 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
527 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
c153f45f 528 vblwait->request.type,
776c9443
MCA
529 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
530 return -EINVAL;
531 }
532
c153f45f 533 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
0a3e67a4 534 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
776c9443 535
0a3e67a4 536 if (crtc >= dev->num_crtcs)
776c9443
MCA
537 return -EINVAL;
538
0a3e67a4
JB
539 ret = drm_vblank_get(dev, crtc);
540 if (ret) {
541 DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
542 return ret;
543 }
544 seq = drm_vblank_count(dev, crtc);
776c9443 545
c153f45f 546 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
1da177e4 547 case _DRM_VBLANK_RELATIVE:
c153f45f
EA
548 vblwait->request.sequence += seq;
549 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
1da177e4
LT
550 case _DRM_VBLANK_ABSOLUTE:
551 break;
552 default:
0a3e67a4
JB
553 ret = -EINVAL;
554 goto done;
1da177e4
LT
555 }
556
ab285d74 557 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
c153f45f
EA
558 (seq - vblwait->request.sequence) <= (1<<23)) {
559 vblwait->request.sequence = seq + 1;
ab285d74
MCA
560 }
561
b5e89ed5 562 if (flags & _DRM_VBLANK_SIGNAL) {
1da177e4 563 unsigned long irqflags;
0a3e67a4 564 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
55910517 565 struct drm_vbl_sig *vbl_sig;
1da177e4 566
b5e89ed5 567 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1da177e4
LT
568
569 /* Check if this task has already scheduled the same signal
570 * for the same vblank sequence number; nothing to be done in
571 * that case
572 */
bd1b331f 573 list_for_each_entry(vbl_sig, vbl_sigs, head) {
c153f45f
EA
574 if (vbl_sig->sequence == vblwait->request.sequence
575 && vbl_sig->info.si_signo ==
576 vblwait->request.signal
b5e89ed5
DA
577 && vbl_sig->task == current) {
578 spin_unlock_irqrestore(&dev->vbl_lock,
579 irqflags);
c153f45f 580 vblwait->reply.sequence = seq;
1da177e4
LT
581 goto done;
582 }
583 }
584
0a3e67a4 585 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
b5e89ed5 586 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
0a3e67a4
JB
587 ret = -EBUSY;
588 goto done;
1da177e4
LT
589 }
590
b5e89ed5 591 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1da177e4 592
0a3e67a4
JB
593 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
594 DRM_MEM_DRIVER);
595 if (!vbl_sig) {
596 ret = -ENOMEM;
597 goto done;
598 }
599
600 ret = drm_vblank_get(dev, crtc);
601 if (ret) {
602 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
603 DRM_MEM_DRIVER);
604 return ret;
1da177e4
LT
605 }
606
0a3e67a4 607 atomic_inc(&dev->vbl_signal_pending);
1da177e4 608
c153f45f
EA
609 vbl_sig->sequence = vblwait->request.sequence;
610 vbl_sig->info.si_signo = vblwait->request.signal;
1da177e4
LT
611 vbl_sig->task = current;
612
b5e89ed5 613 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1da177e4 614
bd1b331f 615 list_add_tail(&vbl_sig->head, vbl_sigs);
1da177e4 616
b5e89ed5 617 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
049b3233 618
c153f45f 619 vblwait->reply.sequence = seq;
1da177e4 620 } else {
0a3e67a4
JB
621 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
622 vblwait->request.sequence, crtc);
623 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
624 ((drm_vblank_count(dev, crtc)
625 - vblwait->request.sequence) <= (1 << 23)));
626
627 if (ret != -EINTR) {
628 struct timeval now;
629
630 do_gettimeofday(&now);
631
632 vblwait->reply.tval_sec = now.tv_sec;
633 vblwait->reply.tval_usec = now.tv_usec;
634 vblwait->reply.sequence = drm_vblank_count(dev, crtc);
635 DRM_DEBUG("returning %d to client\n",
636 vblwait->reply.sequence);
637 } else {
638 DRM_DEBUG("vblank wait interrupted by signal\n");
639 }
1da177e4
LT
640 }
641
0a3e67a4
JB
642done:
643 drm_vblank_put(dev, crtc);
1da177e4
LT
644 return ret;
645}
646
647/**
648 * Send the VBLANK signals.
649 *
650 * \param dev DRM device.
0a3e67a4 651 * \param crtc CRTC where the vblank event occurred
1da177e4
LT
652 *
653 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
654 *
655 * If a signal is not requested, then calls vblank_wait().
656 */
0a3e67a4 657static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
1da177e4 658{
0a3e67a4
JB
659 struct drm_vbl_sig *vbl_sig, *tmp;
660 struct list_head *vbl_sigs;
661 unsigned int vbl_seq;
1da177e4
LT
662 unsigned long flags;
663
b5e89ed5 664 spin_lock_irqsave(&dev->vbl_lock, flags);
1da177e4 665
0a3e67a4
JB
666 vbl_sigs = &dev->vbl_sigs[crtc];
667 vbl_seq = drm_vblank_count(dev, crtc);
1da177e4 668
0a3e67a4
JB
669 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
670 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
671 vbl_sig->info.si_code = vbl_seq;
672 send_sig_info(vbl_sig->info.si_signo,
673 &vbl_sig->info, vbl_sig->task);
1da177e4 674
0a3e67a4 675 list_del(&vbl_sig->head);
af6061af 676
0a3e67a4
JB
677 drm_free(vbl_sig, sizeof(*vbl_sig),
678 DRM_MEM_DRIVER);
679 atomic_dec(&dev->vbl_signal_pending);
680 drm_vblank_put(dev, crtc);
681 }
1da177e4
LT
682 }
683
b5e89ed5 684 spin_unlock_irqrestore(&dev->vbl_lock, flags);
1da177e4 685}
1da177e4 686
0a3e67a4
JB
687/**
688 * drm_handle_vblank - handle a vblank event
689 * @dev: DRM device
690 * @crtc: where this event occurred
691 *
692 * Drivers should call this routine in their vblank interrupt handlers to
693 * update the vblank counter and send any signals that may be pending.
694 */
695void drm_handle_vblank(struct drm_device *dev, int crtc)
696{
697 atomic_inc(&dev->_vblank_count[crtc]);
698 DRM_WAKEUP(&dev->vbl_queue[crtc]);
699 drm_vbl_send_signals(dev, crtc);
700}
701EXPORT_SYMBOL(drm_handle_vblank);
2e54a007
MCA
702
703/**
704 * Tasklet wrapper function.
705 *
706 * \param data DRM device in disguise.
707 *
708 * Attempts to grab the HW lock and calls the driver callback on success. On
709 * failure, leave the lock marked as contended so the callback can be called
710 * from drm_unlock().
711 */
712static void drm_locked_tasklet_func(unsigned long data)
713{
84b1fd10 714 struct drm_device *dev = (struct drm_device *)data;
2e54a007 715 unsigned long irqflags;
e5b4f194
TH
716 void (*tasklet_func)(struct drm_device *);
717
2e54a007 718 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
e5b4f194
TH
719 tasklet_func = dev->locked_tasklet_func;
720 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
2e54a007 721
e5b4f194 722 if (!tasklet_func ||
040ac320 723 !drm_lock_take(&dev->lock,
2e54a007 724 DRM_KERNEL_CONTEXT)) {
2e54a007
MCA
725 return;
726 }
727
728 dev->lock.lock_time = jiffies;
729 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
730
e5b4f194
TH
731 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
732 tasklet_func = dev->locked_tasklet_func;
733 dev->locked_tasklet_func = NULL;
734 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
735
736 if (tasklet_func != NULL)
737 tasklet_func(dev);
2e54a007 738
040ac320 739 drm_lock_free(&dev->lock,
2e54a007 740 DRM_KERNEL_CONTEXT);
2e54a007
MCA
741}
742
743/**
744 * Schedule a tasklet to call back a driver hook with the HW lock held.
745 *
746 * \param dev DRM device.
747 * \param func Driver callback.
748 *
749 * This is intended for triggering actions that require the HW lock from an
750 * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
751 * completes. Note that the callback may be called from interrupt or process
752 * context, it must not make any assumptions about this. Also, the HW lock will
753 * be held with the kernel context or any client context.
754 */
84b1fd10 755void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
2e54a007
MCA
756{
757 unsigned long irqflags;
758 static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
759
8163e418
MCA
760 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
761 test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
2e54a007
MCA
762 return;
763
764 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
765
766 if (dev->locked_tasklet_func) {
767 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
768 return;
769 }
770
771 dev->locked_tasklet_func = func;
772
773 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
774
775 drm_tasklet.data = (unsigned long)dev;
776
777 tasklet_hi_schedule(&drm_tasklet);
778}
779EXPORT_SYMBOL(drm_locked_tasklet);
This page took 0.372139 seconds and 5 git commands to generate.