2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/jiffies.h>
14 static int rpm_resume(struct device
*dev
, int rpmflags
);
17 * update_pm_runtime_accounting - Update the time accounting of power states
18 * @dev: Device to update the accounting for
20 * In order to be able to have time accounting of the various power states
21 * (as used by programs such as PowerTOP to show the effectiveness of runtime
22 * PM), we need to track the time spent in each state.
23 * update_pm_runtime_accounting must be called each time before the
24 * runtime_status field is updated, to account the time in the old state
27 void update_pm_runtime_accounting(struct device
*dev
)
29 unsigned long now
= jiffies
;
32 delta
= now
- dev
->power
.accounting_timestamp
;
37 dev
->power
.accounting_timestamp
= now
;
39 if (dev
->power
.disable_depth
> 0)
42 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
43 dev
->power
.suspended_jiffies
+= delta
;
45 dev
->power
.active_jiffies
+= delta
;
48 static void __update_runtime_status(struct device
*dev
, enum rpm_status status
)
50 update_pm_runtime_accounting(dev
);
51 dev
->power
.runtime_status
= status
;
55 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
56 * @dev: Device to handle.
58 static void pm_runtime_deactivate_timer(struct device
*dev
)
60 if (dev
->power
.timer_expires
> 0) {
61 del_timer(&dev
->power
.suspend_timer
);
62 dev
->power
.timer_expires
= 0;
67 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
68 * @dev: Device to handle.
70 static void pm_runtime_cancel_pending(struct device
*dev
)
72 pm_runtime_deactivate_timer(dev
);
74 * In case there's a request pending, make sure its work function will
75 * return without doing anything.
77 dev
->power
.request
= RPM_REQ_NONE
;
81 * rpm_check_suspend_allowed - Test whether a device may be suspended.
82 * @dev: Device to test.
84 static int rpm_check_suspend_allowed(struct device
*dev
)
88 if (dev
->power
.runtime_error
)
90 else if (atomic_read(&dev
->power
.usage_count
) > 0
91 || dev
->power
.disable_depth
> 0)
93 else if (!pm_children_suspended(dev
))
96 /* Pending resume requests take precedence over suspends. */
97 else if ((dev
->power
.deferred_resume
98 && dev
->power
.status
== RPM_SUSPENDING
)
99 || (dev
->power
.request_pending
100 && dev
->power
.request
== RPM_REQ_RESUME
))
102 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
110 * rpm_idle - Notify device bus type if the device can be suspended.
111 * @dev: Device to notify the bus type about.
112 * @rpmflags: Flag bits.
114 * Check if the device's run-time PM status allows it to be suspended. If
115 * another idle notification has been started earlier, return immediately. If
116 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
117 * run the ->runtime_idle() callback directly.
119 * This function must be called under dev->power.lock with interrupts disabled.
121 static int rpm_idle(struct device
*dev
, int rpmflags
)
122 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
126 retval
= rpm_check_suspend_allowed(dev
);
128 ; /* Conditions are wrong. */
130 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
131 else if (dev
->power
.runtime_status
!= RPM_ACTIVE
)
135 * Any pending request other than an idle notification takes
136 * precedence over us, except that the timer may be running.
138 else if (dev
->power
.request_pending
&&
139 dev
->power
.request
> RPM_REQ_IDLE
)
142 /* Act as though RPM_NOWAIT is always set. */
143 else if (dev
->power
.idle_notification
)
144 retval
= -EINPROGRESS
;
148 /* Pending requests need to be canceled. */
149 dev
->power
.request
= RPM_REQ_NONE
;
151 /* Carry out an asynchronous or a synchronous idle notification. */
152 if (rpmflags
& RPM_ASYNC
) {
153 dev
->power
.request
= RPM_REQ_IDLE
;
154 if (!dev
->power
.request_pending
) {
155 dev
->power
.request_pending
= true;
156 queue_work(pm_wq
, &dev
->power
.work
);
161 dev
->power
.idle_notification
= true;
163 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_idle
) {
164 spin_unlock_irq(&dev
->power
.lock
);
166 dev
->bus
->pm
->runtime_idle(dev
);
168 spin_lock_irq(&dev
->power
.lock
);
169 } else if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->runtime_idle
) {
170 spin_unlock_irq(&dev
->power
.lock
);
172 dev
->type
->pm
->runtime_idle(dev
);
174 spin_lock_irq(&dev
->power
.lock
);
175 } else if (dev
->class && dev
->class->pm
176 && dev
->class->pm
->runtime_idle
) {
177 spin_unlock_irq(&dev
->power
.lock
);
179 dev
->class->pm
->runtime_idle(dev
);
181 spin_lock_irq(&dev
->power
.lock
);
184 dev
->power
.idle_notification
= false;
185 wake_up_all(&dev
->power
.wait_queue
);
192 * rpm_suspend - Carry out run-time suspend of given device.
193 * @dev: Device to suspend.
194 * @rpmflags: Flag bits.
196 * Check if the device's run-time PM status allows it to be suspended. If
197 * another suspend has been started earlier, either return immediately or wait
198 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
199 * pending idle notification. If the RPM_ASYNC flag is set then queue a
200 * suspend request; otherwise run the ->runtime_suspend() callback directly.
201 * If a deferred resume was requested while the callback was running then carry
202 * it out; otherwise send an idle notification for the device (if the suspend
203 * failed) or for its parent (if the suspend succeeded).
205 * This function must be called under dev->power.lock with interrupts disabled.
207 static int rpm_suspend(struct device
*dev
, int rpmflags
)
208 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
210 struct device
*parent
= NULL
;
214 dev_dbg(dev
, "%s flags 0x%x\n", __func__
, rpmflags
);
217 retval
= rpm_check_suspend_allowed(dev
);
220 ; /* Conditions are wrong. */
222 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
223 else if (dev
->power
.runtime_status
== RPM_RESUMING
&&
224 !(rpmflags
& RPM_ASYNC
))
229 /* Other scheduled or pending requests need to be canceled. */
230 pm_runtime_cancel_pending(dev
);
232 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
235 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
236 retval
= -EINPROGRESS
;
240 /* Wait for the other suspend running in parallel with us. */
242 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
243 TASK_UNINTERRUPTIBLE
);
244 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
247 spin_unlock_irq(&dev
->power
.lock
);
251 spin_lock_irq(&dev
->power
.lock
);
253 finish_wait(&dev
->power
.wait_queue
, &wait
);
257 /* Carry out an asynchronous or a synchronous suspend. */
258 if (rpmflags
& RPM_ASYNC
) {
259 dev
->power
.request
= RPM_REQ_SUSPEND
;
260 if (!dev
->power
.request_pending
) {
261 dev
->power
.request_pending
= true;
262 queue_work(pm_wq
, &dev
->power
.work
);
267 __update_runtime_status(dev
, RPM_SUSPENDING
);
268 dev
->power
.deferred_resume
= false;
270 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_suspend
) {
271 spin_unlock_irq(&dev
->power
.lock
);
273 retval
= dev
->bus
->pm
->runtime_suspend(dev
);
275 spin_lock_irq(&dev
->power
.lock
);
276 dev
->power
.runtime_error
= retval
;
277 } else if (dev
->type
&& dev
->type
->pm
278 && dev
->type
->pm
->runtime_suspend
) {
279 spin_unlock_irq(&dev
->power
.lock
);
281 retval
= dev
->type
->pm
->runtime_suspend(dev
);
283 spin_lock_irq(&dev
->power
.lock
);
284 dev
->power
.runtime_error
= retval
;
285 } else if (dev
->class && dev
->class->pm
286 && dev
->class->pm
->runtime_suspend
) {
287 spin_unlock_irq(&dev
->power
.lock
);
289 retval
= dev
->class->pm
->runtime_suspend(dev
);
291 spin_lock_irq(&dev
->power
.lock
);
292 dev
->power
.runtime_error
= retval
;
298 __update_runtime_status(dev
, RPM_ACTIVE
);
299 dev
->power
.deferred_resume
= 0;
300 if (retval
== -EAGAIN
|| retval
== -EBUSY
) {
301 if (dev
->power
.timer_expires
== 0)
303 dev
->power
.runtime_error
= 0;
305 pm_runtime_cancel_pending(dev
);
308 __update_runtime_status(dev
, RPM_SUSPENDED
);
309 pm_runtime_deactivate_timer(dev
);
312 parent
= dev
->parent
;
313 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
316 wake_up_all(&dev
->power
.wait_queue
);
318 if (dev
->power
.deferred_resume
) {
327 if (parent
&& !parent
->power
.ignore_children
) {
328 spin_unlock_irq(&dev
->power
.lock
);
330 pm_request_idle(parent
);
332 spin_lock_irq(&dev
->power
.lock
);
336 dev_dbg(dev
, "%s returns %d\n", __func__
, retval
);
342 * rpm_resume - Carry out run-time resume of given device.
343 * @dev: Device to resume.
344 * @rpmflags: Flag bits.
346 * Check if the device's run-time PM status allows it to be resumed. Cancel
347 * any scheduled or pending requests. If another resume has been started
348 * earlier, either return imediately or wait for it to finish, depending on the
349 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
350 * parallel with this function, either tell the other process to resume after
351 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
352 * flag is set then queue a resume request; otherwise run the
353 * ->runtime_resume() callback directly. Queue an idle notification for the
354 * device if the resume succeeded.
356 * This function must be called under dev->power.lock with interrupts disabled.
358 static int rpm_resume(struct device
*dev
, int rpmflags
)
359 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
361 struct device
*parent
= NULL
;
364 dev_dbg(dev
, "%s flags 0x%x\n", __func__
, rpmflags
);
367 if (dev
->power
.runtime_error
)
369 else if (dev
->power
.disable_depth
> 0)
374 /* Other scheduled or pending requests need to be canceled. */
375 pm_runtime_cancel_pending(dev
);
377 if (dev
->power
.runtime_status
== RPM_ACTIVE
) {
382 if (dev
->power
.runtime_status
== RPM_RESUMING
383 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
386 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
387 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
388 dev
->power
.deferred_resume
= true;
390 retval
= -EINPROGRESS
;
394 /* Wait for the operation carried out in parallel with us. */
396 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
397 TASK_UNINTERRUPTIBLE
);
398 if (dev
->power
.runtime_status
!= RPM_RESUMING
399 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
402 spin_unlock_irq(&dev
->power
.lock
);
406 spin_lock_irq(&dev
->power
.lock
);
408 finish_wait(&dev
->power
.wait_queue
, &wait
);
412 /* Carry out an asynchronous or a synchronous resume. */
413 if (rpmflags
& RPM_ASYNC
) {
414 dev
->power
.request
= RPM_REQ_RESUME
;
415 if (!dev
->power
.request_pending
) {
416 dev
->power
.request_pending
= true;
417 queue_work(pm_wq
, &dev
->power
.work
);
423 if (!parent
&& dev
->parent
) {
425 * Increment the parent's resume counter and resume it if
428 parent
= dev
->parent
;
429 spin_unlock(&dev
->power
.lock
);
431 pm_runtime_get_noresume(parent
);
433 spin_lock(&parent
->power
.lock
);
435 * We can resume if the parent's run-time PM is disabled or it
436 * is set to ignore children.
438 if (!parent
->power
.disable_depth
439 && !parent
->power
.ignore_children
) {
440 rpm_resume(parent
, 0);
441 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
444 spin_unlock(&parent
->power
.lock
);
446 spin_lock(&dev
->power
.lock
);
452 __update_runtime_status(dev
, RPM_RESUMING
);
454 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_resume
) {
455 spin_unlock_irq(&dev
->power
.lock
);
457 retval
= dev
->bus
->pm
->runtime_resume(dev
);
459 spin_lock_irq(&dev
->power
.lock
);
460 dev
->power
.runtime_error
= retval
;
461 } else if (dev
->type
&& dev
->type
->pm
462 && dev
->type
->pm
->runtime_resume
) {
463 spin_unlock_irq(&dev
->power
.lock
);
465 retval
= dev
->type
->pm
->runtime_resume(dev
);
467 spin_lock_irq(&dev
->power
.lock
);
468 dev
->power
.runtime_error
= retval
;
469 } else if (dev
->class && dev
->class->pm
470 && dev
->class->pm
->runtime_resume
) {
471 spin_unlock_irq(&dev
->power
.lock
);
473 retval
= dev
->class->pm
->runtime_resume(dev
);
475 spin_lock_irq(&dev
->power
.lock
);
476 dev
->power
.runtime_error
= retval
;
482 __update_runtime_status(dev
, RPM_SUSPENDED
);
483 pm_runtime_cancel_pending(dev
);
485 __update_runtime_status(dev
, RPM_ACTIVE
);
487 atomic_inc(&parent
->power
.child_count
);
489 wake_up_all(&dev
->power
.wait_queue
);
492 rpm_idle(dev
, RPM_ASYNC
);
496 spin_unlock_irq(&dev
->power
.lock
);
498 pm_runtime_put(parent
);
500 spin_lock_irq(&dev
->power
.lock
);
503 dev_dbg(dev
, "%s returns %d\n", __func__
, retval
);
509 * pm_runtime_work - Universal run-time PM work function.
510 * @work: Work structure used for scheduling the execution of this function.
512 * Use @work to get the device object the work is to be done for, determine what
513 * is to be done and execute the appropriate run-time PM function.
515 static void pm_runtime_work(struct work_struct
*work
)
517 struct device
*dev
= container_of(work
, struct device
, power
.work
);
518 enum rpm_request req
;
520 spin_lock_irq(&dev
->power
.lock
);
522 if (!dev
->power
.request_pending
)
525 req
= dev
->power
.request
;
526 dev
->power
.request
= RPM_REQ_NONE
;
527 dev
->power
.request_pending
= false;
533 rpm_idle(dev
, RPM_NOWAIT
);
535 case RPM_REQ_SUSPEND
:
536 rpm_suspend(dev
, RPM_NOWAIT
);
539 rpm_resume(dev
, RPM_NOWAIT
);
544 spin_unlock_irq(&dev
->power
.lock
);
548 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
549 * @data: Device pointer passed by pm_schedule_suspend().
551 * Check if the time is right and queue a suspend request.
553 static void pm_suspend_timer_fn(unsigned long data
)
555 struct device
*dev
= (struct device
*)data
;
557 unsigned long expires
;
559 spin_lock_irqsave(&dev
->power
.lock
, flags
);
561 expires
= dev
->power
.timer_expires
;
562 /* If 'expire' is after 'jiffies' we've been called too early. */
563 if (expires
> 0 && !time_after(expires
, jiffies
)) {
564 dev
->power
.timer_expires
= 0;
565 rpm_suspend(dev
, RPM_ASYNC
);
568 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
572 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
573 * @dev: Device to suspend.
574 * @delay: Time to wait before submitting a suspend request, in milliseconds.
576 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
581 spin_lock_irqsave(&dev
->power
.lock
, flags
);
584 retval
= rpm_suspend(dev
, RPM_ASYNC
);
588 retval
= rpm_check_suspend_allowed(dev
);
592 /* Other scheduled or pending requests need to be canceled. */
593 pm_runtime_cancel_pending(dev
);
595 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
596 dev
->power
.timer_expires
+= !dev
->power
.timer_expires
;
597 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
600 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
604 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
607 * __pm_runtime_idle - Entry point for run-time idle operations.
608 * @dev: Device to send idle notification for.
609 * @rpmflags: Flag bits.
611 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
612 * return immediately if it is larger than zero. Then carry out an idle
613 * notification, either synchronous or asynchronous.
615 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
617 int __pm_runtime_idle(struct device
*dev
, int rpmflags
)
622 if (rpmflags
& RPM_GET_PUT
) {
623 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
627 spin_lock_irqsave(&dev
->power
.lock
, flags
);
628 retval
= rpm_idle(dev
, rpmflags
);
629 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
633 EXPORT_SYMBOL_GPL(__pm_runtime_idle
);
636 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
637 * @dev: Device to suspend.
638 * @rpmflags: Flag bits.
640 * Carry out a suspend, either synchronous or asynchronous.
642 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
644 int __pm_runtime_suspend(struct device
*dev
, int rpmflags
)
649 spin_lock_irqsave(&dev
->power
.lock
, flags
);
650 retval
= rpm_suspend(dev
, rpmflags
);
651 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
655 EXPORT_SYMBOL_GPL(__pm_runtime_suspend
);
658 * __pm_runtime_resume - Entry point for run-time resume operations.
659 * @dev: Device to resume.
660 * @rpmflags: Flag bits.
662 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
663 * carry out a resume, either synchronous or asynchronous.
665 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
667 int __pm_runtime_resume(struct device
*dev
, int rpmflags
)
672 if (rpmflags
& RPM_GET_PUT
)
673 atomic_inc(&dev
->power
.usage_count
);
675 spin_lock_irqsave(&dev
->power
.lock
, flags
);
676 retval
= rpm_resume(dev
, rpmflags
);
677 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
681 EXPORT_SYMBOL_GPL(__pm_runtime_resume
);
684 * __pm_runtime_set_status - Set run-time PM status of a device.
685 * @dev: Device to handle.
686 * @status: New run-time PM status of the device.
688 * If run-time PM of the device is disabled or its power.runtime_error field is
689 * different from zero, the status may be changed either to RPM_ACTIVE, or to
690 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
691 * However, if the device has a parent and the parent is not active, and the
692 * parent's power.ignore_children flag is unset, the device's status cannot be
693 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
695 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
696 * and the device parent's counter of unsuspended children is modified to
697 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
698 * notification request for the parent is submitted.
700 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
702 struct device
*parent
= dev
->parent
;
704 bool notify_parent
= false;
707 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
710 spin_lock_irqsave(&dev
->power
.lock
, flags
);
712 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
717 if (dev
->power
.runtime_status
== status
)
720 if (status
== RPM_SUSPENDED
) {
721 /* It always is possible to set the status to 'suspended'. */
723 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
724 notify_parent
= !parent
->power
.ignore_children
;
730 spin_lock_nested(&parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
733 * It is invalid to put an active child under a parent that is
734 * not active, has run-time PM enabled and the
735 * 'power.ignore_children' flag unset.
737 if (!parent
->power
.disable_depth
738 && !parent
->power
.ignore_children
739 && parent
->power
.runtime_status
!= RPM_ACTIVE
)
741 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
742 atomic_inc(&parent
->power
.child_count
);
744 spin_unlock(&parent
->power
.lock
);
751 __update_runtime_status(dev
, status
);
752 dev
->power
.runtime_error
= 0;
754 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
757 pm_request_idle(parent
);
761 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
764 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
765 * @dev: Device to handle.
767 * Flush all pending requests for the device from pm_wq and wait for all
768 * run-time PM operations involving the device in progress to complete.
770 * Should be called under dev->power.lock with interrupts disabled.
772 static void __pm_runtime_barrier(struct device
*dev
)
774 pm_runtime_deactivate_timer(dev
);
776 if (dev
->power
.request_pending
) {
777 dev
->power
.request
= RPM_REQ_NONE
;
778 spin_unlock_irq(&dev
->power
.lock
);
780 cancel_work_sync(&dev
->power
.work
);
782 spin_lock_irq(&dev
->power
.lock
);
783 dev
->power
.request_pending
= false;
786 if (dev
->power
.runtime_status
== RPM_SUSPENDING
787 || dev
->power
.runtime_status
== RPM_RESUMING
788 || dev
->power
.idle_notification
) {
791 /* Suspend, wake-up or idle notification in progress. */
793 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
794 TASK_UNINTERRUPTIBLE
);
795 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
796 && dev
->power
.runtime_status
!= RPM_RESUMING
797 && !dev
->power
.idle_notification
)
799 spin_unlock_irq(&dev
->power
.lock
);
803 spin_lock_irq(&dev
->power
.lock
);
805 finish_wait(&dev
->power
.wait_queue
, &wait
);
810 * pm_runtime_barrier - Flush pending requests and wait for completions.
811 * @dev: Device to handle.
813 * Prevent the device from being suspended by incrementing its usage counter and
814 * if there's a pending resume request for the device, wake the device up.
815 * Next, make sure that all pending requests for the device have been flushed
816 * from pm_wq and wait for all run-time PM operations involving the device in
817 * progress to complete.
820 * 1, if there was a resume request pending and the device had to be woken up,
823 int pm_runtime_barrier(struct device
*dev
)
827 pm_runtime_get_noresume(dev
);
828 spin_lock_irq(&dev
->power
.lock
);
830 if (dev
->power
.request_pending
831 && dev
->power
.request
== RPM_REQ_RESUME
) {
836 __pm_runtime_barrier(dev
);
838 spin_unlock_irq(&dev
->power
.lock
);
839 pm_runtime_put_noidle(dev
);
843 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
846 * __pm_runtime_disable - Disable run-time PM of a device.
847 * @dev: Device to handle.
848 * @check_resume: If set, check if there's a resume request for the device.
850 * Increment power.disable_depth for the device and if was zero previously,
851 * cancel all pending run-time PM requests for the device and wait for all
852 * operations in progress to complete. The device can be either active or
853 * suspended after its run-time PM has been disabled.
855 * If @check_resume is set and there's a resume request pending when
856 * __pm_runtime_disable() is called and power.disable_depth is zero, the
857 * function will wake up the device before disabling its run-time PM.
859 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
861 spin_lock_irq(&dev
->power
.lock
);
863 if (dev
->power
.disable_depth
> 0) {
864 dev
->power
.disable_depth
++;
869 * Wake up the device if there's a resume request pending, because that
870 * means there probably is some I/O to process and disabling run-time PM
871 * shouldn't prevent the device from processing the I/O.
873 if (check_resume
&& dev
->power
.request_pending
874 && dev
->power
.request
== RPM_REQ_RESUME
) {
876 * Prevent suspends and idle notifications from being carried
877 * out after we have woken up the device.
879 pm_runtime_get_noresume(dev
);
883 pm_runtime_put_noidle(dev
);
886 if (!dev
->power
.disable_depth
++)
887 __pm_runtime_barrier(dev
);
890 spin_unlock_irq(&dev
->power
.lock
);
892 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
895 * pm_runtime_enable - Enable run-time PM of a device.
896 * @dev: Device to handle.
898 void pm_runtime_enable(struct device
*dev
)
902 spin_lock_irqsave(&dev
->power
.lock
, flags
);
904 if (dev
->power
.disable_depth
> 0)
905 dev
->power
.disable_depth
--;
907 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
909 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
911 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
914 * pm_runtime_forbid - Block run-time PM of a device.
915 * @dev: Device to handle.
917 * Increase the device's usage count and clear its power.runtime_auto flag,
918 * so that it cannot be suspended at run time until pm_runtime_allow() is called
921 void pm_runtime_forbid(struct device
*dev
)
923 spin_lock_irq(&dev
->power
.lock
);
924 if (!dev
->power
.runtime_auto
)
927 dev
->power
.runtime_auto
= false;
928 atomic_inc(&dev
->power
.usage_count
);
932 spin_unlock_irq(&dev
->power
.lock
);
934 EXPORT_SYMBOL_GPL(pm_runtime_forbid
);
937 * pm_runtime_allow - Unblock run-time PM of a device.
938 * @dev: Device to handle.
940 * Decrease the device's usage count and set its power.runtime_auto flag.
942 void pm_runtime_allow(struct device
*dev
)
944 spin_lock_irq(&dev
->power
.lock
);
945 if (dev
->power
.runtime_auto
)
948 dev
->power
.runtime_auto
= true;
949 if (atomic_dec_and_test(&dev
->power
.usage_count
))
953 spin_unlock_irq(&dev
->power
.lock
);
955 EXPORT_SYMBOL_GPL(pm_runtime_allow
);
958 * pm_runtime_init - Initialize run-time PM fields in given device object.
959 * @dev: Device object to initialize.
961 void pm_runtime_init(struct device
*dev
)
963 dev
->power
.runtime_status
= RPM_SUSPENDED
;
964 dev
->power
.idle_notification
= false;
966 dev
->power
.disable_depth
= 1;
967 atomic_set(&dev
->power
.usage_count
, 0);
969 dev
->power
.runtime_error
= 0;
971 atomic_set(&dev
->power
.child_count
, 0);
972 pm_suspend_ignore_children(dev
, false);
973 dev
->power
.runtime_auto
= true;
975 dev
->power
.request_pending
= false;
976 dev
->power
.request
= RPM_REQ_NONE
;
977 dev
->power
.deferred_resume
= false;
978 dev
->power
.accounting_timestamp
= jiffies
;
979 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
981 dev
->power
.timer_expires
= 0;
982 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
985 init_waitqueue_head(&dev
->power
.wait_queue
);
989 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
990 * @dev: Device object being removed from device hierarchy.
992 void pm_runtime_remove(struct device
*dev
)
994 __pm_runtime_disable(dev
, false);
996 /* Change the status back to 'suspended' to match the initial status. */
997 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
998 pm_runtime_set_suspended(dev
);
This page took 0.130966 seconds and 5 git commands to generate.