2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
14 static int rpm_resume(struct device
*dev
, int rpmflags
);
15 static int rpm_suspend(struct device
*dev
, int rpmflags
);
18 * update_pm_runtime_accounting - Update the time accounting of power states
19 * @dev: Device to update the accounting for
21 * In order to be able to have time accounting of the various power states
22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
23 * PM), we need to track the time spent in each state.
24 * update_pm_runtime_accounting must be called each time before the
25 * runtime_status field is updated, to account the time in the old state
28 void update_pm_runtime_accounting(struct device
*dev
)
30 unsigned long now
= jiffies
;
33 delta
= now
- dev
->power
.accounting_timestamp
;
38 dev
->power
.accounting_timestamp
= now
;
40 if (dev
->power
.disable_depth
> 0)
43 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
44 dev
->power
.suspended_jiffies
+= delta
;
46 dev
->power
.active_jiffies
+= delta
;
49 static void __update_runtime_status(struct device
*dev
, enum rpm_status status
)
51 update_pm_runtime_accounting(dev
);
52 dev
->power
.runtime_status
= status
;
56 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
57 * @dev: Device to handle.
59 static void pm_runtime_deactivate_timer(struct device
*dev
)
61 if (dev
->power
.timer_expires
> 0) {
62 del_timer(&dev
->power
.suspend_timer
);
63 dev
->power
.timer_expires
= 0;
68 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
69 * @dev: Device to handle.
71 static void pm_runtime_cancel_pending(struct device
*dev
)
73 pm_runtime_deactivate_timer(dev
);
75 * In case there's a request pending, make sure its work function will
76 * return without doing anything.
78 dev
->power
.request
= RPM_REQ_NONE
;
82 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
83 * @dev: Device to handle.
85 * Compute the autosuspend-delay expiration time based on the device's
86 * power.last_busy time. If the delay has already expired or is disabled
87 * (negative) or the power.use_autosuspend flag isn't set, return 0.
88 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
90 * This function may be called either with or without dev->power.lock held.
91 * Either way it can be racy, since power.last_busy may be updated at any time.
93 unsigned long pm_runtime_autosuspend_expiration(struct device
*dev
)
95 int autosuspend_delay
;
97 unsigned long last_busy
;
98 unsigned long expires
= 0;
100 if (!dev
->power
.use_autosuspend
)
103 autosuspend_delay
= ACCESS_ONCE(dev
->power
.autosuspend_delay
);
104 if (autosuspend_delay
< 0)
107 last_busy
= ACCESS_ONCE(dev
->power
.last_busy
);
108 elapsed
= jiffies
- last_busy
;
110 goto out
; /* jiffies has wrapped around. */
113 * If the autosuspend_delay is >= 1 second, align the timer by rounding
114 * up to the nearest second.
116 expires
= last_busy
+ msecs_to_jiffies(autosuspend_delay
);
117 if (autosuspend_delay
>= 1000)
118 expires
= round_jiffies(expires
);
120 if (elapsed
>= expires
- last_busy
)
121 expires
= 0; /* Already expired. */
126 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration
);
129 * rpm_check_suspend_allowed - Test whether a device may be suspended.
130 * @dev: Device to test.
132 static int rpm_check_suspend_allowed(struct device
*dev
)
136 if (dev
->power
.runtime_error
)
138 else if (atomic_read(&dev
->power
.usage_count
) > 0
139 || dev
->power
.disable_depth
> 0)
141 else if (!pm_children_suspended(dev
))
144 /* Pending resume requests take precedence over suspends. */
145 else if ((dev
->power
.deferred_resume
146 && dev
->power
.status
== RPM_SUSPENDING
)
147 || (dev
->power
.request_pending
148 && dev
->power
.request
== RPM_REQ_RESUME
))
150 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
157 * rpm_idle - Notify device bus type if the device can be suspended.
158 * @dev: Device to notify the bus type about.
159 * @rpmflags: Flag bits.
161 * Check if the device's run-time PM status allows it to be suspended. If
162 * another idle notification has been started earlier, return immediately. If
163 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
164 * run the ->runtime_idle() callback directly.
166 * This function must be called under dev->power.lock with interrupts disabled.
168 static int rpm_idle(struct device
*dev
, int rpmflags
)
170 int (*callback
)(struct device
*);
173 retval
= rpm_check_suspend_allowed(dev
);
175 ; /* Conditions are wrong. */
177 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
178 else if (dev
->power
.runtime_status
!= RPM_ACTIVE
)
182 * Any pending request other than an idle notification takes
183 * precedence over us, except that the timer may be running.
185 else if (dev
->power
.request_pending
&&
186 dev
->power
.request
> RPM_REQ_IDLE
)
189 /* Act as though RPM_NOWAIT is always set. */
190 else if (dev
->power
.idle_notification
)
191 retval
= -EINPROGRESS
;
195 /* Pending requests need to be canceled. */
196 dev
->power
.request
= RPM_REQ_NONE
;
198 if (dev
->power
.no_callbacks
) {
199 /* Assume ->runtime_idle() callback would have suspended. */
200 retval
= rpm_suspend(dev
, rpmflags
);
204 /* Carry out an asynchronous or a synchronous idle notification. */
205 if (rpmflags
& RPM_ASYNC
) {
206 dev
->power
.request
= RPM_REQ_IDLE
;
207 if (!dev
->power
.request_pending
) {
208 dev
->power
.request_pending
= true;
209 queue_work(pm_wq
, &dev
->power
.work
);
214 dev
->power
.idle_notification
= true;
216 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_idle
)
217 callback
= dev
->bus
->pm
->runtime_idle
;
218 else if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->runtime_idle
)
219 callback
= dev
->type
->pm
->runtime_idle
;
220 else if (dev
->class && dev
->class->pm
)
221 callback
= dev
->class->pm
->runtime_idle
;
226 spin_unlock_irq(&dev
->power
.lock
);
230 spin_lock_irq(&dev
->power
.lock
);
233 dev
->power
.idle_notification
= false;
234 wake_up_all(&dev
->power
.wait_queue
);
241 * rpm_callback - Run a given runtime PM callback for a given device.
242 * @cb: Runtime PM callback to run.
243 * @dev: Device to run the callback for.
245 static int rpm_callback(int (*cb
)(struct device
*), struct device
*dev
)
246 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
253 spin_unlock_irq(&dev
->power
.lock
);
257 spin_lock_irq(&dev
->power
.lock
);
258 dev
->power
.runtime_error
= retval
;
264 * rpm_suspend - Carry out run-time suspend of given device.
265 * @dev: Device to suspend.
266 * @rpmflags: Flag bits.
268 * Check if the device's run-time PM status allows it to be suspended. If
269 * another suspend has been started earlier, either return immediately or wait
270 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
271 * pending idle notification. If the RPM_ASYNC flag is set then queue a
272 * suspend request; otherwise run the ->runtime_suspend() callback directly.
273 * If a deferred resume was requested while the callback was running then carry
274 * it out; otherwise send an idle notification for the device (if the suspend
275 * failed) or for its parent (if the suspend succeeded).
277 * This function must be called under dev->power.lock with interrupts disabled.
279 static int rpm_suspend(struct device
*dev
, int rpmflags
)
280 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
282 int (*callback
)(struct device
*);
283 struct device
*parent
= NULL
;
286 dev_dbg(dev
, "%s flags 0x%x\n", __func__
, rpmflags
);
289 retval
= rpm_check_suspend_allowed(dev
);
292 ; /* Conditions are wrong. */
294 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
295 else if (dev
->power
.runtime_status
== RPM_RESUMING
&&
296 !(rpmflags
& RPM_ASYNC
))
301 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
302 if ((rpmflags
& RPM_AUTO
)
303 && dev
->power
.runtime_status
!= RPM_SUSPENDING
) {
304 unsigned long expires
= pm_runtime_autosuspend_expiration(dev
);
307 /* Pending requests need to be canceled. */
308 dev
->power
.request
= RPM_REQ_NONE
;
311 * Optimization: If the timer is already running and is
312 * set to expire at or before the autosuspend delay,
313 * avoid the overhead of resetting it. Just let it
314 * expire; pm_suspend_timer_fn() will take care of the
317 if (!(dev
->power
.timer_expires
&& time_before_eq(
318 dev
->power
.timer_expires
, expires
))) {
319 dev
->power
.timer_expires
= expires
;
320 mod_timer(&dev
->power
.suspend_timer
, expires
);
322 dev
->power
.timer_autosuspends
= 1;
327 /* Other scheduled or pending requests need to be canceled. */
328 pm_runtime_cancel_pending(dev
);
330 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
333 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
334 retval
= -EINPROGRESS
;
338 /* Wait for the other suspend running in parallel with us. */
340 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
341 TASK_UNINTERRUPTIBLE
);
342 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
345 spin_unlock_irq(&dev
->power
.lock
);
349 spin_lock_irq(&dev
->power
.lock
);
351 finish_wait(&dev
->power
.wait_queue
, &wait
);
355 dev
->power
.deferred_resume
= false;
356 if (dev
->power
.no_callbacks
)
357 goto no_callback
; /* Assume success. */
359 /* Carry out an asynchronous or a synchronous suspend. */
360 if (rpmflags
& RPM_ASYNC
) {
361 dev
->power
.request
= (rpmflags
& RPM_AUTO
) ?
362 RPM_REQ_AUTOSUSPEND
: RPM_REQ_SUSPEND
;
363 if (!dev
->power
.request_pending
) {
364 dev
->power
.request_pending
= true;
365 queue_work(pm_wq
, &dev
->power
.work
);
370 __update_runtime_status(dev
, RPM_SUSPENDING
);
372 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_suspend
)
373 callback
= dev
->bus
->pm
->runtime_suspend
;
374 else if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->runtime_suspend
)
375 callback
= dev
->type
->pm
->runtime_suspend
;
376 else if (dev
->class && dev
->class->pm
)
377 callback
= dev
->class->pm
->runtime_suspend
;
381 retval
= rpm_callback(callback
, dev
);
383 __update_runtime_status(dev
, RPM_ACTIVE
);
384 dev
->power
.deferred_resume
= 0;
385 if (retval
== -EAGAIN
|| retval
== -EBUSY
)
386 dev
->power
.runtime_error
= 0;
388 pm_runtime_cancel_pending(dev
);
391 __update_runtime_status(dev
, RPM_SUSPENDED
);
392 pm_runtime_deactivate_timer(dev
);
395 parent
= dev
->parent
;
396 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
399 wake_up_all(&dev
->power
.wait_queue
);
401 if (dev
->power
.deferred_resume
) {
407 if (parent
&& !parent
->power
.ignore_children
) {
408 spin_unlock_irq(&dev
->power
.lock
);
410 pm_request_idle(parent
);
412 spin_lock_irq(&dev
->power
.lock
);
416 dev_dbg(dev
, "%s returns %d\n", __func__
, retval
);
422 * rpm_resume - Carry out run-time resume of given device.
423 * @dev: Device to resume.
424 * @rpmflags: Flag bits.
426 * Check if the device's run-time PM status allows it to be resumed. Cancel
427 * any scheduled or pending requests. If another resume has been started
428 * earlier, either return imediately or wait for it to finish, depending on the
429 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
430 * parallel with this function, either tell the other process to resume after
431 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
432 * flag is set then queue a resume request; otherwise run the
433 * ->runtime_resume() callback directly. Queue an idle notification for the
434 * device if the resume succeeded.
436 * This function must be called under dev->power.lock with interrupts disabled.
438 static int rpm_resume(struct device
*dev
, int rpmflags
)
439 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
441 int (*callback
)(struct device
*);
442 struct device
*parent
= NULL
;
445 dev_dbg(dev
, "%s flags 0x%x\n", __func__
, rpmflags
);
448 if (dev
->power
.runtime_error
)
450 else if (dev
->power
.disable_depth
> 0)
456 * Other scheduled or pending requests need to be canceled. Small
457 * optimization: If an autosuspend timer is running, leave it running
458 * rather than cancelling it now only to restart it again in the near
461 dev
->power
.request
= RPM_REQ_NONE
;
462 if (!dev
->power
.timer_autosuspends
)
463 pm_runtime_deactivate_timer(dev
);
465 if (dev
->power
.runtime_status
== RPM_ACTIVE
) {
470 if (dev
->power
.runtime_status
== RPM_RESUMING
471 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
474 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
475 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
476 dev
->power
.deferred_resume
= true;
478 retval
= -EINPROGRESS
;
482 /* Wait for the operation carried out in parallel with us. */
484 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
485 TASK_UNINTERRUPTIBLE
);
486 if (dev
->power
.runtime_status
!= RPM_RESUMING
487 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
490 spin_unlock_irq(&dev
->power
.lock
);
494 spin_lock_irq(&dev
->power
.lock
);
496 finish_wait(&dev
->power
.wait_queue
, &wait
);
501 * See if we can skip waking up the parent. This is safe only if
502 * power.no_callbacks is set, because otherwise we don't know whether
503 * the resume will actually succeed.
505 if (dev
->power
.no_callbacks
&& !parent
&& dev
->parent
) {
506 spin_lock(&dev
->parent
->power
.lock
);
507 if (dev
->parent
->power
.disable_depth
> 0
508 || dev
->parent
->power
.ignore_children
509 || dev
->parent
->power
.runtime_status
== RPM_ACTIVE
) {
510 atomic_inc(&dev
->parent
->power
.child_count
);
511 spin_unlock(&dev
->parent
->power
.lock
);
512 goto no_callback
; /* Assume success. */
514 spin_unlock(&dev
->parent
->power
.lock
);
517 /* Carry out an asynchronous or a synchronous resume. */
518 if (rpmflags
& RPM_ASYNC
) {
519 dev
->power
.request
= RPM_REQ_RESUME
;
520 if (!dev
->power
.request_pending
) {
521 dev
->power
.request_pending
= true;
522 queue_work(pm_wq
, &dev
->power
.work
);
528 if (!parent
&& dev
->parent
) {
530 * Increment the parent's resume counter and resume it if
533 parent
= dev
->parent
;
534 spin_unlock(&dev
->power
.lock
);
536 pm_runtime_get_noresume(parent
);
538 spin_lock(&parent
->power
.lock
);
540 * We can resume if the parent's run-time PM is disabled or it
541 * is set to ignore children.
543 if (!parent
->power
.disable_depth
544 && !parent
->power
.ignore_children
) {
545 rpm_resume(parent
, 0);
546 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
549 spin_unlock(&parent
->power
.lock
);
551 spin_lock(&dev
->power
.lock
);
557 if (dev
->power
.no_callbacks
)
558 goto no_callback
; /* Assume success. */
560 __update_runtime_status(dev
, RPM_RESUMING
);
562 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_resume
)
563 callback
= dev
->bus
->pm
->runtime_resume
;
564 else if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->runtime_resume
)
565 callback
= dev
->type
->pm
->runtime_resume
;
566 else if (dev
->class && dev
->class->pm
)
567 callback
= dev
->class->pm
->runtime_resume
;
571 retval
= rpm_callback(callback
, dev
);
573 __update_runtime_status(dev
, RPM_SUSPENDED
);
574 pm_runtime_cancel_pending(dev
);
577 __update_runtime_status(dev
, RPM_ACTIVE
);
579 atomic_inc(&parent
->power
.child_count
);
581 wake_up_all(&dev
->power
.wait_queue
);
584 rpm_idle(dev
, RPM_ASYNC
);
588 spin_unlock_irq(&dev
->power
.lock
);
590 pm_runtime_put(parent
);
592 spin_lock_irq(&dev
->power
.lock
);
595 dev_dbg(dev
, "%s returns %d\n", __func__
, retval
);
601 * pm_runtime_work - Universal run-time PM work function.
602 * @work: Work structure used for scheduling the execution of this function.
604 * Use @work to get the device object the work is to be done for, determine what
605 * is to be done and execute the appropriate run-time PM function.
607 static void pm_runtime_work(struct work_struct
*work
)
609 struct device
*dev
= container_of(work
, struct device
, power
.work
);
610 enum rpm_request req
;
612 spin_lock_irq(&dev
->power
.lock
);
614 if (!dev
->power
.request_pending
)
617 req
= dev
->power
.request
;
618 dev
->power
.request
= RPM_REQ_NONE
;
619 dev
->power
.request_pending
= false;
625 rpm_idle(dev
, RPM_NOWAIT
);
627 case RPM_REQ_SUSPEND
:
628 rpm_suspend(dev
, RPM_NOWAIT
);
630 case RPM_REQ_AUTOSUSPEND
:
631 rpm_suspend(dev
, RPM_NOWAIT
| RPM_AUTO
);
634 rpm_resume(dev
, RPM_NOWAIT
);
639 spin_unlock_irq(&dev
->power
.lock
);
643 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
644 * @data: Device pointer passed by pm_schedule_suspend().
646 * Check if the time is right and queue a suspend request.
648 static void pm_suspend_timer_fn(unsigned long data
)
650 struct device
*dev
= (struct device
*)data
;
652 unsigned long expires
;
654 spin_lock_irqsave(&dev
->power
.lock
, flags
);
656 expires
= dev
->power
.timer_expires
;
657 /* If 'expire' is after 'jiffies' we've been called too early. */
658 if (expires
> 0 && !time_after(expires
, jiffies
)) {
659 dev
->power
.timer_expires
= 0;
660 rpm_suspend(dev
, dev
->power
.timer_autosuspends
?
661 (RPM_ASYNC
| RPM_AUTO
) : RPM_ASYNC
);
664 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
668 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
669 * @dev: Device to suspend.
670 * @delay: Time to wait before submitting a suspend request, in milliseconds.
672 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
677 spin_lock_irqsave(&dev
->power
.lock
, flags
);
680 retval
= rpm_suspend(dev
, RPM_ASYNC
);
684 retval
= rpm_check_suspend_allowed(dev
);
688 /* Other scheduled or pending requests need to be canceled. */
689 pm_runtime_cancel_pending(dev
);
691 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
692 dev
->power
.timer_expires
+= !dev
->power
.timer_expires
;
693 dev
->power
.timer_autosuspends
= 0;
694 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
697 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
701 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
704 * __pm_runtime_idle - Entry point for run-time idle operations.
705 * @dev: Device to send idle notification for.
706 * @rpmflags: Flag bits.
708 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
709 * return immediately if it is larger than zero. Then carry out an idle
710 * notification, either synchronous or asynchronous.
712 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
714 int __pm_runtime_idle(struct device
*dev
, int rpmflags
)
719 if (rpmflags
& RPM_GET_PUT
) {
720 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
724 spin_lock_irqsave(&dev
->power
.lock
, flags
);
725 retval
= rpm_idle(dev
, rpmflags
);
726 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
730 EXPORT_SYMBOL_GPL(__pm_runtime_idle
);
733 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
734 * @dev: Device to suspend.
735 * @rpmflags: Flag bits.
737 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
738 * return immediately if it is larger than zero. Then carry out a suspend,
739 * either synchronous or asynchronous.
741 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
743 int __pm_runtime_suspend(struct device
*dev
, int rpmflags
)
748 if (rpmflags
& RPM_GET_PUT
) {
749 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
753 spin_lock_irqsave(&dev
->power
.lock
, flags
);
754 retval
= rpm_suspend(dev
, rpmflags
);
755 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
759 EXPORT_SYMBOL_GPL(__pm_runtime_suspend
);
762 * __pm_runtime_resume - Entry point for run-time resume operations.
763 * @dev: Device to resume.
764 * @rpmflags: Flag bits.
766 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
767 * carry out a resume, either synchronous or asynchronous.
769 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
771 int __pm_runtime_resume(struct device
*dev
, int rpmflags
)
776 if (rpmflags
& RPM_GET_PUT
)
777 atomic_inc(&dev
->power
.usage_count
);
779 spin_lock_irqsave(&dev
->power
.lock
, flags
);
780 retval
= rpm_resume(dev
, rpmflags
);
781 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
785 EXPORT_SYMBOL_GPL(__pm_runtime_resume
);
788 * __pm_runtime_set_status - Set run-time PM status of a device.
789 * @dev: Device to handle.
790 * @status: New run-time PM status of the device.
792 * If run-time PM of the device is disabled or its power.runtime_error field is
793 * different from zero, the status may be changed either to RPM_ACTIVE, or to
794 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
795 * However, if the device has a parent and the parent is not active, and the
796 * parent's power.ignore_children flag is unset, the device's status cannot be
797 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
799 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
800 * and the device parent's counter of unsuspended children is modified to
801 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
802 * notification request for the parent is submitted.
804 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
806 struct device
*parent
= dev
->parent
;
808 bool notify_parent
= false;
811 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
814 spin_lock_irqsave(&dev
->power
.lock
, flags
);
816 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
821 if (dev
->power
.runtime_status
== status
)
824 if (status
== RPM_SUSPENDED
) {
825 /* It always is possible to set the status to 'suspended'. */
827 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
828 notify_parent
= !parent
->power
.ignore_children
;
834 spin_lock_nested(&parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
837 * It is invalid to put an active child under a parent that is
838 * not active, has run-time PM enabled and the
839 * 'power.ignore_children' flag unset.
841 if (!parent
->power
.disable_depth
842 && !parent
->power
.ignore_children
843 && parent
->power
.runtime_status
!= RPM_ACTIVE
)
845 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
846 atomic_inc(&parent
->power
.child_count
);
848 spin_unlock(&parent
->power
.lock
);
855 __update_runtime_status(dev
, status
);
856 dev
->power
.runtime_error
= 0;
858 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
861 pm_request_idle(parent
);
865 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
868 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
869 * @dev: Device to handle.
871 * Flush all pending requests for the device from pm_wq and wait for all
872 * run-time PM operations involving the device in progress to complete.
874 * Should be called under dev->power.lock with interrupts disabled.
876 static void __pm_runtime_barrier(struct device
*dev
)
878 pm_runtime_deactivate_timer(dev
);
880 if (dev
->power
.request_pending
) {
881 dev
->power
.request
= RPM_REQ_NONE
;
882 spin_unlock_irq(&dev
->power
.lock
);
884 cancel_work_sync(&dev
->power
.work
);
886 spin_lock_irq(&dev
->power
.lock
);
887 dev
->power
.request_pending
= false;
890 if (dev
->power
.runtime_status
== RPM_SUSPENDING
891 || dev
->power
.runtime_status
== RPM_RESUMING
892 || dev
->power
.idle_notification
) {
895 /* Suspend, wake-up or idle notification in progress. */
897 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
898 TASK_UNINTERRUPTIBLE
);
899 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
900 && dev
->power
.runtime_status
!= RPM_RESUMING
901 && !dev
->power
.idle_notification
)
903 spin_unlock_irq(&dev
->power
.lock
);
907 spin_lock_irq(&dev
->power
.lock
);
909 finish_wait(&dev
->power
.wait_queue
, &wait
);
914 * pm_runtime_barrier - Flush pending requests and wait for completions.
915 * @dev: Device to handle.
917 * Prevent the device from being suspended by incrementing its usage counter and
918 * if there's a pending resume request for the device, wake the device up.
919 * Next, make sure that all pending requests for the device have been flushed
920 * from pm_wq and wait for all run-time PM operations involving the device in
921 * progress to complete.
924 * 1, if there was a resume request pending and the device had to be woken up,
927 int pm_runtime_barrier(struct device
*dev
)
931 pm_runtime_get_noresume(dev
);
932 spin_lock_irq(&dev
->power
.lock
);
934 if (dev
->power
.request_pending
935 && dev
->power
.request
== RPM_REQ_RESUME
) {
940 __pm_runtime_barrier(dev
);
942 spin_unlock_irq(&dev
->power
.lock
);
943 pm_runtime_put_noidle(dev
);
947 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
950 * __pm_runtime_disable - Disable run-time PM of a device.
951 * @dev: Device to handle.
952 * @check_resume: If set, check if there's a resume request for the device.
954 * Increment power.disable_depth for the device and if was zero previously,
955 * cancel all pending run-time PM requests for the device and wait for all
956 * operations in progress to complete. The device can be either active or
957 * suspended after its run-time PM has been disabled.
959 * If @check_resume is set and there's a resume request pending when
960 * __pm_runtime_disable() is called and power.disable_depth is zero, the
961 * function will wake up the device before disabling its run-time PM.
963 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
965 spin_lock_irq(&dev
->power
.lock
);
967 if (dev
->power
.disable_depth
> 0) {
968 dev
->power
.disable_depth
++;
973 * Wake up the device if there's a resume request pending, because that
974 * means there probably is some I/O to process and disabling run-time PM
975 * shouldn't prevent the device from processing the I/O.
977 if (check_resume
&& dev
->power
.request_pending
978 && dev
->power
.request
== RPM_REQ_RESUME
) {
980 * Prevent suspends and idle notifications from being carried
981 * out after we have woken up the device.
983 pm_runtime_get_noresume(dev
);
987 pm_runtime_put_noidle(dev
);
990 if (!dev
->power
.disable_depth
++)
991 __pm_runtime_barrier(dev
);
994 spin_unlock_irq(&dev
->power
.lock
);
996 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
999 * pm_runtime_enable - Enable run-time PM of a device.
1000 * @dev: Device to handle.
1002 void pm_runtime_enable(struct device
*dev
)
1004 unsigned long flags
;
1006 spin_lock_irqsave(&dev
->power
.lock
, flags
);
1008 if (dev
->power
.disable_depth
> 0)
1009 dev
->power
.disable_depth
--;
1011 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
1013 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
1015 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
1018 * pm_runtime_forbid - Block run-time PM of a device.
1019 * @dev: Device to handle.
1021 * Increase the device's usage count and clear its power.runtime_auto flag,
1022 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1025 void pm_runtime_forbid(struct device
*dev
)
1027 spin_lock_irq(&dev
->power
.lock
);
1028 if (!dev
->power
.runtime_auto
)
1031 dev
->power
.runtime_auto
= false;
1032 atomic_inc(&dev
->power
.usage_count
);
1036 spin_unlock_irq(&dev
->power
.lock
);
1038 EXPORT_SYMBOL_GPL(pm_runtime_forbid
);
1041 * pm_runtime_allow - Unblock run-time PM of a device.
1042 * @dev: Device to handle.
1044 * Decrease the device's usage count and set its power.runtime_auto flag.
1046 void pm_runtime_allow(struct device
*dev
)
1048 spin_lock_irq(&dev
->power
.lock
);
1049 if (dev
->power
.runtime_auto
)
1052 dev
->power
.runtime_auto
= true;
1053 if (atomic_dec_and_test(&dev
->power
.usage_count
))
1054 rpm_idle(dev
, RPM_AUTO
);
1057 spin_unlock_irq(&dev
->power
.lock
);
1059 EXPORT_SYMBOL_GPL(pm_runtime_allow
);
1062 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
1063 * @dev: Device to handle.
1065 * Set the power.no_callbacks flag, which tells the PM core that this
1066 * device is power-managed through its parent and has no run-time PM
1067 * callbacks of its own. The run-time sysfs attributes will be removed.
1070 void pm_runtime_no_callbacks(struct device
*dev
)
1072 spin_lock_irq(&dev
->power
.lock
);
1073 dev
->power
.no_callbacks
= 1;
1074 spin_unlock_irq(&dev
->power
.lock
);
1075 if (device_is_registered(dev
))
1076 rpm_sysfs_remove(dev
);
1078 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks
);
1081 * update_autosuspend - Handle a change to a device's autosuspend settings.
1082 * @dev: Device to handle.
1083 * @old_delay: The former autosuspend_delay value.
1084 * @old_use: The former use_autosuspend value.
1086 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1087 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1089 * This function must be called under dev->power.lock with interrupts disabled.
1091 static void update_autosuspend(struct device
*dev
, int old_delay
, int old_use
)
1093 int delay
= dev
->power
.autosuspend_delay
;
1095 /* Should runtime suspend be prevented now? */
1096 if (dev
->power
.use_autosuspend
&& delay
< 0) {
1098 /* If it used to be allowed then prevent it. */
1099 if (!old_use
|| old_delay
>= 0) {
1100 atomic_inc(&dev
->power
.usage_count
);
1105 /* Runtime suspend should be allowed now. */
1108 /* If it used to be prevented then allow it. */
1109 if (old_use
&& old_delay
< 0)
1110 atomic_dec(&dev
->power
.usage_count
);
1112 /* Maybe we can autosuspend now. */
1113 rpm_idle(dev
, RPM_AUTO
);
1118 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1119 * @dev: Device to handle.
1120 * @delay: Value of the new delay in milliseconds.
1122 * Set the device's power.autosuspend_delay value. If it changes to negative
1123 * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
1124 * changes the other way, allow run-time suspends.
1126 void pm_runtime_set_autosuspend_delay(struct device
*dev
, int delay
)
1128 int old_delay
, old_use
;
1130 spin_lock_irq(&dev
->power
.lock
);
1131 old_delay
= dev
->power
.autosuspend_delay
;
1132 old_use
= dev
->power
.use_autosuspend
;
1133 dev
->power
.autosuspend_delay
= delay
;
1134 update_autosuspend(dev
, old_delay
, old_use
);
1135 spin_unlock_irq(&dev
->power
.lock
);
1137 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay
);
1140 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1141 * @dev: Device to handle.
1142 * @use: New value for use_autosuspend.
1144 * Set the device's power.use_autosuspend flag, and allow or prevent run-time
1145 * suspends as needed.
1147 void __pm_runtime_use_autosuspend(struct device
*dev
, bool use
)
1149 int old_delay
, old_use
;
1151 spin_lock_irq(&dev
->power
.lock
);
1152 old_delay
= dev
->power
.autosuspend_delay
;
1153 old_use
= dev
->power
.use_autosuspend
;
1154 dev
->power
.use_autosuspend
= use
;
1155 update_autosuspend(dev
, old_delay
, old_use
);
1156 spin_unlock_irq(&dev
->power
.lock
);
1158 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend
);
1161 * pm_runtime_init - Initialize run-time PM fields in given device object.
1162 * @dev: Device object to initialize.
1164 void pm_runtime_init(struct device
*dev
)
1166 dev
->power
.runtime_status
= RPM_SUSPENDED
;
1167 dev
->power
.idle_notification
= false;
1169 dev
->power
.disable_depth
= 1;
1170 atomic_set(&dev
->power
.usage_count
, 0);
1172 dev
->power
.runtime_error
= 0;
1174 atomic_set(&dev
->power
.child_count
, 0);
1175 pm_suspend_ignore_children(dev
, false);
1176 dev
->power
.runtime_auto
= true;
1178 dev
->power
.request_pending
= false;
1179 dev
->power
.request
= RPM_REQ_NONE
;
1180 dev
->power
.deferred_resume
= false;
1181 dev
->power
.accounting_timestamp
= jiffies
;
1182 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
1184 dev
->power
.timer_expires
= 0;
1185 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
1186 (unsigned long)dev
);
1188 init_waitqueue_head(&dev
->power
.wait_queue
);
1192 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1193 * @dev: Device object being removed from device hierarchy.
1195 void pm_runtime_remove(struct device
*dev
)
1197 __pm_runtime_disable(dev
, false);
1199 /* Change the status back to 'suspended' to match the initial status. */
1200 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
1201 pm_runtime_set_suspended(dev
);